repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/map_benchmark.rs | lib/segment/benches/map_benchmark.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::collections::{BTreeMap, HashMap};
use criterion::{Criterion, criterion_group, criterion_main};
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::data_types::tiny_map::TinyMap;
use segment::fixtures::index_fixtures::random_vector;
const DIM: usize = 100;
fn small_map_obj(c: &mut Criterion) {
let mut group = c.benchmark_group("small-map-obj-group");
let mut rng = StdRng::seed_from_u64(42);
let default_key = "vector".to_string();
let default_key_2 = "vector1".to_string();
let default_key_3 = "vector2".to_string();
let random_vector = random_vector(&mut rng, DIM);
group.bench_function("hash-map", |b| {
b.iter(|| {
let mut map = HashMap::new();
map.insert(default_key.clone(), random_vector.clone());
map.insert(default_key_2.clone(), random_vector.clone());
map.insert(default_key_3.clone(), random_vector.clone());
let _ = map.get(&default_key_3);
});
});
group.bench_function("btree-map", |b| {
b.iter(|| {
let mut map = BTreeMap::new();
map.insert(default_key.clone(), random_vector.clone());
map.insert(default_key_2.clone(), random_vector.clone());
map.insert(default_key_3.clone(), random_vector.clone());
let _ = map.get(&default_key_3);
});
});
#[allow(clippy::vec_init_then_push)]
group.bench_function("vec-map", |b| {
b.iter(|| {
let mut map = Vec::with_capacity(3);
map.push((default_key.clone(), random_vector.clone()));
map.push((default_key_2.clone(), random_vector.clone()));
map.push((default_key_3.clone(), random_vector.clone()));
let _ = map.iter().find(|(k, _)| k == &default_key_3);
});
});
group.bench_function("tiny-map", |b| {
b.iter(|| {
let mut map = TinyMap::new();
map.insert(default_key.clone(), random_vector.clone());
map.insert(default_key_2.clone(), random_vector.clone());
map.insert(default_key_3.clone(), random_vector.clone());
let _ = map.get(&default_key_3);
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = small_map_obj
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/serde_formats.rs | lib/segment/benches/serde_formats.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use criterion::{Criterion, criterion_group, criterion_main};
use itertools::Itertools;
use segment::payload_json;
use segment::types::Payload;
fn serde_formats_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("serde-formats-group");
let payloads = (0..1000)
.map(|x| {
let payload = payload_json! {"val": format!("val_{x}")};
payload
})
.collect_vec();
let cbor_bytes = payloads
.iter()
.map(|p| serde_cbor::to_vec(p).unwrap())
.collect_vec();
let rmp_bytes = payloads
.iter()
.map(|p| rmp_serde::to_vec(p).unwrap())
.collect_vec();
group.bench_function("serde-serialize-cbor", |b| {
b.iter(|| {
for payload in &payloads {
let vec = serde_cbor::to_vec(payload);
vec.unwrap();
}
});
});
group.bench_function("serde-deserialize-cbor", |b| {
b.iter(|| {
for bytes in &cbor_bytes {
let _payload: Payload = serde_cbor::from_slice(bytes).unwrap();
}
});
});
group.bench_function("serde-serialize-rmp", |b| {
b.iter(|| {
for payload in &payloads {
let vec = rmp_serde::to_vec(payload);
vec.unwrap();
}
});
});
group.bench_function("serde-deserialize-rmp", |b| {
b.iter(|| {
for bytes in &rmp_bytes {
let _payload: Payload = rmp_serde::from_slice(bytes).unwrap();
}
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = serde_formats_bench
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/metrics.rs | lib/segment/benches/metrics.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use criterion::{Criterion, criterion_group, criterion_main};
use half::f16;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::vectors::{VectorElementTypeByte, VectorElementTypeHalf};
use segment::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::avx::dot::avx_dot_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::avx::euclid::avx_euclid_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::avx::manhattan::avx_manhattan_similarity_half;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_f16::neon::dot::neon_dot_similarity_half;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_f16::neon::euclid::neon_euclid_similarity_half;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_f16::neon::manhattan::neon_manhattan_similarity_half;
use segment::spaces::metric_f16::simple_dot::dot_similarity_half;
use segment::spaces::metric_f16::simple_euclid::euclid_similarity_half;
use segment::spaces::metric_f16::simple_manhattan::manhattan_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::sse::dot::sse_dot_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::sse::euclid::sse_euclid_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_f16::sse::manhattan::sse_manhattan_similarity_half;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::avx2::cosine::avx_cosine_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::avx2::dot::avx_dot_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::avx2::euclid::avx_euclid_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::avx2::manhattan::avx_manhattan_similarity_bytes;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_uint::neon::cosine::neon_cosine_similarity_bytes;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_uint::neon::dot::neon_dot_similarity_bytes;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_uint::neon::euclid::neon_euclid_similarity_bytes;
#[cfg(target_arch = "aarch64")]
use segment::spaces::metric_uint::neon::manhattan::neon_manhattan_similarity_bytes;
use segment::spaces::metric_uint::simple_cosine::cosine_similarity_bytes;
use segment::spaces::metric_uint::simple_dot::dot_similarity_bytes;
use segment::spaces::metric_uint::simple_euclid::euclid_similarity_bytes;
use segment::spaces::metric_uint::simple_manhattan::manhattan_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::sse2::cosine::sse_cosine_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::sse2::dot::sse_dot_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::sse2::euclid::sse_euclid_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use segment::spaces::metric_uint::sse2::manhattan::sse_manhattan_similarity_bytes;
use segment::spaces::simple::{CosineMetric, DotProductMetric, EuclidMetric, ManhattanMetric};
const DIM: usize = 1024;
const COUNT: usize = 100_000;
fn byte_metrics_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("byte-metrics-bench-group");
let mut rng = StdRng::seed_from_u64(42);
let random_vectors_1: Vec<Vec<u8>> = (0..COUNT)
.map(|_| (0..DIM).map(|_| rng.random_range(0..=255)).collect())
.collect();
let random_vectors_2: Vec<Vec<u8>> = (0..COUNT)
.map(|_| (0..DIM).map(|_| rng.random_range(0..=255)).collect())
.collect();
group.bench_function("byte-dot", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<DotProductMetric as Metric<VectorElementTypeByte>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("byte-dot-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
dot_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-dot-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_dot_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-dot-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_dot_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("byte-dot-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_dot_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
group.bench_function("byte-cosine", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<CosineMetric as Metric<VectorElementTypeByte>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("byte-cosine-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
cosine_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-cosine-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_cosine_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-cosine-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_cosine_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("byte-cosine-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_cosine_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
group.bench_function("byte-euclid", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<EuclidMetric as Metric<VectorElementTypeByte>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("byte-euclid-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
euclid_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-euclid-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_euclid_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-euclid-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_euclid_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("byte-euclid-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_euclid_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
group.bench_function("byte-manhattan", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<ManhattanMetric as Metric<VectorElementTypeByte>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("byte-manhattan-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
manhattan_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-manhattan-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_manhattan_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("byte-manhattan-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_manhattan_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("byte-manhattan-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_manhattan_similarity_bytes(&random_vectors_1[i], &random_vectors_2[i])
});
});
}
fn half_metrics_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("half-metrics-bench-group");
let mut rng = StdRng::seed_from_u64(42);
let random_vectors_1: Vec<Vec<f16>> = (0..COUNT)
.map(|_| {
(0..DIM)
.map(|_| f16::from_f32(rng.random_range(0.0..=1.0)))
.collect()
})
.collect();
let random_vectors_2: Vec<Vec<f16>> = (0..COUNT)
.map(|_| {
(0..DIM)
.map(|_| f16::from_f32(rng.random_range(0.0..=1.0)))
.collect()
})
.collect();
group.bench_function("half-dot", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<DotProductMetric as Metric<VectorElementTypeHalf>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("half-dot-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
dot_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-dot-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_dot_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-dot-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_dot_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("half-dot-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_dot_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
group.bench_function("half-euclid", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<EuclidMetric as Metric<VectorElementTypeHalf>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("half-euclid-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
euclid_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-euclid-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_euclid_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-euclid-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_euclid_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("half-euclid-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_euclid_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
group.bench_function("half-manhattan", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
<ManhattanMetric as Metric<VectorElementTypeHalf>>::similarity(
&random_vectors_1[i],
&random_vectors_2[i],
)
});
});
group.bench_function("half-manhattan-no-simd", |b| {
let mut i = 0;
b.iter(|| {
i = (i + 1) % COUNT;
manhattan_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-manhattan-avx", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
avx_manhattan_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("half-manhattan-sse", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
sse_manhattan_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("half-manhattan-neon", |b| {
let mut i = 0;
b.iter(|| unsafe {
i = (i + 1) % COUNT;
neon_manhattan_similarity_half(&random_vectors_1[i], &random_vectors_2[i])
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = byte_metrics_bench, half_metrics_bench
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/multi_vector_search.rs | lib/segment/benches/multi_vector_search.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_multi_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::random_multi_vector;
use segment::index::VectorIndex;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::segment_constructor::{VectorIndexBuildArgs, build_segment};
use segment::types::Distance::{Dot, Euclid};
use segment::types::{
Distance, HnswConfig, HnswGlobalConfig, Indexes, MultiVectorConfig, SegmentConfig,
SeqNumberType, VectorDataConfig, VectorStorageType,
};
use tempfile::Builder;
#[cfg(not(target_os = "windows"))]
mod prof;
const NUM_POINTS: usize = 10_000;
const NUM_VECTORS_PER_POINT: usize = 16;
const VECTOR_DIM: usize = 128;
const TOP: usize = 10;
// intent: bench `search` without filter
fn multi_vector_search_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("multi-vector-search-group");
let mut rnd = StdRng::seed_from_u64(42);
let hnsw_index = make_segment_index(&mut rnd, Dot);
group.bench_function("hnsw-multivec-search-dot", |b| {
b.iter_batched(
|| random_multi_vector(&mut rnd, VECTOR_DIM, NUM_VECTORS_PER_POINT).into(),
|query| {
let results = hnsw_index
.search(&[&query], None, TOP, None, &Default::default())
.unwrap();
assert_eq!(results[0].len(), TOP);
},
BatchSize::SmallInput,
)
});
let hnsw_index = make_segment_index(&mut rnd, Euclid);
group.bench_function("hnsw-multivec-search-euclidean", |b| {
b.iter_batched(
|| random_multi_vector(&mut rnd, VECTOR_DIM, NUM_VECTORS_PER_POINT).into(),
|query| {
let results = hnsw_index
.search(&[&query], None, TOP, None, &Default::default())
.unwrap();
assert_eq!(results[0].len(), TOP);
},
BatchSize::SmallInput,
)
});
group.finish();
}
fn make_segment_index<R: Rng + ?Sized>(rng: &mut R, distance: Distance) -> HNSWIndex {
let stopped = AtomicBool::new(false);
let segment_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let segment_config = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: VECTOR_DIM,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: Some(MultiVectorConfig::default()), // uses multivec config
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let hw_counter = HardwareCounterCell::new();
let mut segment = build_segment(segment_dir.path(), &segment_config, true).unwrap();
for n in 0..NUM_POINTS {
let idx = (n as u64).into();
let multi_vec = random_multi_vector(rng, VECTOR_DIM, NUM_VECTORS_PER_POINT);
let named_vectors = only_default_multi_vector(&multi_vec);
segment
.upsert_point(n as SeqNumberType, idx, named_vectors, &hw_counter)
.unwrap();
}
// build HNSW index
let hnsw_config = HnswConfig {
m: 8,
ef_construct: 16,
full_scan_threshold: 10, // low value to trigger index usage by default
max_indexing_threads: 0,
on_disk: None,
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: segment.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
stopped: &stopped,
rng,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
hnsw_index.populate().unwrap();
hnsw_index
}
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = multi_vector_search_benchmark
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/boolean_filtering.rs | lib/segment/benches/boolean_filtering.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use segment::fixtures::payload_context_fixture::{
FixtureIdTracker, create_payload_storage_fixture, create_plain_payload_index,
create_struct_payload_index,
};
use segment::fixtures::payload_fixtures::BOOL_KEY;
use segment::index::PayloadIndex;
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::types::{Condition, FieldCondition, Filter, Match, PayloadSchemaType, ValueVariants};
use tempfile::Builder;
mod prof;
const NUM_POINTS: usize = 100000;
fn random_bool_filter<R: Rng + ?Sized>(rng: &mut R) -> Filter {
Filter::new_must(Condition::Field(FieldCondition::new_match(
BOOL_KEY.parse().unwrap(),
Match::new_value(ValueVariants::Bool(rng.random_bool(0.5))),
)))
}
pub fn plain_boolean_query_points(c: &mut Criterion) {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let mut group = c.benchmark_group("boolean-query-points");
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let plain_index = create_plain_payload_index(dir.path(), NUM_POINTS, seed);
let mut result_size = 0;
let mut query_count = 0;
let is_stopped = AtomicBool::new(false);
let hw_counter = HardwareCounterCell::new();
group.bench_function("plain", |b| {
b.iter(|| {
let filter = random_bool_filter(&mut rng);
result_size += plain_index
.query_points(&filter, &hw_counter, &is_stopped)
.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
}
pub fn struct_boolean_query_points(c: &mut Criterion) {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let struct_index = create_struct_payload_index(dir.path(), NUM_POINTS, seed);
let mut group = c.benchmark_group("boolean-query-points");
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("binary-index", |b| {
b.iter(|| {
let filter = random_bool_filter(&mut rng);
result_size += struct_index
.query_points(&filter, &hw_counter, &is_stopped)
.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
group.finish();
}
pub fn keyword_index_boolean_query_points(c: &mut Criterion) {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let payload_storage = Arc::new(AtomicRefCell::new(
create_payload_storage_fixture(NUM_POINTS, seed).into(),
));
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(NUM_POINTS)));
let hw_counter = HardwareCounterCell::new();
let mut index = StructPayloadIndex::open(
payload_storage,
id_tracker,
std::collections::HashMap::new(),
dir.path(),
true,
true,
)
.unwrap();
index
.set_indexed(
&BOOL_KEY.parse().unwrap(),
PayloadSchemaType::Keyword,
&hw_counter,
)
.unwrap();
let is_stopped = AtomicBool::new(false);
let mut group = c.benchmark_group("boolean-query-points");
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("keyword-index", |b| {
b.iter(|| {
let filter = random_bool_filter(&mut rng);
result_size += index.query_points(&filter, &hw_counter, &is_stopped).len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = plain_boolean_query_points, struct_boolean_query_points, keyword_index_boolean_query_points
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = plain_boolean_query_points, struct_boolean_query_points, keyword_index_boolean_query_points
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/hnsw_build_graph.rs | lib/segment/benches/hnsw_build_graph.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::fixtures::index_fixtures::TestRawScorerProducer;
use segment::index::hnsw_index::HnswM;
use segment::index::hnsw_index::graph_layers_builder::GraphLayersBuilder;
use segment::types::Distance;
const NUM_VECTORS: usize = 10000;
const DIM: usize = 32;
const M: usize = 16;
const EF_CONSTRUCT: usize = 64;
const USE_HEURISTIC: bool = true;
fn hnsw_benchmark(c: &mut Criterion) {
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::new(DIM, Distance::Cosine, NUM_VECTORS, false, &mut rng);
let mut group = c.benchmark_group("hnsw-index-build-group");
group.sample_size(10);
group.bench_function("hnsw_index", |b| {
b.iter(|| {
let mut rng = rand::rng();
let mut graph_layers_builder = GraphLayersBuilder::new(
NUM_VECTORS,
HnswM::new2(M),
EF_CONSTRUCT,
10,
USE_HEURISTIC,
);
for idx in 0..(NUM_VECTORS as PointOffsetType) {
let level = graph_layers_builder.get_random_layer(&mut rng);
graph_layers_builder.set_levels(idx, level);
graph_layers_builder.link_new_point(idx, vector_holder.internal_scorer(idx));
}
})
});
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = hnsw_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = hnsw_benchmark
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/numeric_index_check_values.rs | lib/segment/benches/numeric_index_check_values.rs | use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::common::operation_error::OperationResult;
use segment::index::field_index::numeric_index::mmap_numeric_index::MmapNumericIndex;
use segment::index::field_index::numeric_index::mutable_numeric_index::InMemoryNumericIndex;
use tempfile::Builder;
mod prof;
const NUM_POINTS: usize = 100000;
const VALUES_PER_POINT: usize = 2;
fn get_random_payloads(rng: &mut StdRng, num_points: usize) -> Vec<(PointOffsetType, f64)> {
let mut payloads = Vec::with_capacity(num_points);
for i in 0..num_points {
for _ in 0..VALUES_PER_POINT {
let value: f64 = rng.random_range(0.0..1.0f64);
payloads.push((i as PointOffsetType, value));
}
}
payloads
}
pub fn struct_numeric_check_values(c: &mut Criterion) {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut group = c.benchmark_group("numeric-check-values");
let payloads: Vec<(PointOffsetType, f64)> = get_random_payloads(&mut rng, NUM_POINTS);
let mutable_index: InMemoryNumericIndex<f64> = payloads
.into_iter()
.map(Ok)
.collect::<OperationResult<InMemoryNumericIndex<_>>>()
.unwrap();
let hw_counter = HardwareCounterCell::new();
let mut count = 0;
group.bench_function("numeric-index", |b| {
b.iter(|| {
let random_index = rng.random_range(0..NUM_POINTS) as PointOffsetType;
if mutable_index.check_values_any(random_index, |value| *value > 0.5) {
count += 1;
}
})
});
let mmap_index = MmapNumericIndex::build(mutable_index, dir.path(), false).unwrap();
group.bench_function("mmap-numeric-index", |b| {
b.iter(|| {
let random_index = rng.random_range(0..NUM_POINTS) as PointOffsetType;
if mmap_index.check_values_any(random_index, |value| *value > 0.5, &hw_counter) {
count += 1;
}
})
});
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = struct_numeric_check_values
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = struct_numeric_check_values
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/config.rs | lib/gridstore/src/config.rs | use serde::{Deserialize, Serialize};
/// Expect JSON values to have roughly 3–5 fields with mostly small values.
/// For 1M values, this would require 128MB of memory.
pub const DEFAULT_BLOCK_SIZE_BYTES: usize = 128;
/// Default page size used when not specified
pub const DEFAULT_PAGE_SIZE_BYTES: usize = 32 * 1024 * 1024; // 32MB
pub const DEFAULT_REGION_SIZE_BLOCKS: usize = 8_192;
pub const DEFAULT_USE_COMPRESSION: bool = true;
#[derive(Debug, Copy, Clone, Serialize, Deserialize, Default)]
pub enum Compression {
None,
#[default]
LZ4,
}
/// Configuration options for the storage
#[derive(Debug, Default)]
pub struct StorageOptions {
/// Size of a page in bytes. Must be a multiple of (`block_size_bytes` * `region_size_blocks`).
///
/// Default is 32MB
pub page_size_bytes: Option<usize>,
/// Size of a block in bytes
///
/// Default is 128 bytes
pub block_size_bytes: Option<usize>,
/// Size of a region in blocks
///
/// Default is 8192 blocks
pub region_size_blocks: Option<u16>,
/// Use compression
///
/// Default is LZ4
pub compression: Option<Compression>,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub(crate) struct StorageConfig {
/// Size of a page in bytes
///
/// Default is 32MB
pub page_size_bytes: usize,
/// Size of a block in bytes
///
/// Default is 128 bytes
pub block_size_bytes: usize,
/// Size of a region in blocks
///
/// Default is 8192 blocks
pub region_size_blocks: usize,
/// Use compression
///
/// Default is true
#[serde(default)]
pub compression: Compression,
}
impl TryFrom<StorageOptions> for StorageConfig {
type Error = &'static str;
fn try_from(options: StorageOptions) -> Result<Self, Self::Error> {
let page_size_bytes = options.page_size_bytes.unwrap_or(DEFAULT_PAGE_SIZE_BYTES);
let block_size_bytes = options.block_size_bytes.unwrap_or(DEFAULT_BLOCK_SIZE_BYTES);
let region_size_blocks = options
.region_size_blocks
.map(|x| x as usize)
.unwrap_or(DEFAULT_REGION_SIZE_BLOCKS);
if block_size_bytes == 0 {
return Err("Block size must be greater than 0");
}
if region_size_blocks == 0 {
return Err("Region size must be greater than 0");
}
if page_size_bytes == 0 {
return Err("Page size must be greater than 0");
}
let region_size_bytes = block_size_bytes * region_size_blocks;
if page_size_bytes < region_size_bytes {
return Err("Page size must be greater than or equal to (block size * region size)");
}
if !page_size_bytes.is_multiple_of(region_size_bytes) {
return Err("Page size must be a multiple of (block size * region size)");
}
Ok(Self {
page_size_bytes,
block_size_bytes,
region_size_blocks,
compression: options.compression.unwrap_or_default(),
})
}
}
impl From<StorageConfig> for StorageOptions {
fn from(config: StorageConfig) -> Self {
Self {
page_size_bytes: Some(config.page_size_bytes),
block_size_bytes: Some(config.block_size_bytes),
region_size_blocks: Some(config.region_size_blocks as u16),
compression: Some(config.compression),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/page.rs | lib/gridstore/src/page.rs | use std::path::{Path, PathBuf};
use fs_err as fs;
use memmap2::{Mmap, MmapMut};
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting, Madviseable};
use memory::mmap_ops::{
MULTI_MMAP_IS_SUPPORTED, create_and_ensure_length, open_read_mmap, open_write_mmap,
};
use crate::Result;
use crate::error::GridstoreError;
use crate::tracker::BlockOffset;
#[derive(Debug)]
pub(crate) struct Page {
path: PathBuf,
/// Main data mmap for read/write
///
/// Best suited for random reads.
mmap: MmapMut,
/// Read-only mmap best suited for sequential reads
///
/// `None` on platforms that do not support multiple memory maps to the same file.
/// Use [`mmap_seq`] utility function to access this mmap if available.
_mmap_seq: Option<Mmap>,
}
impl Page {
/// Flushes outstanding memory map modifications to disk.
pub(crate) fn flush(&self) -> std::io::Result<()> {
self.mmap.flush()
}
/// Create a new page at the given path
pub fn new(path: &Path, size: usize) -> Result<Page> {
create_and_ensure_length(path, size)?;
let mmap = open_write_mmap(path, AdviceSetting::from(Advice::Random), false)?;
// Only open second mmap for sequential reads if supported
let mmap_seq = if *MULTI_MMAP_IS_SUPPORTED {
Some(open_read_mmap(
path,
AdviceSetting::from(Advice::Sequential),
false,
)?)
} else {
None
};
let path = path.to_path_buf();
Ok(Page {
path,
mmap,
_mmap_seq: mmap_seq,
})
}
/// Open an existing page at the given path
/// If the file does not exist, return None
pub fn open(path: &Path) -> Result<Page> {
if !path.exists() {
return Err(GridstoreError::service_error(format!(
"Page file does not exist: {}",
path.display()
)));
}
let mmap = open_write_mmap(path, AdviceSetting::from(Advice::Random), false)?;
// Only open second mmap for sequential reads if supported
let mmap_seq = if *MULTI_MMAP_IS_SUPPORTED {
Some(open_read_mmap(
path,
AdviceSetting::from(Advice::Sequential),
false,
)?)
} else {
None
};
let path = path.to_path_buf();
Ok(Page {
path,
mmap,
_mmap_seq: mmap_seq,
})
}
/// Helper to get a slice suited for sequential reads if available, otherwise use the main mmap
#[inline]
fn mmap_seq(&self) -> &[u8] {
#[expect(clippy::used_underscore_binding)]
self._mmap_seq
.as_ref()
.map(|m| m.as_ref())
.unwrap_or(self.mmap.as_ref())
}
/// Write a value into the page
///
/// # Returns
/// Amount of bytes that didn't fit into the page
///
/// # Corruption
///
/// If the block_offset and length of the value are already taken, this function will still overwrite the data.
pub fn write_value(
&mut self,
block_offset: u32,
value: &[u8],
block_size_bytes: usize,
) -> usize {
// The size of the data cell containing the value
let value_size = value.len();
let value_start = block_offset as usize * block_size_bytes;
let value_end = value_start + value_size;
// only write what fits in the page
let unwritten_tail = value_end.saturating_sub(self.mmap.len());
// set value region
self.mmap[value_start..value_end - unwritten_tail]
.copy_from_slice(&value[..value_size - unwritten_tail]);
unwritten_tail
}
/// Read a value from the page
///
/// # Arguments
/// - block_offset: The offset of the value in blocks
/// - length: The number of blocks the value occupies
/// - READ_SEQUENTIAL: Whether to read mmap pages ahead to optimize sequential access
///
/// # Returns
/// - None if the value is not within the page
/// - Some(slice) if the value was successfully read
///
/// # Panics
///
/// If the `block_offset` starts after the page ends.
pub fn read_value<const READ_SEQUENTIAL: bool>(
&self,
block_offset: BlockOffset,
length: u32,
block_size_bytes: usize,
) -> (&[u8], usize) {
if READ_SEQUENTIAL {
Self::read_value_with_generic_storage(
self.mmap_seq(),
block_offset,
length,
block_size_bytes,
)
} else {
Self::read_value_with_generic_storage(
&self.mmap,
block_offset,
length,
block_size_bytes,
)
}
}
fn read_value_with_generic_storage(
mmap: &[u8],
block_offset: BlockOffset,
length: u32,
block_size_bytes: usize,
) -> (&[u8], usize) {
let value_start = block_offset as usize * block_size_bytes;
let mmap_len = mmap.len();
assert!(value_start < mmap_len);
let value_end = value_start + length as usize;
let unread_tail = value_end.saturating_sub(mmap_len);
// read value region
(&mmap[value_start..value_end - unread_tail], unread_tail)
}
/// Delete the page from the filesystem.
#[allow(dead_code)]
pub fn delete_page(self) {
#[expect(clippy::used_underscore_binding)]
drop((self.mmap, self._mmap_seq));
fs::remove_file(&self.path).unwrap();
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) {
#[expect(clippy::used_underscore_binding)]
if let Some(mmap_seq) = &self._mmap_seq {
mmap_seq.populate();
}
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
clear_disk_cache(&self.path)?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/lib.rs | lib/gridstore/src/lib.rs | pub mod bitmask;
pub mod blob;
pub mod config;
pub mod error;
pub mod fixtures;
mod gridstore;
mod page;
mod tracker;
pub use blob::Blob;
pub use gridstore::Gridstore;
use crate::error::GridstoreError;
pub(crate) type Result<T> = std::result::Result<T, GridstoreError>;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/tracker.rs | lib/gridstore/src/tracker.rs | use std::path::{Path, PathBuf};
use ahash::{AHashMap, AHashSet};
use memmap2::MmapMut;
use memory::madvise::{Advice, AdviceSetting, Madviseable};
use memory::mmap_ops::{
create_and_ensure_length, open_write_mmap, transmute_from_u8, transmute_to_u8,
};
use smallvec::SmallVec;
use crate::Result;
use crate::error::GridstoreError;
pub type PointOffset = u32;
pub type BlockOffset = u32;
pub type PageId = u32;
const TRACKER_MEM_ADVICE: Advice = Advice::Random;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct ValuePointer {
/// Which page the value is stored in
pub page_id: PageId,
/// Start offset (in blocks) of the value
pub block_offset: BlockOffset,
/// Length in bytes of the value
pub length: u32,
}
impl ValuePointer {
pub fn new(page_id: PageId, block_offset: BlockOffset, length: u32) -> Self {
Self {
page_id,
block_offset,
length,
}
}
}
/// Pointer updates for a given point offset
///
/// Keeps track of the places where the value for a point offset have been written, until we persist them.
///
/// In context of Gridstore, for each point offset this means:
///
/// - `current` is the value the tracker should report and become persisted when flushing.
/// If exists, `Some`; otherwise, `None`.
///
/// - `to_free` is the list of pointers that should be freed in the bitmask during flush, so that
/// the space in the pages can be reused.
///
/// When flushing, we persist all changes we have currently collected. It is possible that new changes
/// come in between preparing the flusher and executing it. After we've written to disk, we remove (drain),
/// the now persisted changes from these pointer updates. With this mechanism we write each update to
/// disk exactly once.
#[derive(Debug, Default, Clone, PartialEq)]
pub(super) struct PointerUpdates {
/// Pointer to write in tracker when persisting
current: Option<ValuePointer>,
/// List of pointers to free in bitmask when persisting
to_free: SmallVec<[ValuePointer; 1]>,
}
impl PointerUpdates {
/// Mark this pointer as set
///
/// It will mark the pointer as used on disk on flush, and will free all previous pending
/// pointers
fn set(&mut self, pointer: ValuePointer) {
if self.current == Some(pointer) {
debug_assert!(false, "we should not set the same point twice");
return;
}
// Move the current pointer to the pointers to free, if it exists
if let Some(old_pointer) = self.current.replace(pointer) {
self.to_free.push(old_pointer);
debug_assert_eq!(
self.to_free.iter().copied().collect::<AHashSet<_>>().len(),
self.to_free.len(),
"should not have duplicate pointers to free",
);
}
debug_assert!(
!self.to_free.contains(&pointer),
"old list cannot contain pointer we just set",
);
}
/// Mark this pointer as unset
///
/// It will completely free the pointer on disk on flush including all it's previous pending
/// pointers
fn unset(&mut self, pointer: ValuePointer) {
let old_pointer = self.current.take();
// Fallback: if the pointer to unset is not the current one, free both pointers, though this shouldn't happen
debug_assert!(
old_pointer.is_none_or(|p| p == pointer),
"new unset pointer should match with current one, if any",
);
if let Some(old_pointer) = old_pointer
&& old_pointer != pointer
{
self.to_free.push(old_pointer);
}
self.to_free.push(pointer);
debug_assert_eq!(
self.to_free.iter().copied().collect::<AHashSet<_>>().len(),
self.to_free.len(),
"should not have duplicate pointers to free",
);
}
/// Pointer is empty if there is no set nor unsets
fn is_empty(&self) -> bool {
self.current.is_none() && self.to_free.is_empty()
}
/// Remove all pointers from self that have been persisted
///
/// After calling this self may end up being empty. The caller is responsible for dropping
/// empty structures if desired.
///
/// Unknown pointers in `persisted` are ignored.
///
/// Returns if the structure is empty after this operation
fn drain_persisted(&mut self, persisted: &Self) -> bool {
debug_assert!(!self.is_empty(), "must have at least one pointer");
debug_assert!(
!persisted.is_empty(),
"persisted must have at least one pointer",
);
// Shortcut: we persisted everything if both are equal, we can empty this structure
if self == persisted {
*self = Self::default();
return true;
}
let Self {
current: previous_current,
to_free: freed,
} = persisted;
// Remove self set if persisted
if let (Some(current), Some(previous_current)) = (self.current, *previous_current)
&& current == previous_current
{
self.current.take();
}
// Only keep unsets that are not persisted
self.to_free.retain(|pointer| !freed.contains(pointer));
self.is_empty()
}
}
#[derive(Debug, Default, Clone)]
struct TrackerHeader {
next_pointer_offset: u32,
}
#[derive(Debug)]
pub struct Tracker {
/// Path to the file
path: PathBuf,
/// Header of the file
header: TrackerHeader,
/// Mmap of the file
mmap: MmapMut,
/// Updates that haven't been flushed
///
/// When flushing, these updates get written into the mmap and flushed at once.
pub(super) pending_updates: AHashMap<PointOffset, PointerUpdates>,
/// The maximum pointer offset in the tracker (updated in memory).
next_pointer_offset: PointOffset,
}
impl Tracker {
const FILE_NAME: &'static str = "tracker.dat";
const DEFAULT_SIZE: usize = 1024 * 1024; // 1MB
pub fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
fn tracker_file_name(path: &Path) -> PathBuf {
path.join(Self::FILE_NAME)
}
/// Create a new PageTracker at the given dir path
/// The file is created with the default size if no size hint is given
pub fn new(path: &Path, size_hint: Option<usize>) -> Self {
let path = Self::tracker_file_name(path);
let size = size_hint.unwrap_or(Self::DEFAULT_SIZE).next_power_of_two();
assert!(size > size_of::<TrackerHeader>(), "Size hint is too small");
create_and_ensure_length(&path, size).expect("Failed to create page tracker file");
let mmap = open_write_mmap(&path, AdviceSetting::from(TRACKER_MEM_ADVICE), false)
.expect("Failed to open page tracker mmap");
let header = TrackerHeader::default();
let pending_updates = AHashMap::new();
let mut page_tracker = Self {
path,
header,
mmap,
pending_updates,
next_pointer_offset: 0,
};
page_tracker.write_header();
page_tracker
}
/// Open an existing PageTracker at the given path
/// If the file does not exist, return None
pub fn open(path: &Path) -> Result<Self> {
let path = Self::tracker_file_name(path);
if !path.exists() {
return Err(GridstoreError::service_error(format!(
"Tracker file does not exist: {}",
path.display()
)));
}
let mmap = open_write_mmap(&path, AdviceSetting::from(TRACKER_MEM_ADVICE), false)?;
let header: &TrackerHeader = transmute_from_u8(&mmap[0..size_of::<TrackerHeader>()]);
let pending_updates = AHashMap::new();
Ok(Self {
next_pointer_offset: header.next_pointer_offset,
path,
header: header.clone(),
mmap,
pending_updates,
})
}
/// Writes the accumulated pending updates to mmap and flushes it
///
/// Changes should be captured from [`self.pending_updates`]. This method may therefore flush
/// an earlier version of changes.
///
/// This updates the list of pending updates inside this tracker for each given update that is
/// processed.
///
/// Returns the old pointers that were overwritten, so that they can be freed in the bitmask.
#[must_use = "The old pointers need to be freed in the bitmask"]
pub fn write_pending_and_flush(
&mut self,
pending_updates: AHashMap<PointOffset, PointerUpdates>,
) -> std::io::Result<Vec<ValuePointer>> {
let mut old_pointers = Vec::new();
for (point_offset, updates) in pending_updates {
match updates.current {
// Write to store a new pointer
Some(new_pointer) => {
// Mark any existing pointer for removal to free its blocks
if let Some(&Some(old_pointer)) = self.get_raw(point_offset) {
old_pointers.push(old_pointer);
}
self.persist_pointer(point_offset, Some(new_pointer));
}
// Write to empty the pointer
None => self.persist_pointer(point_offset, None),
}
// Mark all old pointers for removal to free its blocks
old_pointers.extend(&updates.to_free);
// Remove all persisted updates from the latest updates, drop if no changes are left
if let Some(latest_updates) = self.pending_updates.get_mut(&point_offset) {
let is_empty = latest_updates.drain_persisted(&updates);
if is_empty {
let prev = self.pending_updates.remove(&point_offset);
if let Some(prev) = prev {
debug_assert!(
prev.is_empty(),
"remove pending element should be empty but got {prev:?}"
);
}
}
}
}
// Increment header count if necessary
self.persist_pointer_count();
// Flush the mmap
self.mmap.flush()?;
Ok(old_pointers)
}
#[cfg(test)]
pub fn write_pending_and_flush_internal(&mut self) -> std::io::Result<Vec<ValuePointer>> {
let pending_updates = std::mem::take(&mut self.pending_updates);
self.write_pending_and_flush(pending_updates)
}
/// Return the size of the underlying mmapped file
#[cfg(test)]
pub fn mmap_file_size(&self) -> usize {
self.mmap.len()
}
pub fn pointer_count(&self) -> u32 {
self.next_pointer_offset
}
/// Write the current page header to the memory map
fn write_header(&mut self) {
self.mmap[0..size_of::<TrackerHeader>()].copy_from_slice(transmute_to_u8(&self.header));
}
/// Save the mapping at the given offset
/// The file is resized if necessary
fn persist_pointer(&mut self, point_offset: PointOffset, pointer: Option<ValuePointer>) {
if pointer.is_none() && point_offset as usize >= self.mmap.len() {
return;
}
let point_offset = point_offset as usize;
let start_offset =
size_of::<TrackerHeader>() + point_offset * size_of::<Option<ValuePointer>>();
let end_offset = start_offset + size_of::<Option<ValuePointer>>();
// Grow tracker file if it isn't big enough
if self.mmap.len() < end_offset {
self.mmap.flush().unwrap();
let new_size = end_offset.next_power_of_two();
create_and_ensure_length(&self.path, new_size).unwrap();
self.mmap = open_write_mmap(&self.path, AdviceSetting::from(TRACKER_MEM_ADVICE), false)
.unwrap();
}
self.mmap[start_offset..end_offset].copy_from_slice(transmute_to_u8(&pointer));
}
#[cfg(test)]
pub fn is_empty(&self) -> bool {
self.mapping_len() == 0
}
/// Get the length of the mapping
/// Excludes None values
/// Warning: performs a full scan of the tracker.
#[cfg(test)]
pub fn mapping_len(&self) -> usize {
(0..self.next_pointer_offset)
.filter(|i| self.get(*i).is_some())
.count()
}
/// Iterate over the pointers in the tracker
pub fn iter_pointers(&self) -> impl Iterator<Item = (PointOffset, Option<ValuePointer>)> + '_ {
(0..self.next_pointer_offset).map(move |i| (i, self.get(i as PointOffset)))
}
/// Get the raw value at the given point offset
fn get_raw(&self, point_offset: PointOffset) -> Option<&Option<ValuePointer>> {
let start_offset =
size_of::<TrackerHeader>() + point_offset as usize * size_of::<Option<ValuePointer>>();
let end_offset = start_offset + size_of::<Option<ValuePointer>>();
if end_offset > self.mmap.len() {
return None;
}
let page_pointer = transmute_from_u8(&self.mmap[start_offset..end_offset]);
Some(page_pointer)
}
/// Get the page pointer at the given point offset
pub fn get(&self, point_offset: PointOffset) -> Option<ValuePointer> {
match self.pending_updates.get(&point_offset) {
// Pending update exists but is empty, should not happen, fall back to real data
Some(pending) if pending.is_empty() => {
debug_assert!(false, "pending updates must not be empty");
self.get_raw(point_offset).copied().flatten()
}
// Use set from pending updates
Some(pending) => pending.current,
// No pending update, use real data
None => self.get_raw(point_offset).copied().flatten(),
}
}
/// Increment the header count if the given point offset is larger than the current count
fn persist_pointer_count(&mut self) {
self.header.next_pointer_offset = self.next_pointer_offset;
self.write_header();
}
pub fn has_pointer(&self, point_offset: PointOffset) -> bool {
self.get(point_offset).is_some()
}
pub fn set(&mut self, point_offset: PointOffset, value_pointer: ValuePointer) {
self.pending_updates
.entry(point_offset)
.or_default()
.set(value_pointer);
self.next_pointer_offset = self.next_pointer_offset.max(point_offset + 1);
}
/// Unset the value at the given point offset and return its previous value
pub fn unset(&mut self, point_offset: PointOffset) -> Option<ValuePointer> {
let pointer_opt = self.get(point_offset);
if let Some(pointer) = pointer_opt {
self.pending_updates
.entry(point_offset)
.or_default()
.unset(pointer);
}
pointer_opt
}
pub fn populate(&self) {
self.mmap.populate();
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use rstest::rstest;
use tempfile::Builder;
use super::{PointerUpdates, Tracker, ValuePointer};
#[test]
fn test_file_name() {
let path: PathBuf = "/tmp/test".into();
let file_name = Tracker::tracker_file_name(&path);
assert_eq!(file_name, path.join(Tracker::FILE_NAME));
}
#[test]
fn test_page_tracker_files() {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let tracker = Tracker::new(path, None);
let files = tracker.files();
assert_eq!(files.len(), 1);
assert_eq!(files[0], path.join(Tracker::FILE_NAME));
}
#[test]
fn test_new_tracker() {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let tracker = Tracker::new(path, None);
assert!(tracker.is_empty());
assert_eq!(tracker.mapping_len(), 0);
assert_eq!(tracker.pointer_count(), 0);
}
#[rstest]
#[case(10)]
#[case(100)]
#[case(1000)]
fn test_mapping_len_tracker(#[case] initial_tracker_size: usize) {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let mut tracker = Tracker::new(path, Some(initial_tracker_size));
assert!(tracker.is_empty());
tracker.set(0, ValuePointer::new(1, 1, 1));
tracker.write_pending_and_flush_internal().unwrap();
assert!(!tracker.is_empty());
assert_eq!(tracker.mapping_len(), 1);
tracker.set(100, ValuePointer::new(2, 2, 2));
tracker.write_pending_and_flush_internal().unwrap();
assert_eq!(tracker.pointer_count(), 101);
assert_eq!(tracker.mapping_len(), 2);
}
#[rstest]
#[case(10)]
#[case(100)]
#[case(1000)]
fn test_set_get_clear_tracker(#[case] initial_tracker_size: usize) {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let mut tracker = Tracker::new(path, Some(initial_tracker_size));
tracker.set(0, ValuePointer::new(1, 1, 1));
tracker.set(1, ValuePointer::new(2, 2, 2));
tracker.set(2, ValuePointer::new(3, 3, 3));
tracker.set(10, ValuePointer::new(10, 10, 10));
tracker.write_pending_and_flush_internal().unwrap();
assert!(!tracker.is_empty());
assert_eq!(tracker.mapping_len(), 4);
assert_eq!(tracker.pointer_count(), 11); // accounts for empty slots
assert_eq!(tracker.get_raw(0), Some(&Some(ValuePointer::new(1, 1, 1))));
assert_eq!(tracker.get_raw(1), Some(&Some(ValuePointer::new(2, 2, 2))));
assert_eq!(tracker.get_raw(2), Some(&Some(ValuePointer::new(3, 3, 3))));
assert_eq!(tracker.get_raw(3), Some(&None)); // intermediate empty slot
assert_eq!(
tracker.get_raw(10),
Some(&Some(ValuePointer::new(10, 10, 10)))
);
assert_eq!(tracker.get_raw(100_000), None); // out of bounds
tracker.unset(1);
tracker.write_pending_and_flush_internal().unwrap();
// the value has been cleared but the entry is still there
assert_eq!(tracker.get_raw(1), Some(&None));
assert_eq!(tracker.get(1), None);
assert_eq!(tracker.mapping_len(), 3);
assert_eq!(tracker.pointer_count(), 11);
// overwrite some values
tracker.set(0, ValuePointer::new(10, 10, 10));
tracker.set(2, ValuePointer::new(30, 30, 30));
tracker.write_pending_and_flush_internal().unwrap();
assert_eq!(tracker.get(0), Some(ValuePointer::new(10, 10, 10)));
assert_eq!(tracker.get(2), Some(ValuePointer::new(30, 30, 30)));
}
#[rstest]
#[case(10)]
#[case(100)]
#[case(1000)]
fn test_persist_and_open_tracker(#[case] initial_tracker_size: usize) {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let value_count: usize = 1000;
let mut tracker = Tracker::new(path, Some(initial_tracker_size));
for i in 0..value_count {
// save only half of the values
if i % 2 == 0 {
tracker.set(i as u32, ValuePointer::new(i as u32, i as u32, i as u32));
}
}
tracker.write_pending_and_flush_internal().unwrap();
assert_eq!(tracker.mapping_len(), value_count / 2);
assert_eq!(tracker.pointer_count(), value_count as u32 - 1);
// drop the tracker
drop(tracker);
// reopen the tracker
let tracker = Tracker::open(path).unwrap();
assert_eq!(tracker.mapping_len(), value_count / 2);
assert_eq!(tracker.pointer_count(), value_count as u32 - 1);
// check the values
for i in 0..value_count {
if i % 2 == 0 {
assert_eq!(
tracker.get(i as u32),
Some(ValuePointer::new(i as u32, i as u32, i as u32))
);
} else {
assert_eq!(tracker.get(i as u32), None);
}
}
}
#[rstest]
#[case(10, 16)]
#[case(100, 128)]
#[case(1000, 1024)]
#[case(1024, 1024)]
fn test_page_tracker_resize(
#[case] desired_tracker_size: usize,
#[case] actual_tracker_size: usize,
) {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let mut tracker = Tracker::new(path, Some(desired_tracker_size));
assert_eq!(tracker.mapping_len(), 0);
assert_eq!(tracker.mmap_file_size(), actual_tracker_size);
for i in 0..100_000 {
tracker.set(i, ValuePointer::new(i, i, i));
}
tracker.write_pending_and_flush_internal().unwrap();
assert_eq!(tracker.mapping_len(), 100_000);
assert!(tracker.mmap_file_size() > actual_tracker_size);
}
#[test]
fn test_track_non_sequential_large_offset() {
let file = Builder::new().prefix("test-tracker").tempdir().unwrap();
let path = file.path();
let mut tracker = Tracker::new(path, None);
assert_eq!(tracker.mapping_len(), 0);
let page_pointer = ValuePointer::new(1, 1, 1);
let key = 1_000_000;
tracker.set(key, page_pointer);
assert_eq!(tracker.get(key), Some(page_pointer));
}
#[test]
fn test_value_pointer_drain() {
let mut updates = PointerUpdates::default();
updates.set(ValuePointer::new(1, 1, 1));
// When all updates are persisted, drop the entry
assert!(updates.clone().drain_persisted(&updates), "must drop entry");
updates.set(ValuePointer::new(1, 2, 1));
// When all updates are persisted, drop the entry
assert!(updates.clone().drain_persisted(&updates), "must drop entry");
let persisted = updates.clone();
updates.set(ValuePointer::new(1, 3, 1));
// Last pointer was not persisted, only keep it for the next flush
{
let mut updates = updates.clone();
assert!(!updates.drain_persisted(&persisted));
assert_eq!(updates.current, Some(ValuePointer::new(1, 3, 1))); // set block offset 3
assert_eq!(
updates.to_free.as_slice(),
&[
ValuePointer::new(1, 2, 1), // unset block offset 2 (last persisted was set)
]
);
}
updates.set(ValuePointer::new(1, 4, 1));
// Last two pointers were not persisted, only keep them for the next flush
{
let mut updates = updates.clone();
assert!(!updates.drain_persisted(&persisted));
assert_eq!(updates.current, Some(ValuePointer::new(1, 4, 1))); // set block offset 4
assert_eq!(
updates.to_free.as_slice(),
&[
ValuePointer::new(1, 2, 1), // unset block offset 2 (last persisted was set)
ValuePointer::new(1, 3, 1), // unset block offset 3
]
);
}
let persisted = updates.clone();
updates.unset(ValuePointer::new(1, 4, 1));
// Last pointer write is persisted, but the delete of the last pointer is not
// Then we keep the last pointer with set=false to flush the delete next time
{
let mut updates = updates.clone();
assert!(!updates.drain_persisted(&persisted));
assert_eq!(updates.current, None);
assert_eq!(updates.to_free.as_slice(), &[ValuePointer::new(1, 4, 1)]);
}
// Even if the history would somehow be shuffled we'd still properly drain
{
let mut updates = updates.clone();
let mut persisted = updates.clone();
persisted.to_free.swap(0, 1);
persisted.to_free.swap(1, 3);
assert!(updates.drain_persisted(&persisted));
}
}
/// Test pointer drain edge case that was previously broken.
///
/// See: <https://github.com/qdrant/qdrant/pull/7741>
#[test]
fn test_value_pointer_drain_bug_7741() {
// current:
// - latest: true
// - history: [block_offset:1, block_offset:2]
//
// persisted:
// - latest: false
// - history: [block_offset:1]
//
// expected current after drain:
// - latest: true
// - history: [block_offset:2]
let mut updates = PointerUpdates::default();
// Put and delete block offset 1
updates.set(ValuePointer::new(1, 1, 1));
updates.unset(ValuePointer::new(1, 1, 1));
// Clone this set of updates to flush later
let persisted = updates.clone();
// Put block offset 2
updates.set(ValuePointer::new(1, 2, 1));
// Drain persisted updates and don't drop, still need to persist block offset 2 later
let do_drop = updates.drain_persisted(&persisted);
assert!(!do_drop, "must not drop entry");
// Pending updates must only have set for block offset 2
let expected = {
let mut expected = PointerUpdates::default();
expected.set(ValuePointer::new(1, 2, 1));
expected
};
assert_eq!(
updates, expected,
"must have one pending update to set block offset 2",
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/error.rs | lib/gridstore/src/error.rs | use memory::mmap_type;
#[derive(thiserror::Error, Debug)]
pub enum GridstoreError {
#[error("{0}")]
Mmap(#[from] mmap_type::Error),
#[error("{0}")]
Io(#[from] std::io::Error),
#[error("{0}")]
SerdeJson(#[from] serde_json::error::Error),
#[error("Service error: {description}")]
ServiceError { description: String },
#[error("Flush was cancelled")]
FlushCancelled,
#[error("Validation error: {message}")]
ValidationError { message: String },
}
impl GridstoreError {
pub fn service_error(description: impl Into<String>) -> Self {
GridstoreError::ServiceError {
description: description.into(),
}
}
pub fn validation_error(message: impl Into<String>) -> Self {
GridstoreError::ValidationError {
message: message.into(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/fixtures.rs | lib/gridstore/src/fixtures.rs | use rand::Rng;
use rand::distr::{Distribution, Uniform};
use serde::{Deserialize, Serialize};
use serde_json::Map;
use tempfile::{Builder, TempDir};
use crate::config::{Compression, StorageOptions};
use crate::{Blob, Gridstore};
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct Payload(pub Map<String, serde_json::Value>);
impl Default for Payload {
fn default() -> Self {
Payload(serde_json::Map::new())
}
}
impl Blob for Payload {
fn to_bytes(&self) -> Vec<u8> {
serde_json::to_vec(self).unwrap()
}
fn from_bytes(data: &[u8]) -> Self {
serde_json::from_slice(data).unwrap()
}
}
/// Create an empty storage with the default configuration
pub fn empty_storage() -> (TempDir, Gridstore<Payload>) {
let dir = Builder::new().prefix("test-storage").tempdir().unwrap();
let storage = Gridstore::new(dir.path().to_path_buf(), Default::default()).unwrap();
(dir, storage)
}
/// Create an empty storage with a specific page size
pub fn empty_storage_sized(
page_size: usize,
compression: Compression,
) -> (TempDir, Gridstore<Payload>) {
let dir = Builder::new().prefix("test-storage").tempdir().unwrap();
let options = StorageOptions {
page_size_bytes: Some(page_size),
compression: Some(compression),
..Default::default()
};
let storage = Gridstore::new(dir.path().to_path_buf(), options).unwrap();
(dir, storage)
}
pub fn random_word(rng: &mut impl Rng) -> String {
let len = rng.random_range(1..10);
let mut word = String::with_capacity(len);
for _ in 0..len {
word.push(rng.random_range(b'a'..=b'z') as char);
}
word
}
pub fn random_payload(rng: &mut impl Rng, size_factor: usize) -> Payload {
let mut payload = Payload::default();
let word = random_word(rng);
let sentence = (0..rng.random_range(1..20 * size_factor))
.map(|_| random_word(rng))
.collect::<Vec<_>>()
.join(" ");
let distr = Uniform::new(0, 100000).unwrap();
let indices = (0..rng.random_range(1..100 * size_factor))
.map(|_| distr.sample(rng))
.collect::<Vec<_>>();
payload.0 = serde_json::json!(
{
"word": word, // string
"sentence": sentence, // string
"number": rng.random_range(0..1000), // number
"indices": indices, // array of numbers
"bool": rng.random_bool(0.5), // boolean
"null": serde_json::Value::Null, // null
"object": {
"bool": rng.random_bool(0.5),
}, // object
}
)
.as_object()
.unwrap()
.clone();
payload
}
pub fn minimal_payload() -> Payload {
Payload(serde_json::json!({"a": 1}).as_object().unwrap().clone())
}
pub const HM_FIELDS: [&str; 23] = [
"article_id",
"product_code",
"prod_name",
"product_type_no",
"product_type_name",
"product_group_name",
"graphical_appearance_no",
"graphical_appearance_name",
"colour_group_code",
"colour_group_name",
"perceived_colour_value_id",
"perceived_colour_value_name",
"perceived_colour_master_id",
"perceived_colour_master_name",
"department_no",
"department_name",
"index_code,index_name",
"index_group_no",
"index_group_name",
"section_no,section_name",
"garment_group_no",
"garment_group_name",
"detail_desc",
];
#[cfg(test)]
mod tests {
use crate::Blob;
use crate::fixtures::Payload;
#[test]
fn test_serde_symmetry() {
let mut payload = Payload::default();
payload.0.insert(
"key".to_string(),
serde_json::Value::String("value".to_string()),
);
let bytes = payload.to_bytes();
let deserialized = Payload::from_bytes(&bytes);
assert_eq!(payload, deserialized);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/blob.rs | lib/gridstore/src/blob.rs | use zerocopy::{FromBytes, Immutable, IntoBytes};
pub trait Blob {
fn to_bytes(&self) -> Vec<u8>;
fn from_bytes(bytes: &[u8]) -> Self;
}
impl Blob for Vec<u8> {
fn to_bytes(&self) -> Vec<u8> {
self.clone()
}
fn from_bytes(bytes: &[u8]) -> Self {
bytes.to_vec()
}
}
impl Blob for Vec<ecow::EcoString> {
fn to_bytes(&self) -> Vec<u8> {
serde_cbor::to_vec(self).expect("Failed to serialize Vec<ecow::EcoString>")
}
fn from_bytes(bytes: &[u8]) -> Self {
serde_cbor::from_slice(bytes).expect("Failed to deserialize Vec<ecow::EcoString>")
}
}
impl Blob for Vec<(f64, f64)> {
fn to_bytes(&self) -> Vec<u8> {
self.iter()
.flat_map(|(a, b)| a.to_le_bytes().into_iter().chain(b.to_le_bytes()))
.collect()
}
fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len().is_multiple_of(size_of::<f64>() * 2),
"unexpected number of bytes for Vec<(f64, f64)>",
);
bytes
.chunks(size_of::<f64>() * 2)
.map(|v| {
let (a, b) = v.split_at(size_of::<f64>());
(
f64::read_from_bytes(a).expect("invalid number of bytes for type f64"),
f64::read_from_bytes(b).expect("invalid number of bytes for type f64"),
)
})
.collect()
}
}
macro_rules! impl_blob_vec_zerocopy {
($type:ty) => {
impl Blob for Vec<$type>
where
$type: FromBytes + IntoBytes + Immutable,
{
fn to_bytes(&self) -> Vec<u8> {
self.iter()
.flat_map(|item| item.as_bytes())
.copied()
.collect()
}
fn from_bytes(bytes: &[u8]) -> Self {
assert!(
bytes.len().is_multiple_of(size_of::<$type>()),
"unexpected number of bytes for Vec<{}>",
stringify!($type),
);
bytes
.chunks(size_of::<$type>())
.map(|v| <$type>::read_from_bytes(v).expect("invalid chunk size for type T"))
.collect()
}
}
};
}
impl_blob_vec_zerocopy!(i64);
impl_blob_vec_zerocopy!(u128);
impl_blob_vec_zerocopy!(f64);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/gridstore.rs | lib/gridstore/src/gridstore.rs | use std::io::BufReader;
use std::path::PathBuf;
use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::counter::referenced_counter::HwMetricRefCounter;
use common::is_alive_lock::IsAliveLock;
use fs_err as fs;
use fs_err::File;
use io::file_operations::atomic_save_json;
use itertools::Itertools;
use lz4_flex::compress_prepend_size;
use parking_lot::RwLock;
use crate::Result;
use crate::bitmask::Bitmask;
use crate::blob::Blob;
use crate::config::{Compression, StorageConfig, StorageOptions};
use crate::error::GridstoreError;
use crate::page::Page;
use crate::tracker::{BlockOffset, PageId, PointOffset, Tracker, ValuePointer};
const CONFIG_FILENAME: &str = "config.json";
pub type Flusher = Box<dyn FnOnce() -> std::result::Result<(), GridstoreError> + Send>;
/// Storage for values of type `V`.
///
/// Assumes sequential IDs to the values (0, 1, 2, 3, ...)
#[derive(Debug)]
pub struct Gridstore<V> {
/// Configuration of the storage.
pub(super) config: StorageConfig,
/// Holds mapping from `PointOffset` -> `ValuePointer`
///
/// Stored in a separate file
pub(super) tracker: Arc<RwLock<Tracker>>,
/// Mapping from page_id -> mmap page
pub(super) pages: Arc<RwLock<Vec<Page>>>,
/// Bitmask to represent which "blocks" of data in the pages are used and which are free.
///
/// 0 is free, 1 is used.
bitmask: Arc<RwLock<Bitmask>>,
/// Path of the directory where the storage files are stored
base_path: PathBuf,
_value_type: std::marker::PhantomData<V>,
/// Lock to prevent concurrent flushes and used for waiting for ongoing flushes to finish.
is_alive_flush_lock: IsAliveLock,
}
#[inline]
fn compress_lz4(value: &[u8]) -> Vec<u8> {
compress_prepend_size(value)
}
#[inline]
fn decompress_lz4(value: &[u8]) -> Vec<u8> {
lz4_flex::decompress_size_prepended(value).unwrap()
}
impl<V: Blob> Gridstore<V> {
/// LZ4 compression
fn compress(&self, value: Vec<u8>) -> Vec<u8> {
match self.config.compression {
Compression::None => value,
Compression::LZ4 => compress_lz4(&value),
}
}
/// LZ4 decompression
fn decompress(&self, value: Vec<u8>) -> Vec<u8> {
match self.config.compression {
Compression::None => value,
Compression::LZ4 => decompress_lz4(&value),
}
}
pub fn files(&self) -> Vec<PathBuf> {
let mut paths = Vec::with_capacity(self.pages.read().len() + 1);
// page tracker file
for tracker_file in self.tracker.read().files() {
paths.push(tracker_file);
}
// pages files
for page_id in 0..self.next_page_id() {
paths.push(self.page_path(page_id));
}
// bitmask files
for bitmask_file in self.bitmask.read().files() {
paths.push(bitmask_file);
}
// config file
paths.push(self.base_path.join(CONFIG_FILENAME));
paths
}
pub fn immutable_files(&self) -> Vec<PathBuf> {
vec![self.base_path.join(CONFIG_FILENAME)]
}
fn next_page_id(&self) -> PageId {
self.pages.read().len() as PageId
}
pub fn max_point_id(&self) -> PointOffset {
self.tracker.read().pointer_count()
}
/// Opens an existing storage, or initializes a new one.
/// Depends on the existence of the config file at the `base_path`.
///
/// In case of opening, it ignores the `create_options` parameter.
pub fn open_or_create(base_path: PathBuf, create_options: StorageOptions) -> Result<Self> {
let config_path = base_path.join(CONFIG_FILENAME);
if config_path.exists() {
Self::open(base_path)
} else {
// create folder if it does not exist
fs::create_dir_all(&base_path).map_err(|err| {
GridstoreError::service_error(format!(
"Failed to create gridstore storage directory: {err}"
))
})?;
Self::new(base_path, create_options)
}
}
/// Initializes a new storage with a single empty page.
///
/// `base_path` is the directory where the storage files will be stored.
/// It should exist already.
pub fn new(base_path: PathBuf, options: StorageOptions) -> Result<Self> {
if !base_path.exists() {
return Err(GridstoreError::service_error("Base path does not exist"));
}
if !base_path.is_dir() {
return Err(GridstoreError::service_error(
"Base path is not a directory",
));
}
let config = StorageConfig::try_from(options).map_err(GridstoreError::service_error)?;
let config_path = base_path.join(CONFIG_FILENAME);
let storage = Self {
tracker: Arc::new(RwLock::new(Tracker::new(&base_path, None))),
pages: Default::default(),
bitmask: Arc::new(RwLock::new(Bitmask::create(&base_path, config)?)),
base_path,
config,
_value_type: std::marker::PhantomData,
is_alive_flush_lock: IsAliveLock::new(),
};
// create first page to be covered by the bitmask
let new_page_id = storage.next_page_id();
let path = storage.page_path(new_page_id);
let page = Page::new(&path, storage.config.page_size_bytes)?;
storage.pages.write().push(page);
// lastly, write config to disk to use as a signal that the storage has been created correctly
atomic_save_json(&config_path, &config)
.map_err(|err| GridstoreError::service_error(err.to_string()))?;
Ok(storage)
}
/// Open an existing storage at the given path
/// Returns None if the storage does not exist
pub fn open(base_path: PathBuf) -> Result<Self> {
if !base_path.exists() {
return Err(GridstoreError::service_error(format!(
"Path '{base_path:?}' does not exist"
)));
}
if !base_path.is_dir() {
return Err(GridstoreError::service_error(format!(
"Path '{base_path:?}' is not a directory"
)));
}
// read config file first
let config_path = base_path.join(CONFIG_FILENAME);
let config_file = BufReader::new(File::open(&config_path)?);
let config: StorageConfig = serde_json::from_reader(config_file)?;
let page_tracker = Tracker::open(&base_path)?;
let bitmask = Bitmask::open(&base_path, config)?;
let num_pages = bitmask.infer_num_pages();
let storage = Self {
tracker: Arc::new(RwLock::new(page_tracker)),
config,
pages: Arc::new(RwLock::new(Vec::with_capacity(num_pages))),
bitmask: Arc::new(RwLock::new(bitmask)),
base_path,
_value_type: std::marker::PhantomData,
is_alive_flush_lock: IsAliveLock::new(),
};
// load pages
let mut pages = storage.pages.write();
for page_id in 0..num_pages as PageId {
let page_path = storage.page_path(page_id);
let page = Page::open(&page_path)?;
pages.push(page);
}
drop(pages);
Ok(storage)
}
/// Get the path for a given page id
fn page_path(&self, page_id: u32) -> PathBuf {
self.base_path.join(format!("page_{page_id}.dat"))
}
/// Read raw value from the pages. Considering that they can span more than one page.
///
/// # Arguments
///
/// - `start_page_id` - The id of the first page to read from.
/// - `block_offset` - The offset within the first page to start reading from.
/// - `length` - The total length of the value to read.
/// - READ_SEQUENTIAL - Whether to optimize sequential reads by fetching next mmap pages.
fn read_from_pages<const READ_SEQUENTIAL: bool>(
&self,
start_page_id: PageId,
mut block_offset: BlockOffset,
mut length: u32,
) -> Vec<u8> {
let mut raw_sections = Vec::with_capacity(length as usize);
let pages = self.pages.read();
for page_id in start_page_id.. {
let page = &pages[page_id as usize];
let (raw, unread_bytes) = page.read_value::<READ_SEQUENTIAL>(
block_offset,
length,
self.config.block_size_bytes,
);
raw_sections.extend(raw);
if unread_bytes == 0 {
break;
}
block_offset = 0;
length = unread_bytes as u32;
}
raw_sections
}
/// Get the value for a given point offset
///
/// # Arguments
/// - point_offset: The ID of the value.
/// - hw_counter: The hardware counter cell.
/// - READ_SEQUENTIAL: Whether to read mmap pages ahead to optimize sequential access
pub fn get_value<const READ_SEQUENTIAL: bool>(
&self,
point_offset: PointOffset,
hw_counter: &HardwareCounterCell,
) -> Option<V> {
let ValuePointer {
page_id,
block_offset,
length,
} = self.get_pointer(point_offset)?;
let raw = self.read_from_pages::<READ_SEQUENTIAL>(page_id, block_offset, length);
hw_counter.payload_io_read_counter().incr_delta(raw.len());
let decompressed = self.decompress(raw);
let value = V::from_bytes(&decompressed);
Some(value)
}
/// Create a new page and return its id.
/// If size is None, the page will have the default size
///
/// Returns the new page id
#[allow(clippy::needless_pass_by_ref_mut)]
fn create_new_page(&mut self) -> Result<u32> {
let new_page_id = self.next_page_id();
let path = self.page_path(new_page_id);
let page = Page::new(&path, self.config.page_size_bytes)?;
self.pages.write().push(page);
self.bitmask.write().cover_new_page()?;
Ok(new_page_id)
}
/// Get the mapping for a given point offset
fn get_pointer(&self, point_offset: PointOffset) -> Option<ValuePointer> {
self.tracker.read().get(point_offset)
}
fn find_or_create_available_blocks(
&mut self,
num_blocks: u32,
) -> Result<(PageId, BlockOffset)> {
debug_assert!(num_blocks > 0, "num_blocks must be greater than 0");
let bitmask_guard = self.bitmask.read();
if let Some((page_id, block_offset)) = bitmask_guard.find_available_blocks(num_blocks) {
return Ok((page_id, block_offset));
}
// else we need new page(s)
let trailing_free_blocks = bitmask_guard.trailing_free_blocks();
// release the lock before creating new pages
drop(bitmask_guard);
let missing_blocks = num_blocks.saturating_sub(trailing_free_blocks) as usize;
let num_pages =
(missing_blocks * self.config.block_size_bytes).div_ceil(self.config.page_size_bytes);
for _ in 0..num_pages {
self.create_new_page()?;
}
// At this point we are sure that we have enough free pages to allocate the blocks
let available = self
.bitmask
.read()
.find_available_blocks(num_blocks)
.unwrap();
Ok(available)
}
/// Write value into a new cell, considering that it can span more than one page
#[allow(clippy::needless_pass_by_ref_mut)]
fn write_into_pages(
&mut self,
value: &[u8],
start_page_id: PageId,
mut block_offset: BlockOffset,
) {
let value_size = value.len();
// Track the number of bytes that still need to be written
let mut unwritten_tail = value_size;
let mut pages = self.pages.write();
for page_id in start_page_id.. {
let page = &mut pages[page_id as usize];
let range = (value_size - unwritten_tail)..;
unwritten_tail =
page.write_value(block_offset, &value[range], self.config.block_size_bytes);
if unwritten_tail == 0 {
break;
}
block_offset = 0;
}
}
/// Put a value in the storage.
///
/// Returns true if the value existed previously and was updated, false if it was newly inserted.
pub fn put_value(
&mut self,
point_offset: PointOffset,
value: &V,
hw_counter: HwMetricRefCounter,
) -> Result<bool> {
// This function needs to NOT corrupt data in case of a crash.
//
// Since we cannot know deterministically when a write is persisted without flushing explicitly,
// and we don't want to flush on every write, we decided to follow this approach:
// ┌─────────┐ ┌───────┐┌────┐┌────┐ ┌───────┐ ┌─────┐
// │put_value│ │Tracker││Page││Gaps│ │Bitmask│ │flush│
// └────┬────┘ └───┬───┘└─┬──┘└─┬──┘ └───┬───┘ └──┬──┘
// │ │ │ │ │ │
// │ Find a spot to write │ │ │
// │───────────────────────────────────────────>│ │
// │ │ │ │ │ │
// │ Page id + offset │ │ │
// │<───────────────────────────────────────────│ │
// │ │ │ │ │ │
// │ Write data to page │ │ │ │
// │──────────────────────────>│ │ │ │
// │ │ │ │ │ │
// │ Mark as used │ │ │
// │───────────────────────────────────────────>│ │
// │ │ │ │ │ │
// │ │ │ │Update gap│ │
// │ │ │ │<─────────│ │
// │ │ │ │ │ │
// │Set pointer (in-ram)│ │ │ │ │
// │───────────────────>│ │ │ │ │
// │ │ │ │ │ │
// │ │ │ │ │ flush │
// │ │ │ │ │<──────────────────────────────────│
// │ │ │ │ │ │
// │ │ │ │ flush │ │
// │ │ │ │<─────────│ │
// │ │ │ │ │ │
// │ │ │ │ │ flush │
// │ │ │<───────────────────────────────────────────────────│
// │ │ │ │ │ │
// │ │ │ │ Write from memory AND flush │
// │ │<──────────────────────────────────────────────────────────│
// │ │ │ │ │ │
// │ │ │ │ │Mark all old as available and flush│
// │ │ │ │ │<──────────────────────────────────│
// ┌────┴────┐ ┌───┴───┐┌─┴──┐┌─┴──┐ ┌───┴───┐ ┌──┴──┐
// │put_value│ │Tracker││Page││Gaps│ │Bitmask│ │flush│
// └─────────┘ └───────┘└────┘└────┘ └───────┘ └─────┘
//
// In case of crashing somewhere in the middle of this operation, the worst
// that should happen is that we mark more cells as used than they actually are,
// so will never reuse such space, but data will not be corrupted.
let value_bytes = value.to_bytes();
let comp_value = self.compress(value_bytes);
let value_size = comp_value.len();
hw_counter.incr_delta(value_size);
let required_blocks = Self::blocks_for_value(value_size, self.config.block_size_bytes);
let (start_page_id, block_offset) =
self.find_or_create_available_blocks(required_blocks)?;
// insert into a new cell, considering that it can span more than one page
self.write_into_pages(&comp_value, start_page_id, block_offset);
// mark new cell as used in the bitmask
self.bitmask
.write()
.mark_blocks(start_page_id, block_offset, required_blocks, true);
// update the pointer
let mut tracker_guard = self.tracker.write();
let is_update = tracker_guard.has_pointer(point_offset);
tracker_guard.set(
point_offset,
ValuePointer::new(start_page_id, block_offset, value_size as u32),
);
// return whether it was an update or not
Ok(is_update)
}
/// Delete a value from the storage
///
/// Returns None if the point_offset, page, or value was not found
///
/// Returns the deleted value otherwise
pub fn delete_value(&mut self, point_offset: PointOffset) -> Option<V> {
let ValuePointer {
page_id,
block_offset,
length,
} = self.tracker.write().unset(point_offset)?;
let raw = self.read_from_pages::<false>(page_id, block_offset, length);
let decompressed = self.decompress(raw);
let value = V::from_bytes(&decompressed);
Some(value)
}
/// Clear the storage, going back to the initial state
///
/// Completely wipes the storage, and recreates it with a single empty page.
pub fn clear(&mut self) -> Result<()> {
let create_options = StorageOptions::from(self.config);
let base_path = self.base_path.clone();
// Wait for all background flush operations to finish, abort pending flushes Below we
// create a new Gridstore instance with a new flush lock, so flushers created on the new
// instance work as expected
self.is_alive_flush_lock.blocking_mark_dead();
// Wipe
self.pages.write().clear();
fs::remove_dir_all(&base_path).map_err(|err| {
GridstoreError::service_error(format!(
"Failed to remove gridstore storage directory: {err}"
))
})?;
// Recreate
fs::create_dir_all(&base_path).map_err(|err| {
GridstoreError::service_error(format!(
"Failed to create gridstore storage directory: {err}"
))
})?;
*self = Self::new(base_path, create_options)?;
Ok(())
}
/// Wipe the storage, drop all pages and delete the base directory
///
/// Takes ownership because this function leaves Gridstore in an inconsistent state which does
/// not allow further usage. Use [`clear`](Self::clear) instead to clear and reuse the storage.
pub fn wipe(self) -> Result<()> {
let base_path = self.base_path.clone();
// Wait for all background flush operations to finish, abort pending flushes
self.is_alive_flush_lock.blocking_mark_dead();
// Make sure strong references are dropped, to avoid starting another flush
drop(self);
// deleted base directory
fs::remove_dir_all(base_path).map_err(|err| {
GridstoreError::service_error(format!(
"Failed to remove gridstore storage directory: {err}"
))
})
}
/// Iterate over all the values in the storage
///
/// Stops when the callback returns Ok(false)
pub fn iter<F, E>(
&self,
mut callback: F,
hw_counter: HwMetricRefCounter,
) -> std::result::Result<(), E>
where
F: FnMut(PointOffset, V) -> std::result::Result<bool, E>,
{
for (point_offset, pointer) in
self.tracker
.read()
.iter_pointers()
.filter_map(|(point_offset, opt_pointer)| {
opt_pointer.map(|pointer| (point_offset, pointer))
})
{
let ValuePointer {
page_id,
block_offset,
length,
} = pointer;
let raw = self.read_from_pages::<true>(page_id, block_offset, length);
hw_counter.incr_delta(raw.len());
let decompressed = self.decompress(raw);
let value = V::from_bytes(&decompressed);
if !callback(point_offset, value)? {
return Ok(());
}
}
Ok(())
}
/// Return the storage size in bytes
pub fn get_storage_size_bytes(&self) -> usize {
self.bitmask.read().get_storage_size_bytes()
}
}
impl<V> Gridstore<V> {
/// The number of blocks needed for a given value bytes size
#[inline]
fn blocks_for_value(value_size: usize, block_size: usize) -> u32 {
value_size.div_ceil(block_size).try_into().unwrap()
}
/// Create flusher that durably persists all pending changes when invoked
pub fn flusher(&self) -> Flusher {
let pending_updates = self.tracker.read().pending_updates.clone();
let pages = Arc::downgrade(&self.pages);
let tracker = Arc::downgrade(&self.tracker);
let bitmask = Arc::downgrade(&self.bitmask);
let block_size_bytes = self.config.block_size_bytes;
let is_alive_flush_lock = self.is_alive_flush_lock.handle();
Box::new(move || {
let Some(is_alive_flush_guard) = is_alive_flush_lock.lock_if_alive() else {
// Gridstore is cleared, cancel flush
return Ok(());
};
let (Some(pages), Some(tracker), Some(bitmask)) =
(pages.upgrade(), tracker.upgrade(), bitmask.upgrade())
else {
return Ok(());
};
let mut bitmask_guard = bitmask.upgradable_read();
bitmask_guard.flush()?;
for page in pages.read().iter() {
page.flush()?;
}
let old_pointers = tracker.write().write_pending_and_flush(pending_updates)?;
if old_pointers.is_empty() {
return Ok(());
}
// Update all free blocks in the bitmask
bitmask_guard.with_upgraded(|guard| {
for (page_id, pointer_group) in
&old_pointers.into_iter().chunk_by(|pointer| pointer.page_id)
{
let local_ranges = pointer_group.map(|pointer| {
let start = pointer.block_offset;
let end = pointer.block_offset
+ Self::blocks_for_value(pointer.length as usize, block_size_bytes);
start as usize..end as usize
});
guard.mark_blocks_batch(page_id, local_ranges, false);
}
});
bitmask_guard.flush()?;
// Keep the guard till the end of the flush to prevent concurrent drop/flushes
drop(is_alive_flush_guard);
Ok(())
})
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> std::io::Result<()> {
for page in self.pages.read().iter() {
page.populate();
}
self.tracker.read().populate();
self.bitmask.read().populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
for page in self.pages.read().iter() {
page.clear_cache()?;
}
self.bitmask.read().clear_cache()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::time::Duration;
use fs_err::File;
use itertools::Itertools;
use rand::distr::Uniform;
use rand::prelude::Distribution;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use tempfile::Builder;
use super::*;
use crate::blob::Blob;
use crate::config::{
DEFAULT_BLOCK_SIZE_BYTES, DEFAULT_PAGE_SIZE_BYTES, DEFAULT_REGION_SIZE_BLOCKS,
};
use crate::fixtures::{HM_FIELDS, Payload, empty_storage, empty_storage_sized, random_payload};
#[test]
fn test_empty_payload_storage() {
let hw_counter = HardwareCounterCell::new();
let (_dir, storage) = empty_storage();
let payload = storage.get_value::<false>(0, &hw_counter);
assert!(payload.is_none());
assert_eq!(storage.get_storage_size_bytes(), 0);
}
#[test]
fn test_put_single_empty_value() {
let (_dir, mut storage) = empty_storage();
let hw_counter = HardwareCounterCell::new();
let hw_counter = hw_counter.ref_payload_io_write_counter();
// TODO: should we actually use the pages for empty values?
let payload = Payload::default();
storage.put_value(0, &payload, hw_counter).unwrap();
assert_eq!(storage.pages.read().len(), 1);
assert_eq!(storage.tracker.read().mapping_len(), 1);
let hw_counter = HardwareCounterCell::new();
let stored_payload = storage.get_value::<false>(0, &hw_counter);
assert!(stored_payload.is_some());
assert_eq!(stored_payload.unwrap(), Payload::default());
assert_eq!(storage.get_storage_size_bytes(), DEFAULT_BLOCK_SIZE_BYTES);
}
#[test]
fn test_put_single_payload() {
let (_dir, mut storage) = empty_storage();
let mut payload = Payload::default();
payload.0.insert(
"key".to_string(),
serde_json::Value::String("value".to_string()),
);
let hw_counter = HardwareCounterCell::new();
let hw_counter = hw_counter.ref_payload_io_write_counter();
storage.put_value(0, &payload, hw_counter).unwrap();
assert_eq!(storage.pages.read().len(), 1);
assert_eq!(storage.tracker.read().mapping_len(), 1);
let page_mapping = storage.get_pointer(0).unwrap();
assert_eq!(page_mapping.page_id, 0); // first page
assert_eq!(page_mapping.block_offset, 0); // first cell
let hw_counter = HardwareCounterCell::new();
let stored_payload = storage.get_value::<false>(0, &hw_counter);
assert!(stored_payload.is_some());
assert_eq!(stored_payload.unwrap(), payload);
assert_eq!(storage.get_storage_size_bytes(), DEFAULT_BLOCK_SIZE_BYTES);
}
#[test]
fn test_storage_files() {
let (dir, mut storage) = empty_storage();
let mut payload = Payload::default();
payload.0.insert(
"key".to_string(),
serde_json::Value::String("value".to_string()),
);
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
storage.put_value(0, &payload, hw_counter_ref).unwrap();
assert_eq!(storage.pages.read().len(), 1);
assert_eq!(storage.tracker.read().mapping_len(), 1);
let files = storage.files();
let actual_files: Vec<_> = fs::read_dir(dir.path()).unwrap().try_collect().unwrap();
assert_eq!(
files.len(),
actual_files.len(),
"The directory has {} files, but we are reporting {}\nreported: {files:?}\n actual: {actual_files:?}",
actual_files.len(),
files.len()
);
assert_eq!(files.len(), 5, "Expected 5 files, got {files:?}");
assert_eq!(files[0].file_name().unwrap(), "tracker.dat");
assert_eq!(files[1].file_name().unwrap(), "page_0.dat");
assert_eq!(files[2].file_name().unwrap(), "bitmask.dat");
assert_eq!(files[3].file_name().unwrap(), "gaps.dat");
assert_eq!(files[4].file_name().unwrap(), "config.json");
}
#[rstest]
#[case(100000, 2)]
#[case(100, 2000)]
fn test_put_payload(#[case] num_payloads: u32, #[case] payload_size_factor: usize) {
let (_dir, mut storage) = empty_storage();
let rng = &mut rand::rngs::SmallRng::from_os_rng();
let mut payloads = (0..num_payloads)
.map(|point_offset| (point_offset, random_payload(rng, payload_size_factor)))
.collect::<Vec<_>>();
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
for (point_offset, payload) in payloads.iter() {
storage
.put_value(*point_offset, payload, hw_counter_ref)
.unwrap();
let stored_payload = storage.get_value::<false>(*point_offset, &hw_counter);
assert!(stored_payload.is_some());
assert_eq!(&stored_payload.unwrap(), payload);
}
// read randomly
payloads.shuffle(rng);
for (point_offset, payload) in payloads.iter() {
let stored_payload = storage.get_value::<false>(*point_offset, &hw_counter);
assert!(stored_payload.is_some());
assert_eq!(stored_payload.unwrap(), payload.clone());
}
}
#[test]
fn test_delete_single_payload() {
let (_dir, mut storage) = empty_storage();
let mut payload = Payload::default();
payload.0.insert(
"key".to_string(),
serde_json::Value::String("value".to_string()),
);
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
storage.put_value(0, &payload, hw_counter_ref).unwrap();
assert_eq!(storage.pages.read().len(), 1);
let page_mapping = storage.get_pointer(0).unwrap();
assert_eq!(page_mapping.page_id, 0); // first page
assert_eq!(page_mapping.block_offset, 0); // first cell
let stored_payload = storage.get_value::<false>(0, &hw_counter);
assert_eq!(stored_payload, Some(payload));
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/bitmask/gaps.rs | lib/gridstore/src/bitmask/gaps.rs | use std::ops::Range;
use std::path::{Path, PathBuf};
use itertools::Itertools;
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_ops::{create_and_ensure_length, open_write_mmap};
use memory::mmap_type::MmapSlice;
use super::{RegionId, StorageConfig};
use crate::Result;
/// Gaps of contiguous zeros in a bitmask region.
#[derive(Debug, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct RegionGaps {
pub max: u16,
pub leading: u16,
pub trailing: u16,
}
impl RegionGaps {
pub fn new(
leading: u16,
trailing: u16,
max: u16,
#[cfg(debug_assertions)] region_size_blocks: u16,
) -> Self {
#[cfg(debug_assertions)]
{
let maximum_possible = region_size_blocks;
assert!(max <= maximum_possible, "Unexpected max gap size");
assert!(
leading <= max,
"Invalid gaps: leading is {leading}, but max is {max}",
);
assert!(
trailing <= max,
"Invalid gaps: trailing is {trailing}, but max is {max}",
);
if leading == maximum_possible || trailing == maximum_possible {
assert_eq!(leading, trailing);
}
}
Self {
max,
leading,
trailing,
}
}
pub fn all_free(blocks: u16) -> Self {
Self {
max: blocks,
leading: blocks,
trailing: blocks,
}
}
/// Check if the region is completely empty.
/// That is a single large gap
pub fn is_empty(&self, region_size_blocks: u16) -> bool {
self.max == region_size_blocks
}
/// Check if the region is completely full.
/// That is no gaps in the region.
pub fn is_full(&self) -> bool {
self.max == 0
}
}
/// An overview of contiguous free blocks covered by the bitmask.
#[derive(Debug)]
pub(super) struct BitmaskGaps {
pub path: PathBuf,
config: StorageConfig,
mmap_slice: MmapSlice<RegionGaps>,
}
impl BitmaskGaps {
fn file_path(dir: &Path) -> PathBuf {
dir.join("gaps.dat")
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
pub fn create(
dir: &Path,
mut iter: impl ExactSizeIterator<Item = RegionGaps>,
config: StorageConfig,
) -> Self {
let path = Self::file_path(dir);
let length_in_bytes = iter.len() * size_of::<RegionGaps>();
create_and_ensure_length(&path, length_in_bytes).unwrap();
let mmap = open_write_mmap(&path, AdviceSetting::from(Advice::Normal), true).unwrap();
let mut mmap_slice = unsafe { MmapSlice::from(mmap) };
debug_assert_eq!(mmap_slice.len(), iter.len());
mmap_slice.fill_with(|| iter.next().unwrap());
Self {
path,
config,
mmap_slice,
}
}
pub fn open(dir: &Path, config: StorageConfig) -> Result<Self> {
let path = Self::file_path(dir);
let mmap = open_write_mmap(&path, AdviceSetting::from(Advice::Normal), false)?;
let mmap_slice = unsafe { MmapSlice::try_from(mmap) }?;
Ok(Self {
path,
config,
mmap_slice,
})
}
pub fn flush(&self) -> Result<()> {
Ok(self.mmap_slice.flusher()()?)
}
/// Extends the mmap file to fit the new regions
pub fn extend(&mut self, mut iter: impl ExactSizeIterator<Item = RegionGaps>) -> Result<()> {
if iter.len() == 0 {
return Ok(());
}
// reopen the file with a larger size
let prev_len = self.mmap_slice.len();
let new_slice_len = prev_len + iter.len();
let new_length_in_bytes = new_slice_len * size_of::<RegionGaps>();
create_and_ensure_length(&self.path, new_length_in_bytes).unwrap();
let mmap = open_write_mmap(&self.path, AdviceSetting::from(Advice::Normal), false)?;
self.mmap_slice = unsafe { MmapSlice::try_from(mmap) }?;
debug_assert_eq!(self.mmap_slice[prev_len..].len(), iter.len());
self.mmap_slice[prev_len..].fill_with(|| iter.next().unwrap());
Ok(())
}
pub fn trailing_free_blocks(&self) -> u32 {
self.mmap_slice
.iter()
.rev()
.take_while_inclusive(|gap| gap.trailing == self.config.region_size_blocks as u16)
.map(|gap| u32::from(gap.trailing))
.sum()
}
pub fn len(&self) -> usize {
self.mmap_slice.len()
}
pub fn get(&self, idx: usize) -> Option<&RegionGaps> {
self.mmap_slice.get(idx)
}
pub fn get_mut(&mut self, idx: usize) -> &mut RegionGaps {
&mut self.mmap_slice[idx]
}
pub fn as_slice(&self) -> &[RegionGaps] {
&self.mmap_slice
}
/// Find a gap in the bitmask that is large enough to fit `num_blocks` blocks.
/// Returns the range of regions where the gap is.
pub fn find_fitting_gap(&self, num_blocks: u32) -> Option<Range<RegionId>> {
if self.mmap_slice.len() == 1 {
return if self.get(0).unwrap().max as usize >= num_blocks as usize {
Some(0..1)
} else {
None
};
}
// try to find gap in the minimum regions needed
let regions_needed = num_blocks.div_ceil(self.config.region_size_blocks as u32) as usize;
let fits_in_min_regions = match regions_needed {
0 => unreachable!("num_blocks should be at least 1"),
// we might not need to merge any regions, just check the `max` field
1 => self
.as_slice()
.iter()
.enumerate()
.find_map(|(region_id, gap)| {
if gap.max as usize >= num_blocks as usize {
Some(region_id as RegionId..(region_id + 1) as RegionId)
} else {
None
}
}),
// we need to merge at least 2 regions
window_size => self.find_merged_gap(window_size, num_blocks),
};
if fits_in_min_regions.is_some() {
return fits_in_min_regions;
}
// try to find gap by merging one more region (which is the maximum regions we may need for the value)
let window_size = regions_needed + 1;
self.find_merged_gap(window_size, num_blocks)
}
/// Find a gap in the bitmask that is large enough to fit `num_blocks` blocks, in a merged window of regions.
fn find_merged_gap(&self, window_size: usize, num_blocks: u32) -> Option<Range<RegionId>> {
debug_assert!(window_size >= 2, "window size must be at least 2");
self.as_slice()
.windows(window_size)
.enumerate()
.find_map(|(start_region_id, gaps)| {
// make sure the middle regions are all free
let middle_regions = &gaps[1..window_size - 1];
if middle_regions
.iter()
.any(|gap| gap.max as usize != self.config.region_size_blocks)
{
return None;
}
let first_trailing = gaps[0].trailing;
let last_leading = gaps[window_size - 1].leading;
let merged_gap = (first_trailing + last_leading) as usize
+ (window_size - 2) * self.config.region_size_blocks;
if merged_gap as u32 >= num_blocks {
Some(start_region_id as RegionId..(start_region_id + window_size) as RegionId)
} else {
None
}
})
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> std::io::Result<()> {
self.mmap_slice.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
clear_disk_cache(&self.path)?;
Ok(())
}
}
#[cfg(test)]
#[cfg(debug_assertions)]
mod tests {
use proptest::prelude::*;
use tempfile::tempdir;
use super::*;
use crate::config::{DEFAULT_REGION_SIZE_BLOCKS, StorageOptions};
prop_compose! {
fn arbitrary_region_gaps(region_size_blocks: u16)(
leading in 0..=region_size_blocks,
trailing in 0..=region_size_blocks,
max in 0..=region_size_blocks,
) -> RegionGaps {
if leading + trailing >= region_size_blocks {
return RegionGaps::all_free(region_size_blocks);
}
let in_between = region_size_blocks - leading - trailing;
let max = max.min(in_between.saturating_sub(2)).max(leading).max(trailing);
RegionGaps::new(leading, trailing, max, region_size_blocks)
}
}
impl Arbitrary for RegionGaps {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
arbitrary_region_gaps(DEFAULT_REGION_SIZE_BLOCKS as u16).boxed()
}
}
fn regions_gaps_to_bitvec(
gaps: &[RegionGaps],
region_size_blocks: usize,
) -> bitvec::vec::BitVec {
let total_bits = gaps.len() * region_size_blocks;
let mut bv = bitvec::vec::BitVec::repeat(true, total_bits);
for (region_idx, gap) in gaps.iter().enumerate() {
let region_start = region_idx * region_size_blocks;
// Handle leading zeros
if gap.leading > 0 {
for i in 0..gap.leading as usize {
bv.set(region_start + i, false);
}
}
// Handle trailing zeros
if gap.trailing > 0 {
let trailing_start = region_start + region_size_blocks - gap.trailing as usize;
for i in 0..gap.trailing as usize {
bv.set(trailing_start + i, false);
}
}
// Handle max zeros if bigger than both leading and trailing
if gap.max > gap.leading && gap.max > gap.trailing {
// start after leading, but leave one bit in between to create a separate gap
let zeros_start = region_start + gap.leading as usize + 1;
let zeros_end = zeros_start + gap.max as usize;
// Put remaining zeros in middle
for i in zeros_start..zeros_end {
bv.set(i, false);
}
}
}
bv
}
proptest! {
#[test]
fn test_find_fitting_gap(
gaps in prop::collection::vec(any::<RegionGaps>(), 1..100),
num_blocks in 1..=(DEFAULT_REGION_SIZE_BLOCKS as u32 * 3)
) {
let temp_dir = tempdir().unwrap();
let config = StorageOptions::default().try_into().unwrap();
let bitmask_gaps = BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
let bitvec = regions_gaps_to_bitvec(&gaps, DEFAULT_REGION_SIZE_BLOCKS);
if let Some(range) = bitmask_gaps.find_fitting_gap(num_blocks) {
// Range should be within bounds
prop_assert!(range.start <= bitmask_gaps.len() as u32);
prop_assert!(range.end <= bitmask_gaps.len() as u32);
prop_assert!(range.start <= range.end);
// check that range is as constrained as possible
let total_regions = range.end - range.start;
let max_needed_regions = num_blocks.div_ceil(DEFAULT_REGION_SIZE_BLOCKS as u32) + 1;
prop_assert!(total_regions <= max_needed_regions);
// Range should actually have a gap with enough blocks
let regions_start = range.start as usize * DEFAULT_REGION_SIZE_BLOCKS;
let regions_end = range.end as usize * DEFAULT_REGION_SIZE_BLOCKS;
let max_gap = bitvec[regions_start..regions_end].iter().chunk_by(|b| **b).into_iter()
.filter(|(used, _group)| !*used)
.map(|(_, group)| group.count() as u32)
.max()
.unwrap_or(0);
// Verify the gap is large enough
prop_assert!(max_gap >= num_blocks, "max_gap: {}, num_blocks: {}", max_gap, num_blocks);
}
}
}
/// Tests that it is possible to find a large gap in the end of the gaps list
#[test]
fn test_find_fitting_gap_large() {
let large_value_blocks = DEFAULT_REGION_SIZE_BLOCKS + 20;
let gaps = [
RegionGaps {
max: 0,
leading: 0,
trailing: 0,
},
RegionGaps {
max: 500,
leading: 0,
trailing: 500,
},
RegionGaps::all_free(DEFAULT_REGION_SIZE_BLOCKS as u16),
];
let temp_dir = tempdir().unwrap();
let config = StorageOptions::default().try_into().unwrap();
let mut bitmask_gaps =
BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
assert!(bitmask_gaps.mmap_slice.len() >= 3);
bitmask_gaps.mmap_slice[0..3].clone_from_slice(&gaps[..]);
assert!(
bitmask_gaps
.find_fitting_gap(large_value_blocks as u32)
.is_some(),
);
}
#[test]
fn test_find_fitting_gap_windows_end() {
const REGION_SIZE_BLOCKS: u32 = DEFAULT_REGION_SIZE_BLOCKS as u32;
let temp_dir = tempdir().unwrap();
let config: StorageConfig = StorageOptions::default().try_into().unwrap();
// 3 regions, all empty
let gaps = vec![
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
];
let bitmask_gaps = BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
// Find space for blocks covering up to 2 regions
assert!(bitmask_gaps.find_fitting_gap(1).is_some());
assert!(bitmask_gaps.find_fitting_gap(REGION_SIZE_BLOCKS).is_some());
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 2)
.is_some(),
);
// Find space for blocks covering 3 regions
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 2 + 1)
.is_some(),
);
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 3)
.is_some(),
);
// No space for blocks covering 4 or more regions
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 4)
.is_none(),
);
// 3 regions with first 0.5 regions occupied and last 2.5 regions available
let gaps = vec![
RegionGaps {
max: (REGION_SIZE_BLOCKS / 2) as u16,
leading: 0,
trailing: (REGION_SIZE_BLOCKS / 2) as u16,
},
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
];
let bitmask_gaps = BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
// Find space for blocks covering up to 2 regions
assert!(bitmask_gaps.find_fitting_gap(REGION_SIZE_BLOCKS).is_some());
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 2)
.is_some(),
);
// Find space for blocks covering more than 2 up to 2.5 regions
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS * 2 + 1)
.is_some(),
);
assert!(
bitmask_gaps
.find_fitting_gap((REGION_SIZE_BLOCKS * 2) + (REGION_SIZE_BLOCKS / 2))
.is_some(),
);
// No space for blocks covering more than 2.5 regions
assert!(
bitmask_gaps
.find_fitting_gap((REGION_SIZE_BLOCKS * 2) + (REGION_SIZE_BLOCKS / 2) + 1)
.is_none(),
);
// 3 regions with first 1.5 regions occupied and last 1.5 regions available
let gaps = vec![
RegionGaps {
max: 0,
leading: 0,
trailing: 0,
},
RegionGaps {
max: (REGION_SIZE_BLOCKS / 2) as u16,
leading: 0,
trailing: (REGION_SIZE_BLOCKS / 2) as u16,
},
RegionGaps::all_free(REGION_SIZE_BLOCKS as u16),
];
let bitmask_gaps = BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
// Find space for blocks covering more than 1 to 1.5 regions
assert!(bitmask_gaps.find_fitting_gap(REGION_SIZE_BLOCKS).is_some());
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + 1)
.is_some(),
);
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + (REGION_SIZE_BLOCKS / 2))
.is_some(),
);
// No space for blocks covering more than 1.5 regions
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + REGION_SIZE_BLOCKS / 2 + 1)
.is_none(),
);
}
#[test]
fn test_find_fitting_gap_windows_middle() {
const REGION_SIZE_BLOCKS: u32 = DEFAULT_REGION_SIZE_BLOCKS as u32;
let temp_dir = tempdir().unwrap();
let config = StorageOptions::default().try_into().unwrap();
// 3 regions with 1.5 regions occupied and 1.5 regions available
let gaps = vec![
// First region: occupied
RegionGaps {
max: 0,
leading: 0,
trailing: 0,
},
// Second region: first 25% is occupied
RegionGaps {
max: (REGION_SIZE_BLOCKS / 4) as u16 * 3,
leading: 0,
trailing: (REGION_SIZE_BLOCKS / 4) as u16 * 3,
},
// Third region: last 25% is occupied
RegionGaps {
max: (REGION_SIZE_BLOCKS / 4) as u16 * 3,
leading: (REGION_SIZE_BLOCKS / 4) as u16 * 3,
trailing: 0,
},
];
let bitmask_gaps = BitmaskGaps::create(temp_dir.path(), gaps.clone().into_iter(), config);
// Find space for blocks covering up to 1.5 region
assert!(bitmask_gaps.find_fitting_gap(REGION_SIZE_BLOCKS).is_some());
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + 1)
.is_some(),
);
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + REGION_SIZE_BLOCKS / 2)
.is_some(),
);
// No space for blocks covering more than 1.5 regions
assert!(
bitmask_gaps
.find_fitting_gap(REGION_SIZE_BLOCKS + REGION_SIZE_BLOCKS / 2 + 1)
.is_none(),
);
}
#[test]
fn test_region_gaps_persistence() {
use fs_err as fs;
use tempfile::tempdir;
let dir = tempdir().unwrap();
let dir_path = dir.path();
let region_size_blocks = DEFAULT_REGION_SIZE_BLOCKS as u16;
let gaps = vec![
RegionGaps::new(1, 2, 3, region_size_blocks),
RegionGaps::new(4, 5, 6, region_size_blocks),
RegionGaps::new(7, 8, 9, region_size_blocks),
];
// Create RegionGaps and write gaps
{
let config = StorageOptions::default().try_into().unwrap();
let region_gaps = BitmaskGaps::create(dir_path, gaps.clone().into_iter(), config);
assert_eq!(region_gaps.len(), gaps.len());
for (i, gap) in gaps.iter().enumerate() {
assert_eq!(region_gaps.get(i).unwrap(), gap);
}
}
// Reopen RegionGaps and verify gaps
{
let config = StorageOptions::default().try_into().unwrap();
let region_gaps = BitmaskGaps::open(dir_path, config).unwrap();
assert_eq!(region_gaps.len(), gaps.len());
for (i, gap) in gaps.iter().enumerate() {
assert_eq!(region_gaps.get(i).unwrap(), gap);
}
}
// Extend RegionGaps with more gaps
let more_gaps = vec![
RegionGaps::new(10, 11, 12, region_size_blocks),
RegionGaps::new(13, 14, 15, region_size_blocks),
];
{
let config = StorageOptions::default().try_into().unwrap();
let mut region_gaps = BitmaskGaps::open(dir_path, config).unwrap();
region_gaps.extend(more_gaps.clone().into_iter()).unwrap();
assert_eq!(region_gaps.len(), gaps.len() + more_gaps.len());
for (i, gap) in gaps.iter().chain(more_gaps.iter()).enumerate() {
assert_eq!(region_gaps.get(i).unwrap(), gap);
}
}
// Reopen RegionGaps and verify all gaps
{
let config = StorageOptions::default().try_into().unwrap();
let region_gaps = BitmaskGaps::open(dir_path, config).unwrap();
assert_eq!(region_gaps.len(), gaps.len() + more_gaps.len());
for (i, gap) in gaps.iter().chain(more_gaps.iter()).enumerate() {
assert_eq!(region_gaps.get(i).unwrap(), gap);
}
}
// Clean up
fs::remove_file(BitmaskGaps::file_path(dir_path)).unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/src/bitmask/mod.rs | lib/gridstore/src/bitmask/mod.rs | mod gaps;
use std::ops::Range;
use std::path::{Path, PathBuf};
use ahash::AHashSet;
use bitvec::slice::BitSlice;
use gaps::{BitmaskGaps, RegionGaps};
use itertools::Itertools;
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_ops::{create_and_ensure_length, open_write_mmap};
use memory::mmap_type::MmapBitSlice;
use crate::Result;
use crate::config::StorageConfig;
use crate::error::GridstoreError;
use crate::tracker::{BlockOffset, PageId};
const BITMASK_NAME: &str = "bitmask.dat";
type RegionId = u32;
#[derive(Debug)]
pub struct Bitmask {
config: StorageConfig,
/// A summary of every 1KB (8_192 bits) of contiguous zeros in the bitmask, or less if it is the last region.
regions_gaps: BitmaskGaps,
/// The actual bitmask. Each bit represents a block. A 1 means the block is used, a 0 means it is free.
bitslice: MmapBitSlice,
/// The path to the file containing the bitmask.
path: PathBuf,
}
/// Access pattern to the bitmask is always random reads by the already calculated page id.
/// We never need to iterate over multiple bitmask file pages in a row, therefore we can use random access.
const DEFAULT_ADVICE: Advice = Advice::Random;
impl Bitmask {
pub fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone(), self.regions_gaps.path()]
}
/// Calculate the amount of trailing free blocks in the bitmask.
pub fn trailing_free_blocks(&self) -> u32 {
let trailing_gap = self.regions_gaps.trailing_free_blocks();
#[cfg(debug_assertions)]
{
let num_trailing_zeros = self.bitslice.trailing_zeros();
debug_assert_eq!(num_trailing_zeros, trailing_gap as usize);
}
trailing_gap
}
/// Calculate the amount of bytes needed for covering the blocks of a page.
fn length_for_page(config: &StorageConfig) -> usize {
assert_eq!(
config.page_size_bytes % config.block_size_bytes,
0,
"Page size must be a multiple of block size"
);
// one bit per block
let bits = config.page_size_bytes / config.block_size_bytes;
// length in bytes
bits / u8::BITS as usize
}
/// Create a bitmask for one page
pub(crate) fn create(dir: &Path, config: StorageConfig) -> Result<Self> {
debug_assert!(
config.page_size_bytes % config.block_size_bytes * config.region_size_blocks == 0,
"Page size must be a multiple of block size * region size"
);
let length = Self::length_for_page(&config);
// create bitmask mmap
let path = Self::bitmask_path(dir);
create_and_ensure_length(&path, length).unwrap();
let mmap = open_write_mmap(&path, AdviceSetting::from(DEFAULT_ADVICE), false)?;
let mmap_bitslice = MmapBitSlice::try_from(mmap, 0)?;
assert_eq!(mmap_bitslice.len(), length * 8, "Bitmask length mismatch");
// create regions gaps mmap
let num_regions = mmap_bitslice.len() / config.region_size_blocks;
let region_gaps = vec![RegionGaps::all_free(config.region_size_blocks as u16); num_regions];
let mmap_region_gaps = BitmaskGaps::create(dir, region_gaps.into_iter(), config);
Ok(Self {
config,
regions_gaps: mmap_region_gaps,
bitslice: mmap_bitslice,
path,
})
}
pub(crate) fn open(dir: &Path, config: StorageConfig) -> Result<Self> {
debug_assert!(
config
.page_size_bytes
.is_multiple_of(config.block_size_bytes),
"Page size must be a multiple of block size"
);
let path = Self::bitmask_path(dir);
if !path.exists() {
return Err(GridstoreError::service_error(format!(
"Bitmask file does not exist: {}",
path.display()
)));
}
let mmap = open_write_mmap(&path, AdviceSetting::from(DEFAULT_ADVICE), false)?;
let mmap_bitslice = MmapBitSlice::from(mmap, 0);
let bitmask_gaps = BitmaskGaps::open(dir, config)?;
Ok(Self {
config,
regions_gaps: bitmask_gaps,
bitslice: mmap_bitslice,
path,
})
}
fn bitmask_path(dir: &Path) -> PathBuf {
dir.join(BITMASK_NAME)
}
pub fn flush(&self) -> Result<()> {
self.bitslice.flusher()()?;
self.regions_gaps.flush()?;
Ok(())
}
/// Compute the size of the storage in bytes.
/// Does not include the metadata information (e.g. the regions gaps, bitmask...).
pub fn get_storage_size_bytes(&self) -> usize {
let mut size = 0;
let region_size_blocks = self.config.region_size_blocks;
let block_size_bytes = self.config.block_size_bytes;
let region_size_bytes = region_size_blocks * block_size_bytes;
for (gap_id, gap) in self.regions_gaps.as_slice().iter().enumerate() {
// skip empty regions
if gap.is_empty(region_size_blocks as u16) {
continue;
}
// fast path for full regions
if gap.is_full() {
size += region_size_bytes;
} else {
// compute the size of the occupied blocks for the region
let gap_offset_start = gap_id * region_size_blocks;
let gap_offset_end = gap_offset_start + region_size_blocks;
let occupied_blocks = self.bitslice[gap_offset_start..gap_offset_end].count_ones();
size += occupied_blocks * block_size_bytes
}
}
size
}
pub fn infer_num_pages(&self) -> usize {
let bits = self.bitslice.len();
let covered_bytes = bits * self.config.block_size_bytes;
covered_bytes.div_euclid(self.config.page_size_bytes)
}
/// Extend the bitslice to cover another page
pub fn cover_new_page(&mut self) -> Result<()> {
let extra_length = Self::length_for_page(&self.config);
// flush outstanding changes
self.bitslice.flusher()().unwrap();
// reopen the file with a larger size
let previous_bitslice_len = self.bitslice.len();
let new_length = (previous_bitslice_len / u8::BITS as usize) + extra_length;
create_and_ensure_length(&self.path, new_length).unwrap();
let mmap = open_write_mmap(&self.path, AdviceSetting::from(DEFAULT_ADVICE), false)?;
self.bitslice = MmapBitSlice::try_from(mmap, 0)?;
// extend the region gaps
let current_total_regions = self.regions_gaps.len();
let expected_total_full_regions = self
.bitslice
.len()
.div_euclid(self.config.region_size_blocks);
debug_assert!(
self.bitslice
.len()
.is_multiple_of(self.config.region_size_blocks),
"Bitmask length must be a multiple of region size"
);
let new_regions = expected_total_full_regions.saturating_sub(current_total_regions);
let new_gaps =
vec![RegionGaps::all_free(self.config.region_size_blocks as u16); new_regions];
self.regions_gaps.extend(new_gaps.into_iter())?;
assert_eq!(
self.regions_gaps.len() * self.config.region_size_blocks,
self.bitslice.len(),
"Bitmask length mismatch",
);
Ok(())
}
fn range_of_page(&self, page_id: PageId) -> Range<usize> {
let page_blocks = self.config.page_size_bytes / self.config.block_size_bytes;
let start = page_id as usize * page_blocks;
let end = start + page_blocks;
start..end
}
/// The amount of blocks that have never been used in the page.
#[cfg(test)]
pub(crate) fn free_blocks_for_page(&self, page_id: PageId) -> usize {
let range_of_page = self.range_of_page(page_id);
self.bitslice[range_of_page].trailing_zeros()
}
/// The amount of blocks that are available for reuse in the page.
#[allow(dead_code)]
pub(crate) fn fragmented_blocks_for_page(&self, page_id: PageId) -> usize {
let range_of_page = self.range_of_page(page_id);
let bitslice = &self.bitslice[range_of_page];
bitslice.count_zeros() - bitslice.trailing_zeros()
}
pub(crate) fn find_available_blocks(&self, num_blocks: u32) -> Option<(PageId, BlockOffset)> {
let region_id_range = self.regions_gaps.find_fitting_gap(num_blocks)?;
let regions_start_offset = region_id_range.start as usize * self.config.region_size_blocks;
let regions_end_offset = region_id_range.end as usize * self.config.region_size_blocks;
let translate_to_answer = |local_index: u32| {
let page_size_in_blocks = self.config.page_size_bytes / self.config.block_size_bytes;
let global_cursor_offset = local_index as usize + regions_start_offset;
// Calculate the page id and the block offset within the page
let page_id = global_cursor_offset.div_euclid(page_size_in_blocks);
let page_block_offset = global_cursor_offset.rem_euclid(page_size_in_blocks);
(page_id as PageId, page_block_offset as BlockOffset)
};
let regions_bitslice = &self.bitslice[regions_start_offset..regions_end_offset];
Self::find_available_blocks_in_slice(regions_bitslice, num_blocks, translate_to_answer)
}
pub fn find_available_blocks_in_slice<F>(
bitslice: &BitSlice,
num_blocks: u32,
translate_local_index: F,
) -> Option<(PageId, BlockOffset)>
where
F: FnOnce(u32) -> (PageId, BlockOffset),
{
// Get raw memory region
let (head, raw_region, tail) = bitslice
.domain()
.region()
.expect("Regions cover more than one usize");
// We expect the regions to not use partial usizes
debug_assert!(head.is_none());
debug_assert!(tail.is_none());
let mut current_size: u32 = 0;
let mut current_start: u32 = 0;
let mut num_shifts = 0;
// Iterate over the integers that compose the bitvec. So that we can perform bitwise operations.
const BITS_IN_CHUNK: u32 = usize::BITS;
for (chunk_idx, chunk) in raw_region.iter().enumerate() {
let mut chunk = *chunk;
// case of all zeros
if chunk == 0 {
current_size += BITS_IN_CHUNK;
continue;
}
if chunk == !0 {
// case of all ones
if current_size >= num_blocks {
// bingo - we found a free cell of num_blocks
return Some(translate_local_index(current_start));
}
current_size = 0;
current_start = (chunk_idx as u32 + 1) * BITS_IN_CHUNK;
continue;
}
// At least one non-zero bit
let leading = chunk.trailing_zeros();
let trailing = chunk.leading_zeros();
let max_possible_middle_gap = (BITS_IN_CHUNK - leading - trailing).saturating_sub(2);
// Skip looking for local max if it won't improve global max
if num_blocks > max_possible_middle_gap {
current_size += leading;
if current_size >= num_blocks {
// bingo - we found a free cell of num_blocks
return Some(translate_local_index(current_start));
}
current_size = trailing;
current_start = (chunk_idx as u32) * BITS_IN_CHUNK + BITS_IN_CHUNK - trailing;
continue;
}
while chunk != 0 {
let num_zeros = chunk.trailing_zeros();
current_size += num_zeros;
if current_size >= num_blocks {
// bingo - we found a free cell of num_blocks
return Some(translate_local_index(current_start));
}
// shift by the number of zeros
chunk >>= num_zeros as usize;
num_shifts += num_zeros;
// skip consecutive ones
let num_ones = chunk.trailing_ones();
if num_ones < BITS_IN_CHUNK {
chunk >>= num_ones;
} else {
// all ones
debug_assert!(chunk == !0);
chunk = 0;
}
num_shifts += num_ones;
current_size = 0;
current_start = chunk_idx as u32 * BITS_IN_CHUNK + num_shifts;
}
// no more ones in the chunk
current_size += BITS_IN_CHUNK - num_shifts;
num_shifts = 0;
}
if current_size >= num_blocks {
// bingo - we found a free cell of num_blocks
return Some(translate_local_index(current_start));
}
None
}
pub(crate) fn mark_blocks(
&mut self,
page_id: PageId,
block_offset: BlockOffset,
num_blocks: u32,
used: bool,
) {
let relative_range = block_offset as usize..(block_offset as usize + num_blocks as usize);
self.mark_blocks_batch(page_id, std::iter::once(relative_range), used);
}
/// Marks blocks sharing the same page in batch. First updates all ranges in the bitmask, then updates the region gaps a single time.
///
/// # Arguments
/// * `page_id` - The ID of the page to mark blocks on.
/// * `block_ranges` - An iterator over the ranges of blocks to mark, relative to the page start.
/// * `used` - Whether the blocks should be marked as used or free.
pub(crate) fn mark_blocks_batch(
&mut self,
page_id: PageId,
local_block_ranges: impl Iterator<Item = Range<usize>>,
used: bool,
) {
let page_start = self.range_of_page(page_id).start;
let est_num_ranges = local_block_ranges.size_hint().1.unwrap_or(1);
let mut dirty_regions = AHashSet::with_capacity(est_num_ranges);
for range in local_block_ranges {
let bitmask_range = (range.start + page_start)..(range.end + page_start);
self.bitslice[bitmask_range.clone()].fill(used);
let start_region_id =
(bitmask_range.start / self.config.region_size_blocks) as RegionId;
let end_region_id =
bitmask_range.end.div_ceil(self.config.region_size_blocks) as RegionId;
dirty_regions.extend(start_region_id..end_region_id);
}
self.update_region_gaps(dirty_regions);
}
fn update_region_gaps(&mut self, dirty_regions: AHashSet<RegionId>) {
for region_id in dirty_regions {
let region_id = region_id as usize;
let region_start = region_id * self.config.region_size_blocks;
let region_end = region_start + self.config.region_size_blocks;
let bitslice = &self.bitslice[region_start..region_end];
let gaps = Self::calculate_gaps(bitslice, self.config.region_size_blocks);
*self.regions_gaps.get_mut(region_id) = gaps;
}
}
pub fn calculate_gaps(region: &BitSlice, region_size_blocks: usize) -> RegionGaps {
debug_assert_eq!(region.len(), region_size_blocks, "Unexpected region size");
// Get raw memory region
let (head, raw_region, tail) = region
.domain()
.region()
.expect("Region covers more than one usize");
// We expect the region to not use partial usizes
debug_assert!(head.is_none());
debug_assert!(tail.is_none());
// Iterate over the integers that compose the bitslice. So that we can perform bitwise operations.
let mut max = 0;
let mut current = 0;
const BITS_IN_CHUNK: u32 = usize::BITS;
let mut num_shifts = 0;
// In reverse, because we expect the regions to be filled start to end.
// So starting from the end should give us bigger `max` earlier.
for chunk in raw_region.iter().rev() {
// Ensure that the chunk is little-endian.
let mut chunk = chunk.to_le();
// case of all zeros
if chunk == 0 {
current += BITS_IN_CHUNK;
continue;
}
if chunk == !0 {
// case of all ones
max = max.max(current);
current = 0;
continue;
}
// At least one non-zero bit
let leading = chunk.leading_zeros();
let trailing = chunk.trailing_zeros();
let max_possible_middle_gap = (BITS_IN_CHUNK - leading - trailing).saturating_sub(2);
// Skip looking for local max if it won't improve global max
if max > max_possible_middle_gap {
current += leading;
max = max.max(current);
current = trailing;
continue;
}
// Otherwise, look for the actual maximum in the chunk
while chunk != 0 {
// count consecutive zeros
let num_zeros = chunk.leading_zeros();
current += num_zeros;
max = max.max(current);
current = 0;
// shift by the number of zeros
chunk <<= num_zeros as usize;
num_shifts += num_zeros;
// skip consecutive ones
let num_ones = chunk.leading_ones();
if num_ones < BITS_IN_CHUNK {
chunk <<= num_ones;
} else {
// all ones
debug_assert!(chunk == !0);
chunk = 0;
}
num_shifts += num_ones;
}
// no more ones in the chunk
current += BITS_IN_CHUNK - num_shifts;
num_shifts = 0;
}
max = max.max(current);
let leading;
let trailing;
if max == region_size_blocks as u32 {
leading = max;
trailing = max;
} else {
leading = raw_region
.iter()
.take_while_inclusive(|chunk| chunk == &&0)
.map(|chunk| chunk.trailing_zeros())
.sum::<u32>();
trailing = raw_region
.iter()
.rev()
.take_while_inclusive(|chunk| chunk == &&0)
.map(|chunk| chunk.leading_zeros())
.sum::<u32>();
}
#[cfg(debug_assertions)]
{
RegionGaps::new(
leading as u16,
trailing as u16,
max as u16,
region_size_blocks as u16,
)
}
#[cfg(not(debug_assertions))]
{
RegionGaps::new(leading as u16, trailing as u16, max as u16)
}
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> std::io::Result<()> {
self.bitslice.populate()?;
self.regions_gaps.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
clear_disk_cache(&self.path)?;
self.regions_gaps.clear_cache()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use bitvec::bits;
use bitvec::vec::BitVec;
use proptest::prelude::*;
use rand::{Rng, rng};
use crate::config::{DEFAULT_BLOCK_SIZE_BYTES, DEFAULT_REGION_SIZE_BLOCKS, StorageOptions};
#[test]
fn test_length_for_page() {
let config = &StorageOptions {
page_size_bytes: Some(8192),
region_size_blocks: Some(1),
..Default::default()
}
.try_into()
.unwrap();
assert_eq!(super::Bitmask::length_for_page(config), 8);
}
#[test]
fn test_find_available_blocks() {
let page_size = DEFAULT_BLOCK_SIZE_BYTES * DEFAULT_REGION_SIZE_BLOCKS;
let blocks_per_page = (page_size / DEFAULT_BLOCK_SIZE_BYTES) as u32;
let dir = tempfile::tempdir().unwrap();
let options = StorageOptions {
page_size_bytes: Some(page_size),
..Default::default()
};
let mut bitmask = super::Bitmask::create(dir.path(), options.try_into().unwrap()).unwrap();
bitmask.cover_new_page().unwrap();
assert_eq!(bitmask.bitslice.len() as u32, blocks_per_page * 2);
// 1..10
bitmask.mark_blocks(0, 1, 9, true);
// 15..20
bitmask.mark_blocks(0, 15, 5, true);
// 30..blocks_per_page
bitmask.mark_blocks(0, 30, blocks_per_page - 30, true);
// blocks_per_page..blocks_per_page + 1
bitmask.mark_blocks(1, 0, 1, true);
let (page_id, block_offset) = bitmask.find_available_blocks(1).unwrap();
assert_eq!(block_offset, 0);
assert_eq!(page_id, 0);
let (page_id, block_offset) = bitmask.find_available_blocks(2).unwrap();
assert_eq!(block_offset, 10);
assert_eq!(page_id, 0);
let (page_id, block_offset) = bitmask.find_available_blocks(5).unwrap();
assert_eq!(block_offset, 10);
assert_eq!(page_id, 0);
let (page_id, block_offset) = bitmask.find_available_blocks(6).unwrap();
assert_eq!(block_offset, 20);
assert_eq!(page_id, 0);
// first free block of the next page
let (page_id, block_offset) = bitmask.find_available_blocks(30).unwrap();
assert_eq!(block_offset, 1);
assert_eq!(page_id, 1);
// not fitting cell
let found_large = bitmask.find_available_blocks(blocks_per_page);
assert_eq!(found_large, None);
}
#[test]
fn test_raw_bitvec() {
use bitvec::prelude::Lsb0;
let bits = bits![
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1
];
let mut bitvec = BitVec::<usize, Lsb0>::new();
bitvec.extend_from_bitslice(bits);
assert_eq!(bitvec.len(), 64);
let raw = bitvec.as_raw_slice();
assert_eq!(raw.len() as u32, 64 / usize::BITS);
assert_eq!(raw[0].trailing_zeros(), 4);
assert_eq!(raw[0].leading_zeros(), 0);
assert_eq!((raw[0] >> 1).trailing_zeros(), 3)
}
prop_compose! {
/// Creates a fixture bitvec which has gaps of a specific size
fn regions_bitvec_with_max_gap(max_gap_size: usize) (len in 0..DEFAULT_REGION_SIZE_BLOCKS*4) -> (BitVec, usize) {
assert!(max_gap_size > 0);
let len = len.next_multiple_of(DEFAULT_REGION_SIZE_BLOCKS);
let mut bitvec = BitVec::new();
bitvec.resize(len, true);
let mut rng = rng();
let mut i = 0;
let mut max_gap = 0;
while i < len {
let run = rng.random_range(1..max_gap_size).min(len - i);
let skip = rng.random_range(1..max_gap_size);
for j in 0..run {
bitvec.set(i + j, false);
}
if run > max_gap {
max_gap = run;
}
i += run + skip;
}
(bitvec, max_gap)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(1000))]
#[test]
fn test_find_available_blocks_properties((bitvec, max_gap) in regions_bitvec_with_max_gap(120)) {
let bitslice = bitvec.as_bitslice();
// Helper to check if a range is all zeros
let is_free_range = |start: usize, len: usize| {
let range = start..(start + len);
bitslice.get(range)
.map(|slice| slice.not_any())
.unwrap_or(false)
};
// For different requested block sizes
for req_blocks in 1..=max_gap {
if let Some((_, block_offset)) = super::Bitmask::find_available_blocks_in_slice(
bitslice,
req_blocks as u32,
|idx| (0, idx),
) {
// The found position should have enough free blocks
prop_assert!(is_free_range(block_offset as usize, req_blocks));
} else {
prop_assert!(false, "Should've found a free range")
}
}
// For a block size that doesn't fit
let req_blocks = max_gap + 1;
prop_assert!(super::Bitmask::find_available_blocks_in_slice(
bitslice,
req_blocks as u32,
|idx| (0, idx),
).is_none());
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/real_data_bench.rs | lib/gridstore/benches/real_data_bench.rs | use std::hint::black_box;
use std::io::BufReader;
use std::path::Path;
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use fs_err as fs;
use fs_err::File;
use gridstore::fixtures::{HM_FIELDS, Payload, empty_storage};
use rand::Rng;
use serde_json::Value;
/// Insert CSV data into the storage
fn append_csv_data(storage: &mut gridstore::Gridstore<Payload>, csv_path: &Path) {
let csv_file = BufReader::new(File::open(csv_path).expect("file should open"));
let mut rdr = csv::Reader::from_reader(csv_file);
let mut point_offset = storage.max_point_id();
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
for result in rdr.records() {
let record = result.unwrap();
let mut payload = Payload::default();
for (i, &field) in HM_FIELDS.iter().enumerate() {
payload.0.insert(
field.to_string(),
Value::String(record.get(i).unwrap().to_string()),
);
}
storage
.put_value(point_offset, &payload, hw_counter_ref)
.unwrap();
point_offset += 1;
}
}
/// Recursively compute the size of a directory in megabytes
fn compute_folder_size_mb<P: AsRef<Path>>(path: P) -> u64 {
let mut size = 0;
for entry in fs::read_dir(path.as_ref()).unwrap() {
let entry = entry.unwrap();
let metadata = entry.metadata().unwrap();
if metadata.is_dir() {
size += compute_folder_size_mb(entry.path());
} else {
size += metadata.len();
}
}
(size as f32 / 1_000_000.0).ceil() as u64
}
pub fn real_data_data_bench(c: &mut Criterion) {
let (dir, mut storage) = empty_storage();
let csv_path = dataset::Dataset::HMArticles
.download()
.expect("download should succeed");
// check source file size
let file_size_bytes = fs::metadata(csv_path.clone())
.expect("file should exist")
.len();
assert_eq!(file_size_bytes, 36_127_865); // 36MB
// the CSV file has 105_542 rows
let expected_point_count = 105_542;
// insert data once & flush
append_csv_data(&mut storage, &csv_path);
storage.flusher()().unwrap();
assert_eq!(storage.max_point_id(), expected_point_count);
// flush to get a consistent bitmask
storage.flusher()().unwrap();
// sanity check of storage size
let storage_size = storage.get_storage_size_bytes();
assert_eq!(storage_size, 54_034_048); // 54MB
// check storage folder size
let file_size_mb = compute_folder_size_mb(dir.path());
assert_eq!(file_size_mb, 70); // 70MB (includes metadata)
c.bench_function("compute storage size", |b| {
b.iter(|| black_box(storage.get_storage_size_bytes()));
});
c.bench_function("scan storage", |b| {
let hw_counter = HardwareCounterCell::new();
b.iter(|| {
for i in 0..storage.max_point_id() {
let res = storage.get_value::<false>(i, &hw_counter).unwrap();
assert!(res.0.contains_key("article_id"));
}
});
});
// append the same data again to increase storage size
for _ in 0..10 {
append_csv_data(&mut storage, &csv_path);
storage.flusher()().unwrap();
}
let inflated_storage_size = storage.get_storage_size_bytes();
assert_eq!(inflated_storage_size, 594_374_528); // 594 MB (close to 10x54MB)
c.bench_function("compute storage size (large)", |b| {
b.iter(|| black_box(storage.get_storage_size_bytes()));
});
// delete 30% of the points
let mut rng = rand::rng();
for i in 0..storage.max_point_id() {
if rng.random_bool(0.3) {
storage.delete_value(i).unwrap();
}
}
// flush to get a consistent bitmask
storage.flusher()().unwrap();
c.bench_function("compute storage size (large sparse)", |b| {
b.iter(|| black_box(storage.get_storage_size_bytes()));
});
c.bench_function("insert real payload (large)", |b| {
b.iter(|| {
append_csv_data(&mut storage, &csv_path);
// do not always flush to build up pending updates
if rng.random_bool(0.3) {
storage.flusher()().unwrap();
}
});
});
}
criterion_group!(benches, real_data_data_bench);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/random_data_bench.rs | lib/gridstore/benches/random_data_bench.rs | use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use gridstore::fixtures::{empty_storage, random_payload};
/// sized similarly to the real dataset for a fair comparison
const PAYLOAD_COUNT: u32 = 100_000;
pub fn random_data_bench(c: &mut Criterion) {
let (_dir, mut storage) = empty_storage();
let mut rng = rand::rng();
c.bench_function("write random payload", |b| {
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
b.iter_batched_ref(
|| random_payload(&mut rng, 2),
|payload| {
for i in 0..PAYLOAD_COUNT {
storage.put_value(i, payload, hw_counter_ref).unwrap();
}
},
BatchSize::SmallInput,
)
});
c.bench_function("read random payload", |b| {
let hw_counter = HardwareCounterCell::new();
b.iter(|| {
for i in 0..PAYLOAD_COUNT {
let res = storage.get_value::<false>(i, &hw_counter);
assert!(res.is_some());
}
});
});
}
criterion_group!(benches, random_data_bench);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/flush_bench.rs | lib/gridstore/benches/flush_bench.rs | use std::time::{Duration, Instant};
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use gridstore::fixtures::{empty_storage, random_payload};
use rand::Rng;
pub fn flush_bench(c: &mut Criterion) {
let prepopulation_size = 10_000;
// Test sequential updates' flushing performance
for unflushed_updates in [100, 1_000, prepopulation_size].iter() {
let bench_name = format!("flush after {unflushed_updates} sequential writes");
c.bench_function(&bench_name, |b| {
// Setup: Create a storage with a specified number of records
let (_dir, mut storage) = empty_storage();
let mut rng = rand::rng();
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
// Pre-populate storage with sequential random data
for i in 0..prepopulation_size {
let payload = random_payload(&mut rng, 1); // Small payload to speed up setup
storage.put_value(i, &payload, hw_counter_ref).unwrap();
}
b.iter_custom(|iters| {
let mut total_elapsed = Duration::ZERO;
for _ in 0..iters {
// apply sequential ids
for i in 0..*unflushed_updates {
let payload = random_payload(&mut rng, 1);
storage.put_value(i, &payload, hw_counter_ref).unwrap();
}
// Benchmark the flush operation after accumulating updates
let instant = Instant::now();
storage.flusher()().unwrap();
total_elapsed += instant.elapsed();
}
total_elapsed
});
});
}
// Test random updates' flushing performance
for unflushed_updates in [100, 1_000, prepopulation_size].iter() {
let bench_name = format!("flush after {unflushed_updates} random writes");
c.bench_function(&bench_name, |b| {
// Setup: Create a storage with a specified number of records
let (_dir, mut storage) = empty_storage();
let mut rng = rand::rng();
let hw_counter = HardwareCounterCell::new();
let hw_counter_ref = hw_counter.ref_payload_io_write_counter();
// Pre-populate storage with random data
for i in 0..prepopulation_size {
let payload = random_payload(&mut rng, 1); // Small payload to speed up setup
storage.put_value(i, &payload, hw_counter_ref).unwrap();
}
b.iter_custom(|iters| {
let mut total_elapsed = Duration::ZERO;
for _ in 0..iters {
// Apply random updates
for _ in 0..*unflushed_updates {
let id = rng.random_range(0..prepopulation_size);
let payload = random_payload(&mut rng, 1);
storage.put_value(id, &payload, hw_counter_ref).unwrap();
}
// Benchmark the flush operation after accumulating updates
let instant = Instant::now();
storage.flusher()().unwrap();
total_elapsed += instant.elapsed();
}
total_elapsed
});
});
}
}
criterion_group!(benches, flush_bench);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/bitmask_bench.rs | lib/gridstore/benches/bitmask_bench.rs | use std::hint::black_box;
use bitvec::vec::BitVec;
use criterion::{Criterion, criterion_group, criterion_main};
use gridstore::bitmask::Bitmask;
use gridstore::config::DEFAULT_REGION_SIZE_BLOCKS;
use rand::Rng;
pub fn bench_bitmask_ops(c: &mut Criterion) {
let distr = rand::distr::StandardUniform;
let rng = rand::rng();
let random_bitvec = rng
.sample_iter::<bool, _>(distr)
.take(1000 * DEFAULT_REGION_SIZE_BLOCKS)
.collect::<BitVec>();
let mut bitslice_iter = random_bitvec.windows(DEFAULT_REGION_SIZE_BLOCKS).cycle();
c.bench_function("calculate_gaps", |b| {
b.iter(|| {
let bitslice = bitslice_iter.next().unwrap();
Bitmask::calculate_gaps(black_box(bitslice), DEFAULT_REGION_SIZE_BLOCKS)
})
});
c.bench_function("find_available_blocks_in_slice", |b| {
let mut rng = rand::rng();
b.iter(|| {
let bitslice = bitslice_iter.next().unwrap();
let num_blocks = rng.random_range(1..10);
Bitmask::find_available_blocks_in_slice(black_box(bitslice), num_blocks, |_| (0, 0))
})
});
}
criterion_group!(benches, bench_bitmask_ops);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/bustle_bench/payload_storage.rs | lib/gridstore/benches/bustle_bench/payload_storage.rs | use std::sync::Arc;
use bustle::Collection;
use common::counter::hardware_counter::HardwareCounterCell;
use gridstore::fixtures::{Payload, empty_storage};
use parking_lot::RwLock;
use crate::PayloadStorage;
use crate::fixture::{ArcStorage, SequentialCollectionHandle, StorageProxy};
impl Collection for ArcStorage<PayloadStorage> {
type Handle = Self;
fn with_capacity(_capacity: usize) -> Self {
let (dir, storage) = empty_storage();
let proxy = StorageProxy::new(storage);
ArcStorage {
proxy: Arc::new(RwLock::new(proxy)),
dir: Arc::new(dir),
}
}
fn pin(&self) -> Self::Handle {
Self {
proxy: self.proxy.clone(),
dir: self.dir.clone(),
}
}
}
impl SequentialCollectionHandle for PayloadStorage {
fn get(&self, key: &u32) -> bool {
self.get_value::<false>(*key, &HardwareCounterCell::new()) // No measurements needed in benches
.is_some()
}
fn insert(&mut self, key: u32, payload: &Payload) -> bool {
!self
.put_value(
key,
payload,
HardwareCounterCell::new().ref_payload_io_write_counter(),
)
.unwrap()
}
fn remove(&mut self, key: &u32) -> bool {
self.delete_value(*key).is_some()
}
fn update(&mut self, key: &u32, payload: &Payload) -> bool {
self.put_value(
*key,
payload,
HardwareCounterCell::new().ref_payload_io_write_counter(),
)
.unwrap()
}
fn flush(&self) -> bool {
self.flusher()().is_ok()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/bustle_bench/fixture.rs | lib/gridstore/benches/bustle_bench/fixture.rs | use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
use bustle::CollectionHandle;
use gridstore::fixtures::Payload;
use parking_lot::RwLock;
use serde_json::json;
use tempfile::TempDir;
#[derive(Clone)]
pub struct ArcStorage<S> {
pub proxy: Arc<RwLock<StorageProxy<S>>>,
pub dir: Arc<TempDir>,
}
/// A storage that includes an external to internal id tracker, and a generator of payloads.
pub struct StorageProxy<S> {
storage: S,
id_tracker: HashMap<u64, u32>,
max_internal_id: u32,
payload_picker: PayloadPicker,
/// Amount of writes without a flush.
write_count: u32,
}
impl<S> StorageProxy<S> {
pub fn new(storage: S) -> Self {
Self {
storage,
id_tracker: HashMap::new(),
max_internal_id: 0,
payload_picker: PayloadPicker::new(),
write_count: 0,
}
}
}
impl<S: SequentialCollectionHandle> StorageProxy<S> {
const FLUSH_INTERVAL: u32 = 10000;
fn maybe_flush(&mut self) {
if self.write_count >= Self::FLUSH_INTERVAL {
self.write_count = 0;
assert!(self.storage.flush());
} else {
self.write_count += 1;
}
}
}
impl<S: SequentialCollectionHandle> CollectionHandle for ArcStorage<S> {
type Key = u64;
fn get(&mut self, key: &Self::Key) -> bool {
// eprintln!("GET {}", key);
let proxy = self.proxy.read();
let Some(internal) = proxy.id_tracker.get(key) else {
return false;
};
proxy.storage.get(internal)
}
fn insert(&mut self, key: &Self::Key) -> bool {
// eprintln!("INSERT {}", key);
let mut proxy = self.proxy.write();
proxy.maybe_flush();
let internal_id = if let Some(id) = proxy.id_tracker.get(key) {
*id
} else {
let internal_id = proxy.max_internal_id;
proxy.max_internal_id += 1;
proxy.id_tracker.insert(*key, internal_id);
internal_id
};
let payload = proxy.payload_picker.pick(internal_id);
proxy.storage.insert(internal_id, &payload)
}
fn remove(&mut self, key: &Self::Key) -> bool {
// eprintln!("REMOVE {}", key);
let mut proxy = self.proxy.write();
proxy.maybe_flush();
let internal_id = match proxy.id_tracker.get(key) {
Some(internal_id) => *internal_id,
None => return false,
};
proxy.storage.remove(&internal_id)
}
fn update(&mut self, key: &Self::Key) -> bool {
// eprintln!("UPDATE {}", key);
let mut proxy = self.proxy.write();
proxy.maybe_flush();
let Some(&internal_id) = proxy.id_tracker.get(key) else {
return false;
};
let payload = proxy.payload_picker.pick(internal_id);
proxy.storage.update(&internal_id, &payload)
}
}
pub trait SequentialCollectionHandle {
fn get(&self, key: &u32) -> bool;
fn insert(&mut self, key: u32, payload: &Payload) -> bool;
fn remove(&mut self, key: &u32) -> bool;
fn update(&mut self, key: &u32, payload: &Payload) -> bool;
fn flush(&self) -> bool;
}
pub struct PayloadPicker {
payloads: OnceLock<Vec<Arc<Payload>>>,
}
impl PayloadPicker {
fn new() -> Self {
Self {
payloads: OnceLock::new(),
}
}
fn pick(&self, internal_id: u32) -> Arc<Payload> {
let payloads = self.payloads.get_or_init(|| {
[
json!({"name": "Alice", "age": 30, "city": "Wonderland"}),
json!({"name": "Bob", "age": 25, "city": "Builderland", "occupation": "Builder"}),
json!({"name": "Charlie", "age": 35, "city": "Chocolate Factory", "hobbies": ["Inventing", "Exploring"]}),
json!({"name": "Dave", "age": 40, "city": "Dinosaur Land", "favorite_dinosaur": "T-Rex"}),
json!({"name": "Eve", "age": 28, "city": "Eden", "skills": ["Gardening", "Cooking", "Singing"]}),
json!({"name": "Frank", "age": 33, "city": "Fantasy Island", "adventures": ["Treasure Hunt", "Dragon Slaying", "Rescue Mission"]}),
json!({"name": "Grace", "age": 29, "city": "Gotham", "alias": "Batwoman", "gadgets": ["Batarang", "Grapple Gun", "Smoke Bomb"]}),
json!({"name": "Hank", "age": 45, "city": "Hogwarts", "house": "Gryffindor", "patronus": "Stag", "wand": {"wood": "Holly", "core": "Phoenix Feather", "length": 11}}),
json!({"name": "Ivy", "age": 27, "city": "Ivory Tower", "profession": "Scholar", "publications": ["Theories of Magic", "History of the Ancients", "Alchemy and Potions"]}),
json!({"name": "Jack", "age": 32, "city": "Jack's Beanstalk", "adventures": ["Climbing the Beanstalk", "Meeting the Giant", "Stealing the Golden Goose", "Escaping the Giant", "Living Happily Ever After"]}),
].into_iter().map(|value| Arc::new(Payload(value.as_object().unwrap().clone()))).collect()
});
let pick_idx = internal_id as usize % payloads.len();
payloads[pick_idx].clone()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/bustle_bench/rocksdb.rs | lib/gridstore/benches/bustle_bench/rocksdb.rs | use std::sync::Arc;
use bustle::Collection;
use gridstore::Blob;
use gridstore::fixtures::Payload;
use parking_lot::RwLock;
use rocksdb::{DB, DBRecoveryMode, LogLevel, Options, WriteOptions};
use crate::fixture::{ArcStorage, SequentialCollectionHandle, StorageProxy};
const DB_CACHE_SIZE: usize = 10 * 1024 * 1024; // 10 mb
const DB_MAX_LOG_SIZE: usize = 1024 * 1024; // 1 mb
const DB_MAX_OPEN_FILES: usize = 256;
const DB_DELETE_OBSOLETE_FILES_PERIOD: u64 = 3 * 60 * 1_000_000; // 3 minutes in microseconds
pub const DB_PAYLOAD_CF: &str = "payload";
/// RocksDB options (both global and for column families)
pub fn db_options() -> Options {
let mut options: Options = Options::default();
options.set_write_buffer_size(DB_CACHE_SIZE); // write_buffer_size is enforced per column family.
options.create_if_missing(true);
options.set_log_level(LogLevel::Error);
options.set_recycle_log_file_num(1);
options.set_keep_log_file_num(1); // must be greater than zero
options.set_max_log_file_size(DB_MAX_LOG_SIZE);
options.set_delete_obsolete_files_period_micros(DB_DELETE_OBSOLETE_FILES_PERIOD);
options.create_missing_column_families(true);
options.set_max_open_files(DB_MAX_OPEN_FILES as i32);
options.set_compression_type(rocksdb::DBCompressionType::Lz4);
// Qdrant relies on it's own WAL for durability
options.set_wal_recovery_mode(DBRecoveryMode::TolerateCorruptedTailRecords);
#[cfg(debug_assertions)]
{
options.set_paranoid_checks(true);
}
options
}
fn get_write_options() -> WriteOptions {
let mut write_options = WriteOptions::default();
write_options.set_sync(false);
// RocksDB WAL is required for durability even if data is flushed
write_options.disable_wal(false);
write_options
}
impl Collection for ArcStorage<rocksdb::DB> {
type Handle = Self;
fn with_capacity(_capacity: usize) -> Self {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("rocksdb");
let column_families = vec![DB_PAYLOAD_CF];
let options = db_options();
// Make sure that all column families have the same options
let column_with_options = column_families
.into_iter()
.map(|cf| (cf, options.clone()))
.collect::<Vec<_>>();
let db = DB::open_cf_with_opts(&options, path, column_with_options).unwrap();
let proxy = StorageProxy::new(db);
ArcStorage {
proxy: Arc::new(RwLock::new(proxy)),
dir: Arc::new(dir),
}
}
fn pin(&self) -> Self::Handle {
Self {
proxy: self.proxy.clone(),
dir: self.dir.clone(),
}
}
}
impl SequentialCollectionHandle for DB {
fn get(&self, key: &u32) -> bool {
let cf_handle = self.cf_handle(DB_PAYLOAD_CF).unwrap();
self.get_cf(cf_handle, key.to_be_bytes()).unwrap().is_some()
}
fn insert(&mut self, key: u32, payload: &Payload) -> bool {
let cf_handle = self.cf_handle(DB_PAYLOAD_CF).unwrap();
let value = payload.to_bytes();
self.put_cf_opt(cf_handle, key.to_be_bytes(), value, &get_write_options())
.unwrap();
true
}
fn remove(&mut self, key: &u32) -> bool {
let cf_handle = self.cf_handle(DB_PAYLOAD_CF).unwrap();
self.delete_cf_opt(cf_handle, key.to_be_bytes(), &get_write_options())
.unwrap();
true
}
fn update(&mut self, key: &u32, payload: &Payload) -> bool {
let cf_handle = self.cf_handle(DB_PAYLOAD_CF).unwrap();
let value = payload.to_bytes();
self.put_cf_opt(cf_handle, key.to_be_bytes(), value, &get_write_options())
.unwrap();
true
}
fn flush(&self) -> bool {
self.flush().is_ok()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gridstore/benches/bustle_bench/main.rs | lib/gridstore/benches/bustle_bench/main.rs | //! Implements Bustle traits for comparing performance against other kv stores.
#[cfg(feature = "rocksdb")]
use ::rocksdb::DB;
use bustle::{Mix, Workload};
use fixture::ArcStorage;
use gridstore::Gridstore;
use gridstore::fixtures::Payload;
mod fixture;
mod payload_storage;
#[cfg(feature = "rocksdb")]
mod rocksdb;
type PayloadStorage = Gridstore<Payload>;
fn default_opts(workload: &mut Workload) -> &mut Workload {
let seed = [42; 32];
workload.initial_capacity_log2(21).seed(seed)
}
fn main() {
for num_threads in [1, 2] {
println!("------------ {num_threads} thread(s) -------------");
// Read heavy
println!("**read_heavy** with prefill_fraction 0.95");
let mut workload = Workload::new(num_threads, Mix::read_heavy());
default_opts(&mut workload).prefill_fraction(0.95);
println!("ValueStorage:");
workload.run::<ArcStorage<PayloadStorage>>();
#[cfg(feature = "rocksdb")]
{
println!("RocksDB:");
workload.run::<ArcStorage<DB>>();
}
println!(" ");
// Insert heavy
println!("**insert_heavy** with prefill_fraction 0.2");
let mut workload = Workload::new(num_threads, Mix::insert_heavy());
default_opts(&mut workload).prefill_fraction(0.2);
println!("ValueStorage:");
workload.run::<ArcStorage<PayloadStorage>>();
#[cfg(feature = "rocksdb")]
{
println!("RocksDB:");
workload.run::<ArcStorage<DB>>();
}
println!(" ");
// Update heavy
println!("**update_heavy** with prefill_fraction 0.95");
let mut workload = Workload::new(num_threads, Mix::update_heavy());
default_opts(&mut workload).prefill_fraction(0.95);
println!("ValueStorage:");
workload.run::<ArcStorage<PayloadStorage>>();
#[cfg(feature = "rocksdb")]
{
println!("RocksDB:");
workload.run::<ArcStorage<DB>>();
}
println!(" ");
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/lib.rs | lib/sparse/src/lib.rs | pub mod common;
pub mod index;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/search_context.rs | lib/sparse/src/index/search_context.rs | use std::cmp::{Ordering, max, min};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::Relaxed;
use common::counter::hardware_counter::HardwareCounterCell;
use common::top_k::TopK;
use common::types::{PointOffsetType, ScoredPointOffset};
use super::posting_list_common::PostingListIter;
use crate::common::scores_memory_pool::PooledScoresHandle;
use crate::common::sparse_vector::{RemappedSparseVector, score_vectors};
use crate::common::types::{DimId, DimWeight};
use crate::index::inverted_index::InvertedIndex;
use crate::index::posting_list::PostingListIterator;
/// Iterator over posting lists with a reference to the corresponding query index and weight
pub struct IndexedPostingListIterator<T: PostingListIter> {
posting_list_iterator: T,
query_index: DimId,
query_weight: DimWeight,
}
/// Making this larger makes the search faster but uses more (pooled) memory
const ADVANCE_BATCH_SIZE: usize = 10_000;
pub struct SearchContext<'a, 'b, T: PostingListIter = PostingListIterator<'a>> {
postings_iterators: Vec<IndexedPostingListIterator<T>>,
query: RemappedSparseVector,
top: usize,
is_stopped: &'a AtomicBool,
top_results: TopK,
min_record_id: Option<PointOffsetType>, // min_record_id ids across all posting lists
max_record_id: PointOffsetType, // max_record_id ids across all posting lists
pooled: PooledScoresHandle<'b>, // handle to pooled scores
use_pruning: bool,
hardware_counter: &'a HardwareCounterCell,
}
impl<'a, 'b, T: PostingListIter> SearchContext<'a, 'b, T> {
pub fn new(
query: RemappedSparseVector,
top: usize,
inverted_index: &'a impl InvertedIndex<Iter<'a> = T>,
pooled: PooledScoresHandle<'b>,
is_stopped: &'a AtomicBool,
hardware_counter: &'a HardwareCounterCell,
) -> SearchContext<'a, 'b, T> {
let mut postings_iterators = Vec::new();
// track min and max record ids across all posting lists
let mut max_record_id = 0;
let mut min_record_id = u32::MAX;
// iterate over query indices
for (query_weight_offset, id) in query.indices.iter().enumerate() {
if let Some(mut it) = inverted_index.get(*id, hardware_counter)
&& let (Some(first), Some(last_id)) = (it.peek(), it.last_id())
{
// check if new min
let min_record_id_posting = first.record_id;
min_record_id = min(min_record_id, min_record_id_posting);
// check if new max
let max_record_id_posting = last_id;
max_record_id = max(max_record_id, max_record_id_posting);
// capture query info
let query_index = *id;
let query_weight = query.values[query_weight_offset];
postings_iterators.push(IndexedPostingListIterator {
posting_list_iterator: it,
query_index,
query_weight,
});
}
}
let top_results = TopK::new(top);
// Query vectors with negative values can NOT use the pruning mechanism which relies on the pre-computed `max_next_weight`.
// The max contribution per posting list that we calculate is not made to compute the max value of two negative numbers.
// This is a limitation of the current pruning implementation.
let use_pruning = T::reliable_max_next_weight() && query.values.iter().all(|v| *v >= 0.0);
let min_record_id = Some(min_record_id);
SearchContext {
postings_iterators,
query,
top,
is_stopped,
top_results,
min_record_id,
max_record_id,
pooled,
use_pruning,
hardware_counter,
}
}
const DEFAULT_SCORE: f32 = 0.0;
/// Plain search against the given ids without any pruning
pub fn plain_search(&mut self, ids: &[PointOffsetType]) -> Vec<ScoredPointOffset> {
// sort ids to fully leverage posting list iterator traversal
let mut sorted_ids = ids.to_vec();
sorted_ids.sort_unstable();
let cpu_counter = self.hardware_counter.cpu_counter();
let mut indices = Vec::with_capacity(self.query.indices.len());
let mut values = Vec::with_capacity(self.query.values.len());
for id in sorted_ids {
// check for cancellation
if self.is_stopped.load(Relaxed) {
break;
}
indices.clear();
values.clear();
// collect indices and values for the current record id from the query's posting lists *only*
for posting_iterator in self.postings_iterators.iter_mut() {
// rely on underlying binary search as the posting lists are sorted by record id
match posting_iterator.posting_list_iterator.skip_to(id) {
None => {} // no match for posting list
Some(element) => {
// match for posting list
indices.push(posting_iterator.query_index);
values.push(element.weight);
}
}
}
if values.is_empty() {
continue;
}
// Accumulate the sum of the length of the retrieved sparse vector and the query vector length
// as measurement for CPU usage of plain search.
cpu_counter
.incr_delta(self.query.indices.len() + values.len() * size_of::<DimWeight>());
// reconstruct sparse vector and score against query
let sparse_score =
score_vectors(&indices, &values, &self.query.indices, &self.query.values)
.unwrap_or(Self::DEFAULT_SCORE);
self.top_results.push(ScoredPointOffset {
score: sparse_score,
idx: id,
});
}
let top = std::mem::take(&mut self.top_results);
top.into_vec()
}
/// Advance posting lists iterators in a batch fashion.
fn advance_batch<F: Fn(PointOffsetType) -> bool>(
&mut self,
batch_start_id: PointOffsetType,
batch_last_id: PointOffsetType,
filter_condition: &F,
) {
// init batch scores
let batch_len = batch_last_id - batch_start_id + 1;
self.pooled.scores.clear(); // keep underlying allocated memory
self.pooled.scores.resize(batch_len as usize, 0.0);
for posting in self.postings_iterators.iter_mut() {
posting.posting_list_iterator.for_each_till_id(
batch_last_id,
self.pooled.scores.as_mut_slice(),
#[inline(always)]
|scores, id, weight| {
let element_score = weight * posting.query_weight;
let local_id = (id - batch_start_id) as usize;
// SAFETY: `id` is within `batch_start_id..=batch_last_id`
// Thus, `local_id` is within `0..batch_len`.
*unsafe { scores.get_unchecked_mut(local_id) } += element_score;
},
);
}
for (local_index, &score) in self.pooled.scores.iter().enumerate() {
// publish only the non-zero scores above the current min to beat
if score != 0.0 && score > self.top_results.threshold() {
let real_id = batch_start_id + local_index as PointOffsetType;
// do not score if filter condition is not satisfied
if !filter_condition(real_id) {
continue;
}
let score_point_offset = ScoredPointOffset {
score,
idx: real_id,
};
self.top_results.push(score_point_offset);
}
}
}
/// Compute scores for the last posting list quickly
fn process_last_posting_list<F: Fn(PointOffsetType) -> bool>(&mut self, filter_condition: &F) {
debug_assert_eq!(self.postings_iterators.len(), 1);
let posting = &mut self.postings_iterators[0];
posting.posting_list_iterator.for_each_till_id(
PointOffsetType::MAX,
&mut (),
|_, id, weight| {
// do not score if filter condition is not satisfied
if !filter_condition(id) {
return;
}
let score = weight * posting.query_weight;
self.top_results.push(ScoredPointOffset { score, idx: id });
},
);
}
/// Returns the next min record id from all posting list iterators
///
/// returns None if all posting list iterators are exhausted
fn next_min_id(to_inspect: &mut [IndexedPostingListIterator<T>]) -> Option<PointOffsetType> {
let mut min_record_id = None;
// Iterate to find min record id at the head of the posting lists
for posting_iterator in to_inspect.iter_mut() {
if let Some(next_element) = posting_iterator.posting_list_iterator.peek() {
match min_record_id {
None => min_record_id = Some(next_element.record_id), // first record with matching id
Some(min_id_seen) => {
// update min record id if smaller
if next_element.record_id < min_id_seen {
min_record_id = Some(next_element.record_id);
}
}
}
}
}
min_record_id
}
/// Make sure the longest posting list is at the head of the posting list iterators
pub(crate) fn promote_longest_posting_lists_to_the_front(&mut self) {
// find index of longest posting list
let posting_index = self
.postings_iterators
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| {
a.posting_list_iterator
.len_to_end()
.cmp(&b.posting_list_iterator.len_to_end())
})
.map(|(index, _)| index);
if let Some(posting_index) = posting_index {
// make sure it is not already at the head
if posting_index != 0 {
// swap longest posting list to the head
self.postings_iterators.swap(0, posting_index);
}
}
}
/// How many elements are left in the posting list iterator
#[cfg(test)]
pub(crate) fn posting_list_len(&self, idx: usize) -> usize {
self.postings_iterators[idx]
.posting_list_iterator
.len_to_end()
}
/// Search for the top k results that satisfy the filter condition
pub fn search<F: Fn(PointOffsetType) -> bool>(
&mut self,
filter_condition: &F,
) -> Vec<ScoredPointOffset> {
if self.postings_iterators.is_empty() {
return Vec::new();
}
{
// Measure CPU usage of indexed sparse search.
// Assume the complexity of the search as total volume of the posting lists
// that are traversed in the batched search.
let mut cpu_cost = 0;
for posting in self.postings_iterators.iter() {
cpu_cost += posting.posting_list_iterator.len_to_end()
* posting.posting_list_iterator.element_size();
}
self.hardware_counter.cpu_counter().incr_delta(cpu_cost);
}
let mut best_min_score = f32::MIN;
loop {
// check for cancellation (atomic amortized by batch)
if self.is_stopped.load(Relaxed) {
break;
}
// prepare next iterator of batched ids
let Some(start_batch_id) = self.min_record_id else {
break;
};
// compute batch range of contiguous ids for the next batch
let last_batch_id = min(
start_batch_id + ADVANCE_BATCH_SIZE as u32,
self.max_record_id,
);
// advance and score posting lists iterators
self.advance_batch(start_batch_id, last_batch_id, filter_condition);
// remove empty posting lists if necessary
self.postings_iterators.retain(|posting_iterator| {
posting_iterator.posting_list_iterator.len_to_end() != 0
});
// update min_record_id
self.min_record_id = Self::next_min_id(&mut self.postings_iterators);
// check if all posting lists are exhausted
if self.postings_iterators.is_empty() {
break;
}
// if only one posting list left, we can score it quickly
if self.postings_iterators.len() == 1 {
self.process_last_posting_list(filter_condition);
break;
}
// we potentially have enough results to prune low performing posting lists
if self.use_pruning && self.top_results.len() >= self.top {
// current min score
let new_min_score = self.top_results.threshold();
if new_min_score == best_min_score {
// no improvement in lowest best score since last pruning - skip pruning
continue;
} else {
best_min_score = new_min_score;
}
// make sure the first posting list is the longest for pruning
self.promote_longest_posting_lists_to_the_front();
// prune posting list that cannot possibly contribute to the top results
let pruned = self.prune_longest_posting_list(new_min_score);
if pruned {
// update min_record_id
self.min_record_id = Self::next_min_id(&mut self.postings_iterators);
}
}
}
// posting iterators exhausted, return result queue
let queue = std::mem::take(&mut self.top_results);
queue.into_vec()
}
/// Prune posting lists that cannot possibly contribute to the top results
/// Assumes longest posting list is at the head of the posting list iterators
/// Returns true if the longest posting list was pruned
pub fn prune_longest_posting_list(&mut self, min_score: f32) -> bool {
if self.postings_iterators.is_empty() {
return false;
}
// peek first element of longest posting list
let (longest_posting_iterator, rest_iterators) = self.postings_iterators.split_at_mut(1);
let longest_posting_iterator = &mut longest_posting_iterator[0];
if let Some(element) = longest_posting_iterator.posting_list_iterator.peek() {
let next_min_id_in_others = Self::next_min_id(rest_iterators);
match next_min_id_in_others {
Some(next_min_id) => {
match next_min_id.cmp(&element.record_id) {
Ordering::Equal => {
// if the next min id in the other posting lists is the same as the current one,
// we can't prune the current element as it needs to be scored properly across posting lists
return false;
}
Ordering::Less => {
// we can't prune as there the other posting lists contains smaller smaller ids that need to scored first
return false;
}
Ordering::Greater => {
// next_min_id is > element.record_id there is a chance to prune up to `next_min_id`
// check against the max possible score using the `max_next_weight`
// we can under prune as we should actually check the best score up to `next_min_id` - 1 only
// instead of the max possible score but it is not possible to know the best score up to `next_min_id` - 1
let max_weight_from_list = element.weight.max(element.max_next_weight);
let max_score_contribution =
max_weight_from_list * longest_posting_iterator.query_weight;
if max_score_contribution <= min_score {
// prune to next_min_id
let longest_posting_iterator =
&mut self.postings_iterators[0].posting_list_iterator;
let position_before_pruning =
longest_posting_iterator.current_index();
longest_posting_iterator.skip_to(next_min_id);
let position_after_pruning =
longest_posting_iterator.current_index();
// check if pruning took place
return position_before_pruning != position_after_pruning;
}
}
}
}
None => {
// the current posting list is the only one left, we can potentially skip it to the end
// check against the max possible score using the `max_next_weight`
let max_weight_from_list = element.weight.max(element.max_next_weight);
let max_score_contribution =
max_weight_from_list * longest_posting_iterator.query_weight;
if max_score_contribution <= min_score {
// prune to the end!
let longest_posting_iterator = &mut self.postings_iterators[0];
longest_posting_iterator.posting_list_iterator.skip_to_end();
return true;
}
}
}
}
// no pruning took place
false
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/compressed_posting_list.rs | lib/sparse/src/index/compressed_posting_list.rs | use std::cmp::Ordering;
use std::fmt::Debug;
use std::mem::size_of;
use bitpacking::BitPacker as _;
use common::counter::hardware_counter::HardwareCounterCell;
use common::counter::iterator_hw_measurement::HwMeasurementIteratorExt;
use common::types::PointOffsetType;
#[cfg(debug_assertions)]
use itertools::Itertools as _;
use super::posting_list_common::{
GenericPostingElement, PostingElement, PostingElementEx, PostingListIter,
};
use crate::common::types::{DimWeight, Weight};
type BitPackerImpl = bitpacking::BitPacker4x;
/// How many elements are packed in a single chunk.
const CHUNK_SIZE: usize = BitPackerImpl::BLOCK_LEN;
#[derive(Default, Debug, Clone, PartialEq)]
pub struct CompressedPostingList<W: Weight> {
/// Compressed ids data. Chunks refer to subslies of this data.
id_data: Vec<u8>,
/// Fixed-size chunks.
chunks: Vec<CompressedPostingChunk<W>>,
/// Remainder elements that do not fit into chunks.
remainders: Vec<GenericPostingElement<W>>,
/// Id of the last element in the list. Used to avoid unpacking the last chunk.
last_id: Option<PointOffsetType>,
/// Quantization parameters.
quantization_params: W::QuantizationParams,
}
/// A non-owning view of [`CompressedPostingList`].
#[derive(Debug, Clone)]
pub struct CompressedPostingListView<'a, W: Weight> {
id_data: &'a [u8],
chunks: &'a [CompressedPostingChunk<W>],
remainders: &'a [GenericPostingElement<W>],
last_id: Option<PointOffsetType>,
multiplier: W::QuantizationParams,
hw_counter: &'a HardwareCounterCell,
}
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct CompressedPostingChunk<W> {
/// Initial data point id. Used for decompression.
initial: PointOffsetType,
/// An offset within id_data
offset: u32,
/// Weight values for the chunk.
weights: [W; CHUNK_SIZE],
}
impl<W: Weight> CompressedPostingList<W> {
pub(super) fn view<'a>(
&'a self,
hw_counter: &'a HardwareCounterCell,
) -> CompressedPostingListView<'a, W> {
CompressedPostingListView {
id_data: &self.id_data,
chunks: &self.chunks,
remainders: &self.remainders,
last_id: self.last_id,
multiplier: self.quantization_params,
hw_counter,
}
}
pub fn iter<'a>(
&'a self,
hw_counter: &'a HardwareCounterCell,
) -> CompressedPostingListIterator<'a, W> {
self.view(hw_counter).iter()
}
#[cfg(test)]
pub fn from(records: Vec<(PointOffsetType, DimWeight)>) -> CompressedPostingList<W> {
let mut posting_list = CompressedPostingBuilder::new();
for (id, weight) in records {
posting_list.add(id, weight);
}
posting_list.build()
}
}
pub struct CompressedPostingListStoreSize {
pub total: usize,
pub id_data_bytes: usize,
pub chunks_count: usize,
}
impl CompressedPostingListStoreSize {
fn new<W: Weight>(id_data_bytes: usize, chunks_count: usize, remainders_count: usize) -> Self {
CompressedPostingListStoreSize {
total: id_data_bytes
+ chunks_count * size_of::<CompressedPostingChunk<W>>()
+ remainders_count * size_of::<GenericPostingElement<W>>(),
id_data_bytes,
chunks_count,
}
}
}
/// Defines possible results of the search for the chunk by the ID.
enum IdChunkPosition {
/// The Id is smaller than any data in chunks and therefore
/// not in the posting list.
Before,
/// Id if possibly in the chunk, but it is not guaranteed.
Chunk(usize),
/// The Id is greater than any data in chunks, but may be in the remainder
After,
}
impl<'a, W: Weight> CompressedPostingListView<'a, W> {
pub(super) fn new(
id_data: &'a [u8],
chunks: &'a [CompressedPostingChunk<W>],
remainders: &'a [GenericPostingElement<W>],
last_id: Option<PointOffsetType>,
multiplier: W::QuantizationParams,
hw_counter: &'a HardwareCounterCell,
) -> Self {
CompressedPostingListView {
id_data,
chunks,
remainders,
last_id,
multiplier,
hw_counter,
}
}
pub(super) fn parts(
&self,
) -> (
&'a [u8],
&'a [CompressedPostingChunk<W>],
&'a [GenericPostingElement<W>],
) {
(self.id_data, self.chunks, self.remainders)
}
pub fn last_id(&self) -> Option<PointOffsetType> {
self.last_id
}
pub fn multiplier(&self) -> W::QuantizationParams {
self.multiplier
}
pub(super) fn store_size(&self) -> CompressedPostingListStoreSize {
CompressedPostingListStoreSize::new::<W>(
self.id_data.len(),
self.chunks.len(),
self.remainders.len(),
)
}
pub fn to_owned(&self) -> CompressedPostingList<W> {
CompressedPostingList {
id_data: self.id_data.to_vec(),
chunks: self.chunks.to_vec(),
remainders: self.remainders.to_vec(),
last_id: self.last_id,
quantization_params: self.multiplier,
}
}
pub fn len(&self) -> usize {
self.chunks.len() * CHUNK_SIZE + self.remainders.len()
}
pub fn is_empty(&self) -> bool {
self.chunks.is_empty() && self.remainders.is_empty()
}
fn decompress_chunk(
&self,
chunk_index: usize,
decompressed_chunk: &mut [PointOffsetType; CHUNK_SIZE],
) {
let chunk = &self.chunks[chunk_index];
let chunk_size = Self::get_chunk_size(self.chunks, self.id_data, chunk_index);
self.hw_counter.vector_io_read().incr_delta(chunk_size);
let chunk_bits = chunk_size * u8::BITS as usize / CHUNK_SIZE;
BitPackerImpl::new().decompress_strictly_sorted(
chunk.initial.checked_sub(1),
&self.id_data[chunk.offset as usize..chunk.offset as usize + chunk_size],
decompressed_chunk,
chunk_bits as u8,
);
}
fn chunk_id_by_position(&self, position: usize) -> Option<usize> {
let chunk_index = position / CHUNK_SIZE;
if chunk_index < self.chunks.len() {
Some(chunk_index)
} else {
None
}
}
/// Finds the chunk index by the point id.
/// It doesn't guarantee that the ID is inside the chunk,
/// but if the ID exists, it would be in the chunk.
fn chunk_id_by_id(&self, id: PointOffsetType) -> IdChunkPosition {
let chunk_index_result = self.chunks.binary_search_by(|c| c.initial.cmp(&id));
match chunk_index_result {
Ok(chunk_id) => {
// Found chunk with the first element exactly equal to the id.
IdChunkPosition::Chunk(chunk_id)
}
Err(position) => {
// ┌────── position 0, before any chunk
// │ Means first chunk is already greater than required
// ▼
// ┌─────────┬────────┬─────────────────┐
// │Chunk-1 │Chunk-2 │.... │
// └─────────┴────────┴─────────────────┘ ▲
// ▲ │
// │ │
// It might────────┘
// be inside position-1, if Position == length
// position < length it might be either in the chunk (position-1)
// Or be inside the remainder
if position == self.chunks.len() {
if let Some(first_remainder) = self.remainders.first() {
// If first element of remainder is greater than id,
// then Id might still be in the last chunk, if it exists
if id < first_remainder.record_id {
if position > 0 {
IdChunkPosition::Chunk(position - 1)
} else {
IdChunkPosition::Before
}
} else {
IdChunkPosition::After
}
} else {
// There are no remainder, so we don't know the last id of the last chunk
// Therefore, it is still possible that the id is in the last chunk
IdChunkPosition::Chunk(position - 1)
}
} else if position == 0 {
// The id is smaller than the first element of the first chunk
IdChunkPosition::Before
} else {
// The id is between two chunks
IdChunkPosition::Chunk(position - 1)
}
}
}
}
/// Get byte size of the compressed chunk.
fn get_chunk_size(
chunks: &[CompressedPostingChunk<W>],
data: &[u8],
chunk_index: usize,
) -> usize {
if chunk_index + 1 < chunks.len() {
chunks[chunk_index + 1].offset as usize - chunks[chunk_index].offset as usize
} else {
// Last chunk
data.len() - chunks[chunk_index].offset as usize
}
}
#[inline]
fn get_remainder_id(&self, index: usize) -> Option<&GenericPostingElement<W>> {
self.hw_counter
.vector_io_read()
.incr_delta(size_of::<GenericPostingElement<W>>());
self.remainders.get(index)
}
#[inline]
fn iter_remainder_from(
&self,
index: usize,
) -> impl Iterator<Item = &'_ GenericPostingElement<W>> + '_ {
self.remainders[index..].iter().measure_hw_with_cell(
self.hw_counter,
size_of::<GenericPostingElement<W>>(),
|hw_counter| hw_counter.vector_io_read(),
)
}
#[inline]
fn remainder_len(&self) -> usize {
self.remainders.len()
}
#[inline]
fn chunks_len(&self) -> usize {
self.chunks.len()
}
/// Warning: This function panics if the index is out of bounds.
#[inline]
fn get_weight(&self, pos: usize) -> W {
self.hw_counter.vector_io_read().incr_delta(size_of::<W>());
let chunk = &self.chunks[pos / CHUNK_SIZE];
chunk.weights[pos % CHUNK_SIZE]
}
#[inline]
fn weights_range(&self, pos: usize, count: usize) -> &[W] {
debug_assert!(count <= CHUNK_SIZE);
self.hw_counter
.vector_io_read()
.incr_delta(size_of::<W>() * count);
let chunk = &self.chunks[pos / CHUNK_SIZE];
let start = pos % CHUNK_SIZE;
chunk.weights[start..start + count].as_ref()
}
pub fn iter(&self) -> CompressedPostingListIterator<'a, W> {
CompressedPostingListIterator::new(self)
}
}
pub struct CompressedPostingBuilder {
elements: Vec<PostingElement>,
}
impl CompressedPostingBuilder {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
CompressedPostingBuilder {
elements: Vec::new(),
}
}
/// Add a new record to the posting list.
pub fn add(&mut self, record_id: PointOffsetType, weight: DimWeight) {
self.elements.push(PostingElement { record_id, weight });
}
pub fn build<W: Weight>(mut self) -> CompressedPostingList<W> {
self.elements.sort_unstable_by_key(|e| e.record_id);
let quantization_params =
W::quantization_params_for(self.elements.iter().map(|e| e.weight));
// Check for duplicates
#[cfg(debug_assertions)]
if let Some(e) = self.elements.iter().duplicates_by(|e| e.record_id).next() {
panic!("Duplicate id {} in posting list", e.record_id);
}
let mut this_chunk = Vec::with_capacity(CHUNK_SIZE);
let bitpacker = BitPackerImpl::new();
let mut chunks = Vec::with_capacity(self.elements.len() / CHUNK_SIZE);
let mut data_size = 0;
let mut remainders = Vec::with_capacity(self.elements.len() % CHUNK_SIZE);
for chunk in self.elements.chunks(CHUNK_SIZE) {
if chunk.len() == CHUNK_SIZE {
this_chunk.clear();
this_chunk.extend(chunk.iter().map(|e| e.record_id));
let initial = this_chunk[0];
let chunk_bits =
bitpacker.num_bits_strictly_sorted(initial.checked_sub(1), &this_chunk);
let chunk_size = BitPackerImpl::compressed_block_size(chunk_bits);
chunks.push(CompressedPostingChunk {
initial,
offset: data_size as u32,
weights: chunk
.iter()
.map(|e| Weight::from_f32(quantization_params, e.weight))
.collect::<Vec<_>>()
.try_into()
.expect("Invalid chunk size"),
});
data_size += chunk_size;
} else {
for e in chunk {
remainders.push(GenericPostingElement {
record_id: e.record_id,
weight: Weight::from_f32(quantization_params, e.weight),
});
}
}
}
let mut id_data = vec![0u8; data_size];
for (chunk_index, chunk_data) in self.elements.chunks_exact(CHUNK_SIZE).enumerate() {
this_chunk.clear();
this_chunk.extend(chunk_data.iter().map(|e| e.record_id));
let chunk = &chunks[chunk_index];
let chunk_size =
CompressedPostingListView::get_chunk_size(&chunks, &id_data, chunk_index);
let chunk_bits = chunk_size * u8::BITS as usize / CHUNK_SIZE;
bitpacker.compress_strictly_sorted(
chunk.initial.checked_sub(1),
&this_chunk,
&mut id_data[chunk.offset as usize..chunk.offset as usize + chunk_size],
chunk_bits as u8,
);
}
CompressedPostingList {
id_data,
chunks,
remainders,
last_id: self.elements.last().map(|e| e.record_id),
quantization_params,
}
}
}
#[derive(Clone)]
pub struct CompressedPostingListIterator<'a, W: Weight> {
list: CompressedPostingListView<'a, W>,
/// If true, then `decompressed_chunk` contains the unpacked chunk for the current position.
unpacked: bool,
decompressed_chunk: [PointOffsetType; CHUNK_SIZE],
/// Offset inside the posting list along with optional current element.
/// Defined as a tuple to ensure that we won't forget to update the element
pos: (usize, Option<PointOffsetType>),
}
impl<'a, W: Weight> CompressedPostingListIterator<'a, W> {
#[inline]
fn new(list: &CompressedPostingListView<'a, W>) -> Self {
Self {
list: list.clone(),
unpacked: false,
decompressed_chunk: [0; CHUNK_SIZE],
pos: (0, None),
}
}
#[inline]
fn next_from(&mut self, peek: PostingElementEx) -> PostingElement {
if self.pos.0 / CHUNK_SIZE < self.list.chunks.len() {
self.pos = (self.pos.0 + 1, None);
if self.pos.0.is_multiple_of(CHUNK_SIZE) {
self.unpacked = false;
}
} else {
self.pos = (self.pos.0 + 1, None);
}
peek.into()
}
#[inline]
fn next(&mut self) -> Option<PostingElement> {
let result = self.peek()?;
Some(self.next_from(result))
}
}
impl<W: Weight> PostingListIter for CompressedPostingListIterator<'_, W> {
#[inline]
fn peek(&mut self) -> Option<PostingElementEx> {
let pos = self.pos.0;
if pos / CHUNK_SIZE < self.list.chunks_len() {
if !self.unpacked {
self.list
.decompress_chunk(pos / CHUNK_SIZE, &mut self.decompressed_chunk);
self.unpacked = true;
}
return Some(PostingElementEx {
record_id: self.decompressed_chunk[pos % CHUNK_SIZE],
weight: self.list.get_weight(pos).to_f32(self.list.multiplier),
max_next_weight: Default::default(),
});
}
self.list
.get_remainder_id(pos - self.list.chunks_len() * CHUNK_SIZE)
.map(|e| PostingElementEx {
record_id: e.record_id,
weight: e.weight.to_f32(self.list.multiplier),
max_next_weight: Default::default(),
})
}
#[inline]
fn last_id(&self) -> Option<PointOffsetType> {
self.list.last_id
}
fn element_size(&self) -> usize {
size_of::<W>()
}
fn skip_to(&mut self, record_id: PointOffsetType) -> Option<PostingElementEx> {
// 1. Define which chunk we need to unpack (maybe it is current)
// 2. If current, change the position to the element and do peek
// Shortcut peeking into memory
if let Some(current_record_id) = self.pos.1.as_ref()
&& record_id < *current_record_id
{
// We are already ahead
return None;
}
// If None, we are already reading remainder
let current_chunk_id_opt = self.list.chunk_id_by_position(self.pos.0);
// Required chunk id
let required_chunk_id = self.list.chunk_id_by_id(record_id);
match (required_chunk_id, current_chunk_id_opt) {
(IdChunkPosition::Chunk(chunk_id), Some(current_chunk_id)) => {
match chunk_id.cmp(¤t_chunk_id) {
Ordering::Less => {
// Chunk is already skipped
// Return None, don't change the position
return None;
}
Ordering::Equal => {
let min_pos = chunk_id * CHUNK_SIZE;
self.pos = (std::cmp::max(self.pos.0, min_pos), None);
}
Ordering::Greater => {
// Chunk is ahead, move to it
self.pos = (chunk_id * CHUNK_SIZE, None);
self.unpacked = false;
}
}
}
(IdChunkPosition::Chunk(_), None) => {
// We are already in the remainder, and we can't go back
return None;
}
(IdChunkPosition::Before, _) => {
// Don't change anything, as current `pos` is by definition higher
return None;
}
(IdChunkPosition::After, _) => {
// Go to after the chunks
let min_pos = self.list.chunks_len() * CHUNK_SIZE;
self.pos = (std::cmp::max(self.pos.0, min_pos), None);
self.unpacked = false;
}
};
while let Some(current_element) = self.peek() {
// Save the current element to avoid further peeking
self.pos = (self.pos.0, Some(current_element.record_id));
match current_element.record_id.cmp(&record_id) {
Ordering::Equal => return Some(current_element),
Ordering::Greater => return None,
Ordering::Less => {
// Go to the next element
self.next_from(current_element);
}
}
}
None
}
#[inline]
fn skip_to_end(&mut self) {
self.pos = (
self.list.chunks_len() * CHUNK_SIZE + self.list.remainder_len(),
None,
);
}
#[inline]
fn len_to_end(&self) -> usize {
self.list.len() - self.pos.0
}
#[inline]
fn current_index(&self) -> usize {
self.pos.0
}
#[inline]
fn for_each_till_id<Ctx: ?Sized>(
&mut self,
id: PointOffsetType,
ctx: &mut Ctx,
mut f: impl FnMut(&mut Ctx, PointOffsetType, DimWeight),
) {
let mut pos = self.pos.0;
// Iterate over compressed chunks
let mut weights_buf = [0.0; CHUNK_SIZE];
let mut need_unpack = !self.unpacked;
while pos / CHUNK_SIZE < self.list.chunks_len() {
if need_unpack {
self.list
.decompress_chunk(pos / CHUNK_SIZE, &mut self.decompressed_chunk);
}
need_unpack = true;
let start = pos % CHUNK_SIZE;
let count = count_le_sorted(id, &self.decompressed_chunk[start..]);
let weights = self.list.weights_range(pos, count);
let weights =
W::into_f32_slice(self.list.multiplier, weights, &mut weights_buf[..count]);
for (idx, weight) in
std::iter::zip(&self.decompressed_chunk[start..start + count], weights)
{
f(ctx, *idx, *weight);
}
pos += count;
if start + count != CHUNK_SIZE {
self.unpacked = true;
self.pos = (pos, None);
return;
}
}
// Iterate over remainders
for e in self
.list
.iter_remainder_from(pos - self.list.chunks_len() * CHUNK_SIZE)
{
if e.record_id > id {
self.pos = (pos, None);
return;
}
f(ctx, e.record_id, e.weight.to_f32(self.list.multiplier));
pos += 1;
}
self.pos = (pos, None);
}
fn reliable_max_next_weight() -> bool {
false
}
fn into_std_iter(self) -> impl Iterator<Item = PostingElement> {
CompressedPostingListStdIterator(self)
}
}
#[derive(Clone)]
pub struct CompressedPostingListStdIterator<'a, W: Weight>(CompressedPostingListIterator<'a, W>);
impl<W: Weight> Iterator for CompressedPostingListStdIterator<'_, W> {
type Item = PostingElement;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
/// Find the amount of elements in the sorted array that are less or equal to `val`. In other words,
/// the first index `i` such that `data[i] > val`, or `data.len()` if all elements are less or equal
/// to `val`.
fn count_le_sorted<T: Copy + Eq + Ord>(val: T, data: &[T]) -> usize {
if data.last().is_none_or(|&x| x < val) {
// Happy case
return data.len();
}
data.binary_search(&val).map_or_else(|x| x, |x| x + 1)
}
#[cfg(test)]
mod tests {
use super::*;
const CASES: [usize; 6] = [0, 64, 128, 192, 256, 320];
fn mk_case(count: usize) -> Vec<(PointOffsetType, DimWeight)> {
(0..count)
.map(|i| (i as u32 + 10000, i as DimWeight))
.collect()
}
fn cases() -> Vec<Vec<(PointOffsetType, DimWeight)>> {
CASES.iter().copied().map(mk_case).collect()
}
#[test]
fn test_iter() {
for case in cases() {
let list = CompressedPostingList::<f32>::from(case.clone());
let hw_counter = HardwareCounterCell::new();
let mut iter = list.iter(&hw_counter);
let mut count = 0;
assert_eq!(iter.len_to_end(), case.len(), "len_to_end");
while let Some(e) = iter.next() {
assert_eq!(e.record_id, case[count].0);
assert_eq!(e.weight, case[count].1);
assert_eq!(iter.len_to_end(), case.len() - count - 1);
count += 1;
}
}
}
#[test]
#[allow(clippy::needless_range_loop)] // for consistency
fn test_try_till_id() {
let hw_counter = HardwareCounterCell::new();
for i in 0..CASES.len() {
for j in i..CASES.len() {
for k in j..CASES.len() {
eprintln!("\n\n\n{} {} {}", CASES[i], CASES[j], CASES[k]);
let case = mk_case(CASES[k]);
let pl = CompressedPostingList::<f32>::from(case.clone());
let mut iter = pl.iter(&hw_counter);
let mut data = Vec::new();
let mut counter = 0;
iter.for_each_till_id(
case.get(CASES[i]).map_or(PointOffsetType::MAX, |x| x.0) - 1,
&mut (),
|_, id, weight| {
eprintln!(" {id}");
data.push((id, weight));
counter += 1;
},
);
assert_eq!(data, &case[..CASES[i]]);
eprintln!(" ;");
let mut data = Vec::new();
let mut counter = 0;
iter.for_each_till_id(
case.get(CASES[j]).map_or(PointOffsetType::MAX, |x| x.0) - 1,
&mut (),
|_, id, weight| {
eprintln!(" {id}");
data.push((id, weight));
counter += 1;
},
);
assert_eq!(data, &case[CASES[i]..CASES[j]]);
}
}
}
}
#[test]
fn test_count_le_sorted() {
let data = [1, 2, 4, 5];
for val in 0..9 {
let pos = count_le_sorted(val, &data);
assert!(data[..pos].iter().all(|&x| x <= val));
assert!(data[pos..].iter().all(|&x| x > val));
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/posting_list.rs | lib/sparse/src/index/posting_list.rs | use std::cmp::max;
use common::types::PointOffsetType;
use ordered_float::OrderedFloat;
use super::posting_list_common::{
DEFAULT_MAX_NEXT_WEIGHT, PostingElement, PostingElementEx, PostingListIter,
};
use crate::common::types::DimWeight;
#[derive(Debug, Default, Clone, PartialEq)]
pub struct PostingList {
/// List of the posting elements ordered by id
pub elements: Vec<PostingElementEx>,
}
impl PostingList {
#[cfg(test)]
pub fn from(records: Vec<(PointOffsetType, DimWeight)>) -> PostingList {
let mut posting_list = PostingBuilder::new();
for (id, weight) in records {
posting_list.add(id, weight);
}
posting_list.build()
}
/// Creates a new posting list with a single element.
pub fn new_one(record_id: PointOffsetType, weight: DimWeight) -> PostingList {
PostingList {
elements: vec![PostingElementEx::new(record_id, weight)],
}
}
pub fn delete(&mut self, record_id: PointOffsetType) {
let index = self
.elements
.binary_search_by_key(&record_id, |e| e.record_id);
if let Ok(found_index) = index {
self.elements.remove(found_index);
if let Some(last) = self.elements.last_mut() {
last.max_next_weight = DEFAULT_MAX_NEXT_WEIGHT;
}
if found_index < self.elements.len() {
self.propagate_max_next_weight_to_the_left(found_index);
} else if !self.elements.is_empty() {
self.propagate_max_next_weight_to_the_left(self.elements.len() - 1);
}
}
}
/// Upsert a posting element into the posting list.
///
/// Worst case is adding a new element at the end of the list with a very large weight.
/// This forces to propagate it as potential max_next_weight to all the previous elements.
pub fn upsert(&mut self, posting_element: PostingElementEx) {
// find insertion point in sorted posting list (most expensive operation for large posting list)
let index = self
.elements
.binary_search_by_key(&posting_element.record_id, |e| e.record_id);
let modified_index = match index {
Ok(found_index) => {
// Update existing element for the same id
let element = &mut self.elements[found_index];
if element.weight == posting_element.weight {
// no need to update anything
None
} else {
// the structure of the posting list is not changed, no need to update max_next_weight
element.weight = posting_element.weight;
Some(found_index)
}
}
Err(insert_index) => {
// Insert new element by shifting elements to the right
self.elements.insert(insert_index, posting_element);
// the structure of the posting list is changed, need to update max_next_weight
if insert_index == self.elements.len() - 1 {
// inserted at the end
Some(insert_index)
} else {
// inserted in the middle - need to propagated max_next_weight from the right
Some(insert_index + 1)
}
}
};
// Propagate max_next_weight update to the previous entries
if let Some(modified_index) = modified_index {
self.propagate_max_next_weight_to_the_left(modified_index);
}
}
/// Propagates `max_next_weight` from the entry at `up_to_index` to previous entries.
/// If an entry has a weight larger than `max_next_weight`, the propagation stops.
fn propagate_max_next_weight_to_the_left(&mut self, up_to_index: usize) {
// used element at `up_to_index` as the starting point
let starting_element = &self.elements[up_to_index];
let mut max_next_weight = max(
OrderedFloat(starting_element.max_next_weight),
OrderedFloat(starting_element.weight),
)
.0;
// propagate max_next_weight update to the previous entries
for element in self.elements[..up_to_index].iter_mut().rev() {
// update max_next_weight for element
element.max_next_weight = max_next_weight;
max_next_weight = max_next_weight.max(element.weight);
}
}
pub fn iter(&self) -> PostingListIterator<'_> {
PostingListIterator::new(&self.elements)
}
}
pub struct PostingBuilder {
elements: Vec<PostingElementEx>,
}
impl Default for PostingBuilder {
fn default() -> Self {
Self::new()
}
}
impl PostingBuilder {
pub fn new() -> PostingBuilder {
PostingBuilder {
elements: Vec::new(),
}
}
/// Add a new record to the posting list.
pub fn add(&mut self, record_id: PointOffsetType, weight: DimWeight) {
self.elements.push(PostingElementEx::new(record_id, weight));
}
/// Consume the builder and return the posting list.
pub fn build(mut self) -> PostingList {
// Sort by id
self.elements.sort_unstable_by_key(|e| e.record_id);
// Check for duplicates
#[cfg(debug_assertions)]
{
if let Some(e) = self
.elements
.windows(2)
.find(|e| e[0].record_id == e[1].record_id)
{
panic!("Duplicate id {} in posting list", e[0].record_id);
}
}
// Calculate the `max_next_weight` for all elements starting from the end
let mut max_next_weight = f32::NEG_INFINITY;
for element in self.elements.iter_mut().rev() {
element.max_next_weight = max_next_weight;
max_next_weight = max_next_weight.max(element.weight);
}
PostingList {
elements: self.elements,
}
}
}
/// Iterator over posting list elements offering skipping abilities to avoid full iteration.
#[derive(Debug, Clone)]
pub struct PostingListIterator<'a> {
pub elements: &'a [PostingElementEx],
pub current_index: usize,
}
impl PostingListIter for PostingListIterator<'_> {
#[inline]
fn peek(&mut self) -> Option<PostingElementEx> {
self.elements.get(self.current_index).cloned()
}
#[inline]
fn last_id(&self) -> Option<PointOffsetType> {
self.elements.last().map(|e| e.record_id)
}
fn element_size(&self) -> usize {
size_of::<DimWeight>()
}
#[inline]
fn skip_to(&mut self, record_id: PointOffsetType) -> Option<PostingElementEx> {
self.skip_to(record_id)
}
#[inline]
fn skip_to_end(&mut self) {
self.skip_to_end();
}
#[inline]
fn len_to_end(&self) -> usize {
self.len_to_end()
}
#[inline]
fn current_index(&self) -> usize {
self.current_index
}
fn for_each_till_id<Ctx: ?Sized>(
&mut self,
id: PointOffsetType,
ctx: &mut Ctx,
mut f: impl FnMut(&mut Ctx, PointOffsetType, DimWeight),
) {
let mut current_index = self.current_index;
for element in &self.elements[current_index..] {
if element.record_id > id {
break;
}
f(ctx, element.record_id, element.weight);
current_index += 1;
}
self.current_index = current_index;
}
fn reliable_max_next_weight() -> bool {
true
}
fn into_std_iter(self) -> impl Iterator<Item = PostingElement> {
self.elements.iter().cloned().map(PostingElement::from)
}
}
impl<'a> PostingListIterator<'a> {
pub fn new(elements: &'a [PostingElementEx]) -> PostingListIterator<'a> {
PostingListIterator {
elements,
current_index: 0,
}
}
/// Advances the iterator to the next element.
pub fn advance(&mut self) {
if self.current_index < self.elements.len() {
self.current_index += 1;
}
}
/// Advances the iterator by `count` elements.
pub fn advance_by(&mut self, count: usize) {
self.current_index = (self.current_index + count).min(self.elements.len());
}
/// Returns the next element without advancing the iterator.
pub fn peek(&self) -> Option<&PostingElementEx> {
self.elements.get(self.current_index)
}
/// Returns the number of elements from the current position to the end of the list.
pub fn len_to_end(&self) -> usize {
self.elements.len() - self.current_index
}
/// Tries to find the element with ID == id and returns it.
/// If the element is not found, the iterator is advanced to the next element with ID > id
/// and None is returned.
/// If the iterator is already at the end, None is returned.
/// If the iterator skipped to the end, None is returned and current index is set to the length of the list.
/// Uses binary search.
pub fn skip_to(&mut self, id: PointOffsetType) -> Option<PostingElementEx> {
// Check if we are already at the end
if self.current_index >= self.elements.len() {
return None;
}
// Use binary search to find the next element with ID > id
let next_element =
self.elements[self.current_index..].binary_search_by(|e| e.record_id.cmp(&id));
match next_element {
Ok(found_offset) => {
self.current_index += found_offset;
Some(self.elements[self.current_index].clone())
}
Err(insert_index) => {
self.current_index += insert_index;
None
}
}
}
/// Skips to the end of the posting list and returns None.
pub fn skip_to_end(&mut self) -> Option<&PostingElementEx> {
self.current_index = self.elements.len();
None
}
}
#[cfg(test)]
mod tests {
use itertools::Itertools;
use super::*;
use crate::index::posting_list_common::DEFAULT_MAX_NEXT_WEIGHT;
#[test]
fn test_posting_operations() {
let mut builder = PostingBuilder::new();
builder.add(1, 1.0);
builder.add(2, 2.1);
builder.add(5, 5.0);
builder.add(3, 2.0);
builder.add(8, 3.4);
builder.add(10, 3.0);
builder.add(20, 3.0);
builder.add(7, 4.0);
builder.add(11, 3.0);
let posting_list = builder.build();
let mut iter = PostingListIterator::new(&posting_list.elements);
assert_eq!(iter.peek().unwrap().record_id, 1);
iter.advance();
assert_eq!(iter.peek().unwrap().record_id, 2);
iter.advance();
assert_eq!(iter.peek().unwrap().record_id, 3);
assert_eq!(iter.skip_to(7).unwrap().record_id, 7);
assert_eq!(iter.peek().unwrap().record_id, 7);
assert!(iter.skip_to(9).is_none());
assert_eq!(iter.peek().unwrap().record_id, 10);
assert!(iter.skip_to(20).is_some());
assert_eq!(iter.peek().unwrap().record_id, 20);
assert!(iter.skip_to(21).is_none());
assert!(iter.peek().is_none());
}
#[test]
fn test_upsert_insert_last() {
let mut builder = PostingBuilder::new();
builder.add(1, 1.0);
builder.add(3, 3.0);
builder.add(2, 2.0);
let mut posting_list = builder.build();
// sorted by id
assert_eq!(posting_list.elements[0].record_id, 1);
assert_eq!(posting_list.elements[0].weight, 1.0);
assert_eq!(posting_list.elements[0].max_next_weight, 3.0);
assert_eq!(posting_list.elements[1].record_id, 2);
assert_eq!(posting_list.elements[1].weight, 2.0);
assert_eq!(posting_list.elements[1].max_next_weight, 3.0);
assert_eq!(posting_list.elements[2].record_id, 3);
assert_eq!(posting_list.elements[2].weight, 3.0);
assert_eq!(
posting_list.elements[2].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
// insert mew last element
posting_list.upsert(PostingElementEx::new(4, 4.0));
assert_eq!(posting_list.elements[3].record_id, 4);
assert_eq!(posting_list.elements[3].weight, 4.0);
assert_eq!(
posting_list.elements[3].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
// must update max_next_weight of previous elements if necessary
for element in posting_list.elements.iter().take(3) {
assert_eq!(element.max_next_weight, 4.0);
}
}
#[test]
fn test_upsert_insert_in_gap() {
let mut builder = PostingBuilder::new();
builder.add(1, 1.0);
builder.add(3, 3.0);
builder.add(2, 2.0);
// no entry for 4
builder.add(5, 5.0);
let mut posting_list = builder.build();
// sorted by id
assert_eq!(posting_list.elements[0].record_id, 1);
assert_eq!(posting_list.elements[0].weight, 1.0);
assert_eq!(posting_list.elements[0].max_next_weight, 5.0);
assert_eq!(posting_list.elements[1].record_id, 2);
assert_eq!(posting_list.elements[1].weight, 2.0);
assert_eq!(posting_list.elements[1].max_next_weight, 5.0);
assert_eq!(posting_list.elements[2].record_id, 3);
assert_eq!(posting_list.elements[2].weight, 3.0);
assert_eq!(posting_list.elements[2].max_next_weight, 5.0);
assert_eq!(posting_list.elements[3].record_id, 5);
assert_eq!(posting_list.elements[3].weight, 5.0);
assert_eq!(
posting_list.elements[3].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
// insert mew last element
posting_list.upsert(PostingElementEx::new(4, 4.0));
// `4` is shifted to the right
assert_eq!(posting_list.elements[4].record_id, 5);
assert_eq!(posting_list.elements[4].weight, 5.0);
assert_eq!(
posting_list.elements[4].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
// new element
assert_eq!(posting_list.elements[3].record_id, 4);
assert_eq!(posting_list.elements[3].weight, 4.0);
// must update max_next_weight of previous elements
for element in posting_list.elements.iter().take(4) {
assert_eq!(element.max_next_weight, 5.0);
}
}
#[test]
fn test_upsert_update() {
let mut builder = PostingBuilder::new();
builder.add(1, 1.0);
builder.add(3, 3.0);
builder.add(2, 2.0);
let mut posting_list = builder.build();
// sorted by id
assert_eq!(posting_list.elements[0].record_id, 1);
assert_eq!(posting_list.elements[0].weight, 1.0);
assert_eq!(posting_list.elements[0].max_next_weight, 3.0);
assert_eq!(posting_list.elements[1].record_id, 2);
assert_eq!(posting_list.elements[1].weight, 2.0);
assert_eq!(posting_list.elements[1].max_next_weight, 3.0);
assert_eq!(posting_list.elements[2].record_id, 3);
assert_eq!(posting_list.elements[2].weight, 3.0);
assert_eq!(
posting_list.elements[2].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
// increase weight of existing element
posting_list.upsert(PostingElementEx::new(2, 4.0));
assert_eq!(posting_list.elements[0].record_id, 1);
assert_eq!(posting_list.elements[0].weight, 1.0);
assert_eq!(posting_list.elements[0].max_next_weight, 4.0); // update propagated
assert_eq!(posting_list.elements[1].record_id, 2);
assert_eq!(posting_list.elements[1].weight, 4.0); // updated
assert_eq!(posting_list.elements[1].max_next_weight, 3.0);
assert_eq!(posting_list.elements[2].record_id, 3);
assert_eq!(posting_list.elements[2].weight, 3.0);
assert_eq!(
posting_list.elements[2].max_next_weight,
DEFAULT_MAX_NEXT_WEIGHT
);
}
#[test]
fn test_random_delete() {
use rand::Rng;
use rand::seq::SliceRandom;
let mut rng = rand::rng();
for _ in 0..1000 {
let mut ids = Vec::new();
let mut cur_id = 0;
for _ in 0..32 {
cur_id += rng.random_range(1..10);
ids.push(cur_id);
}
ids.shuffle(&mut rng);
let random_id = ids[rng.random_range(0..ids.len())];
let mut builder1 = PostingBuilder::new();
let mut builder2 = PostingBuilder::new();
for id in ids {
let val = rng.random_range(0..100) as f32 / 10.0;
builder1.add(id, val);
if id != random_id {
builder2.add(id, val);
}
}
let mut posting_list1 = builder1.build();
posting_list1.delete(random_id);
let posting_list2 = builder2.build();
// Ok
assert_eq!(
posting_list1
.elements
.iter()
.map(|e| e.record_id)
.collect_vec(),
posting_list2
.elements
.iter()
.map(|e| e.record_id)
.collect_vec(),
);
assert_eq!(
posting_list1
.elements
.iter()
.map(|e| e.weight)
.collect_vec(),
posting_list2
.elements
.iter()
.map(|e| e.weight)
.collect_vec(),
);
// Fail
assert_eq!(
posting_list1
.elements
.iter()
.map(|e| e.max_next_weight)
.collect_vec(),
posting_list2
.elements
.iter()
.map(|e| e.max_next_weight)
.collect_vec(),
);
// Ok (at least they won't break pruning logic)
assert!(
std::iter::zip(&posting_list1.elements, &posting_list2.elements,)
.all(|(e1, e2)| e1.max_next_weight >= e2.max_next_weight),
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/mod.rs | lib/sparse/src/index/mod.rs | pub mod compressed_posting_list;
pub mod inverted_index;
pub mod loaders;
pub mod posting_list;
pub mod posting_list_common;
pub mod search_context;
#[cfg(test)]
mod tests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/posting_list_common.rs | lib/sparse/src/index/posting_list_common.rs | use common::types::PointOffsetType;
use crate::common::types::DimWeight;
pub const DEFAULT_MAX_NEXT_WEIGHT: DimWeight = f32::NEG_INFINITY;
#[derive(Debug, Clone, PartialEq)]
pub struct GenericPostingElement<W> {
/// Record ID
pub record_id: PointOffsetType,
/// Weight of the record in the dimension
pub weight: W,
}
#[derive(Debug, Clone, PartialEq)]
pub struct PostingElement {
/// Record ID
pub record_id: PointOffsetType,
/// Weight of the record in the dimension
pub weight: DimWeight,
}
#[derive(Debug, Clone, PartialEq)]
pub struct PostingElementEx {
/// Record ID
pub record_id: PointOffsetType,
/// Weight of the record in the dimension
pub weight: DimWeight,
/// Max weight of the next elements in the posting list.
pub max_next_weight: DimWeight,
}
impl PostingElementEx {
/// Initialize negative infinity as max_next_weight.
/// Needs to be updated at insertion time.
pub(crate) fn new(record_id: PointOffsetType, weight: DimWeight) -> PostingElementEx {
PostingElementEx {
record_id,
weight,
max_next_weight: DEFAULT_MAX_NEXT_WEIGHT,
}
}
}
impl From<PostingElementEx> for PostingElement {
fn from(element: PostingElementEx) -> PostingElement {
PostingElement {
record_id: element.record_id,
weight: element.weight,
}
}
}
pub trait PostingListIter {
fn peek(&mut self) -> Option<PostingElementEx>;
fn last_id(&self) -> Option<PointOffsetType>;
/// Size of the weight element
fn element_size(&self) -> usize;
/// Tries to find the element with ID == id and returns it.
/// If the element is not found, the iterator is advanced to the next element with ID > id
/// and None is returned.
/// If the iterator is already at the end, None is returned.
fn skip_to(&mut self, record_id: PointOffsetType) -> Option<PostingElementEx>;
fn skip_to_end(&mut self);
fn len_to_end(&self) -> usize;
fn current_index(&self) -> usize;
/// Iterate over the posting list until `id` is reached (inclusive).
fn for_each_till_id<Ctx: ?Sized>(
&mut self,
id: PointOffsetType,
ctx: &mut Ctx,
f: impl FnMut(&mut Ctx, PointOffsetType, DimWeight),
);
/// Whether the max_next_weight is reliable.
fn reliable_max_next_weight() -> bool;
fn into_std_iter(self) -> impl Iterator<Item = PostingElement>;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/loaders.rs | lib/sparse/src/index/loaders.rs | use std::collections::HashMap;
use std::io::{self, BufRead as _, BufReader, Lines};
use std::mem::size_of;
use std::path::Path;
use fs_err::File;
use memmap2::Mmap;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_ops::{open_read_mmap, transmute_from_u8, transmute_from_u8_to_slice};
use validator::ValidationErrors;
use crate::common::sparse_vector::SparseVector;
/// Compressed Sparse Row matrix, backed by memory-mapped file.
///
/// The layout of the memory-mapped file is as follows:
///
/// | name | type | size | start |
/// |---------|---------------|------------|---------------------|
/// | nrow | `u64` | 8 | 0 |
/// | ncol | `u64` | 8 | 8 |
/// | nnz | `u64` | 8 | 16 |
/// | indptr | `u64[nrow+1]` | 8*(nrow+1) | 24 |
/// | indices | `u32[nnz]` | 4*nnz | 24+8*(nrow+1) |
/// | data | `u32[nnz]` | 4*nnz | 24+8*(nrow+1)+4*nnz |
pub struct Csr {
mmap: Mmap,
nrow: usize,
nnz: usize,
intptr: Vec<u64>,
}
const CSR_HEADER_SIZE: usize = size_of::<u64>() * 3;
impl Csr {
pub fn open(path: impl AsRef<Path>) -> io::Result<Self> {
Self::from_mmap(open_read_mmap(
path.as_ref(),
AdviceSetting::from(Advice::Normal),
false,
)?)
}
#[inline]
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.nrow
}
pub fn iter(&self) -> CsrIter<'_> {
CsrIter { csr: self, row: 0 }
}
fn from_mmap(mmap: Mmap) -> io::Result<Self> {
let (nrow, ncol, nnz) =
transmute_from_u8::<(u64, u64, u64)>(&mmap.as_ref()[..CSR_HEADER_SIZE]);
let (nrow, _ncol, nnz) = (*nrow as usize, *ncol as usize, *nnz as usize);
let indptr = Vec::from(transmute_from_u8_to_slice::<u64>(
&mmap.as_ref()[CSR_HEADER_SIZE..CSR_HEADER_SIZE + size_of::<u64>() * (nrow + 1)],
));
if !indptr.windows(2).all(|w| w[0] <= w[1]) || indptr.last() != Some(&(nnz as u64)) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Invalid indptr array",
));
}
Ok(Self {
mmap,
nrow,
nnz,
intptr: indptr,
})
}
#[inline]
unsafe fn vec(&self, row: usize) -> Result<SparseVector, ValidationErrors> {
unsafe {
let start = *self.intptr.get_unchecked(row) as usize;
let end = *self.intptr.get_unchecked(row + 1) as usize;
let mut pos = CSR_HEADER_SIZE + size_of::<u64>() * (self.nrow + 1);
let indices = transmute_from_u8_to_slice::<u32>(
self.mmap
.as_ref()
.get_unchecked(pos + size_of::<u32>() * start..pos + size_of::<u32>() * end),
);
pos += size_of::<u32>() * self.nnz;
let data = transmute_from_u8_to_slice::<f32>(
self.mmap
.as_ref()
.get_unchecked(pos + size_of::<f32>() * start..pos + size_of::<f32>() * end),
);
SparseVector::new(indices.to_vec(), data.to_vec())
}
}
}
/// Iterator over the rows of a CSR matrix.
pub struct CsrIter<'a> {
csr: &'a Csr,
row: usize,
}
impl Iterator for CsrIter<'_> {
type Item = Result<SparseVector, ValidationErrors>;
fn next(&mut self) -> Option<Self::Item> {
(self.row < self.csr.nrow).then(|| {
let vec = unsafe { self.csr.vec(self.row) };
self.row += 1;
vec
})
}
}
impl ExactSizeIterator for CsrIter<'_> {
fn len(&self) -> usize {
self.csr.nrow - self.row
}
}
pub fn load_csr_vecs(path: impl AsRef<Path>) -> io::Result<Vec<SparseVector>> {
Csr::open(path)?
.iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
/// Stream of sparse vectors in JSON format.
pub struct JsonReader(Lines<BufReader<File>>);
impl JsonReader {
pub fn open(path: impl AsRef<Path>) -> io::Result<Self> {
let path = path.as_ref().to_path_buf();
Ok(JsonReader(BufReader::new(File::open(path)?).lines()))
}
}
impl Iterator for JsonReader {
type Item = Result<SparseVector, io::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|line| {
line.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|line| {
let data: HashMap<String, f32> = serde_json::from_str(&line)?;
SparseVector::new(
data.keys()
.map(|k| k.parse())
.collect::<Result<Vec<_>, _>>()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?,
data.values().copied().collect(),
)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
})
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_compressed_mmap.rs | lib/sparse/src/index/inverted_index/inverted_index_compressed_mmap.rs | use std::borrow::Cow;
use std::io::{BufWriter, Write as _};
use std::marker::PhantomData;
use std::mem::size_of;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use io::file_operations::{atomic_save_json, read_json};
use io::storage_version::StorageVersion;
use memmap2::Mmap;
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting, Madviseable};
use memory::mmap_ops::{
create_and_ensure_length, open_read_mmap, transmute_from_u8_to_slice, transmute_to_u8,
transmute_to_u8_slice,
};
use serde::{Deserialize, Serialize};
use super::INDEX_FILE_NAME;
use super::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimId, DimOffset, Weight};
use crate::index::compressed_posting_list::{
CompressedPostingChunk, CompressedPostingListIterator, CompressedPostingListView,
};
use crate::index::inverted_index::InvertedIndex;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use crate::index::posting_list_common::GenericPostingElement;
const INDEX_CONFIG_FILE_NAME: &str = "inverted_index_config.json";
pub struct Version;
impl StorageVersion for Version {
fn current_raw() -> &'static str {
"0.2.0"
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct InvertedIndexFileHeader {
/// Number of posting lists
pub posting_count: usize,
/// Number of unique vectors indexed
pub vector_count: usize,
/// Total size of all searchable sparse vectors in bytes
// This is an option because earlier versions of the index did not store this information.
// In case it is not present, it will be calculated on load.
#[serde(skip_serializing_if = "Option::is_none")]
pub total_sparse_size: Option<usize>,
}
/// Inverted flatten index from dimension id to posting list
#[derive(Debug)]
pub struct InvertedIndexCompressedMmap<W> {
path: PathBuf,
mmap: Arc<Mmap>,
pub file_header: InvertedIndexFileHeader,
_phantom: PhantomData<W>,
}
#[derive(Debug, Default, Clone)]
#[repr(C)]
struct PostingListFileHeader<W: Weight> {
pub ids_start: u64,
pub last_id: u32,
/// Possible values: 0, 4, 8, ..., 512.
/// Step = 4 = `BLOCK_LEN / u32::BITS` = `128 / 32`.
/// Max = 512 = `BLOCK_LEN * size_of::<u32>()` = `128 * 4`.
pub ids_len: u32,
pub chunks_count: u32,
pub quantization_params: W::QuantizationParams,
}
impl<W: Weight> InvertedIndex for InvertedIndexCompressedMmap<W> {
type Iter<'a> = CompressedPostingListIterator<'a, W>;
type Version = Version;
fn is_on_disk(&self) -> bool {
true
}
fn open(path: &Path) -> std::io::Result<Self> {
Self::load(path)
}
fn save(&self, path: &Path) -> std::io::Result<()> {
debug_assert_eq!(path, self.path);
// If Self instance exists, it's either constructed by using `open()` (which reads index
// files), or using `from_ram_index()` (which writes them). Both assume that the files
// exist. If any of the files are missing, then something went wrong.
for file in Self::files(path) {
debug_assert!(file.exists());
}
Ok(())
}
fn get<'a>(
&'a self,
id: DimOffset,
hw_counter: &'a HardwareCounterCell,
) -> Option<CompressedPostingListIterator<'a, W>> {
self.get(id, hw_counter)
.map(|posting_list| posting_list.iter())
}
fn len(&self) -> usize {
self.file_header.posting_count
}
fn posting_list_len(&self, id: &DimOffset, hw_counter: &HardwareCounterCell) -> Option<usize> {
self.get(*id, hw_counter)
.map(|posting_list| posting_list.len())
}
fn files(path: &Path) -> Vec<PathBuf> {
vec![
Self::index_file_path(path),
Self::index_config_file_path(path),
]
}
fn immutable_files(path: &Path) -> Vec<PathBuf> {
// `InvertedIndexCompressedMmap` is always immutable
Self::files(path)
}
fn remove(&mut self, _id: PointOffsetType, _old_vector: RemappedSparseVector) {
panic!("Cannot remove from a read-only Mmap inverted index")
}
fn upsert(
&mut self,
_id: PointOffsetType,
_vector: RemappedSparseVector,
_old_vector: Option<RemappedSparseVector>,
) {
panic!("Cannot upsert into a read-only Mmap inverted index")
}
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
path: P,
) -> std::io::Result<Self> {
let index = InvertedIndexCompressedImmutableRam::from_ram_index(ram_index, &path)?;
Self::convert_and_save(&index, path)
}
fn vector_count(&self) -> usize {
self.file_header.vector_count
}
fn total_sparse_vectors_size(&self) -> usize {
debug_assert!(
self.file_header.total_sparse_size.is_some(),
"The field should be populated from the file, or on load"
);
self.file_header.total_sparse_size.unwrap_or(0)
}
fn max_index(&self) -> Option<DimId> {
match self.file_header.posting_count {
0 => None,
len => Some(len as DimId - 1),
}
}
}
impl<W: Weight> InvertedIndexCompressedMmap<W> {
const HEADER_SIZE: usize = size_of::<PostingListFileHeader<W>>();
pub fn index_file_path(path: &Path) -> PathBuf {
path.join(INDEX_FILE_NAME)
}
pub fn index_config_file_path(path: &Path) -> PathBuf {
path.join(INDEX_CONFIG_FILE_NAME)
}
pub fn get<'a>(
&'a self,
id: DimId,
hw_counter: &'a HardwareCounterCell,
) -> Option<CompressedPostingListView<'a, W>> {
// check that the id is not out of bounds (posting_count includes the empty zeroth entry)
if id >= self.file_header.posting_count as DimId {
return None;
}
let header: PostingListFileHeader<W> = self
.slice_part::<PostingListFileHeader<W>>(u64::from(id) * Self::HEADER_SIZE as u64, 1u32)
[0]
.clone();
hw_counter.vector_io_read().incr_delta(Self::HEADER_SIZE);
let remainders_start = header.ids_start
+ u64::from(header.ids_len)
+ u64::from(header.chunks_count) * size_of::<CompressedPostingChunk<W>>() as u64;
let remainders_end = if id + 1 < self.file_header.posting_count as DimId {
self.slice_part::<PostingListFileHeader<W>>(
u64::from(id + 1) * Self::HEADER_SIZE as u64,
1u32,
)[0]
.ids_start
} else {
self.mmap.len() as u64
};
if remainders_end
.checked_sub(remainders_start)
.is_some_and(|len| len % size_of::<GenericPostingElement<W>>() as u64 != 0)
{
return None;
}
Some(CompressedPostingListView::new(
self.slice_part(header.ids_start, header.ids_len),
self.slice_part(
header.ids_start + u64::from(header.ids_len),
header.chunks_count,
),
transmute_from_u8_to_slice(
&self.mmap[remainders_start as usize..remainders_end as usize],
),
header.last_id.checked_sub(1),
header.quantization_params,
hw_counter,
))
}
fn slice_part<T>(&self, start: impl Into<u64>, count: impl Into<u64>) -> &[T] {
let start = start.into() as usize;
let end = start + count.into() as usize * size_of::<T>();
transmute_from_u8_to_slice(&self.mmap[start..end])
}
pub fn convert_and_save<P: AsRef<Path>>(
index: &InvertedIndexCompressedImmutableRam<W>,
path: P,
) -> std::io::Result<Self> {
let total_posting_headers_size =
index.postings.as_slice().len() * size_of::<PostingListFileHeader<W>>();
// Ignore HW on load
let hw_counter = HardwareCounterCell::disposable();
let file_length = total_posting_headers_size
+ index
.postings
.as_slice()
.iter()
.map(|p| p.view(&hw_counter).store_size().total)
.sum::<usize>();
let file_path = Self::index_file_path(path.as_ref());
let file = create_and_ensure_length(file_path.as_ref(), file_length)?;
let mut buf = BufWriter::new(file);
// Save posting headers
let mut offset: usize = total_posting_headers_size;
for posting in index.postings.as_slice() {
let store_size = posting.view(&hw_counter).store_size();
let posting_header = PostingListFileHeader::<W> {
ids_start: offset as u64,
ids_len: store_size.id_data_bytes as u32,
chunks_count: store_size.chunks_count as u32,
last_id: posting.view(&hw_counter).last_id().map_or(0, |id| id + 1),
quantization_params: posting.view(&hw_counter).multiplier(),
};
buf.write_all(transmute_to_u8(&posting_header))?;
offset += store_size.total;
}
// Save posting elements
for posting in index.postings.as_slice() {
let posting_view = posting.view(&hw_counter);
let (id_data, chunks, remainders) = posting_view.parts();
buf.write_all(id_data)?;
buf.write_all(transmute_to_u8_slice(chunks))?;
buf.write_all(transmute_to_u8_slice(remainders))?;
}
// Explicitly fsync file contents to ensure durability
buf.flush()?;
let file = buf.into_inner().unwrap();
file.sync_all()?;
// save header properties
let file_header = InvertedIndexFileHeader {
posting_count: index.postings.as_slice().len(),
vector_count: index.vector_count,
total_sparse_size: Some(index.total_sparse_size),
};
atomic_save_json(&Self::index_config_file_path(path.as_ref()), &file_header)?;
Ok(Self {
path: path.as_ref().to_owned(),
mmap: Arc::new(open_read_mmap(
file_path.as_ref(),
AdviceSetting::Global,
false,
)?),
file_header,
_phantom: PhantomData,
})
}
pub fn load<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
// read index config file
let config_file_path = Self::index_config_file_path(path.as_ref());
// if the file header does not exist, the index is malformed
let file_header: InvertedIndexFileHeader = read_json(&config_file_path)?;
// read index data into mmap
let file_path = Self::index_file_path(path.as_ref());
let mmap = open_read_mmap(
file_path.as_ref(),
AdviceSetting::from(Advice::Normal),
false,
)?;
let mut index = Self {
path: path.as_ref().to_owned(),
mmap: Arc::new(mmap),
file_header,
_phantom: PhantomData,
};
let hw_counter = HardwareCounterCell::disposable();
if index.file_header.total_sparse_size.is_none() {
index.file_header.total_sparse_size =
Some(index.calculate_total_sparse_size(&hw_counter));
atomic_save_json(&config_file_path, &index.file_header)?;
}
Ok(index)
}
fn calculate_total_sparse_size(&self, hw_counter: &HardwareCounterCell) -> usize {
(0..self.file_header.posting_count as DimId)
.filter_map(|id| {
self.get(id, hw_counter)
.map(|posting| posting.store_size().total)
})
.sum()
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> std::io::Result<()> {
self.mmap.populate();
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
clear_disk_cache(&self.path)
}
}
#[cfg(test)]
mod tests {
use tempfile::Builder;
use super::*;
use crate::common::types::QuantizedU8;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
fn compare_indexes<W: Weight>(
inverted_index_ram: &InvertedIndexCompressedImmutableRam<W>,
inverted_index_mmap: &InvertedIndexCompressedMmap<W>,
) {
let hw_counter = HardwareCounterCell::new();
for id in 0..inverted_index_ram.postings.len() as DimId {
let posting_list_ram = inverted_index_ram
.postings
.get(id as usize)
.unwrap()
.view(&hw_counter);
let posting_list_mmap = inverted_index_mmap.get(id, &hw_counter).unwrap();
let mmap_parts = posting_list_mmap.parts();
let ram_parts = posting_list_ram.parts();
assert_eq!(mmap_parts, ram_parts);
}
}
#[test]
fn test_inverted_index_mmap() {
check_inverted_index_mmap::<f32>();
check_inverted_index_mmap::<half::f16>();
check_inverted_index_mmap::<u8>();
check_inverted_index_mmap::<QuantizedU8>();
}
fn check_inverted_index_mmap<W: Weight>() {
let hw_counter = HardwareCounterCell::new();
// skip 4th dimension
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0), (5, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0), (5, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
builder.add(4, [(1, 1.0), (2, 1.0)].into());
builder.add(5, [(1, 2.0)].into());
builder.add(6, [(1, 3.0)].into());
builder.add(7, [(1, 4.0)].into());
builder.add(8, [(1, 5.0)].into());
builder.add(9, [(1, 6.0)].into());
let inverted_index_ram = builder.build();
let tmp_dir_path = Builder::new().prefix("test_index_dir1").tempdir().unwrap();
let inverted_index_ram = InvertedIndexCompressedImmutableRam::from_ram_index(
Cow::Borrowed(&inverted_index_ram),
&tmp_dir_path,
)
.unwrap();
let tmp_dir_path = Builder::new().prefix("test_index_dir2").tempdir().unwrap();
{
let inverted_index_mmap = InvertedIndexCompressedMmap::<W>::convert_and_save(
&inverted_index_ram,
&tmp_dir_path,
)
.unwrap();
compare_indexes(&inverted_index_ram, &inverted_index_mmap);
}
let inverted_index_mmap = InvertedIndexCompressedMmap::<W>::load(&tmp_dir_path).unwrap();
// posting_count: 0th entry is always empty + 1st + 2nd + 3rd + 4th empty + 5th
assert_eq!(inverted_index_mmap.file_header.posting_count, 6);
assert_eq!(inverted_index_mmap.file_header.vector_count, 9);
compare_indexes(&inverted_index_ram, &inverted_index_mmap);
assert!(inverted_index_mmap.get(0, &hw_counter).unwrap().is_empty()); // the first entry is always empty as dimension ids start at 1
assert_eq!(inverted_index_mmap.get(1, &hw_counter).unwrap().len(), 9);
assert_eq!(inverted_index_mmap.get(2, &hw_counter).unwrap().len(), 4);
assert_eq!(inverted_index_mmap.get(3, &hw_counter).unwrap().len(), 3);
assert!(inverted_index_mmap.get(4, &hw_counter).unwrap().is_empty()); // return empty posting list info for intermediary empty ids
assert_eq!(inverted_index_mmap.get(5, &hw_counter).unwrap().len(), 2);
// index after the last values are None
assert!(inverted_index_mmap.get(6, &hw_counter).is_none());
assert!(inverted_index_mmap.get(7, &hw_counter).is_none());
assert!(inverted_index_mmap.get(100, &hw_counter).is_none());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_ram.rs | lib/sparse/src/index/inverted_index/inverted_index_ram.rs | use std::borrow::Cow;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use io::storage_version::StorageVersion;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimId, DimOffset};
use crate::index::inverted_index::InvertedIndex;
use crate::index::posting_list::{PostingList, PostingListIterator};
use crate::index::posting_list_common::PostingElementEx;
pub struct Version;
impl StorageVersion for Version {
fn current_raw() -> &'static str {
panic!("InvertedIndexRam is not supposed to be versioned");
}
}
/// Inverted flatten index from dimension id to posting list
#[derive(Debug, Clone, PartialEq)]
pub struct InvertedIndexRam {
/// Posting lists for each dimension flattened (dimension id -> posting list)
/// Gaps are filled with empty posting lists
pub postings: Vec<PostingList>,
/// Number of unique indexed vectors
/// pre-computed on build and upsert to avoid having to traverse the posting lists.
pub vector_count: usize,
/// Total size of all searchable sparse vectors in bytes
pub total_sparse_size: usize,
}
impl InvertedIndex for InvertedIndexRam {
type Iter<'a> = PostingListIterator<'a>;
type Version = Version;
fn is_on_disk(&self) -> bool {
false
}
fn open(_path: &Path) -> std::io::Result<Self> {
panic!("InvertedIndexRam is not supposed to be loaded");
}
fn save(&self, _path: &Path) -> std::io::Result<()> {
panic!("InvertedIndexRam is not supposed to be saved");
}
fn get<'a>(
&'a self,
id: DimOffset,
_hw_counter: &'a HardwareCounterCell,
) -> Option<PostingListIterator<'a>> {
self.get(&id).map(|posting_list| posting_list.iter())
}
fn len(&self) -> usize {
self.postings.len()
}
fn posting_list_len(&self, id: &DimId, _hw_counter: &HardwareCounterCell) -> Option<usize> {
self.get(id).map(|posting_list| posting_list.elements.len())
}
fn files(_path: &Path) -> Vec<PathBuf> {
Vec::new()
}
fn immutable_files(_path: &Path) -> Vec<PathBuf> {
// `InvertedIndexRam` has no files
Vec::new()
}
fn remove(&mut self, id: PointOffsetType, old_vector: RemappedSparseVector) {
let old_vector_size = old_vector.len() * size_of::<PostingElementEx>();
for dim_id in old_vector.indices {
if let Some(posting) = self.postings.get_mut(dim_id as usize) {
posting.delete(id);
} else {
log::debug!("Posting list for dimension {dim_id} not found");
}
}
self.total_sparse_size = self.total_sparse_size.saturating_sub(old_vector_size);
self.vector_count = self.vector_count.saturating_sub(1);
}
fn upsert(
&mut self,
id: PointOffsetType,
vector: RemappedSparseVector,
old_vector: Option<RemappedSparseVector>,
) {
self.upsert(id, vector, old_vector);
}
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
_path: P,
) -> std::io::Result<Self> {
Ok(ram_index.into_owned())
}
fn vector_count(&self) -> usize {
self.vector_count
}
fn total_sparse_vectors_size(&self) -> usize {
self.total_sparse_size
}
fn max_index(&self) -> Option<DimId> {
match self.postings.len() {
0 => None,
len => Some(len as DimId - 1),
}
}
}
impl InvertedIndexRam {
/// New empty inverted index
pub fn empty() -> InvertedIndexRam {
InvertedIndexRam {
postings: Vec::new(),
vector_count: 0,
total_sparse_size: 0,
}
}
/// Get posting list for dimension id
pub fn get(&self, id: &DimId) -> Option<&PostingList> {
self.postings.get((*id) as usize)
}
/// Upsert a vector into the inverted index.
pub fn upsert(
&mut self,
id: PointOffsetType,
vector: RemappedSparseVector,
old_vector: Option<RemappedSparseVector>,
) {
// Find elements of the old vector that are not in the new vector
if let Some(old_vector) = &old_vector {
let elements_to_delete = old_vector
.indices
.iter()
.filter(|&dim_id| !vector.indices.contains(dim_id))
.map(|&dim_id| dim_id as usize);
for dim_id in elements_to_delete {
if let Some(posting) = self.postings.get_mut(dim_id) {
posting.delete(id);
} else {
log::debug!("Posting list for dimension {dim_id} not found");
}
}
}
let new_vector_size = vector.len() * size_of::<PostingElementEx>();
for (dim_id, weight) in vector.indices.into_iter().zip(vector.values.into_iter()) {
let dim_id = dim_id as usize;
match self.postings.get_mut(dim_id) {
Some(posting) => {
// update existing posting list
let posting_element = PostingElementEx::new(id, weight);
posting.upsert(posting_element);
}
None => {
// resize postings vector (fill gaps with empty posting lists)
self.postings.resize_with(dim_id + 1, PostingList::default);
// initialize new posting for dimension
self.postings[dim_id] = PostingList::new_one(id, weight);
}
}
}
if let Some(old) = old_vector {
self.total_sparse_size = self
.total_sparse_size
.saturating_sub(old.len() * size_of::<PostingElementEx>());
} else {
self.vector_count += 1;
}
self.total_sparse_size += new_vector_size
}
pub fn total_posting_elements_size(&self) -> usize {
self.postings
.iter()
.map(|posting| posting.elements.len() * size_of::<PostingElementEx>())
.sum()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
#[test]
fn upsert_same_dimension_inverted_index_ram() {
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
let mut inverted_index_ram = builder.build();
assert_eq!(inverted_index_ram.vector_count, 3);
inverted_index_ram.upsert(
4,
RemappedSparseVector::new(vec![1, 2, 3], vec![40.0, 40.0, 40.0]).unwrap(),
None,
);
for i in 1..4 {
let posting_list = inverted_index_ram.get(&i).unwrap();
let posting_list = posting_list.elements.as_slice();
assert_eq!(posting_list.len(), 4);
assert_eq!(posting_list.first().unwrap().weight, 10.0);
assert_eq!(posting_list.get(1).unwrap().weight, 20.0);
assert_eq!(posting_list.get(2).unwrap().weight, 30.0);
assert_eq!(posting_list.get(3).unwrap().weight, 40.0);
}
}
#[test]
fn upsert_new_dimension_inverted_index_ram() {
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
let mut inverted_index_ram = builder.build();
assert_eq!(inverted_index_ram.vector_count, 3);
// 4 postings, 0th empty
assert_eq!(inverted_index_ram.postings.len(), 4);
inverted_index_ram.upsert(
4,
RemappedSparseVector::new(vec![1, 2, 30], vec![40.0, 40.0, 40.0]).unwrap(),
None,
);
// new dimension resized postings
assert_eq!(inverted_index_ram.postings.len(), 31);
// updated existing dimension
for i in 1..3 {
let posting_list = inverted_index_ram.get(&i).unwrap();
let posting_list = posting_list.elements.as_slice();
assert_eq!(posting_list.len(), 4);
assert_eq!(posting_list.first().unwrap().weight, 10.0);
assert_eq!(posting_list.get(1).unwrap().weight, 20.0);
assert_eq!(posting_list.get(2).unwrap().weight, 30.0);
assert_eq!(posting_list.get(3).unwrap().weight, 40.0);
}
// fetch 30th posting
let postings = inverted_index_ram.get(&30).unwrap();
let postings = postings.elements.as_slice();
assert_eq!(postings.len(), 1);
let posting = postings.first().unwrap();
assert_eq!(posting.record_id, 4);
assert_eq!(posting.weight, 40.0);
}
#[test]
fn test_upsert_insert_equivalence() {
let first_vec: RemappedSparseVector = [(1, 10.0), (2, 10.0), (3, 10.0)].into();
let second_vec: RemappedSparseVector = [(1, 20.0), (2, 20.0), (3, 20.0)].into();
let third_vec: RemappedSparseVector = [(1, 30.0), (2, 30.0), (3, 30.0)].into();
let mut builder = InvertedIndexBuilder::new();
builder.add(1, first_vec.clone());
builder.add(2, second_vec.clone());
builder.add(3, third_vec.clone());
let inverted_index_ram_built = builder.build();
assert_eq!(inverted_index_ram_built.vector_count, 3);
let mut inverted_index_ram_upserted = InvertedIndexRam::empty();
inverted_index_ram_upserted.upsert(1, first_vec, None);
inverted_index_ram_upserted.upsert(2, second_vec, None);
inverted_index_ram_upserted.upsert(3, third_vec, None);
assert_eq!(
inverted_index_ram_built.postings.len(),
inverted_index_ram_upserted.postings.len()
);
assert_eq!(inverted_index_ram_built, inverted_index_ram_upserted);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_immutable_ram.rs | lib/sparse/src/index/inverted_index/inverted_index_immutable_ram.rs | use std::borrow::Cow;
use std::path::Path;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use super::InvertedIndex;
use super::inverted_index_mmap::InvertedIndexMmap;
use super::inverted_index_ram::InvertedIndexRam;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimId, DimOffset};
use crate::index::posting_list::{PostingList, PostingListIterator};
/// A wrapper around [`InvertedIndexRam`].
/// Will be replaced with the new compressed implementation eventually.
// TODO: Remove this inverted index implementation, it is no longer used
#[derive(Debug, Clone, PartialEq)]
pub struct InvertedIndexImmutableRam {
inner: InvertedIndexRam,
}
impl InvertedIndex for InvertedIndexImmutableRam {
type Iter<'a> = PostingListIterator<'a>;
type Version = <InvertedIndexMmap as InvertedIndex>::Version;
fn is_on_disk(&self) -> bool {
false
}
fn open(path: &Path) -> std::io::Result<Self> {
let mmap_inverted_index = InvertedIndexMmap::load(path)?;
let mut inverted_index = InvertedIndexRam {
postings: Default::default(),
vector_count: mmap_inverted_index.file_header.vector_count,
// Calculated after reading mmap
total_sparse_size: 0,
};
for i in 0..mmap_inverted_index.file_header.posting_count as DimId {
let posting_list = mmap_inverted_index.get(&i).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("Posting list {i} not found"),
)
})?;
inverted_index.postings.push(PostingList {
elements: posting_list.to_owned(),
});
}
inverted_index.total_sparse_size = inverted_index.total_posting_elements_size();
Ok(InvertedIndexImmutableRam {
inner: inverted_index,
})
}
fn save(&self, path: &Path) -> std::io::Result<()> {
InvertedIndexMmap::convert_and_save(&self.inner, path)?;
Ok(())
}
fn get<'a>(
&'a self,
id: DimOffset,
hw_counter: &'a HardwareCounterCell,
) -> Option<PostingListIterator<'a>> {
InvertedIndex::get(&self.inner, id, hw_counter)
}
fn len(&self) -> usize {
self.inner.len()
}
fn posting_list_len(&self, id: &DimOffset, hw_counter: &HardwareCounterCell) -> Option<usize> {
self.inner.posting_list_len(id, hw_counter)
}
fn files(path: &Path) -> Vec<std::path::PathBuf> {
InvertedIndexMmap::files(path)
}
fn immutable_files(path: &Path) -> Vec<std::path::PathBuf> {
// `InvertedIndexImmutableRam` is always immutable
InvertedIndexMmap::immutable_files(path)
}
fn remove(&mut self, _id: PointOffsetType, _old_vector: RemappedSparseVector) {
panic!("Cannot remove from a read-only RAM inverted index")
}
fn upsert(
&mut self,
_id: PointOffsetType,
_vector: RemappedSparseVector,
_old_vector: Option<RemappedSparseVector>,
) {
panic!("Cannot upsert into a read-only RAM inverted index")
}
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
_path: P,
) -> std::io::Result<Self> {
Ok(InvertedIndexImmutableRam {
inner: ram_index.into_owned(),
})
}
fn vector_count(&self) -> usize {
self.inner.vector_count()
}
fn total_sparse_vectors_size(&self) -> usize {
self.inner.total_sparse_vectors_size()
}
fn max_index(&self) -> Option<DimOffset> {
self.inner.max_index()
}
}
#[cfg(test)]
mod tests {
use tempfile::Builder;
use super::*;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
#[test]
fn inverted_index_ram_save_load() {
let mut builder = InvertedIndexBuilder::new();
builder.add(1, vec![(1, 10.0), (2, 10.0), (3, 10.0)].try_into().unwrap());
builder.add(2, vec![(1, 20.0), (2, 20.0), (3, 20.0)].try_into().unwrap());
builder.add(3, vec![(1, 30.0), (2, 30.0), (3, 30.0)].try_into().unwrap());
let inverted_index_ram = builder.build();
let tmp_dir_path = Builder::new().prefix("test_index_dir").tempdir().unwrap();
let inverted_index_immutable_ram = InvertedIndexImmutableRam::from_ram_index(
Cow::Borrowed(&inverted_index_ram),
tmp_dir_path.path(),
)
.unwrap();
inverted_index_immutable_ram
.save(tmp_dir_path.path())
.unwrap();
let loaded_inverted_index = InvertedIndexImmutableRam::open(tmp_dir_path.path()).unwrap();
assert_eq!(inverted_index_immutable_ram, loaded_inverted_index);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_compressed_immutable_ram.rs | lib/sparse/src/index/inverted_index/inverted_index_compressed_immutable_ram.rs | use std::borrow::Cow;
use std::path::Path;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use super::InvertedIndex;
use super::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use super::inverted_index_ram::InvertedIndexRam;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimId, DimOffset, Weight};
use crate::index::compressed_posting_list::{
CompressedPostingBuilder, CompressedPostingList, CompressedPostingListIterator,
};
use crate::index::posting_list_common::PostingListIter as _;
#[derive(Debug, Clone, PartialEq)]
pub struct InvertedIndexCompressedImmutableRam<W: Weight> {
pub(super) postings: Vec<CompressedPostingList<W>>,
pub(super) vector_count: usize,
pub(super) total_sparse_size: usize,
}
impl<W: Weight> InvertedIndexCompressedImmutableRam<W> {
#[allow(dead_code)]
pub(super) fn into_postings(self) -> Vec<CompressedPostingList<W>> {
self.postings
}
}
impl<W: Weight> InvertedIndex for InvertedIndexCompressedImmutableRam<W> {
type Iter<'a> = CompressedPostingListIterator<'a, W>;
type Version = <InvertedIndexCompressedMmap<W> as InvertedIndex>::Version;
fn is_on_disk(&self) -> bool {
false
}
fn open(path: &Path) -> std::io::Result<Self> {
let mmap_inverted_index = InvertedIndexCompressedMmap::load(path)?;
let mut inverted_index = InvertedIndexCompressedImmutableRam {
postings: Vec::with_capacity(mmap_inverted_index.file_header.posting_count),
vector_count: mmap_inverted_index.file_header.vector_count,
total_sparse_size: mmap_inverted_index.total_sparse_vectors_size(),
};
let hw_counter = HardwareCounterCell::disposable();
for i in 0..mmap_inverted_index.file_header.posting_count as DimId {
let posting_list = mmap_inverted_index.get(i, &hw_counter).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("Posting list {i} not found"),
)
})?;
inverted_index.postings.push(posting_list.to_owned());
}
Ok(inverted_index)
}
fn save(&self, path: &Path) -> std::io::Result<()> {
InvertedIndexCompressedMmap::convert_and_save(self, path)?;
Ok(())
}
fn get<'a>(
&'a self,
id: DimOffset,
hw_counter: &'a HardwareCounterCell, // Ignored for in-ram index
) -> Option<Self::Iter<'a>> {
self.postings
.get(id as usize)
.map(|posting_list| posting_list.iter(hw_counter))
}
fn len(&self) -> usize {
self.postings.len()
}
fn posting_list_len(&self, id: &DimOffset, hw_counter: &HardwareCounterCell) -> Option<usize> {
self.get(*id, hw_counter)
.map(|posting_list| posting_list.len_to_end())
}
fn files(path: &Path) -> Vec<std::path::PathBuf> {
InvertedIndexCompressedMmap::<W>::files(path)
}
fn immutable_files(path: &Path) -> Vec<std::path::PathBuf> {
// `InvertedIndexCompressedImmutableRam` is always immutable
InvertedIndexCompressedMmap::<W>::immutable_files(path)
}
fn remove(&mut self, _id: PointOffsetType, _old_vector: RemappedSparseVector) {
panic!("Cannot remove from a read-only RAM inverted index")
}
fn upsert(
&mut self,
_id: PointOffsetType,
_vector: RemappedSparseVector,
_old_vector: Option<RemappedSparseVector>,
) {
panic!("Cannot upsert into a read-only RAM inverted index")
}
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
_path: P,
) -> std::io::Result<Self> {
let mut postings = Vec::with_capacity(ram_index.postings.len());
for old_posting_list in &ram_index.postings {
let mut new_posting_list = CompressedPostingBuilder::new();
for elem in &old_posting_list.elements {
new_posting_list.add(elem.record_id, elem.weight);
}
postings.push(new_posting_list.build());
}
let hw_counter = HardwareCounterCell::disposable();
let total_sparse_size = postings
.iter()
.map(|p| p.view(&hw_counter).store_size().total)
.sum();
Ok(InvertedIndexCompressedImmutableRam {
postings,
vector_count: ram_index.vector_count,
total_sparse_size,
})
}
fn vector_count(&self) -> usize {
self.vector_count
}
fn total_sparse_vectors_size(&self) -> usize {
self.total_sparse_size
}
fn max_index(&self) -> Option<DimOffset> {
self.postings
.len()
.checked_sub(1)
.map(|len| len as DimOffset)
}
}
#[cfg(test)]
mod tests {
use tempfile::Builder;
use super::*;
use crate::common::sparse_vector_fixture::random_sparse_vector;
use crate::common::types::QuantizedU8;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
#[test]
fn test_save_load_tiny() {
let mut builder = InvertedIndexBuilder::new();
builder.add(1, vec![(1, 10.0), (2, 10.0), (3, 10.0)].try_into().unwrap());
builder.add(2, vec![(1, 20.0), (2, 20.0), (3, 20.0)].try_into().unwrap());
builder.add(3, vec![(1, 30.0), (2, 30.0), (3, 30.0)].try_into().unwrap());
let inverted_index_ram = builder.build();
check_save_load::<f32>(&inverted_index_ram);
check_save_load::<half::f16>(&inverted_index_ram);
check_save_load::<u8>(&inverted_index_ram);
check_save_load::<QuantizedU8>(&inverted_index_ram);
}
#[test]
fn test_save_load_large() {
let mut rnd_gen = rand::rng();
let mut builder = InvertedIndexBuilder::new();
// Enough elements to put some of them into chunks
for i in 0..1024 {
builder.add(i, random_sparse_vector(&mut rnd_gen, 3).into_remapped());
}
let inverted_index_ram = builder.build();
check_save_load::<f32>(&inverted_index_ram);
check_save_load::<half::f16>(&inverted_index_ram);
check_save_load::<u8>(&inverted_index_ram);
check_save_load::<QuantizedU8>(&inverted_index_ram);
}
fn check_save_load<W: Weight>(inverted_index_ram: &InvertedIndexRam) {
let tmp_dir_path = Builder::new().prefix("test_index_dir").tempdir().unwrap();
let inverted_index_immutable_ram =
InvertedIndexCompressedImmutableRam::<W>::from_ram_index(
Cow::Borrowed(inverted_index_ram),
tmp_dir_path.path(),
)
.unwrap();
inverted_index_immutable_ram
.save(tmp_dir_path.path())
.unwrap();
let loaded_inverted_index =
InvertedIndexCompressedImmutableRam::<W>::open(tmp_dir_path.path()).unwrap();
assert_eq!(inverted_index_immutable_ram, loaded_inverted_index);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_mmap.rs | lib/sparse/src/index/inverted_index/inverted_index_mmap.rs | use std::borrow::Cow;
use std::mem::size_of;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use io::file_operations::{atomic_save_json, read_json};
use io::storage_version::StorageVersion;
use memmap2::{Mmap, MmapMut};
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting, Madviseable};
use memory::mmap_ops::{
create_and_ensure_length, open_read_mmap, open_write_mmap, transmute_from_u8,
transmute_from_u8_to_slice, transmute_to_u8, transmute_to_u8_slice,
};
use serde::{Deserialize, Serialize};
use super::INDEX_FILE_NAME;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimId, DimOffset};
use crate::index::inverted_index::InvertedIndex;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use crate::index::posting_list::PostingListIterator;
use crate::index::posting_list_common::PostingElementEx;
const POSTING_HEADER_SIZE: usize = size_of::<PostingListFileHeader>();
const INDEX_CONFIG_FILE_NAME: &str = "inverted_index_config.json";
pub struct Version;
impl StorageVersion for Version {
fn current_raw() -> &'static str {
"0.1.0"
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct InvertedIndexFileHeader {
/// Number of posting lists
pub posting_count: usize,
/// Number of unique vectors indexed
pub vector_count: usize,
}
/// Inverted flatten index from dimension id to posting list
#[derive(Debug)]
pub struct InvertedIndexMmap {
path: PathBuf,
mmap: Arc<Mmap>,
pub file_header: InvertedIndexFileHeader,
}
#[derive(Debug, Default, Clone)]
struct PostingListFileHeader {
pub start_offset: u64,
pub end_offset: u64,
}
impl InvertedIndex for InvertedIndexMmap {
type Iter<'a> = PostingListIterator<'a>;
type Version = Version;
fn is_on_disk(&self) -> bool {
true
}
fn open(path: &Path) -> std::io::Result<Self> {
Self::load(path)
}
fn save(&self, path: &Path) -> std::io::Result<()> {
debug_assert_eq!(path, self.path);
// If Self instance exists, it's either constructed by using `open()` (which reads index
// files), or using `from_ram_index()` (which writes them). Both assume that the files
// exist. If any of the files are missing, then something went wrong.
for file in Self::files(path) {
debug_assert!(file.exists());
}
Ok(())
}
fn get<'a>(
&'a self,
id: DimOffset,
hw_counter: &'a HardwareCounterCell,
) -> Option<PostingListIterator<'a>> {
let posting_list = self.get(&id);
hw_counter
.vector_io_read()
.incr_delta(posting_list.map(|x| x.len()).unwrap_or(0) * size_of::<PostingElementEx>());
posting_list.map(PostingListIterator::new)
}
fn len(&self) -> usize {
self.file_header.posting_count
}
fn posting_list_len(&self, id: &DimOffset, _hw_counter: &HardwareCounterCell) -> Option<usize> {
self.get(id).map(|posting_list| posting_list.len())
}
fn files(path: &Path) -> Vec<PathBuf> {
vec![
Self::index_file_path(path),
Self::index_config_file_path(path),
]
}
fn immutable_files(path: &Path) -> Vec<PathBuf> {
// `InvertedIndexMmap` is always immutable
Self::files(path)
}
fn remove(&mut self, _id: PointOffsetType, _old_vector: RemappedSparseVector) {
panic!("Cannot remove from a read-only Mmap inverted index")
}
fn upsert(
&mut self,
_id: PointOffsetType,
_vector: RemappedSparseVector,
_old_vector: Option<RemappedSparseVector>,
) {
panic!("Cannot upsert into a read-only Mmap inverted index")
}
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
path: P,
) -> std::io::Result<Self> {
Self::convert_and_save(&ram_index, path)
}
fn vector_count(&self) -> usize {
self.file_header.vector_count
}
fn total_sparse_vectors_size(&self) -> usize {
debug_assert!(
false,
"This index is already substituted by the compressed version, no need to maintain new features",
);
0
}
fn max_index(&self) -> Option<DimId> {
match self.file_header.posting_count {
0 => None,
len => Some(len as DimId - 1),
}
}
}
impl InvertedIndexMmap {
pub fn index_file_path(path: &Path) -> PathBuf {
path.join(INDEX_FILE_NAME)
}
pub fn index_config_file_path(path: &Path) -> PathBuf {
path.join(INDEX_CONFIG_FILE_NAME)
}
pub fn get(&self, id: &DimId) -> Option<&[PostingElementEx]> {
// check that the id is not out of bounds (posting_count includes the empty zeroth entry)
if *id >= self.file_header.posting_count as DimId {
return None;
}
let header_start = *id as usize * POSTING_HEADER_SIZE;
let header = transmute_from_u8::<PostingListFileHeader>(
&self.mmap[header_start..header_start + POSTING_HEADER_SIZE],
)
.clone();
let elements_bytes = &self.mmap[header.start_offset as usize..header.end_offset as usize];
Some(transmute_from_u8_to_slice(elements_bytes))
}
pub fn convert_and_save<P: AsRef<Path>>(
inverted_index_ram: &InvertedIndexRam,
path: P,
) -> std::io::Result<Self> {
let total_posting_headers_size = Self::total_posting_headers_size(inverted_index_ram);
let total_posting_elements_size = inverted_index_ram.total_posting_elements_size();
let file_length = total_posting_headers_size + total_posting_elements_size;
let file_path = Self::index_file_path(path.as_ref());
create_and_ensure_length(file_path.as_ref(), file_length)?;
let mut mmap = open_write_mmap(
file_path.as_ref(),
AdviceSetting::from(Advice::Normal),
false,
)?;
// file index data
Self::save_posting_headers(&mut mmap, inverted_index_ram, total_posting_headers_size);
Self::save_posting_elements(&mut mmap, inverted_index_ram, total_posting_headers_size);
if file_length > 0 {
mmap.flush()?;
}
// save header properties
let posting_count = inverted_index_ram.postings.len();
let vector_count = inverted_index_ram.vector_count();
// finalize data with index file.
let file_header = InvertedIndexFileHeader {
posting_count,
vector_count,
};
let config_file_path = Self::index_config_file_path(path.as_ref());
atomic_save_json(&config_file_path, &file_header)?;
Ok(Self {
path: path.as_ref().to_owned(),
mmap: Arc::new(mmap.make_read_only()?),
file_header,
})
}
pub fn load<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
// read index config file
let config_file_path = Self::index_config_file_path(path.as_ref());
// if the file header does not exist, the index is malformed
let file_header: InvertedIndexFileHeader = read_json(&config_file_path)?;
// read index data into mmap
let file_path = Self::index_file_path(path.as_ref());
let mmap = open_read_mmap(
file_path.as_ref(),
AdviceSetting::from(Advice::Normal),
false,
)?;
Ok(Self {
path: path.as_ref().to_owned(),
mmap: Arc::new(mmap),
file_header,
})
}
fn total_posting_headers_size(inverted_index_ram: &InvertedIndexRam) -> usize {
inverted_index_ram.postings.len() * POSTING_HEADER_SIZE
}
fn save_posting_headers(
mmap: &mut MmapMut,
inverted_index_ram: &InvertedIndexRam,
total_posting_headers_size: usize,
) {
let mut elements_offset: usize = total_posting_headers_size;
for (id, posting) in inverted_index_ram.postings.iter().enumerate() {
let posting_elements_size = posting.elements.len() * size_of::<PostingElementEx>();
let posting_header = PostingListFileHeader {
start_offset: elements_offset as u64,
end_offset: (elements_offset + posting_elements_size) as u64,
};
elements_offset = posting_header.end_offset as usize;
// save posting header
let posting_header_bytes = transmute_to_u8(&posting_header);
let start_posting_offset = id * POSTING_HEADER_SIZE;
let end_posting_offset = (id + 1) * POSTING_HEADER_SIZE;
mmap[start_posting_offset..end_posting_offset].copy_from_slice(posting_header_bytes);
}
}
fn save_posting_elements(
mmap: &mut MmapMut,
inverted_index_ram: &InvertedIndexRam,
total_posting_headers_size: usize,
) {
let mut offset = total_posting_headers_size;
for posting in &inverted_index_ram.postings {
// save posting element
let posting_elements_bytes = transmute_to_u8_slice(&posting.elements);
mmap[offset..offset + posting_elements_bytes.len()]
.copy_from_slice(posting_elements_bytes);
offset += posting_elements_bytes.len();
}
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> std::io::Result<()> {
self.mmap.populate();
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> std::io::Result<()> {
clear_disk_cache(&self.path)
}
}
#[cfg(test)]
mod tests {
use tempfile::Builder;
use super::*;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
fn compare_indexes(
inverted_index_ram: &InvertedIndexRam,
inverted_index_mmap: &InvertedIndexMmap,
) {
for id in 0..inverted_index_ram.postings.len() as DimId {
let posting_list_ram = inverted_index_ram.get(&id).unwrap().elements.as_slice();
let posting_list_mmap = inverted_index_mmap.get(&id).unwrap();
assert_eq!(posting_list_ram.len(), posting_list_mmap.len());
for i in 0..posting_list_ram.len() {
assert_eq!(posting_list_ram[i], posting_list_mmap[i]);
}
}
}
#[test]
fn test_inverted_index_mmap() {
// skip 4th dimension
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0), (5, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0), (5, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
builder.add(4, [(1, 1.0), (2, 1.0)].into());
builder.add(5, [(1, 2.0)].into());
builder.add(6, [(1, 3.0)].into());
builder.add(7, [(1, 4.0)].into());
builder.add(8, [(1, 5.0)].into());
builder.add(9, [(1, 6.0)].into());
let inverted_index_ram = builder.build();
let tmp_dir_path = Builder::new().prefix("test_index_dir").tempdir().unwrap();
{
let inverted_index_mmap =
InvertedIndexMmap::convert_and_save(&inverted_index_ram, &tmp_dir_path).unwrap();
compare_indexes(&inverted_index_ram, &inverted_index_mmap);
}
let inverted_index_mmap = InvertedIndexMmap::load(&tmp_dir_path).unwrap();
// posting_count: 0th entry is always empty + 1st + 2nd + 3rd + 4th empty + 5th
assert_eq!(inverted_index_mmap.file_header.posting_count, 6);
assert_eq!(inverted_index_mmap.file_header.vector_count, 9);
compare_indexes(&inverted_index_ram, &inverted_index_mmap);
assert!(inverted_index_mmap.get(&0).unwrap().is_empty()); // the first entry is always empty as dimension ids start at 1
assert_eq!(inverted_index_mmap.get(&1).unwrap().len(), 9);
assert_eq!(inverted_index_mmap.get(&2).unwrap().len(), 4);
assert_eq!(inverted_index_mmap.get(&3).unwrap().len(), 3);
assert!(inverted_index_mmap.get(&4).unwrap().is_empty()); // return empty posting list info for intermediary empty ids
assert_eq!(inverted_index_mmap.get(&5).unwrap().len(), 2);
// index after the last values are None
assert!(inverted_index_mmap.get(&6).is_none());
assert!(inverted_index_mmap.get(&7).is_none());
assert!(inverted_index_mmap.get(&100).is_none());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/inverted_index_ram_builder.rs | lib/sparse/src/index/inverted_index/inverted_index_ram_builder.rs | use std::cmp::max;
use common::types::PointOffsetType;
use log::debug;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use crate::index::posting_list::PostingBuilder;
use crate::index::posting_list_common::PostingElementEx;
/// Builder for InvertedIndexRam
pub struct InvertedIndexBuilder {
pub posting_builders: Vec<PostingBuilder>,
pub vector_count: usize,
pub total_sparse_size: usize,
}
impl Default for InvertedIndexBuilder {
fn default() -> Self {
Self::new()
}
}
impl InvertedIndexBuilder {
pub fn new() -> InvertedIndexBuilder {
InvertedIndexBuilder {
posting_builders: Vec::new(),
vector_count: 0,
total_sparse_size: 0,
}
}
/// Add a vector to the inverted index builder
pub fn add(&mut self, id: PointOffsetType, vector: RemappedSparseVector) {
let sparse_size = vector.len() * size_of::<PostingElementEx>();
for (dim_id, weight) in vector.indices.into_iter().zip(vector.values.into_iter()) {
let dim_id = dim_id as usize;
self.posting_builders.resize_with(
max(dim_id + 1, self.posting_builders.len()),
PostingBuilder::new,
);
self.posting_builders[dim_id].add(id, weight);
}
self.vector_count += 1;
self.total_sparse_size = self.total_sparse_size.saturating_add(sparse_size);
}
/// Consumes the builder and returns an InvertedIndexRam
pub fn build(self) -> InvertedIndexRam {
if self.posting_builders.is_empty() {
return InvertedIndexRam {
postings: vec![],
total_sparse_size: self.total_sparse_size,
vector_count: self.vector_count,
};
}
debug!(
"building inverted index with {} sparse vectors in {} posting lists",
self.vector_count,
self.posting_builders.len(),
);
let mut postings = Vec::with_capacity(self.posting_builders.len());
for posting_builder in self.posting_builders {
postings.push(posting_builder.build());
}
let vector_count = self.vector_count;
let total_sparse_size = self.total_sparse_size;
InvertedIndexRam {
postings,
vector_count,
total_sparse_size,
}
}
/// Creates an [InvertedIndexRam] from an iterator of (id, vector) pairs.
pub fn build_from_iterator(
iter: impl Iterator<Item = (PointOffsetType, RemappedSparseVector)>,
) -> InvertedIndexRam {
let mut builder = InvertedIndexBuilder::new();
for (id, vector) in iter {
builder.add(id, vector);
}
builder.build()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/inverted_index/mod.rs | lib/sparse/src/index/inverted_index/mod.rs | use std::borrow::Cow;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use io::storage_version::StorageVersion;
use super::posting_list_common::PostingListIter;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::DimOffset;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
pub mod inverted_index_compressed_immutable_ram;
pub mod inverted_index_compressed_mmap;
pub mod inverted_index_immutable_ram;
pub mod inverted_index_mmap;
pub mod inverted_index_ram;
pub mod inverted_index_ram_builder;
pub const OLD_INDEX_FILE_NAME: &str = "inverted_index.data";
pub const INDEX_FILE_NAME: &str = "inverted_index.dat";
pub trait InvertedIndex: Sized + Debug + 'static {
type Iter<'a>: PostingListIter + Clone
where
Self: 'a;
type Version: StorageVersion;
fn is_on_disk(&self) -> bool;
/// Open existing index based on path
fn open(path: &Path) -> std::io::Result<Self>;
/// Save index
fn save(&self, path: &Path) -> std::io::Result<()>;
/// Get posting list for dimension id
fn get<'a>(
&'a self,
id: DimOffset,
hw_counter: &'a HardwareCounterCell,
) -> Option<Self::Iter<'a>>;
/// Get number of posting lists
fn len(&self) -> usize;
/// Check if the index is empty
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Get number of posting lists for dimension id
fn posting_list_len(&self, id: &DimOffset, hw_counter: &HardwareCounterCell) -> Option<usize>;
/// Files used by this index
fn files(path: &Path) -> Vec<PathBuf>;
fn immutable_files(path: &Path) -> Vec<PathBuf>;
fn remove(&mut self, id: PointOffsetType, old_vector: RemappedSparseVector);
/// Upsert a vector into the inverted index.
fn upsert(
&mut self,
id: PointOffsetType,
vector: RemappedSparseVector,
old_vector: Option<RemappedSparseVector>,
);
/// Create inverted index from ram index
fn from_ram_index<P: AsRef<Path>>(
ram_index: Cow<InvertedIndexRam>,
path: P,
) -> std::io::Result<Self>;
/// Number of indexed vectors
fn vector_count(&self) -> usize;
/// Total size of all the sparse vectors in bytes
fn total_sparse_vectors_size(&self) -> usize;
/// Get max existed index
fn max_index(&self) -> Option<DimOffset>;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/tests/search_context_tests.rs | lib/sparse/src/index/tests/search_context_tests.rs | #[cfg(test)]
#[generic_tests::define]
mod tests {
use std::any::TypeId;
use std::borrow::Cow;
use std::sync::OnceLock;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::{PointOffsetType, ScoredPointOffset};
use rand::Rng;
use tempfile::TempDir;
use crate::common::scores_memory_pool::{PooledScoresHandle, ScoresMemoryPool};
use crate::common::sparse_vector::{RemappedSparseVector, SparseVector};
use crate::common::sparse_vector_fixture::random_sparse_vector;
use crate::common::types::QuantizedU8;
use crate::index::inverted_index::InvertedIndex;
use crate::index::inverted_index::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam;
use crate::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use crate::index::inverted_index::inverted_index_immutable_ram::InvertedIndexImmutableRam;
use crate::index::inverted_index::inverted_index_mmap::InvertedIndexMmap;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
use crate::index::posting_list_common::PostingListIter;
use crate::index::search_context::SearchContext;
// ---- Test instantiations ----
#[instantiate_tests(<InvertedIndexRam>)]
mod ram {}
#[instantiate_tests(<InvertedIndexMmap>)]
mod mmap {}
#[instantiate_tests(<InvertedIndexImmutableRam>)]
mod iram {}
#[instantiate_tests(<InvertedIndexCompressedImmutableRam<f32>>)]
mod iram_f32 {}
#[instantiate_tests(<InvertedIndexCompressedImmutableRam<half::f16>>)]
mod iram_f16 {}
#[instantiate_tests(<InvertedIndexCompressedImmutableRam<u8>>)]
mod iram_u8 {}
#[instantiate_tests(<InvertedIndexCompressedImmutableRam<QuantizedU8>>)]
mod iram_q8 {}
#[instantiate_tests(<InvertedIndexCompressedMmap<f32>>)]
mod mmap_f32 {}
#[instantiate_tests(<InvertedIndexCompressedMmap<half::f16>>)]
mod mmap_f16 {}
#[instantiate_tests(<InvertedIndexCompressedMmap<u8>>)]
mod mmap_u8 {}
#[instantiate_tests(<InvertedIndexCompressedMmap<QuantizedU8>>)]
mod mmap_q8 {}
// --- End of test instantiations ---
static TEST_SCORES_POOL: OnceLock<ScoresMemoryPool> = OnceLock::new();
fn get_pooled_scores() -> PooledScoresHandle<'static> {
TEST_SCORES_POOL
.get_or_init(ScoresMemoryPool::default)
.get()
}
/// Match all filter condition for testing
fn match_all(_p: PointOffsetType) -> bool {
true
}
/// Helper struct to store both an index and a temporary directory
struct TestIndex<I: InvertedIndex> {
index: I,
_temp_dir: TempDir,
}
impl<I: InvertedIndex> TestIndex<I> {
fn from_ram(ram_index: InvertedIndexRam) -> Self {
let temp_dir = tempfile::Builder::new()
.prefix("test_index_dir")
.tempdir()
.unwrap();
TestIndex {
index: I::from_ram_index(Cow::Owned(ram_index), &temp_dir).unwrap(),
_temp_dir: temp_dir,
}
}
}
/// Round scores to allow some quantization errors
fn round_scores<I: 'static>(mut scores: Vec<ScoredPointOffset>) -> Vec<ScoredPointOffset> {
let errors_allowed_for = [
TypeId::of::<InvertedIndexCompressedImmutableRam<QuantizedU8>>(),
TypeId::of::<InvertedIndexCompressedMmap<QuantizedU8>>(),
];
if errors_allowed_for.contains(&TypeId::of::<I>()) {
let precision = 0.25;
scores.iter_mut().for_each(|score| {
score.score = (score.score / precision).round() * precision;
});
scores
} else {
scores
}
}
#[test]
fn test_empty_query<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram(InvertedIndexRam::empty());
let hw_counter = HardwareCounterCell::disposable();
let is_stopped = AtomicBool::new(false);
let mut search_context = SearchContext::new(
RemappedSparseVector::default(), // empty query vector
10,
&index.index,
get_pooled_scores(),
&is_stopped,
&hw_counter,
);
assert_eq!(search_context.search(&match_all), Vec::new());
}
#[test]
fn search_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
10,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(
round_scores::<I>(search_context.search(&match_all)),
vec![
ScoredPointOffset {
score: 90.0,
idx: 3
},
ScoredPointOffset {
score: 60.0,
idx: 2
},
ScoredPointOffset {
score: 30.0,
idx: 1
},
]
);
drop(search_context);
drop(hardware_counter);
// len(QueryVector)=3 * len(vector)=3 => 3*3 => 9
assert!(accumulator.get_cpu() > 0);
if index.index.is_on_disk() {
assert!(accumulator.get_vector_io_read() > 0);
}
}
#[test]
fn search_with_update_test<I: InvertedIndex + 'static>() {
if TypeId::of::<I>() != TypeId::of::<InvertedIndexRam>() {
// Only InvertedIndexRam supports upserts
return;
}
let mut index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
10,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(
round_scores::<I>(search_context.search(&match_all)),
vec![
ScoredPointOffset {
score: 90.0,
idx: 3
},
ScoredPointOffset {
score: 60.0,
idx: 2
},
ScoredPointOffset {
score: 30.0,
idx: 1
},
]
);
drop(search_context);
drop(hardware_counter);
// update index with new point
index.index.upsert(
4,
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![40.0, 40.0, 40.0],
},
None,
);
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
10,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(
search_context.search(&match_all),
vec![
ScoredPointOffset {
score: 120.0,
idx: 4
},
ScoredPointOffset {
score: 90.0,
idx: 3
},
ScoredPointOffset {
score: 60.0,
idx: 2
},
ScoredPointOffset {
score: 30.0,
idx: 1
},
]
);
}
#[test]
fn search_with_hot_key_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (2, 30.0), (3, 30.0)].into());
builder.add(4, [(1, 1.0)].into());
builder.add(5, [(1, 2.0)].into());
builder.add(6, [(1, 3.0)].into());
builder.add(7, [(1, 4.0)].into());
builder.add(8, [(1, 5.0)].into());
builder.add(9, [(1, 6.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
3,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(
round_scores::<I>(search_context.search(&match_all)),
vec![
ScoredPointOffset {
score: 90.0,
idx: 3
},
ScoredPointOffset {
score: 60.0,
idx: 2
},
ScoredPointOffset {
score: 30.0,
idx: 1
},
]
);
drop(search_context);
drop(hardware_counter);
let cpu_cost = accumulator.get_cpu();
assert!(cpu_cost > 0);
if index.index.is_on_disk() {
assert!(accumulator.get_vector_io_read() > 0);
}
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
4,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(
round_scores::<I>(search_context.search(&match_all)),
vec![
ScoredPointOffset {
score: 90.0,
idx: 3
},
ScoredPointOffset {
score: 60.0,
idx: 2
},
ScoredPointOffset {
score: 30.0,
idx: 1
},
ScoredPointOffset { score: 6.0, idx: 9 },
]
);
drop(search_context);
drop(hardware_counter);
// No difference to previous calculation because it's the same amount of score
// calculations when increasing the "top" parameter.
assert_eq!(accumulator.get_cpu(), cpu_cost);
}
#[test]
fn pruning_single_to_end_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0)].into());
builder.add(2, [(1, 20.0)].into());
builder.add(3, [(1, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
1,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
// assuming we have gathered enough results and want to prune the longest posting list
assert!(search_context.prune_longest_posting_list(30.0));
// the longest posting list was pruned to the end
assert_eq!(search_context.posting_list_len(0), 0);
}
#[test]
fn pruning_multi_to_end_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0)].into());
builder.add(2, [(1, 20.0)].into());
builder.add(3, [(1, 30.0)].into());
builder.add(5, [(3, 10.0)].into());
builder.add(6, [(2, 20.0), (3, 20.0)].into());
builder.add(7, [(2, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
1,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
// assuming we have gathered enough results and want to prune the longest posting list
assert!(search_context.prune_longest_posting_list(30.0));
// the longest posting list was pruned to the end
assert_eq!(search_context.posting_list_len(0), 0);
}
#[test]
fn pruning_multi_under_prune_test<I: InvertedIndex>() {
if !I::Iter::reliable_max_next_weight() {
return;
}
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0)].into());
builder.add(2, [(1, 20.0)].into());
builder.add(3, [(1, 20.0)].into());
builder.add(4, [(1, 10.0)].into());
builder.add(5, [(3, 10.0)].into());
builder.add(6, [(1, 20.0), (2, 20.0), (3, 20.0)].into());
builder.add(7, [(1, 40.0), (2, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
1,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
// one would expect this to prune up to `6` but it does not happen it practice because we are under pruning by design
// we should actually check the best score up to `6` - 1 only instead of the max possible score (40.0)
assert!(!search_context.prune_longest_posting_list(30.0));
assert!(search_context.prune_longest_posting_list(40.0));
// the longest posting list was pruned to the end
assert_eq!(
search_context.posting_list_len(0),
2 // 6, 7
);
}
/// Generates a random inverted index with `num_vectors` vectors
#[allow(dead_code)]
fn random_inverted_index<R: Rng + ?Sized>(
rnd_gen: &mut R,
num_vectors: u32,
max_sparse_dimension: usize,
) -> InvertedIndexRam {
let mut inverted_index_ram = InvertedIndexRam::empty();
for i in 1..=num_vectors {
let SparseVector { indices, values } =
random_sparse_vector(rnd_gen, max_sparse_dimension);
let vector = RemappedSparseVector::new(indices, values).unwrap();
inverted_index_ram.upsert(i, vector, None);
}
inverted_index_ram
}
#[test]
fn promote_longest_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (3, 20.0)].into());
builder.add(3, [(2, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
3,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
assert_eq!(search_context.posting_list_len(0), 2);
search_context.promote_longest_posting_lists_to_the_front();
assert_eq!(search_context.posting_list_len(0), 3);
}
#[test]
fn plain_search_all_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (3, 20.0)].into());
builder.add(3, [(1, 30.0), (3, 30.0)].into());
builder.build()
});
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 2, 3],
values: vec![1.0, 1.0, 1.0],
},
3,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let scores = search_context.plain_search(&[1, 3, 2]);
assert_eq!(
round_scores::<I>(scores),
vec![
ScoredPointOffset {
idx: 3,
score: 60.0
},
ScoredPointOffset {
idx: 2,
score: 40.0
},
ScoredPointOffset {
idx: 1,
score: 30.0
},
]
);
drop(search_context);
drop(hardware_counter);
assert!(accumulator.get_cpu() > 0);
if index.index.is_on_disk() {
assert!(accumulator.get_vector_io_read() > 0);
}
}
#[test]
fn plain_search_gap_test<I: InvertedIndex>() {
let index = TestIndex::<I>::from_ram({
let mut builder = InvertedIndexBuilder::new();
builder.add(1, [(1, 10.0), (2, 10.0), (3, 10.0)].into());
builder.add(2, [(1, 20.0), (3, 20.0)].into());
builder.add(3, [(2, 30.0), (3, 30.0)].into());
builder.build()
});
// query vector has a gap for dimension 2
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let mut search_context = SearchContext::new(
RemappedSparseVector {
indices: vec![1, 3],
values: vec![1.0, 1.0],
},
3,
&index.index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let scores = search_context.plain_search(&[1, 2, 3]);
assert_eq!(
round_scores::<I>(scores),
vec![
ScoredPointOffset {
idx: 2,
score: 40.0
},
ScoredPointOffset {
idx: 3,
score: 30.0 // the dimension 2 did not contribute to the score
},
ScoredPointOffset {
idx: 1,
score: 20.0 // the dimension 2 did not contribute to the score
},
]
);
drop(search_context);
drop(hardware_counter);
assert!(accumulator.get_cpu() > 0);
if index.index.is_on_disk() {
assert!(accumulator.get_vector_io_read() > 0);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/tests/mod.rs | lib/sparse/src/index/tests/mod.rs | mod common;
mod hw_counter_test;
mod indexed_vs_plain_test;
mod search_context_tests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/tests/indexed_vs_plain_test.rs | lib/sparse/src/index/tests/indexed_vs_plain_test.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::index::inverted_index::InvertedIndex;
use crate::index::search_context::SearchContext;
use crate::index::tests::common::{build_index, get_pooled_scores, match_all};
fn query<I: InvertedIndex>(index: &I, query: RemappedSparseVector) {
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let top = 10;
let mut search_context = SearchContext::new(
query.clone(),
top,
index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let result = search_context.search(&match_all);
let docs: Vec<_> = result.iter().map(|x| x.idx).collect();
let mut search_context = SearchContext::new(
query,
top,
index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let plain_result = search_context.plain_search(&docs);
assert_eq!(result, plain_result);
}
/// Run regular search and then palin search and compare the results
#[test]
fn test_search_vs_plain() {
let count = 1000;
let density = 32;
let vocab1 = 32;
let vocab2 = 512;
// Expected posting length = count * density / vocab1 / 2 = 1000 * 32 / 32 / 2 = 500
// Expected posting length = count * density / vocab2 / 2 = 1000 * 32 / 512 / 2 = 62.5
let index = build_index::<f32>(count, density, vocab1, vocab2);
let freq_query = RemappedSparseVector {
indices: vec![1],
values: vec![1.0],
};
let freq_query2 = RemappedSparseVector {
indices: vec![0, 1, 2],
values: vec![1.0, 1.0, 1.0],
};
let infreq_query = RemappedSparseVector {
indices: vec![100],
values: vec![1.0],
};
let infreq_query2 = RemappedSparseVector {
indices: vec![101, 102, 103],
values: vec![1.0, 1.0, 1.0],
};
query(&index.index, freq_query);
query(&index.index, freq_query2);
query(&index.index, infreq_query);
query(&index.index, infreq_query2);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/tests/common.rs | lib/sparse/src/index/tests/common.rs | use std::borrow::Cow;
use std::sync::OnceLock;
use common::types::PointOffsetType;
use rand::SeedableRng;
use tempfile::TempDir;
use crate::common::scores_memory_pool::{PooledScoresHandle, ScoresMemoryPool};
use crate::common::sparse_vector::RemappedSparseVector;
use crate::common::types::{DimOffset, Weight};
use crate::index::inverted_index::InvertedIndex;
use crate::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use crate::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use crate::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
static TEST_SCORES_POOL: OnceLock<ScoresMemoryPool> = OnceLock::new();
pub fn get_pooled_scores() -> PooledScoresHandle<'static> {
TEST_SCORES_POOL
.get_or_init(ScoresMemoryPool::default)
.get()
}
pub struct TestIndex<I: InvertedIndex> {
pub index: I,
_temp_dir: TempDir,
}
impl<I: InvertedIndex> TestIndex<I> {
fn from_ram(ram_index: InvertedIndexRam) -> Self {
let temp_dir = tempfile::Builder::new()
.prefix("test_index_dir")
.tempdir()
.unwrap();
TestIndex {
index: I::from_ram_index(Cow::Owned(ram_index), &temp_dir).unwrap(),
_temp_dir: temp_dir,
}
}
}
pub fn random_sparse_vector<R: rand::Rng>(
rnd_gen: &mut R,
density: usize,
vocab1: usize,
vocab2: usize,
) -> RemappedSparseVector {
let mut indices = vec![];
let mut values = vec![];
let value_range = -0.0..2.0;
for _ in 0..density {
loop {
let index = if rnd_gen.random_bool(0.5) {
rnd_gen.random_range(0..vocab1)
} else {
rnd_gen.random_range(vocab1..(vocab1 + vocab2))
};
if indices.contains(&(index as DimOffset)) {
continue;
}
let value = rnd_gen.random_range(value_range.clone());
indices.push(index as DimOffset);
values.push(value);
break;
}
}
RemappedSparseVector { indices, values }
}
pub fn generate_sparse_index<W, R>(
rnd: &mut R,
count: usize,
density: usize,
vocab1: usize,
vocab2: usize,
) -> TestIndex<InvertedIndexCompressedMmap<W>>
where
W: Weight + 'static,
R: rand::Rng,
{
let mut builder = InvertedIndexBuilder::new();
for i in 0..count {
let vector = random_sparse_vector(rnd, density, vocab1, vocab2);
builder.add(i as PointOffsetType, vector);
}
TestIndex::from_ram(builder.build())
}
pub fn build_index<W>(
count: usize,
density: usize,
vocab1: usize,
vocab2: usize,
) -> TestIndex<InvertedIndexCompressedMmap<W>>
where
W: Weight + 'static,
{
let seed = 42;
let mut rnd_gen = rand::rngs::StdRng::seed_from_u64(seed);
generate_sparse_index::<W, _>(&mut rnd_gen, count, density, vocab1, vocab2)
}
pub fn match_all(_p: PointOffsetType) -> bool {
true
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/index/tests/hw_counter_test.rs | lib/sparse/src/index/tests/hw_counter_test.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::types::PointOffsetType;
use itertools::Itertools;
use crate::common::sparse_vector::RemappedSparseVector;
use crate::index::inverted_index::InvertedIndex;
use crate::index::search_context::SearchContext;
use crate::index::tests::common::{build_index, get_pooled_scores, match_all};
fn do_search<I: InvertedIndex>(index: &I, query: RemappedSparseVector) -> HwMeasurementAcc {
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let top = 10;
let mut search_context = SearchContext::new(
query,
top,
index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let result = search_context.search(&match_all);
// there might be less than `top` result
// happens if index contains less than `top` sparse vectors with indices overlapping the query indices
assert!(result.len() <= top);
accumulator
}
fn do_plain_search<I: InvertedIndex>(
index: &I,
query: RemappedSparseVector,
docs: &[PointOffsetType],
) -> HwMeasurementAcc {
let is_stopped = AtomicBool::new(false);
let accumulator = HwMeasurementAcc::new();
let hardware_counter = accumulator.get_counter_cell();
let top = 10;
let mut search_context = SearchContext::new(
query,
top,
index,
get_pooled_scores(),
&is_stopped,
&hardware_counter,
);
let result = search_context.plain_search(docs);
// there might be less than `top` result
// happens if index contains less than `top` sparse vectors with indices overlapping the query indices
assert!(result.len() <= top);
accumulator
}
#[test]
fn test_hw_counter_for_sparse_search() {
let count = 1000;
let density = 32;
let vocab1 = 32;
let vocab2 = 512;
// Expected posting length = count * density / vocab1 / 2 = 1000 * 32 / 32 / 2 = 500
// Expected posting length = count * density / vocab2 / 2 = 1000 * 32 / 512 / 2 = 62.5
let index_f32 = build_index::<f32>(count, density, vocab1, vocab2);
let index_f16 = build_index::<half::f16>(count, density, vocab1, vocab2);
let index_u8 = build_index::<u8>(count, density, vocab1, vocab2);
let freq_query = RemappedSparseVector {
indices: vec![1],
values: vec![1.0],
};
let freq_query2 = RemappedSparseVector {
indices: vec![0, 1, 2],
values: vec![1.0, 1.0, 1.0],
};
let infrequent_query = RemappedSparseVector {
indices: vec![100],
values: vec![1.0],
};
let infrequent_query2 = RemappedSparseVector {
indices: vec![0, 100, 200],
values: vec![1.0, 1.0, 1.0],
};
let acc_f32_freq_1 = do_search(&index_f32.index, freq_query.clone());
let acc_f16_freq_1 = do_search(&index_f16.index, freq_query.clone());
let acc_u8_freq_1 = do_search(&index_u8.index, freq_query.clone());
let acc_f32_freq_3 = do_search(&index_f32.index, freq_query2.clone());
let acc_f16_freq_3 = do_search(&index_f16.index, freq_query2.clone());
let acc_u8_freq_3 = do_search(&index_u8.index, freq_query2.clone());
let acc_f32_infreq_1 = do_search(&index_f32.index, infrequent_query.clone());
let acc_f16_infreq_1 = do_search(&index_f16.index, infrequent_query.clone());
let acc_u8_infreq_1 = do_search(&index_u8.index, infrequent_query.clone());
let acc_f32_infreq_3 = do_search(&index_f32.index, infrequent_query2.clone());
let acc_f16_infreq_3 = do_search(&index_f16.index, infrequent_query2.clone());
let acc_u8_infreq_3 = do_search(&index_u8.index, infrequent_query2.clone());
// Higher precision floats cost more CPU and IO
assert!(acc_f32_freq_1.get_cpu() > acc_f16_freq_1.get_cpu());
assert!(acc_f16_freq_1.get_cpu() > acc_u8_freq_1.get_cpu());
assert!(acc_f32_freq_1.get_vector_io_read() > acc_f16_freq_1.get_vector_io_read());
assert!(acc_f16_freq_1.get_vector_io_read() > acc_u8_freq_1.get_vector_io_read());
// More indices are more expensive than less indices
assert!(acc_f32_freq_3.get_cpu() > acc_f32_freq_1.get_cpu());
assert!(acc_f16_freq_3.get_cpu() > acc_f16_freq_1.get_cpu());
assert!(acc_u8_freq_3.get_cpu() > acc_u8_freq_1.get_cpu());
assert!(acc_f32_freq_3.get_vector_io_read() > acc_f32_freq_1.get_vector_io_read());
assert!(acc_f16_freq_3.get_vector_io_read() > acc_f16_freq_1.get_vector_io_read());
assert!(acc_u8_freq_3.get_vector_io_read() > acc_u8_freq_1.get_vector_io_read());
// Frequent terms are more expensive than infrequent terms
assert!(acc_f32_freq_1.get_cpu() > acc_f32_infreq_1.get_cpu());
assert!(acc_f16_freq_1.get_cpu() > acc_f16_infreq_1.get_cpu());
assert!(acc_u8_freq_1.get_cpu() > acc_u8_infreq_1.get_cpu());
assert!(acc_f32_freq_1.get_vector_io_read() > acc_f32_infreq_1.get_vector_io_read());
assert!(acc_f16_freq_1.get_vector_io_read() > acc_f16_infreq_1.get_vector_io_read());
assert!(acc_u8_freq_1.get_vector_io_read() > acc_u8_infreq_1.get_vector_io_read());
// More indices are more expensive than less indices
assert!(acc_f32_infreq_3.get_cpu() > acc_f32_infreq_1.get_cpu());
assert!(acc_f16_infreq_3.get_cpu() > acc_f16_infreq_1.get_cpu());
assert!(acc_u8_infreq_3.get_cpu() > acc_u8_infreq_1.get_cpu());
assert!(acc_f32_infreq_3.get_vector_io_read() > acc_f32_infreq_1.get_vector_io_read());
assert!(acc_f16_infreq_3.get_vector_io_read() > acc_f16_infreq_1.get_vector_io_read());
assert!(acc_u8_infreq_3.get_vector_io_read() > acc_u8_infreq_1.get_vector_io_read());
}
#[test]
fn test_hw_counter_for_plain_sparse_search() {
let count = 1000;
let density = 32;
let vocab1 = 32;
let vocab2 = 512;
// Expected posting length = count * density / vocab1 / 2 = 1000 * 32 / 32 / 2 = 500
// Expected posting length = count * density / vocab2 / 2 = 1000 * 32 / 512 / 2 = 62.5
let index_f32 = build_index::<f32>(count, density, vocab1, vocab2);
let index_f16 = build_index::<half::f16>(count, density, vocab1, vocab2);
let index_u8 = build_index::<u8>(count, density, vocab1, vocab2);
let documents_to_score = 500;
// Plain search requires list of document ids
// Generate random document ids
let document_ids: Vec<PointOffsetType> = (0..documents_to_score)
.map(|_| rand::random::<PointOffsetType>() % count as PointOffsetType)
.unique()
.collect();
let freq_query = RemappedSparseVector {
indices: vec![1],
values: vec![1.0],
};
let freq_query2 = RemappedSparseVector {
indices: vec![0, 1, 2],
values: vec![1.0, 1.0, 1.0],
};
let infreq_query = RemappedSparseVector {
indices: vec![100],
values: vec![1.0],
};
let infreq_query2 = RemappedSparseVector {
indices: vec![101, 102, 103],
values: vec![1.0, 1.0, 1.0],
};
let acc_f32_freq_1 = do_plain_search(&index_f32.index, freq_query.clone(), &document_ids);
let acc_f16_freq_1 = do_plain_search(&index_f16.index, freq_query.clone(), &document_ids);
let acc_u8_freq_1 = do_plain_search(&index_u8.index, freq_query.clone(), &document_ids);
let acc_f32_freq_3 = do_plain_search(&index_f32.index, freq_query2.clone(), &document_ids);
let acc_f16_freq_3 = do_plain_search(&index_f16.index, freq_query2.clone(), &document_ids);
let acc_u8_freq_3 = do_plain_search(&index_u8.index, freq_query2.clone(), &document_ids);
let acc_f32_infreq_1 = do_plain_search(&index_f32.index, infreq_query.clone(), &document_ids);
let acc_f16_infreq_1 = do_plain_search(&index_f16.index, infreq_query.clone(), &document_ids);
let acc_u8_infreq_1 = do_plain_search(&index_u8.index, infreq_query.clone(), &document_ids);
let acc_f32_infreq_3 = do_plain_search(&index_f32.index, infreq_query2.clone(), &document_ids);
let acc_f16_infreq_3 = do_plain_search(&index_f16.index, infreq_query2.clone(), &document_ids);
let acc_u8_infreq_3 = do_plain_search(&index_u8.index, infreq_query2.clone(), &document_ids);
// Higher precision floats cost more IO, but might have same CPU
assert!(acc_f32_freq_1.get_cpu() >= acc_f16_freq_1.get_cpu());
assert!(acc_f16_freq_1.get_cpu() >= acc_u8_freq_1.get_cpu());
assert!(acc_f32_freq_1.get_vector_io_read() > acc_f16_freq_1.get_vector_io_read());
assert!(acc_f16_freq_1.get_vector_io_read() > acc_u8_freq_1.get_vector_io_read());
// More indices are more expensive than less indices
assert!(acc_f32_freq_3.get_cpu() > acc_f32_freq_1.get_cpu());
assert!(acc_f16_freq_3.get_cpu() > acc_f16_freq_1.get_cpu());
assert!(acc_u8_freq_3.get_cpu() > acc_u8_freq_1.get_cpu());
assert!(acc_f32_freq_3.get_vector_io_read() > acc_f32_freq_1.get_vector_io_read());
assert!(acc_f16_freq_3.get_vector_io_read() > acc_f16_freq_1.get_vector_io_read());
assert!(acc_u8_freq_3.get_vector_io_read() > acc_u8_freq_1.get_vector_io_read());
// Frequent terms are more expensive than infrequent terms
assert!(acc_f32_freq_1.get_cpu() > acc_f32_infreq_1.get_cpu());
assert!(acc_f16_freq_1.get_cpu() > acc_f16_infreq_1.get_cpu());
assert!(acc_u8_freq_1.get_cpu() > acc_u8_infreq_1.get_cpu());
assert!(acc_f32_freq_1.get_vector_io_read() > acc_f32_infreq_1.get_vector_io_read());
assert!(acc_f16_freq_1.get_vector_io_read() > acc_f16_infreq_1.get_vector_io_read());
assert!(acc_u8_freq_1.get_vector_io_read() > acc_u8_infreq_1.get_vector_io_read());
// More indices are more expensive than less indices
assert!(acc_f32_infreq_3.get_cpu() > acc_f32_infreq_1.get_cpu());
assert!(acc_f16_infreq_3.get_cpu() > acc_f16_infreq_1.get_cpu());
assert!(acc_u8_infreq_3.get_cpu() > acc_u8_infreq_1.get_cpu());
assert!(acc_f32_infreq_3.get_vector_io_read() > acc_f32_infreq_1.get_vector_io_read());
assert!(acc_f16_infreq_3.get_vector_io_read() > acc_f16_infreq_1.get_vector_io_read());
assert!(acc_u8_infreq_3.get_vector_io_read() > acc_u8_infreq_1.get_vector_io_read());
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/common/scores_memory_pool.rs | lib/sparse/src/common/scores_memory_pool.rs | use common::defaults::POOL_KEEP_LIMIT;
use common::types::ScoreType;
use parking_lot::Mutex;
type PooledScores = Vec<ScoreType>;
#[derive(Debug)]
pub struct PooledScoresHandle<'a> {
pool: &'a ScoresMemoryPool,
pub scores: PooledScores,
}
impl<'a> PooledScoresHandle<'a> {
fn new(pool: &'a ScoresMemoryPool, scores: PooledScores) -> Self {
PooledScoresHandle { pool, scores }
}
}
impl Drop for PooledScoresHandle<'_> {
fn drop(&mut self) {
self.pool.return_back(std::mem::take(&mut self.scores));
}
}
#[derive(Debug)]
pub struct ScoresMemoryPool {
pool: Mutex<Vec<PooledScores>>,
}
impl ScoresMemoryPool {
pub fn new() -> Self {
ScoresMemoryPool {
pool: Mutex::new(Vec::with_capacity(*POOL_KEEP_LIMIT)),
}
}
pub fn get(&self) -> PooledScoresHandle<'_> {
match self.pool.lock().pop() {
None => PooledScoresHandle::new(self, vec![]),
Some(data) => PooledScoresHandle::new(self, data),
}
}
fn return_back(&self, data: PooledScores) {
let mut pool = self.pool.lock();
if pool.len() < *POOL_KEEP_LIMIT {
pool.push(data);
}
}
}
impl Default for ScoresMemoryPool {
fn default() -> Self {
Self::new()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/common/types.rs | lib/sparse/src/common/types.rs | use std::fmt::Debug;
use half::slice::HalfFloatSliceExt;
use itertools::{Itertools, MinMaxResult};
pub type DimOffset = u32;
pub type DimId = u32;
pub type DimId64 = u64;
pub type DimWeight = f32;
pub trait Weight: PartialEq + Copy + Debug + 'static {
type QuantizationParams: Copy + PartialEq + Debug;
fn quantization_params_for(
values: impl ExactSizeIterator<Item = DimWeight> + Clone,
) -> Self::QuantizationParams;
fn from_f32(params: Self::QuantizationParams, value: f32) -> Self;
fn to_f32(self, params: Self::QuantizationParams) -> f32;
fn into_f32_slice<'a>(
params: Self::QuantizationParams,
weights: &'a [Self],
buffer: &'a mut [f32],
) -> &'a [f32];
}
impl Weight for f32 {
type QuantizationParams = ();
#[inline]
fn quantization_params_for(_values: impl ExactSizeIterator<Item = DimWeight> + Clone) {}
#[inline]
fn from_f32(_: (), value: f32) -> Self {
value
}
#[inline]
fn to_f32(self, _: ()) -> f32 {
self
}
#[inline]
fn into_f32_slice<'a>(_: (), weights: &'a [Self], _buffer: &'a mut [f32]) -> &'a [f32] {
// Zero-copy conversion, ignore buffer
weights
}
}
impl Weight for half::f16 {
type QuantizationParams = ();
#[inline]
fn quantization_params_for(_values: impl ExactSizeIterator<Item = DimWeight> + Clone) {}
#[inline]
fn from_f32(_: (), value: f32) -> Self {
half::f16::from_f32(value)
}
fn to_f32(self, _: ()) -> f32 {
self.to_f32()
}
#[inline]
fn into_f32_slice<'a>(_: (), weights: &'a [Self], buffer: &'a mut [f32]) -> &'a [f32] {
weights.convert_to_f32_slice(buffer);
buffer
}
}
#[cfg(feature = "testing")]
impl Weight for u8 {
type QuantizationParams = ();
#[inline]
fn quantization_params_for(_values: impl ExactSizeIterator<Item = DimWeight> + Clone) {}
#[inline]
fn from_f32(_: (), value: f32) -> Self {
value as u8
}
#[inline]
fn to_f32(self, _: ()) -> f32 {
f32::from(self)
}
#[inline]
fn into_f32_slice<'a>(_: (), weights: &'a [Self], buffer: &'a mut [f32]) -> &'a [f32] {
for (i, &weight) in weights.iter().enumerate() {
buffer[i] = f32::from(weight);
}
buffer
}
}
#[derive(PartialEq, Copy, Clone, Debug)]
pub struct QuantizedU8(u8);
impl From<QuantizedU8> for DimWeight {
fn from(val: QuantizedU8) -> Self {
f32::from(val.0)
}
}
#[derive(PartialEq, Default, Copy, Clone, Debug)]
pub struct QuantizedU8Params {
/// Minimum value in the range
min: f32,
/// Difference divided by 256, aka `(max - min) / 255`
diff256: f32,
}
impl Weight for QuantizedU8 {
type QuantizationParams = QuantizedU8Params;
#[inline]
fn quantization_params_for(
values: impl Iterator<Item = DimWeight>,
) -> Self::QuantizationParams {
let (min, max) = match values.minmax() {
MinMaxResult::NoElements => return QuantizedU8Params::default(),
MinMaxResult::OneElement(e) => (e, e),
MinMaxResult::MinMax(min, max) => (min, max),
};
QuantizedU8Params {
min,
diff256: (max - min) / 255.0,
}
}
#[inline]
fn from_f32(params: QuantizedU8Params, value: f32) -> Self {
QuantizedU8(
((value - params.min) / params.diff256)
.round()
.clamp(0.0, 255.0) as u8,
)
}
#[inline]
fn to_f32(self, params: QuantizedU8Params) -> f32 {
params.min + f32::from(self.0) * params.diff256
}
#[inline]
fn into_f32_slice<'a>(
params: QuantizedU8Params,
weights: &'a [Self],
buffer: &'a mut [f32],
) -> &'a [f32] {
assert_eq!(weights.len(), buffer.len());
for (i, &weight) in weights.iter().enumerate() {
buffer[i] = weight.to_f32(params);
}
buffer
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/common/mod.rs | lib/sparse/src/common/mod.rs | pub mod scores_memory_pool;
pub mod sparse_vector;
#[cfg(feature = "testing")]
pub mod sparse_vector_fixture;
pub mod types;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/common/sparse_vector_fixture.rs | lib/sparse/src/common/sparse_vector_fixture.rs | use std::ops::Range;
use rand::Rng;
use crate::common::sparse_vector::SparseVector;
const VALUE_RANGE: Range<f64> = -100.0..100.0;
// Realistic sizing based on experiences with SPLADE
const MAX_VALUES_PER_VECTOR: usize = 300;
/// Generates a non empty sparse vector
pub fn random_sparse_vector<R: Rng + ?Sized>(rnd_gen: &mut R, max_dim_size: usize) -> SparseVector {
let size = rnd_gen.random_range(1..max_dim_size);
let mut tuples: Vec<(u32, f32)> = vec![];
for i in 1..=size {
// make sure the vector is not too large (for performance reasons)
if tuples.len() == MAX_VALUES_PER_VECTOR {
break;
}
// high probability of skipping a dimension to make the vectors more sparse
let skip = rnd_gen.random_bool(0.98);
if !skip {
tuples.push((i as u32, rnd_gen.random_range(VALUE_RANGE) as f32));
}
}
// make sure we have at least one vector
if tuples.is_empty() {
tuples.push((
rnd_gen.random_range(1..max_dim_size) as u32,
rnd_gen.random_range(VALUE_RANGE) as f32,
));
}
SparseVector::try_from(tuples).unwrap()
}
/// Generates a sparse vector with all dimensions filled
pub fn random_full_sparse_vector<R: Rng + ?Sized>(
rnd_gen: &mut R,
max_size: usize,
) -> SparseVector {
let mut tuples: Vec<(u32, f32)> = Vec::with_capacity(max_size);
for i in 1..=max_size {
tuples.push((i as u32, rnd_gen.random_range(VALUE_RANGE) as f32));
}
SparseVector::try_from(tuples).unwrap()
}
/// Generates a sparse vector with only positive values
pub fn random_positive_sparse_vector<R: Rng + ?Sized>(
rnd_gen: &mut R,
max_dim_size: usize,
) -> SparseVector {
let mut vec = random_sparse_vector(rnd_gen, max_dim_size);
for value in vec.values.iter_mut() {
*value = value.abs();
}
vec
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/src/common/sparse_vector.rs | lib/sparse/src/common/sparse_vector.rs | use std::borrow::Cow;
use std::hash::Hash;
use common::types::ScoreType;
use gridstore::Blob;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use validator::{Validate, ValidationError, ValidationErrors};
use crate::common::types::{DimId, DimOffset, DimWeight};
/// Sparse vector structure
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct SparseVector {
/// Indices must be unique
pub indices: Vec<DimId>,
/// Values and indices must be the same length
pub values: Vec<DimWeight>,
}
impl Hash for SparseVector {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let Self { indices, values } = self;
indices.hash(state);
for &value in values {
OrderedFloat(value).hash(state);
}
}
}
/// Same as `SparseVector` but with `DimOffset` indices.
/// Meaning that is uses internal segment-specific indices.
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)]
pub struct RemappedSparseVector {
/// indices must be unique
pub indices: Vec<DimOffset>,
/// values and indices must be the same length
pub values: Vec<DimWeight>,
}
/// Sort two arrays by the first array.
pub fn double_sort<T: Ord + Copy, V: Copy>(indices: &mut [T], values: &mut [V]) {
// Check if the indices are already sorted
if indices.windows(2).all(|w| w[0] < w[1]) {
return;
}
let mut indexed_values: Vec<(T, V)> = indices
.iter()
.zip(values.iter())
.map(|(&i, &v)| (i, v))
.collect();
// Sort the vector of tuples by indices
indexed_values.sort_unstable_by_key(|&(i, _)| i);
for (i, (index, value)) in indexed_values.into_iter().enumerate() {
indices[i] = index;
values[i] = value;
}
}
pub fn score_vectors<T: Ord + Eq>(
self_indices: &[T],
self_values: &[DimWeight],
other_indices: &[T],
other_values: &[DimWeight],
) -> Option<ScoreType> {
let mut score = 0.0;
// track whether there is any overlap
let mut overlap = false;
let mut i = 0;
let mut j = 0;
while i < self_indices.len() && j < other_indices.len() {
match self_indices[i].cmp(&other_indices[j]) {
std::cmp::Ordering::Less => i += 1,
std::cmp::Ordering::Greater => j += 1,
std::cmp::Ordering::Equal => {
overlap = true;
score += self_values[i] * other_values[j];
i += 1;
j += 1;
}
}
}
if overlap { Some(score) } else { None }
}
impl RemappedSparseVector {
pub fn new(indices: Vec<DimId>, values: Vec<DimWeight>) -> Result<Self, ValidationErrors> {
let vector = Self { indices, values };
vector.validate()?;
Ok(vector)
}
pub fn sort_by_indices(&mut self) {
double_sort(&mut self.indices, &mut self.values);
}
/// Check if this vector is sorted by indices.
pub fn is_sorted(&self) -> bool {
self.indices.windows(2).all(|w| w[0] < w[1])
}
/// Score this vector against another vector using dot product.
/// Warning: Expects both vectors to be sorted by indices.
///
/// Return None if the vectors do not overlap.
pub fn score(&self, other: &RemappedSparseVector) -> Option<ScoreType> {
debug_assert!(self.is_sorted());
debug_assert!(other.is_sorted());
score_vectors(&self.indices, &self.values, &other.indices, &other.values)
}
/// Returns the number of elements in the vector.
pub fn len(&self) -> usize {
self.indices.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl SparseVector {
pub fn new(indices: Vec<DimId>, values: Vec<DimWeight>) -> Result<Self, ValidationErrors> {
let vector = SparseVector { indices, values };
vector.validate()?;
Ok(vector)
}
/// Sort this vector by indices.
///
/// Sorting is required for scoring and overlap checks.
pub fn sort_by_indices(&mut self) {
double_sort(&mut self.indices, &mut self.values);
}
/// Check if this vector is sorted by indices.
pub fn is_sorted(&self) -> bool {
self.indices.windows(2).all(|w| w[0] < w[1])
}
/// Check if this vector is empty.
pub fn is_empty(&self) -> bool {
self.indices.is_empty() && self.values.is_empty()
}
/// Returns the number of elements in the vector.
pub fn len(&self) -> usize {
self.indices.len()
}
/// Score this vector against another vector using dot product.
/// Warning: Expects both vectors to be sorted by indices.
///
/// Return None if the vectors do not overlap.
pub fn score(&self, other: &SparseVector) -> Option<ScoreType> {
debug_assert!(self.is_sorted());
debug_assert!(other.is_sorted());
score_vectors(&self.indices, &self.values, &other.indices, &other.values)
}
/// Construct a new vector that is the result of performing all indices-wise operations.
/// Automatically sort input vectors if necessary.
pub fn combine_aggregate(
&self,
other: &SparseVector,
op: impl Fn(DimWeight, DimWeight) -> DimWeight,
) -> Self {
// Copy and sort `self` vector if not already sorted
let this: Cow<SparseVector> = if !self.is_sorted() {
let mut this = self.clone();
this.sort_by_indices();
Cow::Owned(this)
} else {
Cow::Borrowed(self)
};
assert!(this.is_sorted());
// Copy and sort `other` vector if not already sorted
let cow_other: Cow<SparseVector> = if !other.is_sorted() {
let mut other = other.clone();
other.sort_by_indices();
Cow::Owned(other)
} else {
Cow::Borrowed(other)
};
let other = &cow_other;
assert!(other.is_sorted());
let mut result = SparseVector::default();
let mut i = 0;
let mut j = 0;
while i < this.indices.len() && j < other.indices.len() {
match this.indices[i].cmp(&other.indices[j]) {
std::cmp::Ordering::Less => {
result.indices.push(this.indices[i]);
result.values.push(op(this.values[i], 0.0));
i += 1;
}
std::cmp::Ordering::Greater => {
result.indices.push(other.indices[j]);
result.values.push(op(0.0, other.values[j]));
j += 1;
}
std::cmp::Ordering::Equal => {
result.indices.push(this.indices[i]);
result.values.push(op(this.values[i], other.values[j]));
i += 1;
j += 1;
}
}
}
while i < this.indices.len() {
result.indices.push(this.indices[i]);
result.values.push(op(this.values[i], 0.0));
i += 1;
}
while j < other.indices.len() {
result.indices.push(other.indices[j]);
result.values.push(op(0.0, other.values[j]));
j += 1;
}
debug_assert!(result.is_sorted());
debug_assert!(result.validate().is_ok());
result
}
/// Create [RemappedSparseVector] from this vector in a naive way. Only suitable for testing.
#[cfg(feature = "testing")]
pub fn into_remapped(self) -> RemappedSparseVector {
RemappedSparseVector {
indices: self.indices,
values: self.values,
}
}
}
impl TryFrom<Vec<(u32, f32)>> for RemappedSparseVector {
type Error = ValidationErrors;
fn try_from(tuples: Vec<(u32, f32)>) -> Result<Self, Self::Error> {
let (indices, values): (Vec<_>, Vec<_>) = tuples.into_iter().unzip();
RemappedSparseVector::new(indices, values)
}
}
impl TryFrom<Vec<(u32, f32)>> for SparseVector {
type Error = ValidationErrors;
fn try_from(tuples: Vec<(u32, f32)>) -> Result<Self, Self::Error> {
let (indices, values): (Vec<_>, Vec<_>) = tuples.into_iter().unzip();
SparseVector::new(indices, values)
}
}
impl Blob for SparseVector {
fn to_bytes(&self) -> Vec<u8> {
bincode::serialize(&self).expect("Sparse vector serialization should not fail")
}
fn from_bytes(data: &[u8]) -> Self {
bincode::deserialize(data).expect("Sparse vector deserialization should not fail")
}
}
#[cfg(test)]
impl<const N: usize> From<[(u32, f32); N]> for SparseVector {
fn from(value: [(u32, f32); N]) -> Self {
value.to_vec().try_into().unwrap()
}
}
#[cfg(test)]
impl<const N: usize> From<[(u32, f32); N]> for RemappedSparseVector {
fn from(value: [(u32, f32); N]) -> Self {
value.to_vec().try_into().unwrap()
}
}
impl Validate for SparseVector {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_sparse_vector_impl(&self.indices, &self.values)
}
}
impl Validate for RemappedSparseVector {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_sparse_vector_impl(&self.indices, &self.values)
}
}
pub fn validate_sparse_vector_impl<T: Clone + Eq + Hash>(
indices: &[T],
values: &[DimWeight],
) -> Result<(), ValidationErrors> {
let mut errors = ValidationErrors::default();
if indices.len() != values.len() {
errors.add(
"values",
ValidationError::new("must be the same length as indices"),
);
}
if indices.iter().unique().count() != indices.len() {
errors.add("indices", ValidationError::new("must be unique"));
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_score_aligned_same_size() {
let v1 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
let v2 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
assert_eq!(v1.score(&v2), Some(14.0));
}
#[test]
fn test_score_not_aligned_same_size() {
let v1 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
let v2 = RemappedSparseVector::new(vec![2, 3, 4], vec![2.0, 3.0, 4.0]).unwrap();
assert_eq!(v1.score(&v2), Some(13.0));
}
#[test]
fn test_score_aligned_different_size() {
let v1 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
let v2 = RemappedSparseVector::new(vec![1, 2, 3, 4], vec![1.0, 2.0, 3.0, 4.0]).unwrap();
assert_eq!(v1.score(&v2), Some(14.0));
}
#[test]
fn test_score_not_aligned_different_size() {
let v1 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
let v2 = RemappedSparseVector::new(vec![2, 3, 4, 5], vec![2.0, 3.0, 4.0, 5.0]).unwrap();
assert_eq!(v1.score(&v2), Some(13.0));
}
#[test]
fn test_score_no_overlap() {
let v1 = RemappedSparseVector::new(vec![1, 2, 3], vec![1.0, 2.0, 3.0]).unwrap();
let v2 = RemappedSparseVector::new(vec![4, 5, 6], vec![2.0, 3.0, 4.0]).unwrap();
assert!(v1.score(&v2).is_none());
}
#[test]
fn validation_test() {
let fully_empty = SparseVector::new(vec![], vec![]);
assert!(fully_empty.is_ok());
assert!(fully_empty.unwrap().is_empty());
let different_length = SparseVector::new(vec![1, 2, 3], vec![1.0, 2.0]);
assert!(different_length.is_err());
let not_sorted = SparseVector::new(vec![1, 3, 2], vec![1.0, 2.0, 3.0]);
assert!(not_sorted.is_ok());
let not_unique = SparseVector::new(vec![1, 2, 3, 2], vec![1.0, 2.0, 3.0, 4.0]);
assert!(not_unique.is_err());
}
#[test]
fn sorting_test() {
let mut not_sorted = SparseVector::new(vec![1, 3, 2], vec![1.0, 2.0, 3.0]).unwrap();
assert!(!not_sorted.is_sorted());
not_sorted.sort_by_indices();
assert!(not_sorted.is_sorted());
}
#[test]
fn combine_aggregate_test() {
// Test with missing index
let a = SparseVector::new(vec![1, 2, 3], vec![0.1, 0.2, 0.3]).unwrap();
let b = SparseVector::new(vec![2, 3, 4], vec![2.0, 3.0, 4.0]).unwrap();
let sum = a.combine_aggregate(&b, |x, y| x + 2.0 * y);
assert_eq!(sum.indices, vec![1, 2, 3, 4]);
assert_eq!(sum.values, vec![0.1, 4.2, 6.3, 8.0]);
// reverse arguments
let sum = b.combine_aggregate(&a, |x, y| x + 2.0 * y);
assert_eq!(sum.indices, vec![1, 2, 3, 4]);
assert_eq!(sum.values, vec![0.2, 2.4, 3.6, 4.0]);
// Test with non-sorted input
let a = SparseVector::new(vec![1, 2, 3], vec![0.1, 0.2, 0.3]).unwrap();
let b = SparseVector::new(vec![4, 2, 3], vec![4.0, 2.0, 3.0]).unwrap();
let sum = a.combine_aggregate(&b, |x, y| x + 2.0 * y);
assert_eq!(sum.indices, vec![1, 2, 3, 4]);
assert_eq!(sum.values, vec![0.1, 4.2, 6.3, 8.0]);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/benches/prof.rs | lib/sparse/benches/prof.rs | use std::io::Write;
use std::os::raw::c_int;
use std::path::Path;
use criterion::profiler::Profiler;
use fs_err as fs;
use fs_err::File;
use pprof::ProfilerGuard;
use pprof::flamegraph::TextTruncateDirection;
use pprof::protos::Message;
/// Small custom profiler that can be used with Criterion to create a flamegraph for benchmarks.
/// Also see [the Criterion documentation on this][custom-profiler].
///
/// ## Example on how to enable the custom profiler:
///
/// ```
/// mod perf;
/// use perf::FlamegraphProfiler;
///
/// fn fibonacci_profiled(criterion: &mut Criterion) {
/// // Use the criterion struct as normal here.
/// }
///
/// fn custom() -> Criterion {
/// Criterion::default().with_profiler(FlamegraphProfiler::new())
/// }
///
/// criterion_group! {
/// name = benches;
/// config = custom();
/// targets = fibonacci_profiled
/// }
/// ```
///
/// The neat thing about this is that it will sample _only_ the benchmark, and not other stuff like
/// the setup process.
///
/// Further, it will only kick in if `--profile-time <time>` is passed to the benchmark binary.
/// A flamegraph will be created for each individual benchmark in its report directory under
/// `profile/flamegraph.svg`.
///
/// [custom-profiler]: https://bheisler.github.io/criterion.rs/book/user_guide/profiling.html#implementing-in-process-profiling-hooks
pub struct FlamegraphProfiler<'a> {
frequency: c_int,
active_profiler: Option<ProfilerGuard<'a>>,
}
impl FlamegraphProfiler<'_> {
pub fn new(frequency: c_int) -> Self {
FlamegraphProfiler {
frequency,
active_profiler: None,
}
}
}
impl Profiler for FlamegraphProfiler<'_> {
fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) {
self.active_profiler = Some(ProfilerGuard::new(self.frequency).unwrap());
}
fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) {
fs::create_dir_all(benchmark_dir).unwrap();
let pprof_path = benchmark_dir.join("profile.pb");
let flamegraph_path = benchmark_dir.join("flamegraph.svg");
eprintln!("\nflamegraph_path = {flamegraph_path:#?}");
let flamegraph_file = File::create(&flamegraph_path)
.expect("File system error while creating flamegraph.svg");
let mut options = pprof::flamegraph::Options::default();
options.hash = true;
options.image_width = Some(2500);
options.text_truncate_direction = TextTruncateDirection::Left;
options.font_size /= 3;
if let Some(profiler) = self.active_profiler.take() {
let report = profiler.report().build().unwrap();
let mut file = File::create(pprof_path).unwrap();
let profile = report.pprof().unwrap();
let mut content = Vec::new();
profile.encode(&mut content).unwrap();
file.write_all(&content).unwrap();
report
.flamegraph_with_options(flamegraph_file, &mut options)
.expect("Error writing flamegraph");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/sparse/benches/search.rs | lib/sparse/benches/search.rs | use std::borrow::Cow;
use std::io;
use std::path::Path;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::measurement::Measurement;
use criterion::{Criterion, criterion_group, criterion_main};
use dataset::Dataset;
use indicatif::{ProgressBar, ProgressDrawTarget};
use itertools::Itertools;
use rand::SeedableRng as _;
use rand::rngs::StdRng;
use sparse::common::scores_memory_pool::ScoresMemoryPool;
use sparse::common::sparse_vector::{RemappedSparseVector, SparseVector};
use sparse::common::sparse_vector_fixture::{random_positive_sparse_vector, random_sparse_vector};
use sparse::common::types::QuantizedU8;
use sparse::index::inverted_index::InvertedIndex;
use sparse::index::inverted_index::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam;
use sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use sparse::index::inverted_index::inverted_index_mmap::InvertedIndexMmap;
use sparse::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use sparse::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder;
use sparse::index::loaders::{self, Csr};
use sparse::index::search_context::SearchContext;
mod prof;
const NUM_QUERIES: usize = 2048;
const MAX_SPARSE_DIM: usize = 30_000;
const TOP: usize = 10;
pub fn bench_search(c: &mut Criterion) {
bench_uniform_random(c, "random-50k", 50_000);
bench_uniform_random(c, "random-500k", 500_000);
{
let query_vectors =
loaders::load_csr_vecs(Dataset::NeurIps2023Queries.download().unwrap()).unwrap();
let index_1m = load_csr_index(Dataset::NeurIps2023_1M.download().unwrap(), 1.0).unwrap();
run_bench(c, "neurips2023-1M", index_1m, &query_vectors);
let index_full =
load_csr_index(Dataset::NeurIps2023Full.download().unwrap(), 0.25).unwrap();
run_bench(c, "neurips2023-full-25pct", index_full, &query_vectors);
}
bench_movies(c);
}
fn bench_uniform_random(c: &mut Criterion, name: &str, num_vectors: usize) {
let mut rnd = StdRng::seed_from_u64(42);
let index = InvertedIndexBuilder::build_from_iterator((0..num_vectors).map(|idx| {
(
idx as PointOffsetType,
random_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into_remapped(),
)
}));
let query_vectors = (0..NUM_QUERIES)
.map(|_| random_positive_sparse_vector(&mut rnd, MAX_SPARSE_DIM))
.collect::<Vec<_>>();
run_bench(c, name, index, &query_vectors);
}
pub fn bench_movies(c: &mut Criterion) {
let mut iter =
loaders::JsonReader::open(Dataset::SpladeWikiMovies.download().unwrap()).unwrap();
// Use the first NUM_QUERIES vectors as queries, and the rest as index.
let query_vectors = (0..NUM_QUERIES)
.map(|_| iter.next().unwrap().unwrap())
.collect_vec();
let index = InvertedIndexBuilder::build_from_iterator(
iter.enumerate()
.map(|(idx, vec)| (idx as PointOffsetType, vec.unwrap().into_remapped())),
);
run_bench(c, "movies", index, &query_vectors);
}
pub fn run_bench(
c: &mut Criterion,
name: &str,
index: InvertedIndexRam,
query_vectors: &[SparseVector],
) {
let hottest_id = index
.postings
.iter()
.enumerate()
.map(|(i, p)| (i, p.elements.len()))
.max_by_key(|(_, len)| *len)
.unwrap()
.0 as u32;
let average_elements = index
.postings
.iter()
.map(|p| p.elements.len())
.sum::<usize>() as f64
/ index.postings.len() as f64;
eprintln!(
"Hottest id: {hottest_id} (elements: {}), average elements: {average_elements}",
index.postings[hottest_id as usize].elements.len(),
);
let hottest_query_vectors = query_vectors
.iter()
.cloned()
.map(|mut vec| {
vec.indices.truncate(4);
vec.values.truncate(4);
if let Err(idx) = vec.indices.binary_search(&hottest_id) {
if idx < vec.indices.len() {
vec.indices[idx] = hottest_id;
vec.values[idx] = 1.0;
} else {
vec.indices.push(hottest_id);
vec.values.push(1.0);
}
}
vec.into_remapped()
})
.collect::<Vec<_>>();
run_bench2(
c.benchmark_group(format!("search/ram/{name}")),
&index,
query_vectors,
&hottest_query_vectors,
);
run_bench2(
c.benchmark_group(format!("search/mmap/{name}")),
&InvertedIndexMmap::from_ram_index(
Cow::Borrowed(&index),
tempfile::Builder::new()
.prefix("test_index_dir")
.tempdir()
.unwrap()
.path(),
)
.unwrap(),
query_vectors,
&hottest_query_vectors,
);
macro_rules! run_bench2 {
($name:literal, $type:ty) => {
run_bench2(
c.benchmark_group(format!("search/ram_{}/{name}", $name)),
&InvertedIndexCompressedImmutableRam::<$type>::from_ram_index(
Cow::Borrowed(&index),
"nonexistent/path",
)
.unwrap(),
query_vectors,
&hottest_query_vectors,
);
run_bench2(
c.benchmark_group(format!("search/mmap_{}/{name}", $name)),
&InvertedIndexCompressedMmap::<$type>::from_ram_index(
Cow::Borrowed(&index),
tempfile::Builder::new()
.prefix("test_index_dir")
.tempdir()
.unwrap()
.path(),
)
.unwrap(),
query_vectors,
&hottest_query_vectors,
);
};
}
run_bench2!("c32", f32);
run_bench2!("c16", half::f16);
// run_bench2!("c8", u8);
run_bench2!("q8", QuantizedU8);
}
fn run_bench2(
mut group: criterion::BenchmarkGroup<'_, impl Measurement>,
index: &impl InvertedIndex,
query_vectors: &[SparseVector],
hottest_query_vectors: &[RemappedSparseVector],
) {
let pool = ScoresMemoryPool::new();
let stopped = AtomicBool::new(false);
let mut it = query_vectors.iter().cycle();
let hardware_counter = HardwareCounterCell::new();
group.bench_function("basic", |b| {
b.iter_batched(
|| it.next().unwrap().clone().into_remapped(),
|vec| {
SearchContext::new(vec, TOP, index, pool.get(), &stopped, &hardware_counter)
.search(&|_| true)
},
criterion::BatchSize::SmallInput,
)
});
let hardware_counter = HardwareCounterCell::new();
let mut it = hottest_query_vectors.iter().cycle();
group.bench_function("hottest", |b| {
b.iter_batched(
|| it.next().unwrap().clone(),
|vec| {
SearchContext::new(vec, TOP, index, pool.get(), &stopped, &hardware_counter)
.search(&|_| true)
},
criterion::BatchSize::SmallInput,
)
});
}
fn load_csr_index(path: impl AsRef<Path>, ratio: f32) -> io::Result<InvertedIndexRam> {
let csr = Csr::open(path.as_ref())?;
let mut builder = InvertedIndexBuilder::new();
assert!(ratio > 0.0 && ratio <= 1.0);
let count = (csr.len() as f32 * ratio) as usize;
let bar =
ProgressBar::with_draw_target(Some(count as u64), ProgressDrawTarget::stderr_with_hz(12));
for (row, vec) in bar.wrap_iter(csr.iter().take(count).enumerate()) {
builder.add(
row as u32,
vec.map(|v| v.into_remapped())
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?,
);
}
bar.finish_and_clear();
Ok(builder.build())
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = bench_search,
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = bench_search,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/config.rs | lib/collection/src/config.rs | use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::{Read, Write as _};
use std::num::{NonZeroU32, NonZeroUsize};
use std::path::Path;
use atomicwrites::AtomicFile;
use atomicwrites::OverwriteBehavior::AllowOverwrite;
use fs_err::File;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::types::{
Distance, HnswConfig, Indexes, Payload, PayloadStorageType, QuantizationConfig, SegmentConfig,
SparseVectorDataConfig, StrictModeConfig, VectorDataConfig, VectorName, VectorNameBuf,
VectorStorageDatatype, VectorStorageType,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use validator::Validate;
use wal::WalOptions;
use crate::operations::config_diff::{DiffConfig, QuantizationConfigDiff};
use crate::operations::types::{
CollectionError, CollectionResult, CollectionWarning, SparseVectorParams, SparseVectorsConfig,
VectorParams, VectorParamsDiff, VectorsConfig, VectorsConfigDiff,
};
use crate::operations::validation;
use crate::optimizers_builder::OptimizersConfig;
pub const COLLECTION_CONFIG_FILE: &str = "config.json";
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Anonymize, Clone, PartialEq, Eq)]
#[anonymize(false)]
pub struct WalConfig {
/// Size of a single WAL segment in MB
#[validate(range(min = 1))]
pub wal_capacity_mb: usize,
/// Number of WAL segments to create ahead of actually used ones
pub wal_segments_ahead: usize,
/// Number of closed WAL segments to keep
#[validate(range(min = 1))]
#[serde(default = "default_wal_retain_closed")]
pub wal_retain_closed: usize,
}
fn default_wal_retain_closed() -> usize {
1
}
impl From<&WalConfig> for WalOptions {
fn from(config: &WalConfig) -> Self {
let WalConfig {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = config;
WalOptions {
segment_capacity: wal_capacity_mb * 1024 * 1024,
segment_queue_len: *wal_segments_ahead,
retain_closed: NonZeroUsize::new(*wal_retain_closed).unwrap(),
}
}
}
impl Default for WalConfig {
fn default() -> Self {
WalConfig {
wal_capacity_mb: 32,
wal_segments_ahead: 0,
wal_retain_closed: default_wal_retain_closed(),
}
}
}
#[derive(
Debug, Deserialize, Serialize, JsonSchema, Anonymize, PartialEq, Eq, Hash, Clone, Copy, Default,
)]
#[serde(rename_all = "snake_case")]
pub enum ShardingMethod {
#[default]
Auto,
Custom,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Anonymize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub struct CollectionParams {
/// Configuration of the vector storage
#[validate(nested)]
#[serde(default)]
pub vectors: VectorsConfig,
/// Number of shards the collection has
#[serde(default = "default_shard_number")]
#[anonymize(false)]
pub shard_number: NonZeroU32,
/// Sharding method
/// Default is Auto - points are distributed across all available shards
/// Custom - points are distributed across shards according to shard key
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
pub sharding_method: Option<ShardingMethod>,
/// Number of replicas for each shard
#[serde(default = "default_replication_factor")]
#[anonymize(false)]
pub replication_factor: NonZeroU32,
/// Defines how many replicas should apply the operation for us to consider it successful.
/// Increasing this number will make the collection more resilient to inconsistencies, but will
/// also make it fail if not enough replicas are available.
/// Does not have any performance impact.
#[serde(default = "default_write_consistency_factor")]
#[anonymize(false)]
pub write_consistency_factor: NonZeroU32,
/// Defines how many additional replicas should be processing read request at the same time.
/// Default value is Auto, which means that fan-out will be determined automatically based on
/// the busyness of the local replica.
/// Having more than 0 might be useful to smooth latency spikes of individual nodes.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub read_fan_out_factor: Option<u32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
///
/// Default: true
#[serde(default = "default_on_disk_payload")]
pub on_disk_payload: bool,
/// Configuration of the sparse vector storage
#[serde(default, skip_serializing_if = "Option::is_none")]
#[validate(nested)]
pub sparse_vectors: Option<BTreeMap<VectorNameBuf, SparseVectorParams>>,
}
impl CollectionParams {
pub fn payload_storage_type(&self) -> PayloadStorageType {
#[cfg(feature = "rocksdb")]
if self.on_disk_payload {
PayloadStorageType::Mmap
} else if common::flags::feature_flags().payload_storage_skip_rocksdb {
PayloadStorageType::InRamMmap
} else {
PayloadStorageType::InMemory
}
#[cfg(not(feature = "rocksdb"))]
if self.on_disk_payload {
PayloadStorageType::Mmap
} else {
PayloadStorageType::InRamMmap
}
}
pub fn check_compatible(&self, other: &CollectionParams) -> CollectionResult<()> {
let CollectionParams {
vectors,
shard_number: _, // Maybe be updated by resharding, assume local shards needs to be dropped
sharding_method, // Not changeable
replication_factor: _, // May be changed
write_consistency_factor: _, // May be changed
read_fan_out_factor: _, // May be changed
on_disk_payload: _, // May be changed
sparse_vectors, // Parameters may be changes, but not the structure
} = other;
self.vectors.check_compatible(vectors)?;
let this_sparse_vectors: HashSet<_> = if let Some(sparse_vectors) = &self.sparse_vectors {
sparse_vectors.keys().collect()
} else {
HashSet::new()
};
let other_sparse_vectors: HashSet<_> = if let Some(sparse_vectors) = sparse_vectors {
sparse_vectors.keys().collect()
} else {
HashSet::new()
};
if this_sparse_vectors != other_sparse_vectors {
return Err(CollectionError::bad_input(format!(
"sparse vectors are incompatible: \
origin sparse vectors: {this_sparse_vectors:?}, \
while other sparse vectors: {other_sparse_vectors:?}",
)));
}
let this_sharding_method = self.sharding_method.unwrap_or_default();
let other_sharding_method = sharding_method.unwrap_or_default();
if this_sharding_method != other_sharding_method {
return Err(CollectionError::bad_input(format!(
"sharding method is incompatible: \
origin sharding method: {this_sharding_method:?}, \
while other sharding method: {other_sharding_method:?}",
)));
}
Ok(())
}
}
pub fn default_shard_number() -> NonZeroU32 {
NonZeroU32::new(1).unwrap()
}
pub fn default_replication_factor() -> NonZeroU32 {
NonZeroU32::new(1).unwrap()
}
pub fn default_write_consistency_factor() -> NonZeroU32 {
NonZeroU32::new(1).unwrap()
}
pub const fn default_on_disk_payload() -> bool {
true
}
#[derive(Debug, Deserialize, Serialize, Validate, Clone, PartialEq)]
pub struct CollectionConfigInternal {
#[validate(nested)]
pub params: CollectionParams,
#[validate(nested)]
pub hnsw_config: HnswConfig,
#[validate(nested)]
pub optimizer_config: OptimizersConfig,
#[validate(nested)]
pub wal_config: WalConfig,
#[serde(default)]
#[validate(nested)]
pub quantization_config: Option<QuantizationConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[validate(nested)]
pub strict_mode_config: Option<StrictModeConfig>,
#[serde(default)]
pub uuid: Option<Uuid>,
/// Arbitrary JSON metadata for the collection
/// This can be used to store application-specific information
/// such as creation time, migration data, inference model info, etc.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Payload>,
}
impl CollectionConfigInternal {
pub fn to_bytes(&self) -> CollectionResult<Vec<u8>> {
serde_json::to_vec(self).map_err(|err| CollectionError::service_error(err.to_string()))
}
pub fn save(&self, path: &Path) -> CollectionResult<()> {
let config_path = path.join(COLLECTION_CONFIG_FILE);
let af = AtomicFile::new(&config_path, AllowOverwrite);
let state_bytes = serde_json::to_vec(self).unwrap();
af.write(|f| f.write_all(&state_bytes)).map_err(|err| {
CollectionError::service_error(format!("Can't write {config_path:?}, error: {err}"))
})?;
Ok(())
}
pub fn load(path: &Path) -> CollectionResult<Self> {
let config_path = path.join(COLLECTION_CONFIG_FILE);
let mut contents = String::new();
let mut file = File::open(config_path)?;
file.read_to_string(&mut contents)?;
Ok(serde_json::from_str(&contents)?)
}
/// Check if collection config exists
pub fn check(path: &Path) -> bool {
let config_path = path.join(COLLECTION_CONFIG_FILE);
config_path.exists()
}
pub fn validate_and_warn(&self) {
if let Err(ref errs) = self.validate() {
validation::warn_validation_errors("Collection configuration file", errs);
}
}
/// Get warnings related to this configuration
pub fn get_warnings(&self) -> Vec<CollectionWarning> {
let mut warnings = Vec::new();
for (vector_name, vector_config) in self.params.vectors.params_iter() {
let vector_hnsw = self
.hnsw_config
.update_opt(vector_config.hnsw_config.as_ref());
let vector_quantization =
vector_config.quantization_config.is_some() || self.quantization_config.is_some();
if vector_hnsw.inline_storage.unwrap_or_default() {
if vector_config.multivector_config.is_some() {
warnings.push(CollectionWarning {
message: format!(
"The `hnsw_config.inline_storage` option for vector '{vector_name}' \
is not compatible with multivectors. This option will be ignored."
),
});
} else if !vector_quantization {
warnings.push(CollectionWarning {
message: format!(
"The `hnsw_config.inline_storage` option for vector '{vector_name}' \
requires quantization to be enabled. This option will be ignored."
),
});
}
}
}
warnings
}
pub fn to_base_segment_config(&self) -> CollectionResult<SegmentConfig> {
self.params
.to_base_segment_config(self.quantization_config.as_ref())
}
}
impl CollectionParams {
pub fn empty() -> Self {
CollectionParams {
vectors: Default::default(),
shard_number: default_shard_number(),
sharding_method: None,
replication_factor: default_replication_factor(),
write_consistency_factor: default_write_consistency_factor(),
read_fan_out_factor: None,
on_disk_payload: default_on_disk_payload(),
sparse_vectors: None,
}
}
fn missing_vector_error(&self, vector_name: &VectorName) -> CollectionError {
let mut available_names = vec![];
match &self.vectors {
VectorsConfig::Single(_) => {
available_names.push(DEFAULT_VECTOR_NAME.to_owned());
}
VectorsConfig::Multi(vectors) => {
for name in vectors.keys() {
available_names.push(name.clone());
}
}
}
if let Some(sparse_vectors) = &self.sparse_vectors {
for name in sparse_vectors.keys() {
available_names.push(name.clone());
}
}
if available_names.is_empty() {
CollectionError::BadInput {
description: "Vectors are not configured in this collection".into(),
}
} else if available_names == vec![DEFAULT_VECTOR_NAME] {
CollectionError::BadInput {
description: format!(
"Vector with name {vector_name} is not configured in this collection"
),
}
} else {
let available_names = available_names.join(", ");
if vector_name == DEFAULT_VECTOR_NAME {
return CollectionError::BadInput {
description: format!(
"Collection requires specified vector name in the request, available names: {available_names}"
),
};
}
CollectionError::BadInput {
description: format!(
"Vector with name `{vector_name}` is not configured in this collection, available names: {available_names}"
),
}
}
}
pub fn get_distance(&self, vector_name: &VectorName) -> CollectionResult<Distance> {
match self.vectors.get_params(vector_name) {
Some(params) => Ok(params.distance),
None => {
if let Some(sparse_vectors) = &self.sparse_vectors
&& let Some(_params) = sparse_vectors.get(vector_name)
{
return Ok(Distance::Dot);
}
Err(self.missing_vector_error(vector_name))
}
}
}
fn get_vector_params_mut(
&mut self,
vector_name: &VectorName,
) -> CollectionResult<&mut VectorParams> {
self.vectors
.get_params_mut(vector_name)
.ok_or_else(|| CollectionError::BadInput {
description: if vector_name == DEFAULT_VECTOR_NAME {
"Default vector params are not specified in config".into()
} else {
format!("Vector params for {vector_name} are not specified in config")
},
})
}
pub fn get_sparse_vector_params_opt(
&self,
vector_name: &VectorName,
) -> Option<&SparseVectorParams> {
self.sparse_vectors
.as_ref()
.and_then(|sparse_vectors| sparse_vectors.get(vector_name))
}
pub fn get_sparse_vector_params_mut(
&mut self,
vector_name: &VectorName,
) -> CollectionResult<&mut SparseVectorParams> {
self.sparse_vectors
.as_mut()
.ok_or_else(|| CollectionError::BadInput {
description: format!(
"Sparse vector `{vector_name}` is not specified in collection config"
),
})?
.get_mut(vector_name)
.ok_or_else(|| CollectionError::BadInput {
description: format!(
"Sparse vector `{vector_name}` is not specified in collection config"
),
})
}
/// Update collection vectors from the given update vectors config
pub fn update_vectors_from_diff(
&mut self,
update_vectors_diff: &VectorsConfigDiff,
) -> CollectionResult<()> {
for (vector_name, update_params) in update_vectors_diff.0.iter() {
let vector_params = self.get_vector_params_mut(vector_name)?;
let VectorParamsDiff {
hnsw_config,
quantization_config,
on_disk,
} = update_params.clone();
if let Some(hnsw_diff) = hnsw_config {
if let Some(existing_hnsw) = &vector_params.hnsw_config {
vector_params.hnsw_config = Some(existing_hnsw.update(&hnsw_diff));
} else {
vector_params.hnsw_config = Some(hnsw_diff);
}
}
if let Some(quantization_diff) = quantization_config {
vector_params.quantization_config = match quantization_diff.clone() {
QuantizationConfigDiff::Scalar(scalar) => {
Some(QuantizationConfig::Scalar(scalar))
}
QuantizationConfigDiff::Product(product) => {
Some(QuantizationConfig::Product(product))
}
QuantizationConfigDiff::Binary(binary) => {
Some(QuantizationConfig::Binary(binary))
}
QuantizationConfigDiff::Disabled(_) => None,
}
}
if let Some(on_disk) = on_disk {
vector_params.on_disk = Some(on_disk);
}
}
Ok(())
}
/// Update collection vectors from the given update vectors config
pub fn update_sparse_vectors_from_other(
&mut self,
update_vectors: &SparseVectorsConfig,
) -> CollectionResult<()> {
for (vector_name, update_params) in update_vectors.0.iter() {
let sparse_vector_params = self.get_sparse_vector_params_mut(vector_name)?;
let SparseVectorParams { index, modifier } = update_params.clone();
if let Some(modifier) = modifier {
sparse_vector_params.modifier = Some(modifier);
}
if let Some(index) = index {
if let Some(existing_index) = &mut sparse_vector_params.index {
existing_index.update_from_other(index);
} else {
sparse_vector_params.index.replace(index);
}
}
}
Ok(())
}
/// Convert into unoptimized named vector data configs
///
/// It is the job of the segment optimizer to change this configuration with optimized settings
/// based on threshold configurations.
pub fn to_base_vector_data(
&self,
collection_quantization: Option<&QuantizationConfig>,
) -> CollectionResult<HashMap<VectorNameBuf, VectorDataConfig>> {
let quantization_fn = |quantization_config: Option<&QuantizationConfig>| {
quantization_config
// Only if there is no `quantization_config` we may start using `collection_quantization` (to avoid mixing quantizations between segments)
.or(collection_quantization)
.filter(|c| c.supports_appendable())
.cloned()
};
Ok(self
.vectors
.params_iter()
.map(|(name, params)| {
(
name.into(),
VectorDataConfig {
size: params.size.get() as usize,
distance: params.distance,
// Plain (disabled) index
index: Indexes::Plain {},
// Quantizaton config in appendable segment if runtime feature flag is set
quantization_config: common::flags::feature_flags()
.appendable_quantization
.then(|| quantization_fn(params.quantization_config.as_ref()))
.flatten(),
// Default to in memory storage
storage_type: if params.on_disk.unwrap_or_default() {
VectorStorageType::ChunkedMmap
} else {
VectorStorageType::InRamChunkedMmap
},
multivector_config: params.multivector_config,
datatype: params.datatype.map(VectorStorageDatatype::from),
},
)
})
.collect())
}
/// Convert into unoptimized sparse vector data configs
///
/// It is the job of the segment optimizer to change this configuration with optimized settings
/// based on threshold configurations.
pub fn to_sparse_vector_data(
&self,
) -> CollectionResult<HashMap<VectorNameBuf, SparseVectorDataConfig>> {
if let Some(sparse_vectors) = &self.sparse_vectors {
sparse_vectors
.iter()
.map(|(name, params)| {
Ok((
name.clone(),
SparseVectorDataConfig {
index: SparseIndexConfig {
full_scan_threshold: params
.index
.and_then(|index| index.full_scan_threshold),
index_type: SparseIndexType::MutableRam,
datatype: params
.index
.and_then(|index| index.datatype)
.map(VectorStorageDatatype::from),
},
storage_type: params.storage_type(),
modifier: params.modifier,
},
))
})
.collect()
} else {
Ok(Default::default())
}
}
/// Convert into unoptimized segment config
///
/// It is the job of the segment optimizer to change this configuration with optimized settings
/// based on threshold configurations.
pub fn to_base_segment_config(
&self,
collection_quantization: Option<&QuantizationConfig>,
) -> CollectionResult<SegmentConfig> {
let vector_data = self
.to_base_vector_data(collection_quantization)
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to source dense vector configuration from collection parameters: {err:?}"
))
})?;
let sparse_vector_data = self.to_sparse_vector_data().map_err(|err| {
CollectionError::service_error(format!(
"Failed to source sparse vector configuration from collection parameters: {err:?}"
))
})?;
let payload_storage_type = self.payload_storage_type();
let segment_config = SegmentConfig {
vector_data,
sparse_vector_data,
payload_storage_type,
};
Ok(segment_config)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/discovery.rs | lib/collection/src/discovery.rs | use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::Future;
use itertools::Itertools;
use segment::data_types::vectors::NamedQuery;
use segment::types::{Condition, Filter, HasIdCondition, ScoredPoint};
use segment::vector_storage::query::{ContextPair, ContextQuery, DiscoveryQuery};
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequestBatch;
use tokio::sync::RwLockReadGuard;
use crate::collection::Collection;
use crate::common::batching::batch_requests;
use crate::common::fetch_vectors::{
ReferencedVectors, convert_to_vectors, resolve_referenced_vectors_batch,
};
use crate::common::retrieve_request_trait::RetrieveRequest;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{
CollectionError, CollectionResult, CoreSearchRequest, DiscoverRequestInternal,
};
fn discovery_into_core_search(
collection_name: &str,
request: DiscoverRequestInternal,
all_vectors_records_map: &ReferencedVectors,
) -> CollectionResult<CoreSearchRequest> {
let lookup_collection_name = request.get_lookup_collection();
let lookup_vector_name = request.get_lookup_vector_name();
let using = request.using.as_ref().map(|using| using.as_name());
// Check we actually fetched all referenced vectors in this request
let referenced_ids = request.get_referenced_point_ids();
for &point_id in &referenced_ids {
if all_vectors_records_map
.get(lookup_collection_name, point_id)
.is_none()
{
return Err(CollectionError::PointNotFound {
missed_point_id: point_id,
});
}
}
let target = convert_to_vectors(
request.target.iter(),
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
)
.next()
.map(|v| v.to_owned());
let context_pairs = request
.context
.iter()
.flatten()
.map(|pair| {
let mut vector_pair = convert_to_vectors(
pair.iter(),
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
)
.map(|v| v.to_owned());
ContextPair {
// SAFETY: we know there are two elements in the iterator
positive: vector_pair.next().unwrap(),
negative: vector_pair.next().unwrap(),
}
})
.collect_vec();
let query: QueryEnum = match (target, context_pairs) {
// Target with/without pairs => Discovery
(Some(target), pairs) => QueryEnum::Discover(NamedQuery {
query: DiscoveryQuery::new(target, pairs),
using,
}),
// Only pairs => Context
(None, pairs) => QueryEnum::Context(NamedQuery {
query: ContextQuery::new(pairs),
using,
}),
};
// do not exclude vector ids from different lookup collection
let reference_vectors_ids_to_exclude = match lookup_collection_name {
Some(lookup_collection_name) if lookup_collection_name != collection_name => vec![],
_ => referenced_ids,
};
let filter = if reference_vectors_ids_to_exclude.is_empty() {
request.filter
} else {
let not_ids = Filter::new_must_not(Condition::HasId(HasIdCondition {
has_id: reference_vectors_ids_to_exclude.into_iter().collect(),
}));
match &request.filter {
None => Some(not_ids),
Some(filter) => Some(not_ids.merge(filter)),
}
};
let core_search = CoreSearchRequest {
query,
filter,
params: request.params,
limit: request.limit,
offset: request.offset.unwrap_or_default(),
with_payload: request.with_payload,
with_vector: request.with_vector,
score_threshold: None,
};
Ok(core_search)
}
pub async fn discover<'a, F, Fut>(
request: DiscoverRequestInternal,
collection: &Collection,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
shard_selector: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
if request.limit == 0 {
return Ok(vec![]);
}
// `discover` is a special case of discover_batch with a single batch
let request_batch = vec![(request, shard_selector)];
let results = discover_batch(
request_batch,
collection,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc,
)
.await?;
Ok(results.into_iter().next().unwrap())
}
pub async fn discover_batch<'a, F, Fut>(
request_batch: Vec<(DiscoverRequestInternal, ShardSelectorInternal)>,
collection: &Collection,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
let start = std::time::Instant::now();
// shortcuts batch if all requests with limit=0
if request_batch.iter().all(|(s, _)| s.limit == 0) {
return Ok(vec![]);
}
// Validate context_pairs and/or target have value(s)
request_batch.iter().try_for_each(|(request, _)| {
let no_pairs = request.context.is_none()
|| request
.context
.as_ref()
.is_some_and(|pairs| pairs.is_empty());
let no_target = request.target.is_none();
if no_pairs && no_target {
return Err(CollectionError::bad_request(
"target and/or context_pairs must be specified".to_string(),
));
}
Ok(())
})?;
let all_vectors_records_map = resolve_referenced_vectors_batch(
&request_batch,
collection,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// update timeout
let timeout = timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
let res = batch_requests::<
(DiscoverRequestInternal, ShardSelectorInternal),
ShardSelectorInternal,
Vec<CoreSearchRequest>,
Vec<_>,
>(
request_batch,
|(_req, shard)| shard,
|(req, _), acc| {
discovery_into_core_search(collection.name(), req, &all_vectors_records_map).map(
|core_req| {
acc.push(core_req);
},
)
},
|shard_selector, core_searches, requests| {
if core_searches.is_empty() {
return Ok(());
}
let core_search_batch_request = CoreSearchRequestBatch {
searches: core_searches,
};
requests.push(collection.core_search_batch(
core_search_batch_request,
read_consistency,
shard_selector,
timeout,
hw_measurement_acc.clone(),
));
Ok(())
},
)?;
let results = futures::future::try_join_all(res).await?;
let flatten_results: Vec<Vec<_>> = results.into_iter().flatten().collect();
Ok(flatten_results)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/lib.rs | lib/collection/src/lib.rs | pub mod collection;
pub mod collection_manager;
pub mod collection_state;
pub mod common;
pub mod config;
pub mod discovery;
pub mod grouping;
pub mod hash_ring;
pub mod lookup;
pub mod operations;
pub mod optimizers_builder;
pub mod problems;
pub mod recommendations;
pub mod shards;
pub mod telemetry;
mod update_handler;
pub mod wal_delta;
pub mod events;
#[cfg(test)]
mod tests;
pub mod profiling;
pub mod update_workers;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/optimizers_builder.rs | lib/collection/src/optimizers_builder.rs | use std::path::Path;
use std::sync::Arc;
use fs_err as fs;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use segment::index::hnsw_index::num_rayon_threads;
use segment::types::{HnswConfig, HnswGlobalConfig, QuantizationConfig};
use serde::{Deserialize, Serialize};
use validator::Validate;
use crate::collection_manager::optimizers::config_mismatch_optimizer::ConfigMismatchOptimizer;
use crate::collection_manager::optimizers::indexing_optimizer::IndexingOptimizer;
use crate::collection_manager::optimizers::merge_optimizer::MergeOptimizer;
use crate::collection_manager::optimizers::segment_optimizer::OptimizerThresholds;
use crate::collection_manager::optimizers::vacuum_optimizer::VacuumOptimizer;
use crate::config::CollectionParams;
use crate::update_handler::Optimizer;
const DEFAULT_MAX_SEGMENT_PER_CPU_KB: usize = 256_000;
pub const DEFAULT_INDEXING_THRESHOLD_KB: usize = 10_000;
const SEGMENTS_PATH: &str = "segments";
const TEMP_SEGMENTS_PATH: &str = "temp_segments";
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Anonymize, Clone, PartialEq)]
#[anonymize(false)]
pub struct OptimizersConfig {
/// The minimal fraction of deleted vectors in a segment, required to perform segment optimization
#[validate(range(min = 0.0, max = 1.0))]
pub deleted_threshold: f64,
/// The minimal number of vectors in a segment, required to perform segment optimization
#[validate(range(min = 100))]
pub vacuum_min_vector_number: usize,
/// Target amount of segments optimizer will try to keep.
/// Real amount of segments may vary depending on multiple parameters:
/// - Amount of stored points
/// - Current write RPS
///
/// It is recommended to select default number of segments as a factor of the number of search threads,
/// so that each segment would be handled evenly by one of the threads.
/// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs.
pub default_segment_number: usize,
/// Do not create segments larger this size (in kilobytes).
/// Large segments might require disproportionately long indexation times,
/// therefore it makes sense to limit the size of segments.
///
/// If indexing speed is more important - make this parameter lower.
/// If search speed is more important - make this parameter higher.
/// Note: 1Kb = 1 vector of size 256
/// If not set, will be automatically selected considering the number of available CPUs.
#[serde(alias = "max_segment_size_kb")]
#[serde(default)]
#[validate(range(min = 1))]
pub max_segment_size: Option<usize>,
/// Maximum size (in kilobytes) of vectors to store in-memory per segment.
/// Segments larger than this threshold will be stored as read-only memmapped file.
///
/// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
///
/// To disable memmap storage, set this to `0`. Internally it will use the largest threshold possible.
///
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "memmap_threshold_kb")]
#[serde(default)]
#[deprecated(since = "1.15.0", note = "Use `on_disk` flags instead")]
pub memmap_threshold: Option<usize>,
/// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
///
/// Default value is 10,000, based on experiments and observations.
///
/// To disable vector indexing, set to `0`.
///
/// Note: 1kB = 1 vector of size 256.
#[serde(alias = "indexing_threshold_kb")]
#[serde(default)]
pub indexing_threshold: Option<usize>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: u64,
/// Max number of threads (jobs) for running optimizations per shard.
/// Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
/// If null - have no limit and choose dynamically to saturate CPU.
/// If 0 - no optimization threads, optimizations will be disabled.
#[serde(default)]
pub max_optimization_threads: Option<usize>,
}
impl OptimizersConfig {
#[cfg(test)]
pub fn fixture() -> Self {
Self {
deleted_threshold: 0.1,
vacuum_min_vector_number: 1000,
default_segment_number: 0,
max_segment_size: None,
#[expect(deprecated)]
memmap_threshold: None,
indexing_threshold: Some(100_000),
flush_interval_sec: 60,
max_optimization_threads: Some(0),
}
}
pub fn get_number_segments(&self) -> usize {
if self.default_segment_number == 0 {
let num_cpus = common::cpu::get_num_cpus();
// Configure 1 segment per 2 CPUs, as a middle ground between
// latency and RPS.
let expected_segments = num_cpus / 2;
// Do not configure less than 2 and more than 8 segments
// until it is not explicitly requested
expected_segments.clamp(2, 8)
} else {
self.default_segment_number
}
}
pub fn optimizer_thresholds(&self, num_indexing_threads: usize) -> OptimizerThresholds {
let indexing_threshold_kb = match self.indexing_threshold {
None => DEFAULT_INDEXING_THRESHOLD_KB, // default value
Some(0) => usize::MAX, // disable vector index
Some(custom) => custom,
};
#[expect(deprecated)]
let memmap_threshold_kb = match self.memmap_threshold {
None | Some(0) => usize::MAX, // default | disable memmap
Some(custom) => custom,
};
OptimizerThresholds {
memmap_threshold_kb,
indexing_threshold_kb,
max_segment_size_kb: self.get_max_segment_size_in_kilobytes(num_indexing_threads),
}
}
pub fn get_max_segment_size_in_kilobytes(&self, num_indexing_threads: usize) -> usize {
if let Some(max_segment_size) = self.max_segment_size {
max_segment_size
} else {
num_indexing_threads.saturating_mul(DEFAULT_MAX_SEGMENT_PER_CPU_KB)
}
}
}
pub fn clear_temp_segments(shard_path: &Path) {
let temp_segments_path = shard_path.join(TEMP_SEGMENTS_PATH);
if temp_segments_path.exists() {
log::debug!("Removing temp_segments directory: {temp_segments_path:?}");
if let Err(err) = fs::remove_dir_all(&temp_segments_path) {
log::warn!(
"Failed to remove temp_segments directory: {temp_segments_path:?}, error: {err:?}"
);
}
}
}
pub fn build_optimizers(
shard_path: &Path,
collection_params: &CollectionParams,
optimizers_config: &OptimizersConfig,
hnsw_config: &HnswConfig,
hnsw_global_config: &HnswGlobalConfig,
quantization_config: &Option<QuantizationConfig>,
) -> Arc<Vec<Arc<Optimizer>>> {
let num_indexing_threads = num_rayon_threads(hnsw_config.max_indexing_threads);
let segments_path = shard_path.join(SEGMENTS_PATH);
let temp_segments_path = shard_path.join(TEMP_SEGMENTS_PATH);
let threshold_config = optimizers_config.optimizer_thresholds(num_indexing_threads);
Arc::new(vec![
Arc::new(MergeOptimizer::new(
optimizers_config.get_number_segments(),
threshold_config,
segments_path.clone(),
temp_segments_path.clone(),
collection_params.clone(),
*hnsw_config,
hnsw_global_config.clone(),
quantization_config.clone(),
)),
Arc::new(IndexingOptimizer::new(
optimizers_config.get_number_segments(),
threshold_config,
segments_path.clone(),
temp_segments_path.clone(),
collection_params.clone(),
*hnsw_config,
hnsw_global_config.clone(),
quantization_config.clone(),
)),
Arc::new(VacuumOptimizer::new(
optimizers_config.deleted_threshold,
optimizers_config.vacuum_min_vector_number,
threshold_config,
segments_path.clone(),
temp_segments_path.clone(),
collection_params.clone(),
*hnsw_config,
hnsw_global_config.clone(),
quantization_config.clone(),
)),
Arc::new(ConfigMismatchOptimizer::new(
threshold_config,
segments_path,
temp_segments_path,
collection_params.clone(),
*hnsw_config,
hnsw_global_config.clone(),
quantization_config.clone(),
)),
])
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/update_handler.rs | lib/collection/src/update_handler.rs | use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use parking_lot::Mutex;
use segment::types::SeqNumberType;
use tokio::runtime::Handle;
use tokio::sync::mpsc::{self, Receiver};
use tokio::sync::{Mutex as TokioMutex, oneshot};
use tokio::task::JoinHandle;
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::collection_manager::holders::segment_holder::LockedSegmentHolder;
use crate::collection_manager::optimizers::TrackerLog;
use crate::collection_manager::optimizers::segment_optimizer::SegmentOptimizer;
use crate::common::stoppable_task::StoppableTaskHandle;
use crate::operations::CollectionUpdateOperations;
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::CollectionResult;
use crate::shards::CollectionId;
use crate::shards::local_shard::LocalShardClocks;
use crate::shards::update_tracker::UpdateTracker;
use crate::update_workers::UpdateWorkers;
use crate::wal_delta::LockedWal;
pub type Optimizer = dyn SegmentOptimizer + Sync + Send;
/// Information, required to perform operation and notify regarding the result
#[derive(Debug)]
pub struct OperationData {
/// Sequential number of the operation
pub op_num: SeqNumberType,
/// Operation
pub operation: CollectionUpdateOperations,
/// If operation was requested to wait for result
pub wait: bool,
/// Callback notification channel
pub sender: Option<oneshot::Sender<CollectionResult<usize>>>,
pub hw_measurements: HwMeasurementAcc,
}
/// Signal, used to inform Updater process
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum UpdateSignal {
/// Requested operation to perform
Operation(OperationData),
/// Stop all optimizers and listening
Stop,
/// Empty signal used to trigger optimizers
Nop,
/// Ensures that previous updates are applied
Plunger(oneshot::Sender<()>),
}
/// Signal, used to inform Optimization process
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum OptimizerSignal {
/// Sequential number of the operation
Operation(SeqNumberType),
/// Stop all optimizers and listening
Stop,
/// Empty signal used to trigger optimizers
Nop,
}
/// Structure, which holds object, required for processing updates of the collection
pub struct UpdateHandler {
collection_name: CollectionId,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
/// List of used optimizers
pub optimizers: Arc<Vec<Arc<Optimizer>>>,
/// Log of optimizer statuses
optimizers_log: Arc<Mutex<TrackerLog>>,
/// Total number of optimized points since last start
total_optimized_points: Arc<AtomicUsize>,
/// Global CPU budget in number of cores for all optimization tasks.
/// Assigns CPU permits to tasks to limit overall resource utilization.
optimizer_resource_budget: ResourceBudget,
/// How frequent can we flush data
/// This parameter depends on the optimizer config and should be updated accordingly.
pub flush_interval_sec: u64,
segments: LockedSegmentHolder,
/// Process, that listens updates signals and perform updates
update_worker: Option<JoinHandle<()>>,
/// Process, that listens for post-update signals and performs optimization
optimizer_worker: Option<JoinHandle<()>>,
/// Process that periodically flushes segments and tries to truncate wal
flush_worker: Option<JoinHandle<()>>,
/// Sender to stop flush worker
flush_stop: Option<oneshot::Sender<()>>,
runtime_handle: Handle,
/// WAL, required for operations
wal: LockedWal,
/// Always keep this WAL version and later and prevent acknowledging/truncating from the WAL.
/// This is used when other bits of code still depend on information in the WAL, such as the
/// queue proxy shard.
/// Defaults to `u64::MAX` to allow acknowledging all confirmed versions.
pub(super) wal_keep_from: Arc<AtomicU64>,
optimization_handles: Arc<TokioMutex<Vec<StoppableTaskHandle<bool>>>>,
/// Maximum number of concurrent optimization jobs in this update handler.
/// This parameter depends on the optimizer config and should be updated accordingly.
pub max_optimization_threads: Option<usize>,
/// Highest and cutoff clocks for the shard WAL.
clocks: LocalShardClocks,
shard_path: PathBuf,
/// Whether we have ever triggered optimizers since starting.
has_triggered_optimizers: Arc<AtomicBool>,
/// Scroll read lock
/// The lock, which must prevent updates during scroll + retrieve operations
/// Consistency of scroll operations is especially important for internal processes like
/// re-sharding and shard transfer, so explicit lock for those operations is required.
///
/// Write lock must be held for updates, while read lock must be held for scroll
scroll_read_lock: Arc<tokio::sync::RwLock<()>>,
update_tracker: UpdateTracker,
}
impl UpdateHandler {
#[allow(clippy::too_many_arguments)]
pub fn new(
collection_name: CollectionId,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
optimizers: Arc<Vec<Arc<Optimizer>>>,
optimizers_log: Arc<Mutex<TrackerLog>>,
total_optimized_points: Arc<AtomicUsize>,
optimizer_resource_budget: ResourceBudget,
runtime_handle: Handle,
segments: LockedSegmentHolder,
wal: LockedWal,
flush_interval_sec: u64,
max_optimization_threads: Option<usize>,
clocks: LocalShardClocks,
shard_path: PathBuf,
scroll_read_lock: Arc<tokio::sync::RwLock<()>>,
update_tracker: UpdateTracker,
) -> UpdateHandler {
UpdateHandler {
collection_name,
shared_storage_config,
payload_index_schema,
optimizers,
segments,
update_worker: None,
optimizer_worker: None,
optimizers_log,
total_optimized_points,
optimizer_resource_budget,
flush_worker: None,
flush_stop: None,
runtime_handle,
wal,
wal_keep_from: Arc::new(u64::MAX.into()),
flush_interval_sec,
optimization_handles: Arc::new(TokioMutex::new(vec![])),
max_optimization_threads,
clocks,
shard_path,
has_triggered_optimizers: Default::default(),
scroll_read_lock,
update_tracker,
}
}
pub fn run_workers(&mut self, update_receiver: Receiver<UpdateSignal>) {
let (tx, rx) = mpsc::channel(self.shared_storage_config.update_queue_size);
self.optimizer_worker = Some(self.runtime_handle.spawn(
UpdateWorkers::optimization_worker_fn(
self.optimizers.clone(),
tx.clone(),
rx,
self.segments.clone(),
self.wal.clone(),
self.optimization_handles.clone(),
self.optimizers_log.clone(),
self.total_optimized_points.clone(),
self.optimizer_resource_budget.clone(),
self.max_optimization_threads,
self.has_triggered_optimizers.clone(),
self.payload_index_schema.clone(),
self.scroll_read_lock.clone(),
self.update_tracker.clone(),
),
));
let wal = self.wal.clone();
let segments = self.segments.clone();
let scroll_read_lock = self.scroll_read_lock.clone();
let update_tracker = self.update_tracker.clone();
let collection_name = self.collection_name.clone();
self.update_worker = Some(self.runtime_handle.spawn(UpdateWorkers::update_worker_fn(
collection_name,
update_receiver,
tx,
wal,
segments,
scroll_read_lock,
update_tracker,
)));
let segments = self.segments.clone();
let wal = self.wal.clone();
let wal_keep_from = self.wal_keep_from.clone();
let clocks = self.clocks.clone();
let flush_interval_sec = self.flush_interval_sec;
let shard_path = self.shard_path.clone();
let (flush_tx, flush_rx) = oneshot::channel();
self.flush_worker = Some(self.runtime_handle.spawn(UpdateWorkers::flush_worker_fn(
segments,
wal,
wal_keep_from,
clocks,
flush_interval_sec,
flush_rx,
shard_path,
)));
self.flush_stop = Some(flush_tx);
}
pub fn stop_flush_worker(&mut self) {
if let Some(flush_stop) = self.flush_stop.take()
&& let Err(()) = flush_stop.send(())
{
log::warn!("Failed to stop flush worker as it is already stopped.");
}
}
/// Notify optimization handles to stop *without* waiting
///
/// Blocking operation
pub fn notify_optimization_handles_to_stop(&self) {
log::trace!("notify optimization handles to stop");
let opt_handles_guard = self.optimization_handles.blocking_lock();
for handle in opt_handles_guard.iter() {
handle.ask_to_stop();
}
}
/// Gracefully wait before all optimizations stop
/// If some optimization is in progress - it will be finished before shutdown.
pub async fn wait_workers_stops(&mut self) -> CollectionResult<()> {
let maybe_handle = self.update_worker.take();
if let Some(handle) = maybe_handle {
handle.await?;
}
let maybe_handle = self.optimizer_worker.take();
if let Some(handle) = maybe_handle {
handle.await?;
}
let maybe_handle = self.flush_worker.take();
if let Some(handle) = maybe_handle {
handle.await?;
}
let mut opt_handles_guard = self.optimization_handles.lock().await;
for handle in opt_handles_guard.iter() {
handle.ask_to_stop();
}
// If the await fails, we would still keep the rest of handles.
while let Some(handle) = opt_handles_guard.pop() {
if let Some(join_handle) = handle.stop() {
join_handle.await?;
}
}
Ok(())
}
/// Checks whether all update-related workers have stopped.
pub fn is_stopped(&self) -> bool {
self.update_worker.is_none()
&& self.optimizer_worker.is_none()
&& self.flush_worker.is_none()
&& self.optimization_handles.blocking_lock().is_empty()
}
/// Checks the optimizer conditions.
///
/// This function returns a tuple of two booleans:
/// - The first indicates if any optimizers have been triggered since startup.
/// - The second indicates if there are any pending/suboptimal optimizers.
pub(crate) fn check_optimizer_conditions(&self) -> (bool, bool) {
// Check if Qdrant triggered any optimizations since starting at all
let has_triggered_any_optimizers = self.has_triggered_optimizers.load(Ordering::Relaxed);
let excluded_ids = HashSet::<_>::default();
let has_suboptimal_optimizers = self.optimizers.iter().any(|optimizer| {
let nonoptimal_segment_ids =
optimizer.check_condition(self.segments.clone(), &excluded_ids);
!nonoptimal_segment_ids.is_empty()
});
(has_triggered_any_optimizers, has_suboptimal_optimizers)
}
pub async fn store_clocks_if_changed(&self) -> CollectionResult<()> {
let clocks = self.clocks.clone();
let segments = self.segments.clone();
let shard_path = self.shard_path.clone();
self.runtime_handle
.spawn_blocking(move || {
if let Err(err) = clocks.store_if_changed(&shard_path) {
log::warn!("Failed to store clock maps to disk: {err}");
segments.write().report_optimizer_error(err);
}
})
.await?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/telemetry.rs | lib/collection/src/telemetry.rs | use std::collections::HashMap;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use segment::data_types::tiny_map::TinyMap;
use segment::types::{
HnswConfig, Payload, QuantizationConfig, StrictModeConfigOutput, VectorNameBuf,
};
use serde::Serialize;
use uuid::Uuid;
use crate::collection_manager::optimizers::TrackerStatus;
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::types::{OptimizersStatus, ReshardingInfo, ShardTransferInfo};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::shard::ShardId;
use crate::shards::telemetry::ReplicaSetTelemetry;
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct CollectionTelemetry {
pub id: String,
#[anonymize(false)]
pub init_time_ms: u64,
pub config: CollectionConfigTelemetry,
#[serde(skip_serializing_if = "Option::is_none")]
pub shards: Option<Vec<ReplicaSetTelemetry>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub transfers: Option<Vec<ShardTransferInfo>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resharding: Option<Vec<ReshardingInfo>>,
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub shard_clean_tasks: Option<HashMap<ShardId, ShardCleanStatusTelemetry>>,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct CollectionSnapshotTelemetry {
pub id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub running_snapshots: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub running_snapshot_recovery: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub total_snapshot_creations: Option<usize>,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct CollectionsAggregatedTelemetry {
pub vectors: usize,
pub optimizers_status: OptimizersStatus,
pub params: CollectionParams,
}
impl CollectionTelemetry {
pub fn count_vectors(&self) -> usize {
self.shards
.iter()
.flatten()
.filter_map(|shard| shard.local.as_ref())
.map(|x| x.num_vectors.unwrap_or(0))
.sum()
}
/// Amount of optimizers currently running.
///
/// Note: A `DetailsLevel` of 4 or setting `telemetry_detail.optimizer_logs` to true is required.
/// Otherwise, this function will return 0, which may not be correct.
pub fn count_optimizers_running(&self) -> usize {
self.shards
.iter()
.flatten()
.filter_map(|replica_set| replica_set.local.as_ref())
.flat_map(|local_shard| local_shard.optimizations.log.iter().flatten())
.filter(|log| log.status == TrackerStatus::Optimizing)
.count()
}
pub fn count_points(&self) -> usize {
self.shards
.iter()
.flatten()
.filter_map(|shard| shard.local.as_ref())
.map(|local_shard| local_shard.num_points.unwrap_or(0))
.sum()
}
pub fn count_points_per_vector(&self) -> TinyMap<VectorNameBuf, usize> {
self.shards
.iter()
.flatten()
.filter_map(|shard| shard.local.as_ref())
.map(|local_shard| {
local_shard
.num_vectors_by_name
.as_ref()
.into_iter()
.flatten()
})
.fold(
TinyMap::<VectorNameBuf, usize>::new(),
|mut acc, shard_vectors| {
for (name, count) in shard_vectors {
*acc.get_or_insert_default(name) += count;
}
acc
},
)
}
}
#[derive(Serialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum ShardCleanStatusTelemetry {
Started,
Progress(ShardCleanStatusProgressTelemetry),
Done,
Failed(ShardCleanStatusFailedTelemetry),
Cancelled,
}
#[derive(Serialize, Clone, Debug, JsonSchema)]
pub struct ShardCleanStatusProgressTelemetry {
pub deleted_points: usize,
}
#[derive(Serialize, Clone, Debug, JsonSchema)]
pub struct ShardCleanStatusFailedTelemetry {
pub reason: String,
}
#[derive(Debug, Serialize, JsonSchema, Anonymize, Clone, PartialEq)]
pub struct CollectionConfigTelemetry {
pub params: CollectionParams,
pub hnsw_config: HnswConfig,
pub optimizer_config: OptimizersConfig,
pub wal_config: WalConfig,
#[serde(default)]
pub quantization_config: Option<QuantizationConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub strict_mode_config: Option<StrictModeConfigOutput>,
#[serde(default)]
#[anonymize(value = None)]
pub uuid: Option<Uuid>,
/// Arbitrary JSON metadata for the collection
#[serde(default, skip_serializing_if = "Option::is_none")]
#[anonymize(value = None)]
pub metadata: Option<Payload>,
}
impl From<CollectionConfigInternal> for CollectionConfigTelemetry {
fn from(config: CollectionConfigInternal) -> Self {
let CollectionConfigInternal {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
uuid,
metadata,
} = config;
CollectionConfigTelemetry {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config: strict_mode_config.map(StrictModeConfigOutput::from),
uuid,
metadata,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/hash_ring.rs | lib/collection/src/hash_ring.rs | use std::collections::HashSet;
use std::fmt;
use std::hash::{BuildHasherDefault, Hash};
use bytemuck::TransparentWrapper as _;
use common::stable_hash::{StableHash, StableHashed};
use itertools::Itertools as _;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{CustomIdCheckerCondition, PointIdType};
use smallvec::SmallVec;
use crate::operations::cluster_ops::ReshardingDirection;
use crate::shards::shard::ShardId;
pub const HASH_RING_SHARD_SCALE: u32 = 100;
#[derive(Clone, Debug, PartialEq)]
pub enum HashRingRouter<T: Eq + StableHash + Hash = ShardId> {
/// Single hashring
Single(HashRing<T>),
/// Two hashrings when transitioning during resharding
/// Depending on the current resharding state, points may be in either or both shards.
Resharding { old: HashRing<T>, new: HashRing<T> },
}
impl<T: Copy + Eq + StableHash + Hash> HashRingRouter<T> {
/// Create a new single hashring.
///
/// The hashring is created with a fair distribution of points and `HASH_RING_SHARD_SCALE` scale.
pub fn single() -> Self {
Self::Single(HashRing::fair(HASH_RING_SHARD_SCALE))
}
pub fn add(&mut self, shard: T) -> bool {
match self {
Self::Single(ring) => ring.add(shard),
Self::Resharding { old, new } => {
// When resharding is in progress:
// - either `new` hashring contains a shard, that is not in `old` (when resharding *up*)
// - or `old` contains a shard, that is not in `new` (when resharding *down*)
//
// This check ensures, that we don't accidentally break this invariant when adding
// nodes to `Resharding` hashring.
if !old.contains(&shard) && !new.contains(&shard) {
old.add(shard);
new.add(shard);
true
} else {
false
}
}
}
}
pub fn start_resharding(&mut self, shard: T, direction: ReshardingDirection) {
if let Self::Single(ring) = self {
let (old, new) = (ring.clone(), ring.clone());
*self = Self::Resharding { old, new };
}
let Self::Resharding { old, new } = self else {
unreachable!();
};
match direction {
ReshardingDirection::Up => {
old.remove(&shard);
new.add(shard);
}
ReshardingDirection::Down => {
assert!(new.len() > 1, "cannot remove last shard from hash ring");
old.add(shard);
new.remove(&shard);
}
}
}
pub fn commit_resharding(&mut self) -> bool {
let Self::Resharding { new, .. } = self else {
log::warn!("committing resharding hashring, but hashring is not in resharding mode");
return false;
};
*self = Self::Single(new.clone());
true
}
pub fn abort_resharding(&mut self, shard: T, direction: ReshardingDirection) -> bool
where
T: fmt::Display,
{
let context = match direction {
ReshardingDirection::Up => "reverting scale-up hashring into single mode",
ReshardingDirection::Down => "reverting scale-down hashring into single mode",
};
let Self::Resharding { old, new } = self else {
log::warn!("{context}, but hashring is not in resharding mode");
return false;
};
let mut old = old.clone();
let mut new = new.clone();
let (expected_in_old, expected_in_new) = match direction {
ReshardingDirection::Up => (old.remove(&shard), new.remove(&shard)),
ReshardingDirection::Down => (old.add(shard), new.add(shard)),
};
match (expected_in_old, expected_in_new) {
(false, true) => (),
(true, false) => {
log::error!("{context}, but expected state of hashrings is reversed");
}
(true, true) => {
log::error!("{context}, but {shard} is not a target shard");
}
(false, false) => {
log::warn!("{context}, but shard {shard} does not exist in the hashring");
}
};
if old == new {
log::debug!("{context}, because the rerouting for resharding is done");
*self = Self::Single(old.clone());
true
} else {
log::warn!("{context}, but rerouting for resharding is not done yet");
false
}
}
pub fn get<U: StableHash>(&self, key: &U) -> ShardIds<T> {
match self {
Self::Single(ring) => ring.get(key).into_iter().copied().collect(),
Self::Resharding { old, new } => old
.get(key)
.into_iter()
.chain(new.get(key))
.copied()
.dedup() // Both hash rings may return the same shard ID, take it once
.collect(),
}
}
/// Check whether the given point is in the given shard
///
/// In case of resharding, the new hashring is checked.
pub fn is_in_shard<U: StableHash>(&self, key: &U, shard: T) -> bool {
let ring = match self {
Self::Resharding { new, .. } => new,
Self::Single(ring) => ring,
};
ring.get(key) == Some(&shard)
}
}
impl<T: Eq + StableHash + Hash> HashRingRouter<T> {
pub fn is_resharding(&self) -> bool {
matches!(self, Self::Resharding { .. })
}
pub fn is_empty(&self) -> bool {
match self {
Self::Single(ring) => ring.is_empty(),
Self::Resharding { old, new } => old.is_empty() && new.is_empty(),
}
}
/// Get unique nodes from the hashring
pub fn nodes(&self) -> &HashSet<T> {
match self {
HashRingRouter::Single(ring) => ring.nodes(),
HashRingRouter::Resharding { new, .. } => new.nodes(),
}
}
}
type StableHashBuilder = BuildHasherDefault<siphasher::sip::SipHasher24>;
/// List type for shard IDs
///
/// Uses a `SmallVec` putting two IDs on the stack. That's the maximum number of shards we expect
/// with the current resharding implementation.
pub type ShardIds<T = ShardId> = SmallVec<[T; 2]>;
#[derive(Clone, Debug, PartialEq)]
pub enum HashRing<T: Eq + StableHash + Hash> {
Raw {
nodes: HashSet<T>,
ring: hashring::HashRing<StableHashed<T>, StableHashBuilder>,
},
Fair {
nodes: HashSet<T>,
ring: hashring::HashRing<StableHashed<(T, u32)>, StableHashBuilder>,
scale: u32,
},
}
impl<T: Copy + Eq + StableHash + Hash> HashRing<T> {
pub fn raw() -> Self {
Self::Raw {
nodes: HashSet::new(),
ring: hashring::HashRing::with_hasher(StableHashBuilder::new()),
}
}
/// Constructs a HashRing that tries to give all shards equal space on the ring.
/// The higher the `scale` - the more equal the distribution of points on the shards will be,
/// but shard search might be slower.
pub fn fair(scale: u32) -> Self {
Self::Fair {
nodes: HashSet::new(),
ring: hashring::HashRing::with_hasher(StableHashBuilder::new()),
scale,
}
}
pub fn add(&mut self, shard: T) -> bool {
if !self.nodes_mut().insert(shard) {
return false;
}
match self {
HashRing::Raw { ring, .. } => {
ring.add(StableHashed(shard));
}
HashRing::Fair { ring, scale, .. } => {
for idx in 0..*scale {
ring.add(StableHashed((shard, idx)));
}
}
}
true
}
pub fn remove(&mut self, shard: &T) -> bool {
if !self.nodes_mut().remove(shard) {
return false;
}
match self {
HashRing::Raw { ring, .. } => {
ring.remove(&StableHashed(*shard));
}
HashRing::Fair { ring, scale, .. } => {
for idx in 0..*scale {
ring.remove(&StableHashed((*shard, idx)));
}
}
}
true
}
}
impl<T: Eq + StableHash + Hash> HashRing<T> {
pub fn get<U: StableHash>(&self, key: &U) -> Option<&T> {
let key = StableHashed::wrap_ref(key);
match self {
HashRing::Raw { ring, .. } => ring.get(key).map(|StableHashed(shard)| shard),
HashRing::Fair { ring, .. } => ring.get(key).map(|StableHashed((shard, _))| shard),
}
}
pub fn is_empty(&self) -> bool {
self.nodes().is_empty()
}
pub fn len(&self) -> usize {
self.nodes().len()
}
pub fn contains(&self, shard: &T) -> bool {
self.nodes().contains(shard)
}
pub fn nodes(&self) -> &HashSet<T> {
match self {
HashRing::Raw { nodes, .. } => nodes,
HashRing::Fair { nodes, .. } => nodes,
}
}
fn nodes_mut(&mut self) -> &mut HashSet<T> {
match self {
HashRing::Raw { nodes, .. } => nodes,
HashRing::Fair { nodes, .. } => nodes,
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct HashRingFilter {
ring: HashRing<ShardId>,
expected_shard_id: ShardId,
}
impl HashRingFilter {
pub fn new(ring: HashRing<ShardId>, expected_shard_id: ShardId) -> Self {
Self {
ring,
expected_shard_id,
}
}
}
impl CustomIdCheckerCondition for HashRingFilter {
fn estimate_cardinality(&self, points: usize) -> CardinalityEstimation {
CardinalityEstimation {
primary_clauses: vec![],
min: 0,
exp: points / self.ring.len(),
max: points,
}
}
fn check(&self, point_id: PointIdType) -> bool {
self.ring.get(&point_id) == Some(&self.expected_shard_id)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_non_seq_keys() {
let mut ring = HashRing::fair(100);
ring.add(5);
ring.add(7);
ring.add(8);
ring.add(20);
for i in 0..20 {
match ring.get(&i) {
None => panic!("Key {i} has no shard"),
Some(x) => assert!([5, 7, 8, 20].contains(x)),
}
}
}
#[test]
fn test_repartition() {
let mut ring = HashRing::fair(100);
ring.add(1);
ring.add(2);
ring.add(3);
let mut pre_split = Vec::new();
let mut post_split = Vec::new();
for i in 0..100 {
match ring.get(&i) {
None => panic!("Key {i} has no shard"),
Some(x) => pre_split.push(*x),
}
}
ring.add(4);
for i in 0..100 {
match ring.get(&i) {
None => panic!("Key {i} has no shard"),
Some(x) => post_split.push(*x),
}
}
assert_ne!(pre_split, post_split);
for (x, y) in pre_split.iter().zip(post_split.iter()) {
if x != y {
assert_eq!(*y, 4);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/recommendations.rs | lib/collection/src/recommendations.rs | use std::future::Future;
use std::iter::Peekable;
use std::time::Duration;
use api::rest::RecommendStrategy;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use itertools::Itertools;
use segment::data_types::vectors::{
DenseVector, NamedQuery, TypedMultiDenseVector, VectorElementType, VectorInternal, VectorRef,
};
use segment::types::{
Condition, ExtendedPointId, Filter, HasIdCondition, PointIdType, ScoredPoint,
};
use segment::vector_storage::query::RecoQuery;
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequestBatch;
use sparse::common::sparse_vector::SparseVector;
use tokio::sync::RwLockReadGuard;
use crate::collection::Collection;
use crate::common::batching::batch_requests;
use crate::common::fetch_vectors::{
ReferencedVectors, convert_to_vectors, convert_to_vectors_owned,
resolve_referenced_vectors_batch,
};
use crate::common::retrieve_request_trait::RetrieveRequest;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{
CollectionError, CollectionResult, CoreSearchRequest, RecommendRequestInternal, UsingVector,
};
fn avg_vectors<'a>(
vectors: impl IntoIterator<Item = VectorRef<'a>>,
) -> CollectionResult<VectorInternal> {
let mut avg_dense = DenseVector::default();
let mut avg_sparse = SparseVector::default();
let mut avg_multi: Option<TypedMultiDenseVector<VectorElementType>> = None;
let mut dense_count = 0;
let mut sparse_count = 0;
let mut multi_count = 0;
for vector in vectors {
match vector {
VectorRef::Dense(vector) => {
dense_count += 1;
for i in 0..vector.len() {
if i >= avg_dense.len() {
avg_dense.push(vector[i])
} else {
avg_dense[i] += vector[i];
}
}
}
VectorRef::Sparse(vector) => {
sparse_count += 1;
avg_sparse = vector.combine_aggregate(&avg_sparse, |v1, v2| v1 + v2);
}
VectorRef::MultiDense(vector) => {
multi_count += 1;
avg_multi = Some(avg_multi.map_or_else(
|| vector.to_owned(),
|mut avg_multi| {
avg_multi
.flattened_vectors
.extend_from_slice(vector.flattened_vectors);
avg_multi
},
));
}
}
}
match (dense_count, sparse_count, multi_count) {
// TODO(sparse): what if vectors iterator is empty? We added CollectionError::BadRequest,
// but it's not clear if it's the best solution.
// Currently it's hard to return an zeroed vector, because we don't know its type: dense or sparse.
(0, 0, 0) => Err(CollectionError::bad_input(
"Positive vectors should not be empty with `average` strategy".to_owned(),
)),
(_, 0, 0) => {
for item in &mut avg_dense {
*item /= dense_count as VectorElementType;
}
Ok(VectorInternal::from(avg_dense))
}
(0, _, 0) => {
for item in &mut avg_sparse.values {
*item /= sparse_count as VectorElementType;
}
Ok(VectorInternal::from(avg_sparse))
}
(0, 0, _) => match avg_multi {
Some(avg_multi) => Ok(VectorInternal::from(avg_multi)),
None => Err(CollectionError::bad_input(
"Positive vectors should not be empty with `average` strategy".to_owned(),
)),
},
(_, _, _) => Err(CollectionError::bad_input(
"Can't average vectors with different types".to_owned(),
)),
}
}
fn merge_positive_and_negative_avg(
positive: VectorInternal,
negative: VectorInternal,
) -> CollectionResult<VectorInternal> {
match (positive, negative) {
(VectorInternal::Dense(positive), VectorInternal::Dense(negative)) => {
let vector: DenseVector = positive
.iter()
.zip(negative.iter())
.map(|(pos, neg)| pos + pos - neg)
.collect();
Ok(vector.into())
}
(VectorInternal::Sparse(positive), VectorInternal::Sparse(negative)) => Ok(positive
.combine_aggregate(&negative, |pos, neg| pos + pos - neg)
.into()),
(VectorInternal::MultiDense(mut positive), VectorInternal::MultiDense(negative)) => {
// merge positive and negative vectors as concatenated vectors with negative vectors negated
positive.flattened_vectors.extend(negative.flattened_vectors.into_iter().map(|x| -x));
Ok(VectorInternal::MultiDense(positive))
},
_ => Err(CollectionError::bad_input(
"Positive and negative vectors should be of the same type, either all dense or all sparse or all multi".to_owned(),
)),
}
}
pub fn avg_vector_for_recommendation<'a>(
positive: impl IntoIterator<Item = VectorRef<'a>>,
mut negative: Peekable<impl Iterator<Item = VectorRef<'a>>>,
) -> CollectionResult<VectorInternal> {
let avg_positive = avg_vectors(positive)?;
let search_vector = if negative.peek().is_none() {
avg_positive
} else {
let avg_negative = avg_vectors(negative)?;
merge_positive_and_negative_avg(avg_positive, avg_negative)?
};
Ok(search_vector)
}
pub async fn recommend_by<'a, F, Fut>(
request: RecommendRequestInternal,
collection: &Collection,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
shard_selector: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
if request.limit == 0 {
return Ok(vec![]);
}
// `recommend_by` is a special case of recommend_by_batch with a single batch
let request_batch = vec![(request, shard_selector)];
let results = recommend_batch_by(
request_batch,
collection,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc,
)
.await?;
Ok(results.into_iter().next().unwrap())
}
pub fn recommend_into_core_search(
collection_name: &str,
request: RecommendRequestInternal,
all_vectors_records_map: &ReferencedVectors,
) -> CollectionResult<CoreSearchRequest> {
let reference_vectors_ids = request
.positive
.iter()
.chain(&request.negative)
.filter_map(|example| example.as_point_id())
.collect_vec();
let lookup_collection_name = request.lookup_from.as_ref().map(|x| &x.collection);
for &point_id in &reference_vectors_ids {
if all_vectors_records_map
.get(lookup_collection_name, point_id)
.is_none()
{
return Err(CollectionError::PointNotFound {
missed_point_id: point_id,
});
}
}
// do not exclude vector ids from different lookup collection
let reference_vectors_ids_to_exclude = match lookup_collection_name {
Some(lookup_collection_name) if lookup_collection_name != collection_name => vec![],
_ => reference_vectors_ids,
};
match request.strategy.unwrap_or_default() {
RecommendStrategy::AverageVector => recommend_by_avg_vector(
request,
reference_vectors_ids_to_exclude,
all_vectors_records_map,
),
RecommendStrategy::BestScore => Ok(recommend_by_custom_score(
request,
reference_vectors_ids_to_exclude,
all_vectors_records_map,
QueryEnum::RecommendBestScore,
)),
RecommendStrategy::SumScores => Ok(recommend_by_custom_score(
request,
reference_vectors_ids_to_exclude,
all_vectors_records_map,
QueryEnum::RecommendSumScores,
)),
}
}
/// Search points in a collection by already existing points in this or another collection.
///
/// Function works in following stages:
///
/// - Constructs queries to retrieve points from the existing collections
/// - Executes queries in parallel
/// - Converts retrieve results into lookup table
/// - Constructs regular search queries, execute them as single batch
///
/// # Arguments
///
/// * `request_batch` - batch recommendations request
/// * `collection` - collection to search in
/// * `collection_by_name` - function to retrieve collection by name, used to retrieve points from other collections
/// * `timeout` - timeout for the whole batch, in the searching stage. E.g. time in preprocessing won't be counted
///
pub async fn recommend_batch_by<'a, F, Fut>(
request_batch: Vec<(RecommendRequestInternal, ShardSelectorInternal)>,
collection: &Collection,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
let start = std::time::Instant::now();
// shortcuts batch if all requests with limit=0
if request_batch.iter().all(|(s, _)| s.limit == 0) {
return Ok(vec![]);
}
// Validate amount of examples
request_batch.iter().try_for_each(|(request, _)| {
match request.strategy.unwrap_or_default() {
RecommendStrategy::AverageVector => {
if request.positive.is_empty() {
return Err(CollectionError::BadRequest {
description: "At least one positive vector ID required with this strategy"
.to_owned(),
});
}
}
RecommendStrategy::BestScore | RecommendStrategy::SumScores => {
if request.positive.is_empty() && request.negative.is_empty() {
return Err(CollectionError::BadRequest {
description: "At least one positive or negative vector ID required with this strategy"
.to_owned(),
});
}
}
}
Ok(())
})?;
let all_vectors_records_map = resolve_referenced_vectors_batch(
&request_batch,
collection,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// update timeout
let timeout = timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
let res = batch_requests::<
(RecommendRequestInternal, ShardSelectorInternal),
ShardSelectorInternal,
Vec<CoreSearchRequest>,
Vec<_>,
>(
request_batch,
|(_req, shard)| shard,
|(req, _), acc| {
recommend_into_core_search(&collection.id, req, &all_vectors_records_map).map(
|core_req| {
acc.push(core_req);
},
)
},
|shard_selector, core_searches, requests| {
if core_searches.is_empty() {
return Ok(());
}
let core_search_batch_request = CoreSearchRequestBatch {
searches: core_searches,
};
requests.push(collection.core_search_batch(
core_search_batch_request,
read_consistency,
shard_selector,
timeout,
hw_measurement_acc.clone(),
));
Ok(())
},
)?;
let results = futures::future::try_join_all(res).await?;
let flatten_results: Vec<Vec<_>> = results.into_iter().flatten().collect();
Ok(flatten_results)
}
fn recommend_by_avg_vector(
request: RecommendRequestInternal,
reference_vectors_ids_to_exclude: Vec<ExtendedPointId>,
all_vectors_records_map: &ReferencedVectors,
) -> CollectionResult<CoreSearchRequest> {
let lookup_vector_name = request.get_lookup_vector_name();
let RecommendRequestInternal {
filter,
with_payload,
with_vector,
params,
limit,
score_threshold,
offset,
using,
positive,
negative,
lookup_from,
..
} = request;
let lookup_collection_name = lookup_from.as_ref().map(|x| &x.collection);
let positive_vectors = convert_to_vectors(
positive.iter(),
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
);
let negative_vectors = convert_to_vectors(
negative.iter(),
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
);
let search_vector =
avg_vector_for_recommendation(positive_vectors, negative_vectors.peekable())?;
Ok(CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery {
query: search_vector,
using: using.map(|name| name.as_name()),
}),
filter: Some(Filter {
should: None,
min_should: None,
must: filter.map(|filter| vec![Condition::Filter(filter)]),
// Exclude vector ids from the same collection given as lookup params
must_not: Some(vec![Condition::HasId(HasIdCondition {
has_id: reference_vectors_ids_to_exclude.into_iter().collect(),
})]),
}),
with_payload,
with_vector,
params,
limit,
score_threshold,
offset: offset.unwrap_or_default(),
})
}
fn recommend_by_custom_score(
request: RecommendRequestInternal,
reference_vectors_ids_to_exclude: Vec<PointIdType>,
all_vectors_records_map: &ReferencedVectors,
query_variant: impl Fn(NamedQuery<RecoQuery<VectorInternal>>) -> QueryEnum,
) -> CoreSearchRequest {
let lookup_vector_name = request.get_lookup_vector_name();
let RecommendRequestInternal {
positive,
negative,
strategy: _,
filter,
params,
limit,
offset,
with_payload,
with_vector,
score_threshold,
using,
lookup_from,
} = request;
let lookup_collection_name = lookup_from.as_ref().map(|x| &x.collection);
let positive = convert_to_vectors_owned(
positive,
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
);
let negative = convert_to_vectors_owned(
negative,
all_vectors_records_map,
&lookup_vector_name,
lookup_collection_name,
);
let query = query_variant(NamedQuery {
query: RecoQuery::new(positive, negative),
using: using.map(|x| match x {
UsingVector::Name(name) => name,
}),
});
CoreSearchRequest {
query,
filter: Some(Filter {
should: None,
min_should: None,
must: filter.map(|filter| vec![Condition::Filter(filter)]),
must_not: Some(vec![Condition::HasId(HasIdCondition {
has_id: reference_vectors_ids_to_exclude.into_iter().collect(),
})]),
}),
params,
limit,
offset: offset.unwrap_or_default(),
with_payload,
with_vector,
score_threshold,
}
}
#[cfg(test)]
mod tests {
use segment::data_types::vectors::{VectorInternal, VectorRef};
use sparse::common::sparse_vector::SparseVector;
use super::avg_vectors;
#[test]
fn test_avg_vectors() {
let vectors: Vec<VectorInternal> = vec![
vec![1.0, 2.0, 3.0].into(),
vec![1.0, 2.0, 3.0].into(),
vec![1.0, 2.0, 3.0].into(),
];
assert_eq!(
avg_vectors(vectors.iter().map(VectorRef::from)).unwrap(),
vec![1.0, 2.0, 3.0].into(),
);
let vectors: Vec<VectorInternal> = vec![
SparseVector::new(vec![0, 1, 2], vec![0.0, 0.1, 0.2])
.unwrap()
.into(),
SparseVector::new(vec![0, 1, 2], vec![0.0, 1.0, 2.0])
.unwrap()
.into(),
];
assert_eq!(
avg_vectors(vectors.iter().map(VectorRef::from)).unwrap(),
SparseVector::new(vec![0, 1, 2], vec![0.0, 0.55, 1.1])
.unwrap()
.into(),
);
let vectors: Vec<VectorInternal> = vec![
vec![1.0, 2.0, 3.0].into(),
SparseVector::new(vec![0, 1, 2], vec![0.0, 0.1, 0.2])
.unwrap()
.into(),
];
assert!(avg_vectors(vectors.iter().map(VectorRef::from)).is_err());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/events.rs | lib/collection/src/events.rs | use std::collections::HashMap;
use segment::json_path::JsonPath;
use segment::types::{Filter, PayloadFieldSchema};
use crate::shards::CollectionId;
pub struct CollectionDeletedEvent {
pub collection_id: CollectionId,
}
pub struct SlowQueryEvent {
pub collection_id: CollectionId,
pub filters: Vec<Filter>,
pub schema: HashMap<JsonPath, PayloadFieldSchema>,
}
pub struct IndexCreatedEvent {
pub collection_id: CollectionId,
pub field_name: JsonPath,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/wal_delta.rs | lib/collection/src/wal_delta.rs | use std::fmt::Debug;
use std::sync::Arc;
use shard::wal::SerdeWal;
use thiserror::Error;
use tokio::sync::{Mutex, OwnedMutexGuard};
use crate::operations::{ClockTag, OperationWithClockTag};
use crate::shards::local_shard::clock_map::{ClockMap, RecoveryPoint};
pub(crate) type LockedWal = Arc<Mutex<SerdeWal<OperationWithClockTag>>>;
/// A WAL that is recoverable, with operations having clock tags and a corresponding clock map.
pub struct RecoverableWal {
pub(super) wal: LockedWal,
/// Map of all highest seen clocks for each peer and clock ID.
pub(super) newest_clocks: Arc<Mutex<ClockMap>>,
/// Map of all clocks and ticks that are cut off.
///
/// Clock ticks equal to those in this map are still recoverable, while clock ticks below those
/// in this map are not.
///
/// This means two things:
/// - this WAL has at least all these clock versions
/// - (so if we advance these clocks, we have to advance `newest_clocks` as well)
/// - this WAL cannot resolve any delta below any of these clocks
pub(super) oldest_clocks: Arc<Mutex<ClockMap>>,
}
impl RecoverableWal {
pub fn new(
wal: LockedWal,
newest_clocks: Arc<Mutex<ClockMap>>,
oldest_clocks: Arc<Mutex<ClockMap>>,
) -> Self {
Self {
wal,
newest_clocks,
oldest_clocks,
}
}
/// Write a record to the WAL, guarantee durability.
///
/// On success, this returns the WAL record number of the written operation along with a WAL
/// lock guard.
#[must_use = "returned record number and WAL lock must be used carefully"]
pub async fn lock_and_write(
&self,
operation: &mut OperationWithClockTag,
) -> shard::wal::Result<(u64, OwnedMutexGuard<SerdeWal<OperationWithClockTag>>)> {
// Update last seen clock map and correct clock tag if necessary
if let Some(clock_tag) = &mut operation.clock_tag {
let operation_accepted = self
.newest_clocks
.lock()
.await
.advance_clock_and_correct_tag(clock_tag);
if !operation_accepted {
return Err(shard::wal::WalError::ClockRejected);
}
}
// Write operation to WAL
let mut wal_lock = Mutex::lock_owned(self.wal.clone()).await;
wal_lock.write(operation).map(|op_num| (op_num, wal_lock))
}
/// Take clocks snapshot because we deactivated our replica
///
/// Does nothing if a snapshot already existed. Returns `true` if a snapshot was taken.
///
/// When doing a WAL delta recovery transfer, the recovery point is sourced from the latest
/// seen snapshot if it exists. This way we prevent skipping operations if the regular latest
/// clock tags were bumped during a different transfer that was not finished.
///
/// See: <https://github.com/qdrant/qdrant/pull/7787>
pub async fn take_newest_clocks_snapshot(&self) -> bool {
self.newest_clocks.lock().await.take_snapshot()
}
/// Clear any clocks snapshot because we activated our replica
///
/// Returns `true` if a snapshot was cleared.
///
/// When doing a WAL delta recovery transfer, the recovery point is sourced from the latest
/// seen snapshot if it exists. This way we prevent skipping operations if the regular latest
/// clock tags were bumped during a different transfer that was not finished.
///
/// See: <https://github.com/qdrant/qdrant/pull/7787>
pub async fn clear_newest_clocks_snapshot(&self) -> bool {
self.newest_clocks.lock().await.clear_snapshot()
}
/// Update the cutoff clock map based on the given recovery point
///
/// This can only increase clock ticks in the cutoff clock map. If there already are higher
/// clock ticks, they're kept.
///
/// It updates the highest seen clocks alongside with it.
pub async fn update_cutoff(&self, cutoff: &RecoveryPoint) {
// Lock highest and cutoff maps separately to avoid deadlocks
{
let mut newest_clocks = self.newest_clocks.lock().await;
for clock_tag in cutoff.iter_as_clock_tags() {
newest_clocks.advance_clock(clock_tag);
}
}
{
let mut oldest_clocks = self.oldest_clocks.lock().await;
for clock_tag in cutoff.iter_as_clock_tags() {
oldest_clocks.advance_clock(clock_tag);
}
}
}
/// Get a recovery point for this WAL
///
/// Uses newest clocks snapshot if set, otherwise uses newest clocks.
pub async fn recovery_point(&self) -> RecoveryPoint {
self.newest_clocks.lock().await.to_recovery_point()
}
pub async fn resolve_wal_delta(
&self,
recovery_point: RecoveryPoint,
) -> Result<Option<u64>, WalDeltaError> {
let newest_clocks = self.recovery_point().await;
let oldest_clocks = self.oldest_clocks.lock().await.to_recovery_point();
resolve_wal_delta(
self.wal
.lock()
.await
.read_all(true)
.map(|(op_num, op)| (op_num, op.clock_tag)),
recovery_point,
newest_clocks,
oldest_clocks,
)
}
pub async fn wal_version(&self) -> Result<Option<u64>, WalDeltaError> {
let wal = self.wal.lock().await;
if wal.is_empty() {
Ok(None)
} else {
Ok(Some(wal.last_index()))
}
}
/// Append records to this WAL from `other`, starting at operation `append_from` in `other`.
#[cfg(test)]
pub async fn append_from(&self, other: &Self, append_from: u64) -> shard::wal::Result<()> {
let mut operations = other
.wal
.lock()
.await
.read(append_from)
.map(|(_, op)| op)
.collect::<Vec<_>>();
for update in operations.iter_mut() {
let (_, _) = self.lock_and_write(update).await?;
}
Ok(())
}
}
/// Resolve the WAL delta for the given `recovery_point`
///
/// A `local_wal`, `newest_clocks` and `oldest_clocks` are required to resolve the
/// delta. These should be from the node being the source of recovery, likely the current one. The
/// `local_wal` is used to resolve the diff. The `newest_clocks` is used to extend the given
/// recovery point with clocks the failed node does not know about. The `oldest_clocks` is
/// used as lower bound for WAL delta resolution.
///
/// The delta can be sent over to the node which the recovery point is from, to restore its
/// WAL making it consistent with the current shard.
///
/// On success, an option holding a WAL record number is returned.
/// If `Some` - the remote WAL can be recovered by sending the local WAL from that record number.
/// If `None` - the remote WAL is already equal, and we don't have to send any records.
/// If `Err` - no delta can be resolved.
fn resolve_wal_delta(
operations: impl DoubleEndedIterator<Item = (u64, Option<ClockTag>)>,
mut recovery_point: RecoveryPoint,
mut newest_clocks: RecoveryPoint,
mut oldest_clocks: RecoveryPoint,
) -> Result<Option<u64>, WalDeltaError> {
// If recovery point is empty, we cannot do a diff transfer
if recovery_point.is_empty() {
return Err(WalDeltaError::Empty);
}
// If the recovery point has clocks our current node does not know about
// we're missing essential records and cannot resolve a WAL delta
if recovery_point.has_clocks_not_in(&newest_clocks) {
return Err(WalDeltaError::UnknownClocks);
}
// If our current node has any lower clock than the recovery point specifies,
// we're missing essential records and cannot resolve a WAL delta
if recovery_point.has_any_newer_clocks_than(&newest_clocks) {
return Err(WalDeltaError::HigherThanCurrent);
}
// From this point, increase all clocks by one
// We must do that so we can specify clock tick 0 as needing everything from that clock
recovery_point.increase_all_clocks_by(1);
newest_clocks.increase_all_clocks_by(1);
oldest_clocks.increase_all_clocks_by(1);
// Extend clock map with missing clocks this node know about
// Ensure the recovering node gets records for a clock it might not have seen yet
recovery_point.initialize_clocks_missing_from(&newest_clocks);
// Remove clocks that are equal to this node, we don't have to transfer records for them
// TODO: do we want to remove higher clocks too, as the recovery node already has all data?
recovery_point.remove_clocks_equal_to(&newest_clocks);
// Recovery point may not be below our cutoff point
if recovery_point.has_any_older_clocks_than(&oldest_clocks) {
return Err(WalDeltaError::Cutoff);
}
// If there are no points left, WALs match op so we do not recovery anything
if recovery_point.is_empty() {
return Ok(None);
}
// Scroll back over the WAL and find a record that covered all clocks, allowing delta resolution
// Drain satisfied clocks from the recovery point until we have nothing left
log::trace!("Resolving WAL delta for: {recovery_point}");
let mut last_op_num = None;
for (op_num, clock_tag) in operations.rev() {
// We cannot resolve a delta if we have untagged records
let Some(clock_tag) = clock_tag else {
return Err(WalDeltaError::UntaggedRecords);
};
// Keep scrolling until we have no clocks left
let removed_equal = recovery_point.remove_clock_if_newer_or_equal_to_tag(clock_tag);
if recovery_point.is_empty() {
// If we only removed newer clocks, delta-ing from the previous record is good enough
let recover_from = if removed_equal {
Some(op_num)
} else {
last_op_num
};
return Ok(recover_from);
}
last_op_num.replace(op_num);
}
Err(WalDeltaError::NotFound)
}
#[derive(Error, Debug, Clone, PartialEq, Eq)]
#[error("cannot resolve WAL delta: {0}")]
pub enum WalDeltaError {
#[error("recovery point has no clocks to resolve delta for")]
Empty,
#[error("recovery point requests clocks this WAL does not know about")]
UnknownClocks,
#[error("recovery point requests higher clocks this WAL has")]
HigherThanCurrent,
#[error("some recovery point clocks are below the cutoff point in our WAL")]
Cutoff,
#[error("WAL delta cannot include records without clock tags")]
UntaggedRecords,
#[error("cannot find slice of WAL records that satisfies the recovery point")]
NotFound,
}
#[cfg(test)]
mod tests {
use std::collections::{HashMap, HashSet, VecDeque};
use std::num::NonZeroUsize;
use std::ops::Range;
use std::sync::Arc;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::seq::IndexedRandom;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::VectorStructInternal;
use shard::wal::SerdeWal;
use tempfile::{Builder, TempDir};
use wal::WalOptions;
use super::*;
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted,
};
use crate::operations::{ClockTag, CollectionUpdateOperations, OperationWithClockTag};
use crate::shards::local_shard::clock_map::{ClockMap, RecoveryPoint};
use crate::shards::replica_set::clock_set::ClockSet;
fn fixture_empty_wal() -> (RecoverableWal, TempDir) {
let dir = Builder::new().prefix("wal_test").tempdir().unwrap();
let options = WalOptions {
segment_capacity: 1024 * 1024,
segment_queue_len: 0,
retain_closed: NonZeroUsize::new(1).unwrap(),
};
let wal = SerdeWal::new(dir.path(), options).unwrap();
(
RecoverableWal::new(
Arc::new(Mutex::new(wal)),
Arc::new(Mutex::new(ClockMap::default())),
Arc::new(Mutex::new(ClockMap::default())),
),
dir,
)
}
fn mock_operation(id: u64) -> CollectionUpdateOperations {
CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(
PointInsertOperationsInternal::PointsList(vec![PointStructPersisted {
id: id.into(),
vector: VectorStructInternal::from(vec![1.0, 2.0, 3.0]).into(),
payload: None,
}]),
))
}
/// Test WAL delta resolution with just one missed operation on node C.
///
/// See: <https://www.notion.so/qdrant/Testing-suite-4e28a978ec05476080ff26ed07757def?pvs=4>
#[tokio::test]
async fn test_resolve_wal_delta_one_operation() {
// Create WALs for peer A, B and C
let (a_wal, _a_wal_dir) = fixture_empty_wal();
let (b_wal, _b_wal_dir) = fixture_empty_wal();
let (c_wal, _c_wal_dir) = fixture_empty_wal();
// Create clock set for peer A, start first clock from 1
let mut a_clock_set = ClockSet::new();
a_clock_set.get_clock().advance_to(0);
// Create operation on peer A
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(1);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operation to peer A, B and C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let mut c_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
let (_, _) = c_wal.lock_and_write(&mut c_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(c_operation.clock_tag.unwrap().clock_tick);
drop(a_clock_0);
// Create operation on peer A
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(2);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operation to peer A and B, not C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
drop(a_clock_0);
let c_recovery_point = c_wal.recovery_point().await;
// Resolve delta on node A for node C, assert correctness
let delta_from = a_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, 1);
// Resolve delta on node B for node C, assert correctness
let delta_from = b_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, 1);
// Diff should have 1 operation, as C missed just one
assert_eq!(b_wal.wal.lock().await.read(delta_from).count(), 1);
// Recover WAL on node C by writing delta from node B to it
c_wal.append_from(&b_wal, delta_from).await.unwrap();
// WALs should match up perfectly now
a_wal
.wal
.lock()
.await
.read(0)
.zip(b_wal.wal.lock().await.read(0))
.zip(c_wal.wal.lock().await.read(0))
.for_each(|((a, b), c)| {
assert_eq!(a, b);
assert_eq!(b, c);
});
assert_wal_ordering_property(&a_wal, false).await;
assert_wal_ordering_property(&b_wal, false).await;
assert_wal_ordering_property(&c_wal, false).await;
}
/// Test WAL delta resolution when there is gaps in the WAL on all machines.
///
/// We normally do not expect this situation. But it's good to support it if it happens
/// unexpectedly.
///
/// See: <https://www.notion.so/qdrant/Testing-suite-4e28a978ec05476080ff26ed07757def?pvs=4>
#[rstest]
#[tokio::test]
async fn test_resolve_wal_delta_with_gaps(#[values(true, false)] with_gap: bool) {
const N: usize = 5;
const GAP_SIZE: usize = 10;
// Create WALs for peer A, B and C
let (a_wal, _a_wal_dir) = fixture_empty_wal();
let (b_wal, _b_wal_dir) = fixture_empty_wal();
let (c_wal, _c_wal_dir) = fixture_empty_wal();
// Create clock set for peer A, start first clock from 1
let mut a_clock_set = ClockSet::new();
a_clock_set.get_clock().advance_to(0);
// Create N operations on peer A
for n in 0..N {
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation((1 + n) as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operation to peer A, B and C and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let mut c_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
let (_, _) = c_wal.lock_and_write(&mut c_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(c_operation.clock_tag.unwrap().clock_tick);
}
// Introduce a gap in the clocks on A
if with_gap {
for _ in 0..GAP_SIZE {
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
a_clock_0.advance_to(clock_tick);
}
}
// Create N operations on peer A, which are missed on node C
for n in 0..N {
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation((1 + N + n) as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operation to peer A and B and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
}
let c_recovery_point = c_wal.recovery_point().await;
// Resolve delta on node A for node C, assert correctness
let delta_from = a_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Resolve delta on node B for node C, assert correctness
let delta_from = b_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Diff should have N operation, as C missed just N of them
assert_eq!(b_wal.wal.lock().await.read(delta_from).count(), N);
// Recover WAL on node C by writing delta from node B to it
c_wal.append_from(&b_wal, delta_from).await.unwrap();
// WALs should match up perfectly now
a_wal
.wal
.lock()
.await
.read(0)
.zip(b_wal.wal.lock().await.read(0))
.zip(c_wal.wal.lock().await.read(0))
.for_each(|((a, b), c)| {
assert_eq!(a, b);
assert_eq!(b, c);
});
assert_wal_ordering_property(&a_wal, true).await;
assert_wal_ordering_property(&b_wal, true).await;
assert_wal_ordering_property(&c_wal, true).await;
}
/// Test WAL delta resolution with a many missed operations on node C.
///
/// See: <https://www.notion.so/qdrant/Testing-suite-4e28a978ec05476080ff26ed07757def?pvs=4>
#[tokio::test]
async fn test_resolve_wal_delta_many_operations() {
const N: usize = 5;
const M: usize = 25;
// Create WALs for peer A, B and C
let (a_wal, _a_wal_dir) = fixture_empty_wal();
let (b_wal, _b_wal_dir) = fixture_empty_wal();
let (c_wal, _c_wal_dir) = fixture_empty_wal();
// Create clock set for peer A, start first clock from 1
let mut a_clock_set = ClockSet::new();
a_clock_set.get_clock().advance_to(0);
// Create N operations on peer A
for i in 0..N {
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(i as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operations to peer A, B and C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let mut c_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
let (_, _) = c_wal.lock_and_write(&mut c_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(c_operation.clock_tag.unwrap().clock_tick);
}
// Create M operations on peer A, which are missed on node C
for i in N..N + M {
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(i as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operations to peer A and B, not C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
}
let c_recovery_point = c_wal.recovery_point().await;
// Resolve delta on node A for node C, assert correctness
let delta_from = a_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Resolve delta on node B for node C, assert correctness
let delta_from = b_wal
.resolve_wal_delta(c_recovery_point)
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Diff should have M operations, as node C missed M operations
assert_eq!(b_wal.wal.lock().await.read(delta_from).count(), M);
// Recover WAL on node C by writing delta from node B to it
c_wal.append_from(&b_wal, delta_from).await.unwrap();
// WALs should match up perfectly now
a_wal
.wal
.lock()
.await
.read(0)
.zip(b_wal.wal.lock().await.read(0))
.zip(c_wal.wal.lock().await.read(0))
.for_each(|((a, b), c)| {
assert_eq!(a, b);
assert_eq!(b, c);
});
assert_wal_ordering_property(&a_wal, false).await;
assert_wal_ordering_property(&b_wal, false).await;
assert_wal_ordering_property(&c_wal, false).await;
}
/// Test WAL delta resolution with many intermixed operations on node C. Intermixed as in,
/// from multiple nodes.
///
/// See: <https://www.notion.so/qdrant/Testing-suite-4e28a978ec05476080ff26ed07757def?pvs=4>
#[tokio::test]
async fn test_resolve_wal_delta_many_intermixed_operations() {
const N: usize = 3;
const M: usize = 50;
// Create WALs for peer A, B and C
let (a_wal, _a_wal_dir) = fixture_empty_wal();
let (b_wal, _b_wal_dir) = fixture_empty_wal();
let (c_wal, _c_wal_dir) = fixture_empty_wal();
// Create clock sets for peer A and B
let mut a_clock_set = ClockSet::new();
let mut b_clock_set = ClockSet::new();
// Create N operations on peer A
for i in 0..N {
let mut a_clock_0 = a_clock_set.get_clock();
a_clock_0.advance_to(0);
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(i as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operations to peer A, B and C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let mut c_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
let (_, _) = c_wal.lock_and_write(&mut c_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(c_operation.clock_tag.unwrap().clock_tick);
}
// Create M operations on peers A and B, which are missed on node C
for i in N..N + M {
let is_node_a = i % 3 == 0;
let peer_id = if is_node_a { 1 } else { 2 };
let mut clock = if is_node_a {
a_clock_set.get_clock()
} else {
b_clock_set.get_clock()
};
clock.advance_to(0);
let clock_tick = clock.tick_once();
let clock_tag = ClockTag::new(peer_id, clock.id(), clock_tick);
let bare_operation = mock_operation(i as u64);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operations to peer A and B, not C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
clock.advance_to(a_operation.clock_tag.unwrap().clock_tick);
clock.advance_to(b_operation.clock_tag.unwrap().clock_tick);
}
let c_recovery_point = c_wal.recovery_point().await;
// Resolve delta on node A for node C, assert correctness
let delta_from = a_wal
.resolve_wal_delta(c_recovery_point.clone())
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Resolve delta on node B for node C, assert correctness
let delta_from = b_wal
.resolve_wal_delta(c_recovery_point)
.await
.unwrap()
.unwrap();
assert_eq!(delta_from, N as u64);
// Diff should have M operations, as node C missed M operations
assert_eq!(b_wal.wal.lock().await.read(delta_from).count(), M);
// Recover WAL on node C by writing delta from node B to it
c_wal.append_from(&b_wal, delta_from).await.unwrap();
// WALs should match up perfectly now
a_wal
.wal
.lock()
.await
.read(0)
.zip(b_wal.wal.lock().await.read(0))
.zip(c_wal.wal.lock().await.read(0))
.for_each(|((a, b), c)| {
assert_eq!(a, b);
assert_eq!(b, c);
});
assert_wal_ordering_property(&a_wal, false).await;
assert_wal_ordering_property(&b_wal, false).await;
assert_wal_ordering_property(&c_wal, false).await;
}
/// Test WAL delta resolution with operations in a different order on node A and B.
///
/// See: <https://www.notion.so/qdrant/Testing-suite-4e28a978ec05476080ff26ed07757def?pvs=4>
#[tokio::test]
async fn test_resolve_wal_delta_unordered_operations() {
// Create WALs for peer A, B and C
let (a_wal, _a_wal_dir) = fixture_empty_wal();
let (b_wal, _b_wal_dir) = fixture_empty_wal();
let (c_wal, _c_wal_dir) = fixture_empty_wal();
// Create clock sets for peer A and B, start first clocks from 1
let mut a_clock_set = ClockSet::new();
let mut b_clock_set = ClockSet::new();
a_clock_set.get_clock().advance_to(0);
b_clock_set.get_clock().advance_to(0);
// Create operation on peer A
let mut a_clock_0 = a_clock_set.get_clock();
let clock_tick = a_clock_0.tick_once();
let clock_tag = ClockTag::new(1, a_clock_0.id(), clock_tick);
let bare_operation = mock_operation(1);
let operation = OperationWithClockTag::new(bare_operation, Some(clock_tag));
// Write operation to peer A, B and C, and advance clocks
let mut a_operation = operation.clone();
let mut b_operation = operation.clone();
let mut c_operation = operation.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation).await.unwrap();
let (_, _) = c_wal.lock_and_write(&mut c_operation).await.unwrap();
a_clock_0.advance_to(a_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(b_operation.clock_tag.unwrap().clock_tick);
a_clock_0.advance_to(c_operation.clock_tag.unwrap().clock_tick);
drop(a_clock_0);
// Create operations on nodes A and B
let mut a_clock_0 = a_clock_set.get_clock();
let mut b_clock_0 = b_clock_set.get_clock();
let a_clock_tick = a_clock_0.tick_once();
let b_clock_tick = b_clock_0.tick_once();
let a_clock_tag = ClockTag::new(1, a_clock_0.id(), a_clock_tick);
let b_clock_tag = ClockTag::new(2, a_clock_0.id(), b_clock_tick);
let bare_operation_1 = mock_operation(2);
let bare_operation_2 = mock_operation(3);
let operation_1 = OperationWithClockTag::new(bare_operation_1, Some(a_clock_tag));
let operation_2 = OperationWithClockTag::new(bare_operation_2, Some(b_clock_tag));
// Write operations to nodes A and B in different order, but not to node C
let mut a_operation_1 = operation_1.clone();
let mut a_operation_2 = operation_2.clone();
let mut b_operation_1 = operation_1.clone();
let mut b_operation_2 = operation_2.clone();
let (_, _) = a_wal.lock_and_write(&mut a_operation_1).await.unwrap();
let (_, _) = a_wal.lock_and_write(&mut a_operation_2).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation_2).await.unwrap();
let (_, _) = b_wal.lock_and_write(&mut b_operation_1).await.unwrap();
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_state.rs | lib/collection/src/collection_state.rs | use std::collections::{HashMap, HashSet};
use ahash::AHashMap;
use serde::{Deserialize, Serialize};
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::config::CollectionConfigInternal;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::resharding::ReshardState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_holder::shard_mapping::ShardKeyMapping;
use crate::shards::transfer::ShardTransfer;
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ShardInfo {
pub replicas: HashMap<PeerId, ReplicaState>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct State {
pub config: CollectionConfigInternal,
pub shards: AHashMap<ShardId, ShardInfo>,
pub resharding: Option<ReshardState>,
#[serde(default)]
pub transfers: HashSet<ShardTransfer>,
#[serde(default)]
pub shards_key_mapping: ShardKeyMapping,
#[serde(default)]
pub payload_index_schema: PayloadIndexSchema,
}
impl State {
pub fn max_shard_id(&self) -> ShardId {
self.shards_key_mapping.iter_shard_ids().max().unwrap_or(0)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/holders.rs | lib/collection/src/collection_manager/holders.rs | pub mod proxy_segment {
pub use shard::proxy_segment::*;
}
pub mod segment_holder {
pub use shard::locked_segment::*;
pub use shard::segment_holder::*;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/probabilistic_search_sampling.rs | lib/collection/src/collection_manager/probabilistic_search_sampling.rs | /// Precomputed sampling table for the probabilistic segment search algorithm.
///
/// Associate a lambda factor from the [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) to a sampling size.
///
/// The table is precomputed for a range of segments and a range of `top` params.
///
/// TODO attach proper python code to generate the table
/// Python code to generate the table:
///
/// ```python
/// from scipy.stats import poisson
/// q = 0.999 # probability to cover full top in all segments
/// res = []
/// for s in range(2, 1000): # Number of segments
/// for n in range(100, 10000, 50): # top param
/// lmbda = n * (1/s)
/// k = poisson.ppf(q**(1/s), lmbda)
/// res.append((lmbda, int(k)))
/// res = sorted(res, key=lambda x: x[0])
/// ```
///
/// with additional code to remove duplicates and values within 5% of each other.
const POISSON_DISTRIBUTION_SEARCH_SAMPLING: [(f64, usize); 121] = [
(0.19342359767891684, 4),
(0.398406374501992, 5),
(0.6666666666666667, 6),
(0.9900990099009901, 7),
(1.3513513513513513, 8),
(1.7543859649122806, 9),
(2.2222222222222223, 10),
(2.7027027027027026, 11),
(3.125, 12),
(3.7037037037037033, 13),
(4.166666666666666, 14),
(4.761904761904762, 15),
(5.263157894736842, 16),
(5.88235294117647, 17),
(6.25, 18),
(7.142857142857142, 19),
(7.6923076923076925, 20),
(9.090909090909092, 22),
(10.0, 24),
(11.538461538461538, 26),
(12.5, 28),
(14.285714285714285, 30),
(15.384615384615385, 32),
(16.666666666666668, 34),
(18.181818181818183, 36),
(20.0, 38),
(21.428571428571427, 40),
(23.076923076923077, 43),
(25.0, 46),
(27.777777777777775, 49),
(30.0, 52),
(33.33333333333333, 55),
(33.333333333333336, 58),
(37.5, 61),
(39.285714285714285, 65),
(42.857142857142854, 69),
(45.83333333333333, 73),
(50.0, 77),
(50.0, 81),
(56.25, 86),
(60.0, 91),
(66.66666666666666, 96),
(70.0, 101),
(75.0, 107),
(78.57142857142857, 113),
(83.33333333333333, 119),
(90.0, 125),
(94.44444444444444, 132),
(100.0, 139),
(106.25, 146),
(111.53846153846155, 154),
(117.6470588235294, 162),
(125.0, 171),
(137.5, 180),
(143.75, 190),
(150.0, 200),
(162.5, 211),
(170.83333333333331, 222),
(183.33333333333331, 234),
(191.66666666666666, 246),
(200.0, 259),
(215.0, 272),
(230.0, 286),
(242.85714285714283, 301),
(257.1428571428571, 317),
(271.4285714285714, 333),
(285.0, 350),
(300.0, 368),
(318.1818181818182, 387),
(340.0, 407),
(360.0, 429),
(380.0, 451),
(400.0, 474),
(421.4285714285714, 498),
(443.75, 523),
(468.1818181818182, 551),
(494.4444444444444, 579),
(519.2307692307693, 608),
(550.0, 639),
(576.6666666666666, 671),
(614.2857142857142, 706),
(650.0, 742),
(678.5714285714286, 780),
(716.6666666666666, 820),
(759.0909090909091, 864),
(800.0, 908),
(844.4444444444443, 954),
(892.8571428571428, 1003),
(941.6666666666666, 1054),
(991.6666666666666, 1107),
(1043.75, 1164),
(1100.0, 1224),
(1175.0, 1289),
(1233.3333333333333, 1355),
(1291.6666666666665, 1423),
(1364.2857142857142, 1500),
(1440.0, 1576),
(1516.6666666666665, 1658),
(1600.0, 1741),
(1687.5, 1832),
(1780.0, 1931),
(1875.0, 2028),
(1975.0, 2132),
(2083.333333333333, 2240),
(2200.0, 2356),
(2316.6666666666665, 2482),
(2437.5, 2611),
(2575.0, 2744),
(2716.6666666666665, 2896),
(2866.6666666666665, 3051),
(3016.6666666666665, 3205),
(3183.333333333333, 3377),
(3375.0, 3568),
(3550.0, 3748),
(3750.0, 3953),
(3950.0, 4158),
(4175.0, 4389),
(4400.0, 4620),
(4650.0, 4876),
(4900.0, 5132),
(f64::MAX, usize::MAX),
];
/// Uses binary search to find the sampling size for a given lambda.
pub fn find_search_sampling_over_point_distribution(n: f64, p: f64) -> usize {
let target_lambda = p * n;
let k = POISSON_DISTRIBUTION_SEARCH_SAMPLING
.binary_search_by(|&(lambda, _sampling)| lambda.partial_cmp(&target_lambda).unwrap());
match k {
Ok(k) => POISSON_DISTRIBUTION_SEARCH_SAMPLING[k].1,
Err(insert) => POISSON_DISTRIBUTION_SEARCH_SAMPLING[insert].1,
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/fixtures.rs | lib/collection/src/collection_manager/fixtures.rs | use std::collections::HashSet;
use std::path::Path;
use std::time::Duration;
use common::counter::hardware_counter::HardwareCounterCell;
use parking_lot::RwLock;
use rand::Rng;
use rand::rngs::ThreadRng;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::only_default_vector;
use segment::entry::entry_point::SegmentEntry;
use segment::payload_json;
use segment::segment::Segment;
use segment::segment_constructor::simple_segment_constructor::{
VECTOR1_NAME, VECTOR2_NAME, build_multivec_segment, build_simple_segment,
};
use segment::types::{Distance, HnswGlobalConfig, Payload, PointIdType, SeqNumberType};
use crate::collection_manager::holders::segment_holder::SegmentHolder;
use crate::collection_manager::optimizers::indexing_optimizer::IndexingOptimizer;
use crate::collection_manager::optimizers::merge_optimizer::MergeOptimizer;
use crate::collection_manager::optimizers::segment_optimizer::OptimizerThresholds;
use crate::config::CollectionParams;
use crate::operations::types::VectorsConfig;
use crate::operations::vector_params_builder::VectorParamsBuilder;
pub const TEST_TIMEOUT: Duration = Duration::from_secs(10);
pub fn empty_segment(path: &Path) -> Segment {
build_simple_segment(path, 4, Distance::Dot).unwrap()
}
/// A generator for random point IDs
#[derive(Default)]
pub(crate) struct PointIdGenerator {
thread_rng: ThreadRng,
used: HashSet<u64>,
}
impl PointIdGenerator {
#[inline]
pub fn random(&mut self) -> PointIdType {
self.thread_rng.random_range(1..u64::MAX).into()
}
#[inline]
pub fn unique(&mut self) -> PointIdType {
for _ in 0..100_000 {
let id = self.random();
if let PointIdType::NumId(num) = id
&& self.used.insert(num)
{
return id;
}
}
panic!("failed to generate unique point ID after 100000 attempts");
}
}
pub fn random_multi_vec_segment(
path: &Path,
opnum: SeqNumberType,
num_vectors: u64,
dim1: usize,
dim2: usize,
) -> Segment {
let mut id_gen = PointIdGenerator::default();
let mut segment = build_multivec_segment(path, dim1, dim2, Distance::Dot).unwrap();
let mut rnd = rand::rng();
let payload_key = "number";
let keyword_key = "keyword";
let hw_counter = HardwareCounterCell::new();
for _ in 0..num_vectors {
let random_vector1: Vec<_> = (0..dim1).map(|_| rnd.random_range(0.0..1.0)).collect();
let random_vector2: Vec<_> = (0..dim2).map(|_| rnd.random_range(0.0..1.0)).collect();
let mut vectors = NamedVectors::default();
vectors.insert(VECTOR1_NAME.to_owned(), random_vector1.into());
vectors.insert(VECTOR2_NAME.to_owned(), random_vector2.into());
let point_id: PointIdType = id_gen.unique();
let payload_value = rnd.random_range(1..1_000);
let random_keyword = format!("keyword_{}", rnd.random_range(1..10));
let payload: Payload =
payload_json! {payload_key: vec![payload_value], keyword_key: random_keyword};
segment
.upsert_point(opnum, point_id, vectors, &hw_counter)
.unwrap();
segment
.set_payload(opnum, point_id, &payload, &None, &hw_counter)
.unwrap();
}
segment
}
pub fn random_segment(path: &Path, opnum: SeqNumberType, num_vectors: u64, dim: usize) -> Segment {
let mut id_gen = PointIdGenerator::default();
let mut segment = build_simple_segment(path, dim, Distance::Dot).unwrap();
let mut rnd = rand::rng();
let payload_key = "number";
let hw_counter = HardwareCounterCell::new();
for _ in 0..num_vectors {
let random_vector: Vec<_> = (0..dim).map(|_| rnd.random_range(0.0..1.0)).collect();
let point_id: PointIdType = id_gen.unique();
let payload_value = rnd.random_range(1..1_000);
let payload: Payload = payload_json! {payload_key: vec![payload_value]};
segment
.upsert_point(
opnum,
point_id,
only_default_vector(&random_vector),
&hw_counter,
)
.unwrap();
segment
.set_payload(opnum, point_id, &payload, &None, &hw_counter)
.unwrap();
}
segment
}
pub fn build_segment_1(path: &Path) -> Segment {
let mut segment1 = empty_segment(path);
let vec1 = vec![1.0, 0.0, 1.0, 1.0];
let vec2 = vec![1.0, 0.0, 1.0, 0.0];
let vec3 = vec![1.0, 1.0, 1.0, 1.0];
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(1, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment1
.upsert_point(2, 2.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment1
.upsert_point(3, 3.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment1
.upsert_point(4, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment1
.upsert_point(5, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
let payload_key = "color";
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment1
.set_payload(6, 1.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 2.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 3.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 4.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 5.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
}
pub fn build_segment_2(path: &Path) -> Segment {
let mut segment2 = empty_segment(path);
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let vec11 = vec![1.0, 1.0, 1.0, 1.0];
let vec12 = vec![1.0, 1.0, 1.0, 0.0];
let vec13 = vec![1.0, 0.0, 1.0, 1.0];
let vec14 = vec![1.0, 0.0, 0.0, 1.0];
let vec15 = vec![1.0, 1.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment2
.upsert_point(7, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment2
.upsert_point(8, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
segment2
.upsert_point(11, 11.into(), only_default_vector(&vec11), &hw_counter)
.unwrap();
segment2
.upsert_point(12, 12.into(), only_default_vector(&vec12), &hw_counter)
.unwrap();
segment2
.upsert_point(13, 13.into(), only_default_vector(&vec13), &hw_counter)
.unwrap();
segment2
.upsert_point(14, 14.into(), only_default_vector(&vec14), &hw_counter)
.unwrap();
segment2
.upsert_point(15, 15.into(), only_default_vector(&vec15), &hw_counter)
.unwrap();
segment2
}
pub fn build_test_holder(path: &Path) -> RwLock<SegmentHolder> {
let segment1 = build_segment_1(path);
let segment2 = build_segment_2(path);
let mut holder = SegmentHolder::default();
let _sid1 = holder.add_new(segment1);
let _sid2 = holder.add_new(segment2);
RwLock::new(holder)
}
pub(crate) fn get_merge_optimizer(
segment_path: &Path,
collection_temp_dir: &Path,
dim: usize,
optimizer_thresholds: Option<OptimizerThresholds>,
) -> MergeOptimizer {
MergeOptimizer::new(
5,
optimizer_thresholds.unwrap_or(OptimizerThresholds {
max_segment_size_kb: 100_000,
memmap_threshold_kb: 1_000_000,
indexing_threshold_kb: 1_000_000,
}),
segment_path.to_owned(),
collection_temp_dir.to_owned(),
CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(dim as u64, Distance::Dot).build(),
),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
)
}
pub(crate) fn get_indexing_optimizer(
segment_path: &Path,
collection_temp_dir: &Path,
dim: usize,
) -> IndexingOptimizer {
IndexingOptimizer::new(
2,
OptimizerThresholds {
max_segment_size_kb: 100_000,
memmap_threshold_kb: 100,
indexing_threshold_kb: 100,
},
segment_path.to_owned(),
collection_temp_dir.to_owned(),
CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(dim as u64, Distance::Dot).build(),
),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/mod.rs | lib/collection/src/collection_manager/mod.rs | pub mod collection_updater;
pub mod holders;
pub mod optimizers;
pub mod segments_searcher;
pub mod probabilistic_search_sampling;
#[cfg(test)]
pub(crate) mod fixtures;
#[cfg(test)]
mod tests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/collection_updater.rs | lib/collection/src/collection_manager/collection_updater.rs | use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use parking_lot::RwLock;
use segment::types::SeqNumberType;
use shard::update::*;
use crate::collection_manager::holders::segment_holder::SegmentHolder;
use crate::operations::CollectionUpdateOperations;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::update_tracker::UpdateTracker;
/// Implementation of the update operation
#[derive(Default)]
pub struct CollectionUpdater {}
impl CollectionUpdater {
fn handle_update_result(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
operation_result: &CollectionResult<usize>,
) {
match operation_result {
Ok(_) => {
if !segments.read().failed_operation.is_empty() {
// If this operation failed before, remove it because it got fixed now
segments.write().failed_operation.remove(&op_num);
}
}
Err(collection_error) => {
if collection_error.is_transient() {
let mut write_segments = segments.write();
write_segments.failed_operation.insert(op_num);
log::error!("Update operation failed: {collection_error}")
} else {
log::warn!("Update operation declined: {collection_error}")
}
}
}
}
pub fn update(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
operation: CollectionUpdateOperations,
update_operation_lock: Arc<tokio::sync::RwLock<()>>,
update_tracker: UpdateTracker,
hw_counter: &HardwareCounterCell,
) -> CollectionResult<usize> {
// Use block_in_place here to avoid blocking the current async executor
let operation_result = tokio::task::block_in_place(|| {
// Allow only one update at a time, ensure no data races between segments.
// let _update_lock = self.update_lock.lock().unwrap();
let _update_operation_lock = update_operation_lock.blocking_write();
let _update_guard = update_tracker.update();
match operation {
CollectionUpdateOperations::PointOperation(point_operation) => {
process_point_operation(segments, op_num, point_operation, hw_counter)
}
CollectionUpdateOperations::VectorOperation(vector_operation) => {
process_vector_operation(segments, op_num, vector_operation, hw_counter)
}
CollectionUpdateOperations::PayloadOperation(payload_operation) => {
process_payload_operation(segments, op_num, payload_operation, hw_counter)
}
CollectionUpdateOperations::FieldIndexOperation(index_operation) => {
process_field_index_operation(segments, op_num, &index_operation, hw_counter)
}
}
});
let operation_result = operation_result.map_err(CollectionError::from);
CollectionUpdater::handle_update_result(segments, op_num, &operation_result);
operation_result
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use itertools::Itertools;
use parking_lot::RwLockUpgradableReadGuard;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, VectorStructInternal, only_default_vector,
};
use segment::entry::entry_point::SegmentEntry;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::types::PayloadSchemaType::Keyword;
use segment::types::{Payload, PayloadContainer, PayloadFieldSchema, WithPayload};
use serde_json::json;
use shard::retrieve::retrieve_blocking::retrieve_blocking;
use shard::update::upsert_points;
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{
TEST_TIMEOUT, build_segment_1, build_segment_2, build_test_holder,
};
use crate::collection_manager::holders::segment_holder::LockedSegment::Original;
use crate::operations::payload_ops::{DeletePayloadOp, PayloadOps, SetPayloadOp};
use crate::operations::point_ops::{
PointOperations, PointStructPersisted, VectorStructPersisted,
};
#[test]
fn test_sync_ops() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segments = build_test_holder(dir.path());
let vec11 = only_default_vector(&[1.0, 1.0, 1.0, 1.0]);
let vec12 = only_default_vector(&[1.0, 1.0, 1.0, 0.0]);
let vec13 = only_default_vector(&[1.0, 0.0, 1.0, 1.0]);
let points = vec![
PointStructPersisted {
id: 11.into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vec11)),
payload: None,
},
PointStructPersisted {
id: 12.into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vec12)),
payload: None,
},
PointStructPersisted {
id: 13.into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vec13)),
payload: Some(payload_json! { "color": "red" }),
},
PointStructPersisted {
id: 14.into(),
vector: VectorStructPersisted::Single(vec![0., 0., 0., 0.]),
payload: None,
},
PointStructPersisted {
id: 500.into(),
vector: VectorStructPersisted::Single(vec![2., 0., 2., 0.]),
payload: None,
},
];
let hw_counter = HardwareCounterCell::new();
let (num_deleted, num_new, num_updated) = sync_points(
&segments.read(),
100,
Some(10.into()),
None,
&points,
&hw_counter,
)
.unwrap();
assert_eq!(num_deleted, 1); // delete point 15
assert_eq!(num_new, 1); // insert point 500
assert_eq!(num_updated, 2); // upsert point 13 and 14 as it has updated data
// points 11 and 12 are not updated as they are same as before
}
#[test]
fn test_point_ops() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let is_stopped = AtomicBool::new(false);
let segments = build_test_holder(dir.path());
let points = vec![
PointStructPersisted {
id: 1.into(),
vector: VectorStructPersisted::Single(vec![2., 2., 2., 2.]),
payload: None,
},
PointStructPersisted {
id: 500.into(),
vector: VectorStructPersisted::Single(vec![2., 0., 2., 0.]),
payload: None,
},
];
let hw_counter = HardwareCounterCell::new();
let res = upsert_points(&segments.read(), 100, &points, &hw_counter);
assert!(matches!(res, Ok(1)));
let segments = Arc::new(segments);
let records = retrieve_blocking(
segments.clone(),
&[1.into(), 2.into(), 500.into()],
&WithPayload::from(true),
&true.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(records.len(), 3);
for record in records {
let v = record.vector.unwrap();
let v1 = vec![2., 2., 2., 2.];
if record.id == 1.into() {
assert_eq!(v.get(DEFAULT_VECTOR_NAME), Some((&v1).into()))
}
let v2 = vec![2., 0., 2., 0.];
if record.id == 500.into() {
assert_eq!(v.get(DEFAULT_VECTOR_NAME), Some((&v2).into()))
}
}
process_point_operation(
&segments,
101,
PointOperations::DeletePoints {
ids: vec![500.into()],
},
&hw_counter,
)
.unwrap();
let records = retrieve_blocking(
segments,
&[1.into(), 2.into(), 500.into()],
&WithPayload::from(true),
&true.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
for record in records {
assert!(record.vector.is_some());
assert_ne!(record.id, 500.into());
}
}
#[test]
fn test_payload_ops() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segments = build_test_holder(dir.path());
let payload: Payload = serde_json::from_str(r#"{"color":"red"}"#).unwrap();
let is_stopped = AtomicBool::new(false);
let points = vec![1.into(), 2.into(), 3.into()];
let hw_counter = HardwareCounterCell::new();
process_payload_operation(
&segments,
100,
PayloadOps::SetPayload(SetPayloadOp {
payload,
points: Some(points.clone()),
filter: None,
key: None,
}),
&hw_counter,
)
.unwrap();
let segments = Arc::new(segments);
let res = retrieve_blocking(
segments.clone(),
&points,
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 3);
match res.first() {
None => panic!(),
Some(r) => match &r.payload {
None => panic!("No payload assigned"),
Some(payload) => {
assert!(payload.contains_key("color"))
}
},
};
// Test payload delete
process_payload_operation(
&segments,
101,
PayloadOps::DeletePayload(DeletePayloadOp {
points: Some(vec![3.into()]),
keys: vec!["color".parse().unwrap(), "empty".parse().unwrap()],
filter: None,
}),
&hw_counter,
)
.unwrap();
let res = retrieve_blocking(
segments.clone(),
&[3.into()],
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 1);
assert!(!res[0].payload.as_ref().unwrap().contains_key("color"));
// Test clear payload
let res = retrieve_blocking(
segments.clone(),
&[2.into()],
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 1);
assert!(res[0].payload.as_ref().unwrap().contains_key("color"));
process_payload_operation(
&segments,
102,
PayloadOps::ClearPayload {
points: vec![2.into()],
},
&hw_counter,
)
.unwrap();
let res = retrieve_blocking(
segments,
&[2.into()],
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 1);
assert!(!res[0].payload.as_ref().unwrap().contains_key("color"));
}
#[test]
fn test_nested_payload_update_with_index() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let path = dir.path();
let meta_key_path = JsonPath::new("meta");
let nested_key_path: JsonPath = JsonPath::new("meta.color");
let hw_counter = HardwareCounterCell::new();
let mut segment1 = build_segment_1(path);
segment1
.create_field_index(
100,
&nested_key_path,
Some(&PayloadFieldSchema::FieldType(Keyword)),
&hw_counter,
)
.unwrap();
let mut segment2 = build_segment_2(path);
segment2
.create_field_index(
101,
&nested_key_path,
Some(&PayloadFieldSchema::FieldType(Keyword)),
&hw_counter,
)
.unwrap();
let mut holder = SegmentHolder::default();
let segment_ids = vec![holder.add_new(segment1), holder.add_new(segment2)];
let segments_guard = RwLock::new(holder);
let segments = Arc::new(segments_guard);
// payload with nested structure
let payload: Payload = serde_json::from_str(r#"{"color":"red"}"#).unwrap();
let is_stopped = AtomicBool::new(false);
// update points from segment 2
let points = vec![11.into(), 12.into(), 13.into()];
process_payload_operation(
&segments,
102,
PayloadOps::SetPayload(SetPayloadOp {
payload,
points: Some(points.clone()),
filter: None,
key: Some(meta_key_path.clone()),
}),
&hw_counter,
)
.unwrap();
let res = retrieve_blocking(
segments.clone(),
&points,
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 3);
match res.first() {
None => panic!(),
Some(r) => match &r.payload {
None => panic!("No payload assigned"),
Some(actual_payload) => {
let expect_value = json!({"color":"red"});
assert_eq!(
actual_payload.get_value(&meta_key_path).first().unwrap(),
&&expect_value
)
}
},
};
// segment 2 is marked as not appendable to trigger COW mechanism
let upgradable = segments.upgradable_read();
let segments = RwLockUpgradableReadGuard::upgrade(upgradable).remove(&segment_ids);
match segments.get(segment_ids[1]) {
Some(Original(segment)) => {
let mut guard = segment.write();
guard.appendable_flag = false;
}
x => panic!("Unexpected segment type: {x:?}"),
};
let mut holder = SegmentHolder::default();
for segment in segments {
holder.add_new(segment);
}
let segments_guard = RwLock::new(holder);
let segments = Arc::new(segments_guard);
// update points nested values
let payload: Payload = serde_json::from_str(r#"{ "color":"blue"}"#).unwrap();
process_payload_operation(
&segments,
103,
PayloadOps::SetPayload(SetPayloadOp {
payload,
points: Some(points.clone()),
filter: None,
key: Some(meta_key_path.clone()),
}),
&hw_counter,
)
.unwrap();
let res = retrieve_blocking(
segments,
&points,
&WithPayload::from(true),
&false.into(),
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap()
.into_values()
.collect_vec();
assert_eq!(res.len(), 3);
match res.first() {
None => panic!(),
Some(r) => match &r.payload {
None => panic!("No payload assigned"),
Some(actual_payload) => {
let expect_value = json!({"color":"blue"});
assert_eq!(
actual_payload.get_value(&meta_key_path).first().unwrap(),
&&expect_value
)
}
},
};
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/segments_searcher.rs | lib/collection/src/collection_manager/segments_searcher.rs | use std::collections::BTreeSet;
use std::sync::Arc;
use std::time::{Duration, Instant};
use ahash::AHashMap;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::types::ScoreType;
use futures::stream::FuturesUnordered;
use futures::{FutureExt, TryStreamExt};
use itertools::Itertools;
use ordered_float::Float;
use segment::common::operation_error::OperationError;
use segment::data_types::modifier::Modifier;
use segment::data_types::query_context::{FormulaContext, QueryContext, SegmentQueryContext};
use segment::data_types::vectors::QueryVector;
use segment::types::{
Filter, Indexes, PointIdType, ScoredPoint, SearchParams, SegmentConfig, VectorName,
WithPayload, WithPayloadInterface, WithVector,
};
use shard::common::stopping_guard::StoppingGuard;
use shard::query::query_context::{fill_query_context, init_query_context};
use shard::query::query_enum::QueryEnum;
use shard::retrieve::record_internal::RecordInternal;
use shard::retrieve::retrieve_blocking::retrieve_blocking;
use shard::search::CoreSearchRequestBatch;
use shard::search_result_aggregator::BatchResultAggregator;
use tokio::runtime::Handle;
use tokio_util::task::AbortOnDropHandle;
use super::holders::segment_holder::LockedSegmentHolder;
use crate::collection_manager::holders::segment_holder::LockedSegment;
use crate::collection_manager::probabilistic_search_sampling::find_search_sampling_over_point_distribution;
use crate::config::CollectionConfigInternal;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::optimizers_builder::DEFAULT_INDEXING_THRESHOLD_KB;
type BatchOffset = usize;
type SegmentOffset = usize;
// batch -> point for one segment
type SegmentBatchSearchResult = Vec<Vec<ScoredPoint>>;
// Segment -> batch -> point
type BatchSearchResult = Vec<SegmentBatchSearchResult>;
// Result of batch search in one segment
type SegmentSearchExecutedResult = CollectionResult<(SegmentBatchSearchResult, Vec<bool>)>;
/// Simple implementation of segment manager
/// - rebuild segment for memory optimization purposes
#[derive(Default)]
pub struct SegmentsSearcher;
impl SegmentsSearcher {
/// Execute searches in parallel and return results in the same order as the searches were provided
async fn execute_searches(
searches: Vec<AbortOnDropHandle<SegmentSearchExecutedResult>>,
) -> CollectionResult<(BatchSearchResult, Vec<Vec<bool>>)> {
let results_len = searches.len();
let mut search_results_per_segment_res = FuturesUnordered::new();
for (idx, search) in searches.into_iter().enumerate() {
// map the result to include the request index for later reordering
let result_with_request_index = search.map(move |res| res.map(|s| (idx, s)));
search_results_per_segment_res.push(result_with_request_index);
}
let mut search_results_per_segment = vec![Vec::new(); results_len];
let mut further_searches_per_segment = vec![Vec::new(); results_len];
// process results as they come in and store them in the correct order
while let Some((idx, search_result)) = search_results_per_segment_res.try_next().await? {
let (search_results, further_searches) = search_result?;
debug_assert!(search_results.len() == further_searches.len());
search_results_per_segment[idx] = search_results;
further_searches_per_segment[idx] = further_searches;
}
Ok((search_results_per_segment, further_searches_per_segment))
}
/// Processes search result of `[segment_size x batch_size]`.
///
/// # Arguments
/// * `search_result` - `[segment_size x batch_size]`
/// * `limits` - `[batch_size]` - how many results to return for each batched request
/// * `further_searches` - `[segment_size x batch_size]` - whether we can search further in the segment
///
/// Returns batch results aggregated by `[batch_size]` and list of queries, grouped by segment to re-run
pub(crate) fn process_search_result_step1(
search_result: BatchSearchResult,
limits: Vec<usize>,
further_results: &[Vec<bool>],
) -> (
BatchResultAggregator,
AHashMap<SegmentOffset, Vec<BatchOffset>>,
) {
let number_segments = search_result.len();
let batch_size = limits.len();
// The lowest scored element must be larger or equal to the worst scored element in each segment.
// Otherwise, the sampling is invalid and some points might be missing.
// e.g. with 3 segments with the following sampled ranges:
// s1 - [0.91 -> 0.87]
// s2 - [0.92 -> 0.86]
// s3 - [0.93 -> 0.85]
// If the top merged scores result range is [0.93 -> 0.86] then we do not know if s1 could have contributed more points at the lower part between [0.87 -> 0.86]
// In that case, we need to re-run the search without sampling on that segment.
// Initialize result aggregators for each batched request
let mut result_aggregator = BatchResultAggregator::new(limits.iter().copied());
result_aggregator.update_point_versions(search_result.iter().flatten().flatten());
// Therefore we need to track the lowest scored element per segment for each batch
let mut lowest_scores_per_request: Vec<Vec<ScoreType>> = vec![
vec![f32::max_value(); batch_size]; // initial max score value for each batch
number_segments
];
let mut retrieved_points_per_request: Vec<Vec<BatchOffset>> = vec![
vec![0; batch_size]; // initial max score value for each batch
number_segments
];
// Batch results merged from all segments
for (segment_idx, segment_result) in search_result.into_iter().enumerate() {
// merge results for each batch search request across segments
for (batch_req_idx, query_res) in segment_result.into_iter().enumerate() {
retrieved_points_per_request[segment_idx][batch_req_idx] = query_res.len();
lowest_scores_per_request[segment_idx][batch_req_idx] = query_res
.last()
.map(|x| x.score)
.unwrap_or_else(f32::min_value);
result_aggregator.update_batch_results(batch_req_idx, query_res.into_iter());
}
}
// segment id -> list of batch ids
let mut searches_to_rerun: AHashMap<SegmentOffset, Vec<BatchOffset>> = AHashMap::new();
// Check if we want to re-run the search without sampling on some segments
for (batch_id, required_limit) in limits.into_iter().enumerate() {
let lowest_batch_score_opt = result_aggregator.batch_lowest_scores(batch_id);
// If there are no results, we do not need to re-run the search
if let Some(lowest_batch_score) = lowest_batch_score_opt {
for segment_id in 0..number_segments {
let segment_lowest_score = lowest_scores_per_request[segment_id][batch_id];
let retrieved_points = retrieved_points_per_request[segment_id][batch_id];
let have_further_results = further_results[segment_id][batch_id];
if have_further_results
&& retrieved_points < required_limit
&& segment_lowest_score >= lowest_batch_score
{
log::debug!(
"Search to re-run without sampling on segment_id: {segment_id} segment_lowest_score: {segment_lowest_score}, lowest_batch_score: {lowest_batch_score}, retrieved_points: {retrieved_points}, required_limit: {required_limit}",
);
// It is possible, that current segment can have better results than
// the lowest score in the batch. In that case, we need to re-run the search
// without sampling on that segment.
searches_to_rerun
.entry(segment_id)
.or_default()
.push(batch_id);
}
}
}
}
(result_aggregator, searches_to_rerun)
}
pub async fn prepare_query_context(
segments: LockedSegmentHolder,
batch_request: &CoreSearchRequestBatch,
collection_config: &CollectionConfigInternal,
timeout: Duration,
search_runtime_handle: &Handle,
is_stopped_guard: &StoppingGuard,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Option<QueryContext>> {
let indexing_threshold_kb = collection_config
.optimizer_config
.indexing_threshold
.unwrap_or(DEFAULT_INDEXING_THRESHOLD_KB);
let full_scan_threshold_kb = collection_config.hnsw_config.full_scan_threshold;
let search_optimized_threshold_kb = indexing_threshold_kb.max(full_scan_threshold_kb);
let query_context = init_query_context(
&batch_request.searches,
search_optimized_threshold_kb,
is_stopped_guard,
hw_measurement_acc,
|vector_name| {
collection_config
.params
.get_sparse_vector_params_opt(vector_name)
.map(|params| params.modifier == Some(Modifier::Idf))
.unwrap_or(false)
},
);
let is_stopped = is_stopped_guard.get_is_stopped().clone();
// Do blocking calls in a blocking task: `segment.get().read()` calls might block async runtime
let task = AbortOnDropHandle::new(search_runtime_handle.spawn_blocking(move || {
fill_query_context(query_context, segments, timeout, &is_stopped)
}))
.await??;
Ok(task)
}
pub async fn search(
segments: LockedSegmentHolder,
batch_request: Arc<CoreSearchRequestBatch>,
runtime_handle: &Handle,
sampling_enabled: bool,
query_context: QueryContext,
timeout: Duration,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let start = Instant::now();
let query_context_arc = Arc::new(query_context);
// Using block to ensure `segments` variable is dropped in the end of it
let (locked_segments, searches): (Vec<_>, Vec<_>) = {
// Unfortunately, we have to do `segments.read()` twice, once in blocking task
// and once here, due to `Send` bounds :/
let Some(segments_lock) = segments.try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "search"));
};
let segments = segments_lock.non_appendable_then_appendable_segments();
// Probabilistic sampling for the `limit` parameter avoids over-fetching points from segments.
// e.g. 10 segments with limit 1000 would fetch 10000 points in total and discard 9000 points.
// With probabilistic sampling we determine a smaller sampling limit for each segment.
// Use probabilistic sampling if:
// - sampling is enabled
// - more than 1 segment
// - segments are not empty
let use_sampling = sampling_enabled
&& segments_lock.len() > 1
&& query_context_arc.available_point_count() > 0;
segments
.map(|segment| {
let query_context_arc_segment = query_context_arc.clone();
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
let search = runtime_handle.spawn_blocking({
let (segment, batch_request) = (segment.clone(), batch_request.clone());
move || {
let segment_query_context =
query_context_arc_segment.get_segment_query_context();
search_in_segment(
segment,
batch_request,
use_sampling,
&segment_query_context,
timeout,
)
}
});
// We MUST wrap the search handle in AbortOnDropHandle to ensure that we skip
// all searches for futures that are already dropped. Not using this allows
// users to create a humongous queue of search tasks, even though the searches
// are already invalidated.
// See: <https://github.com/qdrant/qdrant/pull/7530>
let search = AbortOnDropHandle::new(search);
(segment, search)
})
.unzip()
};
// perform search on all segments concurrently
// the resulting Vec is in the same order as the segment searches were provided.
let (all_search_results_per_segment, further_results) =
Self::execute_searches(searches).await?;
debug_assert!(all_search_results_per_segment.len() == locked_segments.len());
let (mut result_aggregator, searches_to_rerun) = Self::process_search_result_step1(
all_search_results_per_segment,
batch_request
.searches
.iter()
.map(|request| request.limit + request.offset)
.collect(),
&further_results,
);
// The second step of the search is to re-run the search without sampling on some segments
// Expected that this stage will be executed rarely
if !searches_to_rerun.is_empty() {
// TODO notify telemetry of failing sampling
// Ensure consistent order of segment ids
let searches_to_rerun: Vec<(SegmentOffset, Vec<BatchOffset>)> =
searches_to_rerun.into_iter().collect();
let secondary_searches: Vec<_> = {
let mut res = vec![];
for (segment_id, batch_ids) in searches_to_rerun.iter() {
let query_context_arc_segment = query_context_arc.clone();
let segment = locked_segments[*segment_id].clone();
let partial_batch_request = Arc::new(CoreSearchRequestBatch {
searches: batch_ids
.iter()
.map(|batch_id| batch_request.searches[*batch_id].clone())
.collect(),
});
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
let handle = runtime_handle.spawn_blocking(move || {
let segment_query_context =
query_context_arc_segment.get_segment_query_context();
search_in_segment(
segment,
partial_batch_request,
false,
&segment_query_context,
timeout,
)
});
// We MUST wrap the search handle in AbortOnDropHandle to ensure that we skip
// all searches for futures that are already dropped. Not using this allows
// users to create a humongous queue of search tasks, even though the searches
// are already invalidated.
// See: <https://github.com/qdrant/qdrant/pull/7530>
let handle = AbortOnDropHandle::new(handle);
res.push(handle);
}
res
};
let (secondary_search_results_per_segment, _) =
Self::execute_searches(secondary_searches).await?;
result_aggregator.update_point_versions(
secondary_search_results_per_segment
.iter()
.flatten()
.flatten(),
);
for ((_segment_id, batch_ids), segments_result) in searches_to_rerun
.into_iter()
.zip(secondary_search_results_per_segment.into_iter())
{
for (batch_id, secondary_batch_result) in
batch_ids.into_iter().zip(segments_result.into_iter())
{
result_aggregator
.update_batch_results(batch_id, secondary_batch_result.into_iter());
}
}
}
let top_scores: Vec<_> = result_aggregator.into_topk();
Ok(top_scores)
}
/// Retrieve records for the given points ids from the segments
/// - if payload is enabled, payload will be fetched
/// - if vector is enabled, vector will be fetched
///
/// The points ids can contain duplicates, the records will be fetched only once
///
/// If an id is not found in the segments, it won't be included in the output.
pub async fn retrieve(
segments: LockedSegmentHolder,
points: &[PointIdType],
with_payload: &WithPayload,
with_vector: &WithVector,
runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<AHashMap<PointIdType, RecordInternal>> {
let stopping_guard = StoppingGuard::new();
let points = runtime_handle.spawn_blocking({
let segments = segments.clone();
let points = points.to_vec();
let with_payload = with_payload.clone();
let with_vector = with_vector.clone();
let is_stopped = stopping_guard.get_is_stopped();
// TODO create one Task per segment level retrieve
move || {
retrieve_blocking(
segments,
&points,
&with_payload,
&with_vector,
timeout,
&is_stopped,
hw_measurement_acc,
)
}
});
Ok(AbortOnDropHandle::new(points).await??)
}
pub async fn read_filtered(
segments: LockedSegmentHolder,
filter: Option<&Filter>,
runtime_handle: &Handle,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<BTreeSet<PointIdType>> {
let stopping_guard = StoppingGuard::new();
// cloning filter spawning task
let filter = filter.cloned();
let points = runtime_handle.spawn_blocking(move || {
let is_stopped = stopping_guard.get_is_stopped();
let segments = segments.read();
let hw_counter = hw_measurement_acc.get_counter_cell();
let all_points: BTreeSet<_> = segments
.non_appendable_then_appendable_segments()
.flat_map(|segment| {
segment.get().read().read_filtered(
None,
None,
filter.as_ref(),
&is_stopped,
&hw_counter,
)
})
.collect();
Ok(all_points)
});
AbortOnDropHandle::new(points).await?
}
/// Rescore results with a formula that can reference payload values.
///
/// Aggregates rescores from the segments.
pub async fn rescore_with_formula(
segments: LockedSegmentHolder,
arc_ctx: Arc<FormulaContext>,
runtime_handle: &Handle,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let limit = arc_ctx.limit;
let mut futures = {
let segments_guard = segments.read();
segments_guard
.non_appendable_then_appendable_segments()
.map(|segment| {
let handle = runtime_handle.spawn_blocking({
let arc_ctx = arc_ctx.clone();
let hw_counter = hw_measurement_acc.get_counter_cell();
move || {
segment
.get()
.read()
.rescore_with_formula(arc_ctx, &hw_counter)
}
});
AbortOnDropHandle::new(handle)
})
.collect::<FuturesUnordered<_>>()
};
let mut segments_results = Vec::with_capacity(futures.len());
while let Some(result) = futures.try_next().await? {
segments_results.push(result?)
}
// use aggregator with only one "batch"
let mut aggregator = BatchResultAggregator::new(std::iter::once(limit));
aggregator.update_point_versions(segments_results.iter().flatten());
aggregator.update_batch_results(0, segments_results.into_iter().flatten());
let top =
aggregator.into_topk().into_iter().next().ok_or_else(|| {
OperationError::service_error("expected first result of aggregator")
})?;
Ok(top)
}
}
#[derive(PartialEq, Default, Debug)]
pub enum SearchType {
#[default]
Nearest,
RecommendBestScore,
RecommendSumScores,
Discover,
Context,
FeedbackNaive,
}
impl From<&QueryEnum> for SearchType {
fn from(query: &QueryEnum) -> Self {
match query {
QueryEnum::Nearest(_) => Self::Nearest,
QueryEnum::RecommendBestScore(_) => Self::RecommendBestScore,
QueryEnum::RecommendSumScores(_) => Self::RecommendSumScores,
QueryEnum::Discover(_) => Self::Discover,
QueryEnum::Context(_) => Self::Context,
QueryEnum::FeedbackNaive(_) => Self::FeedbackNaive,
}
}
}
#[derive(PartialEq, Default, Debug)]
struct BatchSearchParams<'a> {
pub search_type: SearchType,
pub vector_name: &'a VectorName,
pub filter: Option<&'a Filter>,
pub with_payload: WithPayload,
pub with_vector: WithVector,
pub top: usize,
pub params: Option<&'a SearchParams>,
}
/// Returns suggested search sampling size for a given number of points and required limit.
fn sampling_limit(
limit: usize,
ef_limit: Option<usize>,
segment_points: usize,
total_points: usize,
) -> usize {
// shortcut empty segment
if segment_points == 0 {
return 0;
}
let segment_probability = segment_points as f64 / total_points as f64;
let poisson_sampling =
find_search_sampling_over_point_distribution(limit as f64, segment_probability);
// if no ef_limit was found, it is a plain index => sampling optimization is not needed.
let effective = ef_limit.map_or(limit, |ef_limit| {
effective_limit(limit, ef_limit, poisson_sampling)
});
log::trace!(
"sampling: {effective}, poisson: {poisson_sampling} segment_probability: {segment_probability}, segment_points: {segment_points}, total_points: {total_points}",
);
effective
}
/// Determines the effective ef limit value for the given parameters.
fn effective_limit(limit: usize, ef_limit: usize, poisson_sampling: usize) -> usize {
// Prefer the highest of poisson_sampling/ef_limit, but never be higher than limit
poisson_sampling.max(ef_limit).min(limit)
}
/// Process sequentially contiguous batches
///
/// # Arguments
///
/// * `segment` - Locked segment to search in
/// * `request` - Batch of search requests
/// * `use_sampling` - If true, try to use probabilistic sampling
/// * `query_context` - Additional context for the search
///
/// # Returns
///
/// Collection Result of:
/// * Vector of ScoredPoints for each request in the batch
/// * Vector of boolean indicating if the segment have further points to search
fn search_in_segment(
segment: LockedSegment,
request: Arc<CoreSearchRequestBatch>,
use_sampling: bool,
segment_query_context: &SegmentQueryContext,
timeout: Duration,
) -> CollectionResult<(Vec<Vec<ScoredPoint>>, Vec<bool>)> {
if segment_query_context.is_stopped() {
return Err(CollectionError::cancelled(
"Search in segment was cancelled",
));
}
let batch_size = request.searches.len();
let mut result: Vec<Vec<ScoredPoint>> = Vec::with_capacity(batch_size);
let mut further_results: Vec<bool> = Vec::with_capacity(batch_size); // if segment have more points to return
let mut vectors_batch: Vec<QueryVector> = vec![];
let mut prev_params = BatchSearchParams::default();
for search_query in &request.searches {
let with_payload_interface = search_query
.with_payload
.as_ref()
.unwrap_or(&WithPayloadInterface::Bool(false));
let params = BatchSearchParams {
search_type: search_query.query.as_ref().into(),
vector_name: search_query.query.get_vector_name(),
filter: search_query.filter.as_ref(),
with_payload: WithPayload::from(with_payload_interface),
with_vector: search_query.with_vector.clone().unwrap_or_default(),
top: search_query.limit + search_query.offset,
params: search_query.params.as_ref(),
};
let query = search_query.query.clone().into();
// same params enables batching (cmp expensive on large filters)
if params == prev_params {
vectors_batch.push(query);
} else {
// different params means different batches
// execute what has been batched so far
if !vectors_batch.is_empty() {
let (mut res, mut further) = execute_batch_search(
&segment,
&vectors_batch,
&prev_params,
use_sampling,
segment_query_context,
timeout,
)?;
further_results.append(&mut further);
result.append(&mut res);
vectors_batch.clear()
}
// start new batch for current search query
vectors_batch.push(query);
prev_params = params;
}
}
// run last batch if any
if !vectors_batch.is_empty() {
let (mut res, mut further) = execute_batch_search(
&segment,
&vectors_batch,
&prev_params,
use_sampling,
segment_query_context,
timeout,
)?;
further_results.append(&mut further);
result.append(&mut res);
}
Ok((result, further_results))
}
fn execute_batch_search(
segment: &LockedSegment,
vectors_batch: &[QueryVector],
search_params: &BatchSearchParams,
use_sampling: bool,
segment_query_context: &SegmentQueryContext,
timeout: Duration,
) -> CollectionResult<(Vec<Vec<ScoredPoint>>, Vec<bool>)> {
let locked_segment = segment.get();
let Some(read_segment) = locked_segment.try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "batch search"));
};
let segment_points = read_segment.available_point_count();
let segment_config = read_segment.config();
let top = if use_sampling {
let ef_limit = search_params
.params
.and_then(|p| p.hnsw_ef)
.or_else(|| get_hnsw_ef_construct(segment_config, search_params.vector_name));
sampling_limit(
search_params.top,
ef_limit,
segment_points,
segment_query_context.available_point_count(),
)
} else {
search_params.top
};
let vectors_batch = &vectors_batch.iter().collect_vec();
let res = read_segment.search_batch(
search_params.vector_name,
vectors_batch,
&search_params.with_payload,
&search_params.with_vector,
search_params.filter,
top,
search_params.params,
segment_query_context,
)?;
drop(read_segment);
let further_results = res
.iter()
.map(|batch_result| batch_result.len() == top)
.collect();
Ok((res, further_results))
}
/// Find the HNSW ef_construct for a named vector
///
/// If the given named vector has no HNSW index, `None` is returned.
fn get_hnsw_ef_construct(config: &SegmentConfig, vector_name: &VectorName) -> Option<usize> {
config
.vector_data
.get(vector_name)
.and_then(|config| match &config.index {
Indexes::Plain {} => None,
Indexes::Hnsw(hnsw) => Some(hnsw),
})
.map(|hnsw| hnsw.ef_construct)
}
#[cfg(test)]
mod tests {
use std::sync::atomic::AtomicBool;
use ahash::AHashSet;
use api::rest::SearchRequestInternal;
use common::counter::hardware_counter::HardwareCounterCell;
use parking_lot::RwLock;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::fixtures::index_fixtures::random_vector;
use segment::index::VectorIndexEnum;
use segment::types::{Condition, HasIdCondition};
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{TEST_TIMEOUT, build_test_holder, random_segment};
use crate::collection_manager::holders::segment_holder::SegmentHolder;
use crate::operations::types::CoreSearchRequest;
use crate::optimizers_builder::DEFAULT_INDEXING_THRESHOLD_KB;
#[test]
fn test_is_indexed_enough_condition() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = random_segment(dir.path(), 10, 200, 256);
let vector_index = segment1
.vector_data
.get(DEFAULT_VECTOR_NAME)
.unwrap()
.vector_index
.clone();
let vector_index_borrow = vector_index.borrow();
let hw_counter = HardwareCounterCell::new();
match &*vector_index_borrow {
VectorIndexEnum::Plain(plain_index) => {
let res_1 = plain_index.is_small_enough_for_unindexed_search(25, None, &hw_counter);
assert!(!res_1);
let res_2 =
plain_index.is_small_enough_for_unindexed_search(225, None, &hw_counter);
assert!(res_2);
let ids: AHashSet<_> = vec![1, 2].into_iter().map(PointIdType::from).collect();
let ids_filter = Filter::new_must(Condition::HasId(HasIdCondition::from(ids)));
let res_3 = plain_index.is_small_enough_for_unindexed_search(
25,
Some(&ids_filter),
&hw_counter,
);
assert!(res_3);
}
_ => panic!("Expected plain index"),
}
}
#[tokio::test]
async fn test_segments_search() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment_holder = build_test_holder(dir.path());
let query = vec![1.0, 1.0, 1.0, 1.0];
let req = CoreSearchRequest {
query: query.into(),
with_payload: None,
with_vector: None,
filter: None,
params: None,
limit: 5,
score_threshold: None,
offset: 0,
};
let batch_request = CoreSearchRequestBatch {
searches: vec![req],
};
let hw_acc = HwMeasurementAcc::new();
let result = SegmentsSearcher::search(
Arc::new(segment_holder),
Arc::new(batch_request),
&Handle::current(),
true,
QueryContext::new(DEFAULT_INDEXING_THRESHOLD_KB, hw_acc),
TEST_TIMEOUT,
)
.await
.unwrap()
.into_iter()
.next()
.unwrap();
// eprintln!("result = {:?}", &result);
assert_eq!(result.len(), 5);
assert!(result[0].id == 3.into() || result[0].id == 11.into());
assert!(result[1].id == 3.into() || result[1].id == 11.into());
}
#[tokio::test]
async fn test_segments_search_sampling() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = random_segment(dir.path(), 10, 2000, 4);
let segment2 = random_segment(dir.path(), 10, 4000, 4);
let mut holder = SegmentHolder::default();
let _sid1 = holder.add_new(segment1);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/merge_optimizer.rs | lib/collection/src/collection_manager/optimizers/merge_optimizer.rs | use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use itertools::Itertools;
use parking_lot::Mutex;
use segment::common::operation_time_statistics::OperationDurationsAggregator;
use segment::types::{HnswConfig, HnswGlobalConfig, QuantizationConfig, SegmentType};
use crate::collection_manager::holders::segment_holder::{
LockedSegment, LockedSegmentHolder, SegmentId,
};
use crate::collection_manager::optimizers::segment_optimizer::{
OptimizerThresholds, SegmentOptimizer,
};
use crate::config::CollectionParams;
const BYTES_IN_KB: usize = 1024;
/// Optimizer that tries to reduce number of segments until it fits configured value.
///
/// It merges 3 smallest segments into a single large segment.
/// Merging 3 segments instead of 2 guarantees that after the optimization the number of segments
/// will be less than before.
pub struct MergeOptimizer {
default_segments_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
telemetry_durations_aggregator: Arc<Mutex<OperationDurationsAggregator>>,
}
impl MergeOptimizer {
#[allow(clippy::too_many_arguments)]
pub fn new(
default_segments_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
) -> Self {
MergeOptimizer {
default_segments_number,
thresholds_config,
segments_path,
collection_temp_dir,
collection_params,
hnsw_config,
hnsw_global_config,
quantization_config,
telemetry_durations_aggregator: OperationDurationsAggregator::new(),
}
}
}
impl SegmentOptimizer for MergeOptimizer {
fn name(&self) -> &str {
"merge"
}
fn segments_path(&self) -> &Path {
self.segments_path.as_path()
}
fn temp_path(&self) -> &Path {
self.collection_temp_dir.as_path()
}
fn collection_params(&self) -> CollectionParams {
self.collection_params.clone()
}
fn hnsw_config(&self) -> &HnswConfig {
&self.hnsw_config
}
fn hnsw_global_config(&self) -> &HnswGlobalConfig {
&self.hnsw_global_config
}
fn quantization_config(&self) -> Option<QuantizationConfig> {
self.quantization_config.clone()
}
fn threshold_config(&self) -> &OptimizerThresholds {
&self.thresholds_config
}
fn check_condition(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
let read_segments = segments.read();
let raw_segments = read_segments
.iter()
.filter(|(sid, segment)| {
matches!(segment, LockedSegment::Original(_)) && !excluded_ids.contains(sid)
})
.collect_vec();
if raw_segments.len() <= self.default_segments_number {
return vec![];
}
let max_candidates = raw_segments.len() - self.default_segments_number + 2;
// Find at least top-3 smallest segments to join.
// We need 3 segments because in this case we can guarantee that total segments number will be less
let candidates: Vec<_> = raw_segments
.iter()
.cloned()
.filter_map(|(idx, segment)| {
let segment_entry = segment.get();
let read_segment = segment_entry.read();
(read_segment.segment_type() != SegmentType::Special).then_some((
*idx,
read_segment
.max_available_vectors_size_in_bytes()
.unwrap_or_default(),
))
})
.sorted_by_key(|(_, size)| *size)
.scan(0, |size_sum, (sid, size)| {
*size_sum += size; // produce a cumulative sum of segment sizes starting from smallest
Some((sid, *size_sum))
})
.take_while(|(_, size)| {
*size
< self
.thresholds_config
.max_segment_size_kb
.saturating_mul(BYTES_IN_KB)
})
.take(max_candidates)
.map(|x| x.0)
.collect();
if candidates.len() < 3 {
return vec![];
}
candidates
}
fn get_telemetry_counter(&self) -> &Mutex<OperationDurationsAggregator> {
&self.telemetry_durations_aggregator
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourceBudget;
use common::progress_tracker::ProgressTracker;
use parking_lot::RwLock;
use segment::index::hnsw_index::num_rayon_threads;
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{get_merge_optimizer, random_segment};
use crate::collection_manager::holders::segment_holder::{LockedSegment, SegmentHolder};
#[test]
fn test_max_merge_size() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let dim = 256;
let _segments_to_merge = [
holder.add_new(random_segment(dir.path(), 100, 40, dim)),
holder.add_new(random_segment(dir.path(), 100, 50, dim)),
holder.add_new(random_segment(dir.path(), 100, 60, dim)),
];
let mut merge_optimizer = get_merge_optimizer(dir.path(), temp_dir.path(), dim, None);
let locked_holder = Arc::new(RwLock::new(holder));
merge_optimizer.default_segments_number = 1;
merge_optimizer.thresholds_config.max_segment_size_kb = 100;
let check_result_empty =
merge_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert!(check_result_empty.is_empty());
merge_optimizer.thresholds_config.max_segment_size_kb = 200;
let check_result = merge_optimizer.check_condition(locked_holder, &Default::default());
assert_eq!(check_result.len(), 3);
}
#[test]
fn test_merge_optimizer() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let dim = 256;
let segments_to_merge = [
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
holder.add_new(random_segment(dir.path(), 100, 10, dim)),
];
let other_segment_ids = [
holder.add_new(random_segment(dir.path(), 100, 20, dim)),
holder.add_new(random_segment(dir.path(), 100, 20, dim)),
holder.add_new(random_segment(dir.path(), 100, 20, dim)),
];
let merge_optimizer = get_merge_optimizer(dir.path(), temp_dir.path(), dim, None);
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let suggested_for_merge =
merge_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_for_merge.len(), 4);
for segment_in in &suggested_for_merge {
assert!(segments_to_merge.contains(segment_in));
}
let old_path = segments_to_merge
.iter()
.map(|sid| match locked_holder.read().get(*sid).unwrap() {
LockedSegment::Original(x) => x.read().current_path.clone(),
LockedSegment::Proxy(_) => panic!("Not expected"),
})
.collect_vec();
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
merge_optimizer
.optimize(
locked_holder.clone(),
suggested_for_merge,
permit,
budget,
&AtomicBool::new(false),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
let after_optimization_segments =
locked_holder.read().iter().map(|(x, _)| *x).collect_vec();
// Check proper number of segments after optimization
assert!(after_optimization_segments.len() <= 5);
assert!(after_optimization_segments.len() > 3);
// Check other segments are untouched
for segment_id in &other_segment_ids {
assert!(after_optimization_segments.contains(segment_id))
}
// Check new optimized segment have all vectors in it
for segment_id in after_optimization_segments {
if !other_segment_ids.contains(&segment_id) {
let holder_guard = locked_holder.read();
let new_segment = holder_guard.get(segment_id).unwrap();
assert_eq!(new_segment.get().read().available_point_count(), 3 * 3 + 10);
}
}
// Check if optimized segments removed from disk
old_path.into_iter().for_each(|x| assert!(!x.exists()));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/config_mismatch_optimizer.rs | lib/collection/src/collection_manager/optimizers/config_mismatch_optimizer.rs | use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use parking_lot::Mutex;
use segment::common::operation_time_statistics::OperationDurationsAggregator;
use segment::index::sparse_index::sparse_index_config::SparseIndexType;
use segment::types::{
HnswConfig, HnswGlobalConfig, Indexes, QuantizationConfig, SegmentType, VectorName,
};
use crate::collection_manager::holders::segment_holder::{LockedSegmentHolder, SegmentId};
use crate::collection_manager::optimizers::segment_optimizer::{
OptimizerThresholds, SegmentOptimizer,
};
use crate::config::CollectionParams;
use crate::operations::config_diff::DiffConfig;
/// Looks for segments having a mismatch between configured and actual parameters
///
/// For example, a user may change the HNSW parameters for a collection. A segment that was already
/// indexed with different parameters now has a mismatch. This segment should be optimized (and
/// indexed) again in order to update the effective configuration.
pub struct ConfigMismatchOptimizer {
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
telemetry_durations_aggregator: Arc<Mutex<OperationDurationsAggregator>>,
}
impl ConfigMismatchOptimizer {
pub fn new(
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
) -> Self {
ConfigMismatchOptimizer {
thresholds_config,
segments_path,
collection_temp_dir,
collection_params,
hnsw_config,
hnsw_global_config,
quantization_config,
telemetry_durations_aggregator: OperationDurationsAggregator::new(),
}
}
/// Check if current configuration requires vectors to be stored on disk
fn check_if_vectors_on_disk(&self, vector_name: &VectorName) -> Option<bool> {
self.collection_params
.vectors
.get_params(vector_name)
.and_then(|vector_params| vector_params.on_disk)
}
/// Check if current configuration requires sparse vectors index to be stored on disk
fn check_if_sparse_vectors_index_on_disk(&self, vector_name: &VectorName) -> Option<bool> {
self.collection_params
.sparse_vectors
.as_ref()
.and_then(|vector_params| vector_params.get(vector_name))
.and_then(|params| params.index)
.and_then(|index| index.on_disk)
}
fn worst_segment(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
let segments_read_guard = segments.read();
let candidates: Vec<_> = segments_read_guard
.iter()
// Excluded externally, might already be scheduled for optimization
.filter(|(idx, _)| !excluded_ids.contains(idx))
.filter_map(|(idx, segment)| {
let segment_entry = segment.get();
let read_segment = segment_entry.read();
let vector_size = read_segment
.max_available_vectors_size_in_bytes()
.unwrap_or_default();
let segment_config = read_segment.config();
if read_segment.segment_type() == SegmentType::Special {
return None; // Never optimize already optimized segment
}
if self.collection_params.on_disk_payload
!= segment_config.payload_storage_type.is_on_disk()
{
return Some((*idx, vector_size)); // Skip segments with payload mismatch
}
// Determine whether dense data in segment has mismatch
let dense_has_mismatch =
segment_config
.vector_data
.iter()
.any(|(vector_name, vector_data)| {
// Check HNSW mismatch
match &vector_data.index {
Indexes::Plain {} => {}
Indexes::Hnsw(effective_hnsw) => {
// Select segment if we have an HNSW mismatch that requires rebuild
let target_hnsw = self.hnsw_config.update_opt(
self.collection_params
.vectors
.get_params(vector_name)
.and_then(|vector_params| {
vector_params.hnsw_config.as_ref()
}),
);
if effective_hnsw.mismatch_requires_rebuild(&target_hnsw) {
return true;
}
}
}
if let Some(is_required_on_disk) =
self.check_if_vectors_on_disk(vector_name)
&& is_required_on_disk != vector_data.storage_type.is_on_disk()
{
return true;
}
// Check quantization mismatch
let target_quantization_collection = self.quantization_config.as_ref();
let target_quantization_vector = self
.collection_params
.vectors
.get_params(vector_name)
.and_then(|vector_params| {
vector_params.quantization_config.clone()
});
let target_quantization = target_quantization_vector
.as_ref()
.or(target_quantization_collection);
vector_data
.quantization_config
.as_ref()
.zip(target_quantization)
// Rebuild if current parameters differ from target parameters
.map(|(current, target)| current.mismatch_requires_rebuild(target))
// Or rebuild if we now change the enabled state on an indexed segment
.unwrap_or_else(|| {
let vector_data_quantization_appendable = vector_data
.quantization_config
.as_ref()
.map(|q| q.supports_appendable())
.unwrap_or(false);
let target_quantization_appendable = target_quantization
.map(|q| q.supports_appendable())
.unwrap_or(false);
// If segment is unindexed, only appendable quantization is applied.
// So that we check if any config is appendable to avoid infinity loop here.
let unindexed_changed = common::flags::feature_flags()
.appendable_quantization
&& (vector_data_quantization_appendable
|| target_quantization_appendable);
(vector_data.quantization_config.is_some()
!= target_quantization.is_some())
&& (vector_data.index.is_indexed() || unindexed_changed)
})
});
// Determine whether dense data in segment has mismatch
let sparse_has_mismatch =
segment_config
.sparse_vector_data
.iter()
.any(|(vector_name, vector_data)| {
let Some(is_required_on_disk) =
self.check_if_sparse_vectors_index_on_disk(vector_name)
else {
return false; // Do nothing if not specified
};
match vector_data.index.index_type {
SparseIndexType::MutableRam => false, // Do nothing for mutable RAM
SparseIndexType::ImmutableRam => is_required_on_disk, // Rebuild if we require on disk
SparseIndexType::Mmap => !is_required_on_disk, // Rebuild if we require in RAM
}
});
(sparse_has_mismatch || dense_has_mismatch).then_some((*idx, vector_size))
})
.collect();
// Select segment with largest vector size
candidates
.into_iter()
.max_by_key(|(_, vector_size)| *vector_size)
.map(|(segment_id, _)| segment_id)
.into_iter()
.collect()
}
}
impl SegmentOptimizer for ConfigMismatchOptimizer {
fn name(&self) -> &str {
"config mismatch"
}
fn segments_path(&self) -> &Path {
self.segments_path.as_path()
}
fn temp_path(&self) -> &Path {
self.collection_temp_dir.as_path()
}
fn collection_params(&self) -> CollectionParams {
self.collection_params.clone()
}
fn hnsw_config(&self) -> &HnswConfig {
&self.hnsw_config
}
fn hnsw_global_config(&self) -> &HnswGlobalConfig {
&self.hnsw_global_config
}
fn quantization_config(&self) -> Option<QuantizationConfig> {
self.quantization_config.clone()
}
fn threshold_config(&self) -> &OptimizerThresholds {
&self.thresholds_config
}
fn check_condition(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
self.worst_segment(segments, excluded_ids)
}
fn get_telemetry_counter(&self) -> &Mutex<OperationDurationsAggregator> {
&self.telemetry_durations_aggregator
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::sync::Arc;
use common::budget::ResourceBudget;
use common::progress_tracker::ProgressTracker;
use parking_lot::RwLock;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::entry::entry_point::SegmentEntry;
use segment::index::hnsw_index::num_rayon_threads;
use segment::types::{
CompressionRatio, Distance, ProductQuantization, ProductQuantizationConfig,
ScalarQuantizationConfig, ScalarType,
};
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{random_multi_vec_segment, random_segment};
use crate::collection_manager::holders::segment_holder::{LockedSegment, SegmentHolder};
use crate::collection_manager::optimizers::indexing_optimizer::IndexingOptimizer;
use crate::operations::config_diff::HnswConfigDiff;
use crate::operations::types::VectorsConfig;
use crate::operations::vector_params_builder::VectorParamsBuilder;
const VECTOR1_NAME: &VectorName = "vector1";
const VECTOR2_NAME: &VectorName = "vector2";
/// This test the config mismatch optimizer for a changed HNSW config
///
/// It tests whether:
/// - the condition check for HNSW mismatches works
/// - optimized segments (and vector storages) use the updated configuration
///
/// In short, this is what happens in this test:
/// - create randomized segment as base
/// - use indexing optimizer to build index for our segment
/// - test config mismatch condition: should not trigger yet
/// - change collection HNSW config
/// - test config mismatch condition: should trigger due to HNSW change
/// - optimize segment with config mismatch optimizer
/// - assert segment uses changed configuration
#[test]
fn test_hnsw_config_mismatch() {
// Collection configuration
let (point_count, dim) = (1000, 10);
let thresholds_config = OptimizerThresholds {
max_segment_size_kb: usize::MAX,
memmap_threshold_kb: usize::MAX,
indexing_threshold_kb: 10,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(dim as u64, Distance::Dot).build(),
),
..CollectionParams::empty()
};
// Base segment
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let segment = random_segment(dir.path(), 100, point_count, dim as usize);
let segment_id = holder.add_new(segment);
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let hnsw_config = HnswConfig {
m: 16,
ef_construct: 100,
full_scan_threshold: 10,
max_indexing_threads: 0,
on_disk: None,
payload_m: None,
inline_storage: None,
};
// Optimizers used in test
let index_optimizer = IndexingOptimizer::new(
2,
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params.clone(),
hnsw_config,
HnswGlobalConfig::default(),
Default::default(),
);
let mut config_mismatch_optimizer = ConfigMismatchOptimizer::new(
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params,
hnsw_config,
HnswGlobalConfig::default(),
Default::default(),
);
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
// Use indexing optimizer to build index for HNSW mismatch test
let changed = index_optimizer
.optimize(
locked_holder.clone(),
vec![segment_id],
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
assert!(
locked_holder.read().get(segment_id).is_none(),
"optimized segment should be gone",
);
assert_eq!(locked_holder.read().len(), 2, "index must be built");
// Mismatch optimizer should not optimize yet, HNSW config is not changed yet
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 0);
// Create changed HNSW config with other m/ef_construct value, update it in the optimizer
let mut changed_hnsw_config = hnsw_config;
changed_hnsw_config.m /= 2;
changed_hnsw_config.ef_construct /= 5;
config_mismatch_optimizer.hnsw_config = changed_hnsw_config;
// Run mismatch optimizer again, make sure it optimizes now
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 1);
let changed = config_mismatch_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
// Ensure new segment has changed HNSW config
locked_holder
.read()
.iter()
.map(|(_, segment)| match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
})
.filter(|segment| segment.total_point_count() > 0)
.for_each(|segment| {
assert_eq!(
segment.config().vector_data[DEFAULT_VECTOR_NAME].index,
Indexes::Hnsw(changed_hnsw_config),
"segment must be optimized with changed HNSW config",
);
});
}
/// This test the config mismatch optimizer for a changed vector specific HNSW config
///
/// Similar to `test_hnsw_config_mismatch` but for multi vector segment with a vector specific
/// change.
///
/// It tests whether:
/// - the condition check for HNSW mismatches works for a vector specific change
/// - optimized segments (and vector storages) use the updated configuration
///
/// In short, this is what happens in this test:
/// - create randomized multi segment as base
/// - use indexing optimizer to build index for our segment
/// - test config mismatch condition: should not trigger yet
/// - change HNSW config for vector2
/// - test config mismatch condition: should trigger due to HNSW change
/// - optimize segment with config mismatch optimizer
/// - assert segment uses changed configuration
#[test]
fn test_hnsw_config_mismatch_vector_specific() {
// Collection configuration
let (point_count, vector1_dim, vector2_dim) = (1000, 10, 20);
let thresholds_config = OptimizerThresholds {
max_segment_size_kb: usize::MAX,
memmap_threshold_kb: usize::MAX,
indexing_threshold_kb: 10,
};
let hnsw_config_vector1 = HnswConfigDiff {
m: Some(10),
ef_construct: Some(40),
on_disk: Some(true),
..Default::default()
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Multi(BTreeMap::from([
(
VECTOR1_NAME.to_owned(),
VectorParamsBuilder::new(vector1_dim as u64, Distance::Dot)
.with_hnsw_config(hnsw_config_vector1)
.build(),
),
(
VECTOR2_NAME.to_owned(),
VectorParamsBuilder::new(vector2_dim as u64, Distance::Dot).build(),
),
])),
..CollectionParams::empty()
};
// Base segment
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let segment = random_multi_vec_segment(
dir.path(),
100,
point_count,
vector1_dim as usize,
vector2_dim as usize,
);
let segment_id = holder.add_new(segment);
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let hnsw_config_collection = HnswConfig {
m: 16,
ef_construct: 100,
full_scan_threshold: 10,
max_indexing_threads: 0,
on_disk: None,
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config_collection.max_indexing_threads);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
// Optimizers used in test
let index_optimizer = IndexingOptimizer::new(
2,
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params.clone(),
hnsw_config_collection,
HnswGlobalConfig::default(),
Default::default(),
);
let mut config_mismatch_optimizer = ConfigMismatchOptimizer::new(
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params,
hnsw_config_collection,
HnswGlobalConfig::default(),
Default::default(),
);
// Use indexing optimizer to build index for HNSW mismatch test
let changed = index_optimizer
.optimize(
locked_holder.clone(),
vec![segment_id],
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
assert!(
locked_holder.read().get(segment_id).is_none(),
"optimized segment should be gone",
);
assert_eq!(locked_holder.read().len(), 2, "index must be built");
// Mismatch optimizer should not optimize yet, HNSW config is not changed yet
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 0);
// Create changed HNSW config for vector2, update it in the optimizer
let mut hnsw_config_vector2 = hnsw_config_vector1;
hnsw_config_vector2.m = hnsw_config_vector1.m.map(|m| m / 2);
hnsw_config_vector2.ef_construct = None;
match config_mismatch_optimizer.collection_params.vectors {
VectorsConfig::Single(_) => unreachable!(),
VectorsConfig::Multi(ref mut map) => {
map.get_mut(VECTOR2_NAME)
.unwrap()
.hnsw_config
.replace(hnsw_config_vector2);
}
}
// Run mismatch optimizer again, make sure it optimizes now
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 1);
let changed = config_mismatch_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
// Ensure new segment has changed HNSW config
locked_holder
.read()
.iter()
.map(|(_, segment)| match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
})
.filter(|segment| segment.total_point_count() > 0)
.for_each(|segment| {
assert_eq!(
segment.config().vector_data[VECTOR1_NAME].index,
Indexes::Hnsw(hnsw_config_collection.update(&hnsw_config_vector1)),
"HNSW config of vector1 is not what we expect",
);
assert_eq!(
segment.config().vector_data[VECTOR2_NAME].index,
Indexes::Hnsw(hnsw_config_collection.update(&hnsw_config_vector2)),
"HNSW config of vector2 is not what we expect",
);
});
}
/// This test the config mismatch optimizer for a changed vector specific HNSW config
///
/// Similar to `test_hnsw_config_mismatch` but for multi vector segment with a vector specific
/// change.
///
/// It tests whether:
/// - the condition check for HNSW mismatches works for a vector specific change
/// - optimized segments (and vector storages) use the updated configuration
///
/// In short, this is what happens in this test:
/// - create randomized multi segment as base
/// - use indexing optimizer to build index for our segment
/// - test config mismatch condition: should not trigger yet
/// - change HNSW config for vector2
/// - test config mismatch condition: should trigger due to HNSW change
/// - optimize segment with config mismatch optimizer
/// - assert segment uses changed configuration
#[test]
fn test_quantization_config_mismatch_vector_specific() {
// Collection configuration
let (point_count, vector1_dim, vector2_dim) = (1000, 10, 20);
let thresholds_config = OptimizerThresholds {
max_segment_size_kb: usize::MAX,
memmap_threshold_kb: usize::MAX,
indexing_threshold_kb: 10,
};
let quantization_config_vector1 =
QuantizationConfig::Scalar(segment::types::ScalarQuantization {
scalar: ScalarQuantizationConfig {
r#type: ScalarType::Int8,
quantile: Some(0.99),
always_ram: Some(true),
},
});
let collection_params = CollectionParams {
vectors: VectorsConfig::Multi(BTreeMap::from([
(
VECTOR1_NAME.to_owned(),
VectorParamsBuilder::new(vector1_dim as u64, Distance::Dot)
.with_quantization_config(quantization_config_vector1.clone())
.build(),
),
(
VECTOR2_NAME.to_owned(),
VectorParamsBuilder::new(vector2_dim as u64, Distance::Dot).build(),
),
])),
..CollectionParams::empty()
};
// Base segment
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let segment = random_multi_vec_segment(
dir.path(),
100,
point_count,
vector1_dim as usize,
vector2_dim as usize,
);
let segment_id = holder.add_new(segment);
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let quantization_config_collection =
QuantizationConfig::Scalar(segment::types::ScalarQuantization {
scalar: ScalarQuantizationConfig {
r#type: ScalarType::Int8,
quantile: Some(0.91),
always_ram: None,
},
});
// Optimizers used in test
let index_optimizer = IndexingOptimizer::new(
2,
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params.clone(),
Default::default(),
HnswGlobalConfig::default(),
Some(quantization_config_collection.clone()),
);
let mut config_mismatch_optimizer = ConfigMismatchOptimizer::new(
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params,
Default::default(),
HnswGlobalConfig::default(),
Some(quantization_config_collection),
);
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
// Use indexing optimizer to build index for quantization mismatch test
let changed = index_optimizer
.optimize(
locked_holder.clone(),
vec![segment_id],
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
assert!(
locked_holder.read().get(segment_id).is_none(),
"optimized segment should be gone",
);
assert_eq!(locked_holder.read().len(), 2, "index must be built");
// Mismatch optimizer should not optimize yet, quantization config is not changed yet
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 0);
// Create changed quantization config for vector2, update it in the optimizer
let quantization_config_vector2 = QuantizationConfig::Product(ProductQuantization {
product: ProductQuantizationConfig {
compression: CompressionRatio::X32,
always_ram: Some(true),
},
});
match config_mismatch_optimizer.collection_params.vectors {
VectorsConfig::Single(_) => unreachable!(),
VectorsConfig::Multi(ref mut map) => {
map.get_mut(VECTOR2_NAME)
.unwrap()
.quantization_config
.replace(quantization_config_vector2.clone());
}
}
// Run mismatch optimizer again, make sure it optimizes now
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
let suggested_to_optimize =
config_mismatch_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 1);
let changed = config_mismatch_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
// Ensure new segment has changed quantization config
locked_holder
.read()
.iter()
.map(|(_, segment)| match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
})
.filter(|segment| segment.total_point_count() > 0)
.for_each(|segment| {
assert_eq!(
segment.config().vector_data[VECTOR1_NAME].quantization_config,
Some(quantization_config_vector1.clone()),
"Quantization config of vector1 is not what we expect",
);
assert_eq!(
segment.config().vector_data[VECTOR2_NAME].quantization_config,
Some(quantization_config_vector2.clone()),
"Quantization config of vector2 is not what we expect",
);
});
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/indexing_optimizer.rs | lib/collection/src/collection_manager/optimizers/indexing_optimizer.rs | use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use parking_lot::Mutex;
use segment::common::operation_time_statistics::OperationDurationsAggregator;
use segment::types::{HnswConfig, HnswGlobalConfig, QuantizationConfig, SegmentType};
use crate::collection_manager::holders::segment_holder::{
LockedSegmentHolder, SegmentHolder, SegmentId,
};
use crate::collection_manager::optimizers::segment_optimizer::{
OptimizerThresholds, SegmentOptimizer,
};
use crate::config::CollectionParams;
const BYTES_IN_KB: usize = 1024;
/// Looks for the segments, which require to be indexed.
///
/// If segment is too large, but still does not have indexes - it is time to create some indexes.
/// The process of index creation is slow and CPU-bounded, so it is convenient to perform
/// index building in a same way as segment re-creation.
pub struct IndexingOptimizer {
default_segments_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
telemetry_durations_aggregator: Arc<Mutex<OperationDurationsAggregator>>,
}
impl IndexingOptimizer {
#[allow(clippy::too_many_arguments)]
pub fn new(
default_segments_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
) -> Self {
IndexingOptimizer {
default_segments_number,
thresholds_config,
segments_path,
collection_temp_dir,
collection_params,
hnsw_config,
hnsw_global_config,
quantization_config,
telemetry_durations_aggregator: OperationDurationsAggregator::new(),
}
}
fn smallest_indexed_segment(
segments: &SegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Option<(SegmentId, usize)> {
segments
.iter()
// Excluded externally, might already be scheduled for optimization
.filter(|(idx, _)| !excluded_ids.contains(idx))
.filter_map(|(idx, segment)| {
let segment_entry = segment.get();
let read_segment = segment_entry.read();
let vector_size = read_segment
.max_available_vectors_size_in_bytes()
.unwrap_or_default();
if read_segment.segment_type() == SegmentType::Special {
return None; // Never optimize already optimized segment
}
let segment_config = read_segment.config();
let is_any_vector_indexed = segment_config.is_any_vector_indexed();
let is_any_on_disk = segment_config.is_any_on_disk();
if !(is_any_vector_indexed || is_any_on_disk) {
return None;
}
Some((idx, vector_size))
})
.min_by_key(|(_, vector_size_bytes)| *vector_size_bytes)
.map(|(idx, size)| (*idx, size))
}
fn worst_segment(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
let segments_read_guard = segments.read();
let candidates: Vec<_> = segments_read_guard
.iter()
// Excluded externally, might already be scheduled for optimization
.filter(|(idx, _)| !excluded_ids.contains(idx))
.filter_map(|(idx, segment)| {
let segment_entry = segment.get();
let read_segment = segment_entry.read();
let max_vector_size_bytes = read_segment
.max_available_vectors_size_in_bytes()
.unwrap_or_default();
let segment_config = read_segment.config();
if read_segment.segment_type() == SegmentType::Special {
return None; // Never optimize already optimized segment
}
let indexing_threshold_bytes = self
.thresholds_config
.indexing_threshold_kb
.saturating_mul(BYTES_IN_KB);
let mmap_threshold_bytes = self
.thresholds_config
.memmap_threshold_kb
.saturating_mul(BYTES_IN_KB);
let mut require_optimization = false;
for (vector_name, vector_config) in self.collection_params.vectors.params_iter() {
if let Some(vector_data) = segment_config.vector_data.get(vector_name) {
let is_indexed = vector_data.index.is_indexed();
let is_on_disk = vector_data.storage_type.is_on_disk();
let storage_size_bytes = read_segment
.available_vectors_size_in_bytes(vector_name)
.unwrap_or_default();
let is_big_for_index = storage_size_bytes >= indexing_threshold_bytes;
let is_big_for_mmap = storage_size_bytes >= mmap_threshold_bytes;
let optimize_for_index = is_big_for_index && !is_indexed;
let optimize_for_mmap = if let Some(on_disk_config) = vector_config.on_disk
{
on_disk_config && !is_on_disk
} else {
is_big_for_mmap && !is_on_disk
};
if optimize_for_index || optimize_for_mmap {
require_optimization = true;
break;
}
}
}
if !require_optimization
&& let Some(sparse_vectors_params) =
self.collection_params.sparse_vectors.as_ref()
{
for sparse_vector_name in sparse_vectors_params.keys() {
if let Some(sparse_vector_data) =
segment_config.sparse_vector_data.get(sparse_vector_name)
{
let is_index_immutable =
sparse_vector_data.index.index_type.is_immutable();
let storage_size = read_segment
.available_vectors_size_in_bytes(sparse_vector_name)
.unwrap_or_default();
let is_big_for_index = storage_size >= indexing_threshold_bytes;
let is_big_for_mmap = storage_size >= mmap_threshold_bytes;
let is_big = is_big_for_index || is_big_for_mmap;
if is_big && !is_index_immutable {
require_optimization = true;
break;
}
}
}
}
require_optimization.then_some((*idx, max_vector_size_bytes))
})
.collect();
// Select the largest unindexed segment, return if none
let selected_segment = candidates
.iter()
.max_by_key(|(_, vector_size_bytes)| *vector_size_bytes);
if selected_segment.is_none() {
return vec![];
}
let (selected_segment_id, selected_segment_size) = *selected_segment.unwrap();
let number_of_segments = segments_read_guard.len();
// If the number of segments if equal or bigger than the default_segments_number
// We want to make sure that we at least do not increase number of segments after optimization, thus we take more than one segment to optimize
if number_of_segments < self.default_segments_number {
return vec![selected_segment_id];
}
// It is better for scheduling if indexing optimizer optimizes 2 segments.
// Because result of the optimization is usually 2 segment - it should preserve
// overall count of segments.
// Find the smallest unindexed to check if we can index together
let smallest_unindexed = candidates
.iter()
.min_by_key(|(_, vector_size_bytes)| *vector_size_bytes);
if let Some((idx, size)) = smallest_unindexed
&& *idx != selected_segment_id
&& selected_segment_size + size
< self
.thresholds_config
.max_segment_size_kb
.saturating_mul(BYTES_IN_KB)
{
return vec![selected_segment_id, *idx];
}
// Find smallest indexed to check if we can reindex together
let smallest_indexed = Self::smallest_indexed_segment(&segments_read_guard, excluded_ids);
if let Some((idx, size)) = smallest_indexed
&& idx != selected_segment_id
&& selected_segment_size + size
< self
.thresholds_config
.max_segment_size_kb
.saturating_mul(BYTES_IN_KB)
{
return vec![selected_segment_id, idx];
}
vec![selected_segment_id]
}
}
impl SegmentOptimizer for IndexingOptimizer {
fn name(&self) -> &str {
"indexing"
}
fn segments_path(&self) -> &Path {
self.segments_path.as_path()
}
fn temp_path(&self) -> &Path {
self.collection_temp_dir.as_path()
}
fn collection_params(&self) -> CollectionParams {
self.collection_params.clone()
}
fn hnsw_config(&self) -> &HnswConfig {
&self.hnsw_config
}
fn hnsw_global_config(&self) -> &HnswGlobalConfig {
&self.hnsw_global_config
}
fn quantization_config(&self) -> Option<QuantizationConfig> {
self.quantization_config.clone()
}
fn threshold_config(&self) -> &OptimizerThresholds {
&self.thresholds_config
}
fn check_condition(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
self.worst_segment(segments, excluded_ids)
}
fn get_telemetry_counter(&self) -> &Mutex<OperationDurationsAggregator> {
&self.telemetry_durations_aggregator
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::ops::Deref;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourceBudget;
use common::counter::hardware_counter::HardwareCounterCell;
use common::progress_tracker::ProgressTracker;
use fs_err as fs;
use itertools::Itertools;
use parking_lot::lock_api::RwLock;
use rand::rng;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::index_fixtures::random_vector;
use segment::index::hnsw_index::num_rayon_threads;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::simple_segment_constructor::{VECTOR1_NAME, VECTOR2_NAME};
use segment::types::{Distance, PayloadSchemaType, VectorNameBuf};
use shard::update::{process_field_index_operation, process_point_operation};
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{random_multi_vec_segment, random_segment};
use crate::collection_manager::holders::segment_holder::{LockedSegment, SegmentHolder};
use crate::collection_manager::optimizers::config_mismatch_optimizer::ConfigMismatchOptimizer;
use crate::operations::point_ops::{
BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations,
};
use crate::operations::types::{VectorParams, VectorsConfig};
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::operations::{CreateIndex, FieldIndexOperations};
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_multi_vector_optimization() {
init();
let mut holder = SegmentHolder::default();
let stopped = AtomicBool::new(false);
let dim1 = 128;
let dim2 = 256;
let segments_dir = Builder::new().prefix("segments_dir").tempdir().unwrap();
let segments_temp_dir = Builder::new()
.prefix("segments_temp_dir")
.tempdir()
.unwrap();
let mut opnum = 101..1000000;
let large_segment =
random_multi_vec_segment(segments_dir.path(), opnum.next().unwrap(), 200, dim1, dim2);
let segment_config = large_segment.segment_config.clone();
let large_segment_id = holder.add_new(large_segment);
let vectors_config: BTreeMap<VectorNameBuf, VectorParams> = segment_config
.vector_data
.iter()
.map(|(name, params)| {
(
name.to_owned(),
VectorParamsBuilder::new(params.size as u64, params.distance).build(),
)
})
.collect();
let mut index_optimizer = IndexingOptimizer::new(
2,
OptimizerThresholds {
max_segment_size_kb: 300,
memmap_threshold_kb: 1000,
indexing_threshold_kb: 1000,
},
segments_dir.path().to_owned(),
segments_temp_dir.path().to_owned(),
CollectionParams {
vectors: VectorsConfig::Multi(vectors_config),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
);
let locked_holder: Arc<RwLock<_, _>> = Arc::new(RwLock::new(holder));
let excluded_ids = Default::default();
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.is_empty());
index_optimizer.thresholds_config.memmap_threshold_kb = 1000;
index_optimizer.thresholds_config.indexing_threshold_kb = 50;
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.contains(&large_segment_id));
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
index_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&stopped,
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
let infos = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().info())
.collect_vec();
let configs = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().config().clone())
.collect_vec();
assert_eq!(infos.len(), 2);
assert_eq!(configs.len(), 2);
let total_points: usize = infos.iter().map(|info| info.num_points).sum();
let total_vectors: usize = infos.iter().map(|info| info.num_vectors).sum();
assert_eq!(total_points, 200);
assert_eq!(total_vectors, 400);
for config in configs {
assert_eq!(config.vector_data.len(), 2);
assert_eq!(config.vector_data.get(VECTOR1_NAME).unwrap().size, dim1);
assert_eq!(config.vector_data.get(VECTOR2_NAME).unwrap().size, dim2);
}
}
#[test]
fn test_indexing_optimizer() {
init();
let mut rng = rng();
let mut holder = SegmentHolder::default();
let payload_field: JsonPath = "number".parse().unwrap();
let stopped = AtomicBool::new(false);
let dim = 256;
let segments_dir = Builder::new().prefix("segments_dir").tempdir().unwrap();
let segments_temp_dir = Builder::new()
.prefix("segments_temp_dir")
.tempdir()
.unwrap();
let mut opnum = 101..1000000;
let small_segment = random_segment(segments_dir.path(), opnum.next().unwrap(), 25, dim);
let middle_low_segment =
random_segment(segments_dir.path(), opnum.next().unwrap(), 90, dim);
let middle_segment = random_segment(segments_dir.path(), opnum.next().unwrap(), 100, dim);
let large_segment = random_segment(segments_dir.path(), opnum.next().unwrap(), 200, dim);
let segment_config = small_segment.segment_config.clone();
let small_segment_id = holder.add_new(small_segment);
let middle_low_segment_id = holder.add_new(middle_low_segment);
let middle_segment_id = holder.add_new(middle_segment);
let large_segment_id = holder.add_new(large_segment);
let mut index_optimizer = IndexingOptimizer::new(
2,
OptimizerThresholds {
max_segment_size_kb: 300,
memmap_threshold_kb: 1000,
indexing_threshold_kb: 1000,
},
segments_dir.path().to_owned(),
segments_temp_dir.path().to_owned(),
CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(
segment_config.vector_data[DEFAULT_VECTOR_NAME].size as u64,
segment_config.vector_data[DEFAULT_VECTOR_NAME].distance,
)
.build(),
),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
);
let locked_holder: Arc<RwLock<_, _>> = Arc::new(RwLock::new(holder));
let excluded_ids = Default::default();
// ---- check condition for MMap optimization
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.is_empty());
index_optimizer.thresholds_config.memmap_threshold_kb = 1000;
index_optimizer.thresholds_config.indexing_threshold_kb = 50;
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.contains(&large_segment_id));
assert!(suggested_to_optimize.contains(&middle_low_segment_id));
index_optimizer.thresholds_config.memmap_threshold_kb = 1000;
index_optimizer.thresholds_config.indexing_threshold_kb = 1000;
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.is_empty());
index_optimizer.thresholds_config.memmap_threshold_kb = 50;
index_optimizer.thresholds_config.indexing_threshold_kb = 1000;
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.contains(&large_segment_id));
index_optimizer.thresholds_config.memmap_threshold_kb = 150;
index_optimizer.thresholds_config.indexing_threshold_kb = 50;
// ----- CREATE AN INDEXED FIELD ------
let hw_counter = HardwareCounterCell::new();
process_field_index_operation(
locked_holder.deref(),
opnum.next().unwrap(),
&FieldIndexOperations::CreateIndex(CreateIndex {
field_name: payload_field.clone(),
field_schema: Some(PayloadSchemaType::Integer.into()),
}),
&hw_counter,
)
.unwrap();
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
// ------ Plain -> Mmap & Indexed payload
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.contains(&large_segment_id));
eprintln!("suggested_to_optimize = {suggested_to_optimize:#?}");
index_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&stopped,
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
eprintln!("Done");
// ------ Plain -> Indexed payload
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.contains(&middle_segment_id));
index_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&stopped,
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
// ------- Keep smallest segment without changes
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &excluded_ids);
assert!(suggested_to_optimize.is_empty());
assert_eq!(
locked_holder.read().len(),
3,
"Testing no new segments were created"
);
let infos = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().info())
.collect_vec();
let configs = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().config().clone())
.collect_vec();
let indexed_count = infos
.iter()
.filter(|info| info.segment_type == SegmentType::Indexed)
.count();
assert_eq!(
indexed_count, 2,
"Testing that 2 segments are actually indexed"
);
let on_disk_count = configs
.iter()
.filter(|config| config.is_any_on_disk())
.count();
assert_eq!(
on_disk_count, 1,
"Testing that only largest segment is not Mmap"
);
let segment_dirs = fs::read_dir(segments_dir.path()).unwrap().collect_vec();
assert_eq!(
segment_dirs.len(),
locked_holder.read().len(),
"Testing that new segments are persisted and old data is removed"
);
for info in &infos {
assert!(
info.index_schema.contains_key(&payload_field),
"Testing that payload is not lost"
);
assert_eq!(
info.index_schema[&payload_field].data_type,
PayloadSchemaType::Integer,
"Testing that payload type is not lost"
);
}
let point_payload = payload_json! {"number": 10000i64};
let batch = BatchPersisted {
ids: vec![501.into(), 502.into(), 503.into()],
vectors: BatchVectorStructPersisted::Single(vec![
random_vector(&mut rng, dim),
random_vector(&mut rng, dim),
random_vector(&mut rng, dim),
]),
payloads: Some(vec![
Some(point_payload.clone()),
Some(point_payload.clone()),
Some(point_payload),
]),
};
let insert_point_ops =
PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch));
let smallest_size = infos
.iter()
.min_by_key(|info| info.num_vectors)
.unwrap()
.num_vectors;
let hw_counter = HardwareCounterCell::new();
process_point_operation(
locked_holder.deref(),
opnum.next().unwrap(),
insert_point_ops,
&hw_counter,
)
.unwrap();
let new_infos = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().info())
.collect_vec();
let new_smallest_size = new_infos
.iter()
.min_by_key(|info| info.num_vectors)
.unwrap()
.num_vectors;
assert_eq!(
new_smallest_size,
smallest_size + 3,
"Testing that new data is added to an appendable segment only"
);
// ---- New appendable segment should be created if none left
// Index even the smallest segment
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
index_optimizer.thresholds_config.indexing_threshold_kb = 20;
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert!(suggested_to_optimize.contains(&small_segment_id));
index_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&stopped,
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
let new_infos2 = locked_holder
.read()
.iter()
.map(|(_sid, segment)| segment.get().read().info())
.collect_vec();
let mut has_empty = false;
for info in new_infos2 {
has_empty |= info.num_vectors == 0;
}
assert!(
has_empty,
"Testing that new segment is created if none left"
);
let batch = BatchPersisted {
ids: vec![601.into(), 602.into(), 603.into()],
vectors: BatchVectorStructPersisted::Single(vec![
random_vector(&mut rng, dim),
random_vector(&mut rng, dim),
random_vector(&mut rng, dim),
]),
payloads: None,
};
let insert_point_ops =
PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch));
process_point_operation(
locked_holder.deref(),
opnum.next().unwrap(),
insert_point_ops,
&hw_counter,
)
.unwrap();
}
/// Test that indexing optimizer maintain expected number of during the optimization duty
#[test]
fn test_indexing_optimizer_with_number_of_segments() {
init();
let mut holder = SegmentHolder::default();
let stopped = AtomicBool::new(false);
let dim = 256;
let segments_dir = Builder::new().prefix("segments_dir").tempdir().unwrap();
let segments_temp_dir = Builder::new()
.prefix("segments_temp_dir")
.tempdir()
.unwrap();
let mut opnum = 101..1000000;
let segments = vec![
random_segment(segments_dir.path(), opnum.next().unwrap(), 100, dim),
random_segment(segments_dir.path(), opnum.next().unwrap(), 100, dim),
random_segment(segments_dir.path(), opnum.next().unwrap(), 100, dim),
random_segment(segments_dir.path(), opnum.next().unwrap(), 100, dim),
];
let number_of_segments = segments.len();
let segment_config = segments[0].segment_config.clone();
let _segment_ids: Vec<SegmentId> = segments
.into_iter()
.map(|segment| holder.add_new(segment))
.collect();
let locked_holder: Arc<RwLock<_, _>> = Arc::new(RwLock::new(holder));
let index_optimizer = IndexingOptimizer::new(
number_of_segments, // Keep the same number of segments
OptimizerThresholds {
max_segment_size_kb: 1000,
memmap_threshold_kb: 1000,
indexing_threshold_kb: 10, // Always optimize
},
segments_dir.path().to_owned(),
segments_temp_dir.path().to_owned(),
CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(
segment_config.vector_data[DEFAULT_VECTOR_NAME].size as u64,
segment_config.vector_data[DEFAULT_VECTOR_NAME].distance,
)
.build(),
),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
);
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
// Index until all segments are indexed
let mut numer_of_optimizations = 0;
loop {
let suggested_to_optimize =
index_optimizer.check_condition(locked_holder.clone(), &Default::default());
if suggested_to_optimize.is_empty() {
break;
}
log::debug!("suggested_to_optimize = {suggested_to_optimize:#?}");
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
index_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&stopped,
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
numer_of_optimizations += 1;
assert!(numer_of_optimizations <= number_of_segments);
let number_of_segments = locked_holder.read().len();
log::debug!(
"numer_of_optimizations = {numer_of_optimizations}, number_of_segments = {number_of_segments}"
);
}
// Ensure that the total number of segments did not change
assert_eq!(locked_holder.read().len(), number_of_segments);
}
/// This tests things are as we expect when we define both `on_disk: false` and `memmap_threshold`
///
/// Before this PR (<https://github.com/qdrant/qdrant/pull/3167>) such configuration would create an infinite optimization loop.
///
/// It tests whether:
/// - the on_disk flag is preferred over memmap_threshold
/// - the index optimizer and config mismatch optimizer don't conflict with this preference
/// - there is no infinite optiization loop with the above configuration
///
/// In short, this is what happens in this test:
/// - create randomized segment as base with `on_disk: false` and `memmap_threshold`
/// - test that indexing optimizer and config mismatch optimizer dont trigger
/// - test that current storage is in memory
/// - change `on_disk: None`
/// - test that indexing optimizer now wants to optimize for `memmap_threshold`
/// - optimize with indexing optimizer to put storage on disk
/// - test that config mismatch optimizer doesn't try to revert on disk storage
#[test]
fn test_on_disk_memmap_threshold_conflict() {
// Collection configuration
let (point_count, dim) = (1000, 10);
let thresholds_config = OptimizerThresholds {
max_segment_size_kb: usize::MAX,
memmap_threshold_kb: 10,
indexing_threshold_kb: usize::MAX,
};
let mut collection_params = CollectionParams {
vectors: VectorsConfig::Single(
VectorParamsBuilder::new(dim as u64, Distance::Dot)
.with_on_disk(false)
.build(),
),
..CollectionParams::empty()
};
// Base segment
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/segment_optimizer.rs | lib/collection/src/collection_manager/optimizers/segment_optimizer.rs | use std::collections::{HashMap, HashSet};
use std::ops::Deref;
use std::path::Path;
use std::sync::atomic::AtomicBool;
use common::budget::{ResourceBudget, ResourcePermit};
use common::bytes::bytes_to_human;
use common::counter::hardware_counter::HardwareCounterCell;
use common::disk::dir_disk_size;
use common::progress_tracker::ProgressTracker;
use fs_err as fs;
use io::storage_version::StorageVersion;
use itertools::Itertools;
use parking_lot::lock_api::RwLockWriteGuard;
use parking_lot::{Mutex, RwLockUpgradableReadGuard};
use segment::common::operation_error::{OperationResult, check_process_stopped};
use segment::common::operation_time_statistics::{
OperationDurationsAggregator, ScopeDurationMeasurer,
};
use segment::entry::entry_point::SegmentEntry;
use segment::index::sparse_index::sparse_index_config::SparseIndexType;
use segment::segment::{Segment, SegmentVersion};
use segment::segment_constructor::build_segment;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::types::{
HnswConfig, HnswGlobalConfig, Indexes, QuantizationConfig, SegmentConfig, VectorStorageType,
};
use shard::proxy_segment::{DeletedPoints, ProxyIndexChanges};
use crate::collection_manager::holders::proxy_segment::{ProxyIndexChange, ProxySegment};
use crate::collection_manager::holders::segment_holder::{
LockedSegment, LockedSegmentHolder, SegmentHolder, SegmentId,
};
use crate::config::CollectionParams;
use crate::operations::config_diff::DiffConfig;
use crate::operations::types::{CollectionError, CollectionResult};
const BYTES_IN_KB: usize = 1024;
#[derive(Debug, Clone, Copy)]
pub struct OptimizerThresholds {
pub max_segment_size_kb: usize,
pub memmap_threshold_kb: usize,
pub indexing_threshold_kb: usize,
}
/// SegmentOptimizer - trait implementing common functionality of the optimizers
///
/// It provides functions which allow to re-build specified segments into a new, better one.
/// Process allows read and write (with some tricks) access to the optimized segments.
///
/// Process of the optimization is same for all optimizers.
/// The selection of the candidates for optimization and the configuration
/// of resulting segment are up to concrete implementations.
pub trait SegmentOptimizer {
/// Get name describing this optimizer
fn name(&self) -> &str;
/// Get the path of the segments directory
fn segments_path(&self) -> &Path;
/// Get temp path, where optimized segments could be temporary stored
fn temp_path(&self) -> &Path;
/// Get basic segment config
fn collection_params(&self) -> CollectionParams;
/// Get HNSW config
fn hnsw_config(&self) -> &HnswConfig;
/// Get HNSW global config
fn hnsw_global_config(&self) -> &HnswGlobalConfig;
/// Get quantization config
fn quantization_config(&self) -> Option<QuantizationConfig>;
/// Get thresholds configuration for the current optimizer
fn threshold_config(&self) -> &OptimizerThresholds;
/// Checks if segment optimization is required
fn check_condition(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId>;
fn get_telemetry_counter(&self) -> &Mutex<OperationDurationsAggregator>;
/// Build temp segment
fn temp_segment(&self, save_version: bool) -> CollectionResult<LockedSegment> {
let collection_params = self.collection_params();
let quantization_config = self.quantization_config();
let config = SegmentConfig {
vector_data: collection_params.to_base_vector_data(quantization_config.as_ref())?,
sparse_vector_data: collection_params.to_sparse_vector_data()?,
payload_storage_type: collection_params.payload_storage_type(),
};
Ok(LockedSegment::new(build_segment(
self.segments_path(),
&config,
save_version,
)?))
}
/// Returns error if segment size is larger than available disk space
fn check_segments_size(&self, optimizing_segments: &[LockedSegment]) -> CollectionResult<()> {
// Counting up how much space do the segments being optimized actually take on the fs.
// If there was at least one error while reading the size, this will be `None`.
let mut space_occupied = Some(0u64);
for segment in optimizing_segments {
let segment = match segment {
LockedSegment::Original(segment) => segment,
LockedSegment::Proxy(_) => {
return Err(CollectionError::service_error(
"Proxy segment is not expected here".to_string(),
));
}
};
let locked_segment = segment.read();
space_occupied =
space_occupied.and_then(|acc| match dir_disk_size(locked_segment.data_path()) {
Ok(size) => Some(size + acc),
Err(err) => {
log::debug!(
"Could not estimate size of segment `{}`: {}",
locked_segment.data_path().display(),
err
);
None
}
});
}
let space_needed = space_occupied.map(|x| 2 * x);
// Ensure temp_path exists
if !self.temp_path().exists() {
fs::create_dir_all(self.temp_path()).map_err(|err| {
CollectionError::service_error(format!(
"Could not create temp directory `{}`: {}",
self.temp_path().display(),
err
))
})?;
}
let space_available = match fs4::available_space(self.temp_path()) {
Ok(available) => Some(available),
Err(err) => {
log::debug!(
"Could not estimate available storage space in `{}`: {}",
self.temp_path().display(),
err
);
None
}
};
match (space_available, space_needed) {
(Some(space_available), Some(space_needed)) => {
if space_needed > 0 {
log::debug!(
"Available space: {}, needed for optimization: {}",
bytes_to_human(space_available as usize),
bytes_to_human(space_needed as usize),
);
}
if space_available < space_needed {
return Err(CollectionError::service_error(format!(
"Not enough space available for optimization, needed: {}, available: {}",
bytes_to_human(space_needed as usize),
bytes_to_human(space_available as usize),
)));
}
}
_ => {
log::warn!(
"Could not estimate available storage space in `{}`; will try optimizing anyway",
self.name(),
);
}
}
Ok(())
}
/// Build optimized segment
fn optimized_segment_builder(
&self,
optimizing_segments: &[LockedSegment],
) -> CollectionResult<SegmentBuilder> {
// Example:
//
// S1: {
// text_vectors: 10000,
// image_vectors: 100
// }
// S2: {
// text_vectors: 200,
// image_vectors: 10000
// }
// Example: bytes_count_by_vector_name = {
// text_vectors: 10200 * dim * VECTOR_ELEMENT_SIZE
// image_vectors: 10100 * dim * VECTOR_ELEMENT_SIZE
// }
let mut bytes_count_by_vector_name = HashMap::new();
for segment in optimizing_segments {
let segment = match segment {
LockedSegment::Original(segment) => segment,
LockedSegment::Proxy(_) => {
return Err(CollectionError::service_error(
"Proxy segment is not expected here".to_string(),
));
}
};
let locked_segment = segment.read();
for vector_name in locked_segment.vector_names() {
let vector_size = locked_segment.available_vectors_size_in_bytes(&vector_name)?;
let size = bytes_count_by_vector_name.entry(vector_name).or_insert(0);
*size += vector_size;
}
}
// Example: maximal_vector_store_size_bytes = 10200 * dim * VECTOR_ELEMENT_SIZE
let maximal_vector_store_size_bytes = bytes_count_by_vector_name
.values()
.max()
.copied()
.unwrap_or(0);
let thresholds = self.threshold_config();
let collection_params = self.collection_params();
let threshold_is_indexed = maximal_vector_store_size_bytes
>= thresholds.indexing_threshold_kb.saturating_mul(BYTES_IN_KB);
let threshold_is_on_disk = maximal_vector_store_size_bytes
>= thresholds.memmap_threshold_kb.saturating_mul(BYTES_IN_KB);
let collection_quantization = self.quantization_config();
let mut vector_data =
collection_params.to_base_vector_data(collection_quantization.as_ref())?;
let mut sparse_vector_data = collection_params.to_sparse_vector_data()?;
// If indexing, change to HNSW index and quantization
if threshold_is_indexed {
let collection_hnsw = self.hnsw_config();
vector_data.iter_mut().for_each(|(vector_name, config)| {
// Assign HNSW index
let param_hnsw = collection_params
.vectors
.get_params(vector_name)
.and_then(|params| params.hnsw_config);
let vector_hnsw = collection_hnsw.update_opt(param_hnsw.as_ref());
config.index = Indexes::Hnsw(vector_hnsw);
// Assign quantization config
let param_quantization = collection_params
.vectors
.get_params(vector_name)
.and_then(|params| params.quantization_config.as_ref());
let vector_quantization = param_quantization
.or(collection_quantization.as_ref())
.cloned();
config.quantization_config = vector_quantization;
});
}
// We want to use single-file mmap in the following cases:
// - It is explicitly configured by `mmap_threshold` -> threshold_is_on_disk=true
// - The segment is indexed and configured on disk -> threshold_is_indexed=true && config_on_disk=Some(true)
if threshold_is_on_disk || threshold_is_indexed {
vector_data.iter_mut().for_each(|(vector_name, config)| {
// Check whether on_disk is explicitly configured, if not, set it to true
let config_on_disk = collection_params
.vectors
.get_params(vector_name)
.and_then(|config| config.on_disk);
match config_on_disk {
Some(true) => config.storage_type = VectorStorageType::Mmap, // Both agree, but prefer mmap storage type
Some(false) => {} // on_disk=false wins, do nothing
None => if threshold_is_on_disk { config.storage_type = VectorStorageType::Mmap }, // Mmap threshold wins
}
// If we explicitly configure on_disk, but the segment storage type uses something
// that doesn't match, warn about it
if let Some(config_on_disk) = config_on_disk
&& config_on_disk != config.storage_type.is_on_disk() {
log::warn!("Collection config for vector {vector_name} has on_disk={config_on_disk:?} configured, but storage type for segment doesn't match it");
}
});
}
sparse_vector_data
.iter_mut()
.for_each(|(vector_name, config)| {
// Assign sparse index on disk
if let Some(sparse_config) = &collection_params.sparse_vectors
&& let Some(params) = sparse_config.get(vector_name)
{
let config_on_disk = params
.index
.and_then(|index_params| index_params.on_disk)
.unwrap_or(threshold_is_on_disk);
// If mmap OR index is exceeded
let is_big = threshold_is_on_disk || threshold_is_indexed;
let index_type = match (is_big, config_on_disk) {
(true, true) => SparseIndexType::Mmap, // Big and configured on disk
(true, false) => SparseIndexType::ImmutableRam, // Big and not on disk nor reached threshold
(false, _) => SparseIndexType::MutableRam, // Small
};
config.index.index_type = index_type;
}
});
let optimized_config = SegmentConfig {
vector_data,
sparse_vector_data,
payload_storage_type: collection_params.payload_storage_type(),
};
Ok(SegmentBuilder::new(
self.segments_path(),
self.temp_path(),
&optimized_config,
self.hnsw_global_config(),
)?)
}
/// Restores original segments from proxies
///
/// # Arguments
///
/// * `segments` - segment holder
/// * `proxy_ids` - ids of poxy-wrapped segment to restore
///
/// # Result
///
/// Original segments are pushed into `segments`, proxies removed.
/// Returns IDs on restored segments
///
fn unwrap_proxy(
&self,
segments: &LockedSegmentHolder,
proxy_ids: &[SegmentId],
) -> Vec<SegmentId> {
let mut segments_lock = segments.write();
let mut restored_segment_ids = vec![];
for &proxy_id in proxy_ids {
if let Some(proxy_segment_ref) = segments_lock.get(proxy_id) {
let locked_proxy_segment = proxy_segment_ref.clone();
match locked_proxy_segment {
LockedSegment::Original(_) => {
/* Already unwrapped. It should not actually be here */
log::warn!("Attempt to unwrap raw segment! Should not happen.")
}
LockedSegment::Proxy(proxy_segment) => {
let wrapped_segment = proxy_segment.read().wrapped_segment.clone();
let (restored_id, _proxies) =
segments_lock.swap_new(wrapped_segment, &[proxy_id]);
restored_segment_ids.push(restored_id);
}
}
}
}
restored_segment_ids
}
/// Unwraps proxy, puts wrapped segment back into local shard
///
/// # Arguments
///
/// * `segments` - all registered segments of the collection
/// * `proxy_ids` - currently used proxies
///
/// # Result
///
/// Drops any optimized state, and rolls back the segments to before optimizing. All new
/// changes since optimizing remain available as they were written to other appendable
/// segments.
fn handle_cancellation(
&self,
segments: &LockedSegmentHolder,
proxy_ids: &[SegmentId],
) -> OperationResult<()> {
self.unwrap_proxy(segments, proxy_ids);
Ok(())
}
/// Function to wrap slow part of optimization. Performs proxy rollback in case of cancellation.
/// Warn: this function might be _VERY_ CPU intensive,
/// so it is necessary to avoid any locks inside this part of the code
///
/// # Arguments
///
/// * `optimizing_segments` - Segments to optimize
/// * `proxy_deleted_points` - Holds a set of points, deleted while optimization was running
/// * `proxy_changed_indexes` - Holds a set of indexes changes, created or deleted while optimization was running
/// * `stopped` - flag to check if optimization was cancelled by external thread
///
/// # Result
///
/// Constructs optimized segment
#[allow(clippy::too_many_arguments)]
fn build_new_segment(
&self,
optimizing_segments: &[LockedSegment],
proxies: &[LockedSegment],
permit: ResourcePermit, // IO resources for copying data
resource_budget: ResourceBudget,
stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
progress: ProgressTracker,
) -> CollectionResult<Segment> {
let mut segment_builder = self.optimized_segment_builder(optimizing_segments)?;
check_process_stopped(stopped)?;
let progress_copy_data = progress.subtask("copy_data");
let progress_populate_storages = progress.subtask("populate_vector_storages");
let progress_wait_permit = progress.subtask("wait_cpu_permit");
let segments: Vec<_> = optimizing_segments
.iter()
.map(|i| match i {
LockedSegment::Original(o) => o.clone(),
LockedSegment::Proxy(_) => {
panic!("Trying to optimize a segment that is already being optimized!")
}
})
.collect();
let mut defragmentation_keys = HashSet::new();
for segment in &segments {
let payload_index = &segment.read().payload_index;
let payload_index = payload_index.borrow();
let keys = payload_index
.config()
.indices
.iter()
.filter(|(_, schema)| schema.schema.is_tenant())
.map(|(key, _)| key.clone());
defragmentation_keys.extend(keys);
}
if !defragmentation_keys.is_empty() {
segment_builder.set_defragment_keys(defragmentation_keys.into_iter().collect());
}
{
progress_copy_data.start();
let segment_guards = segments.iter().map(|segment| segment.read()).collect_vec();
segment_builder.update(
&segment_guards.iter().map(Deref::deref).collect_vec(),
stopped,
)?;
drop(progress_copy_data);
}
let proxy_index_changes = self.proxy_index_changes(proxies);
// Apply index changes to segment builder
// Indexes are only used for defragmentation in segment builder, so versions are ignored
for (field_name, change) in proxy_index_changes.iter_unordered() {
match change {
ProxyIndexChange::Create(schema, _) => {
segment_builder.add_indexed_field(field_name.to_owned(), schema.to_owned());
}
ProxyIndexChange::Delete(_) => {
segment_builder.remove_indexed_field(field_name);
}
ProxyIndexChange::DeleteIfIncompatible(_, schema) => {
segment_builder.remove_index_field_if_incompatible(field_name, schema);
}
}
}
// Before switching from IO to CPU, make sure that vectors cache is heated up,
// so indexing process won't need to wait for IO.
progress_populate_storages.start();
segment_builder.populate_vector_storages()?;
drop(progress_populate_storages);
// 000 - acquired
// +++ - blocked on waiting
//
// Case: 1 indexation job at a time, long indexing
//
// IO limit = 1
// CPU limit = 2 Next optimization
// │ loop
// │
// ▼
// IO 0 00000000000000 000000000
// CPU 1 00000000000000000
// 2 00000000000000000
//
//
// IO 0 ++++++++++++++00000000000000000
// CPU 1 ++++++++0000000000
// 2 ++++++++0000000000
//
//
// Case: 1 indexing job at a time, short indexation
//
//
// IO limit = 1
// CPU limit = 2
//
//
// IO 0 000000000000 ++++++++0000000000
// CPU 1 00000
// 2 00000
//
// IO 0 ++++++++++++00000000000 +++++++
// CPU 1 00000
// 2 00000
// At this stage workload shifts from IO to CPU, so we can release IO permit
// Use same number of threads for indexing as for IO.
// This ensures that IO is equally distributed between optimization jobs.
progress_wait_permit.start();
let desired_cpus = permit.num_io as usize;
let indexing_permit = resource_budget
.replace_with(permit, desired_cpus, 0, stopped)
.map_err(|_| {
CollectionError::cancelled("optimization cancelled while waiting for budget")
})?;
drop(progress_wait_permit);
let mut rng = rand::rng();
let mut optimized_segment: Segment =
segment_builder.build(indexing_permit, stopped, &mut rng, hw_counter, progress)?;
// Delete points
let deleted_points_snapshot = self.proxy_deleted_points(proxies);
let proxy_index_changes = self.proxy_index_changes(proxies);
// Apply index changes before point deletions
// Point deletions bump the segment version, can cause index changes to be ignored
let old_optimized_segment_version = optimized_segment.version();
for (field_name, change) in proxy_index_changes.iter_ordered() {
debug_assert!(
change.version() >= old_optimized_segment_version,
"proxied index change should have newer version than segment",
);
match change {
ProxyIndexChange::Create(schema, version) => {
optimized_segment.create_field_index(
*version,
field_name,
Some(schema),
hw_counter,
)?;
}
ProxyIndexChange::Delete(version) => {
optimized_segment.delete_field_index(*version, field_name)?;
}
ProxyIndexChange::DeleteIfIncompatible(version, schema) => {
optimized_segment
.delete_field_index_if_incompatible(*version, field_name, schema)?;
}
}
check_process_stopped(stopped)?;
}
for (point_id, versions) in deleted_points_snapshot {
optimized_segment
.delete_point(versions.operation_version, point_id, hw_counter)
.unwrap();
}
Ok(optimized_segment)
}
/// Performs optimization of collections's segments
///
/// Including:
/// * Segment rebuilding
/// * Segment joining
///
/// # Arguments
///
/// * `segments` - segments holder
/// * `ids` - list of segment ids to perform optimization on. All segments will be merged into single one
/// * `stopped` - flag for early stopping of the optimization. If appears to be `true` - optimization process should be cancelled, all segments unwrapped.
///
/// # Result
///
/// New optimized segment should be added into `segments`.
/// If there were any record changes during the optimization - an additional plain segment will be created.
///
/// Returns id of the created optimized segment. If no optimization was done - returns None
#[expect(clippy::too_many_arguments)]
fn optimize(
&self,
segments: LockedSegmentHolder,
ids: Vec<SegmentId>,
permit: ResourcePermit,
resource_budget: ResourceBudget,
stopped: &AtomicBool,
progress: ProgressTracker,
on_successful_start: Box<dyn FnOnce()>,
) -> CollectionResult<usize> {
check_process_stopped(stopped)?;
let mut timer = ScopeDurationMeasurer::new(self.get_telemetry_counter());
timer.set_success(false);
// On the one hand - we want to check consistently if all provided segments are
// available for optimization (not already under one) and we want to do it before creating a temp segment
// which is an expensive operation. So we can't not unlock `segments` after the check and before the insert.
//
// On the other hand - we do not want to hold write lock during the segment creation.
// Solution in the middle - is a upgradable lock. It ensures consistency after the check and allows to perform read operation.
let segments_lock = segments.upgradable_read();
// Find appendable segments other than optimized ones
//
// If there are such segments - we can avoid creating a temp segment
// If there are none, we need to create a new empty segment to allow writes during optimization
let appendable_segments_ids = segments_lock.appendable_segments_ids();
let has_appendable_segments_except_optimized =
appendable_segments_ids.iter().any(|id| !ids.contains(id));
let need_extra_cow_segment = !has_appendable_segments_except_optimized;
let optimizing_segments: Vec<_> = ids
.iter()
.cloned()
.map(|id| segments_lock.get(id))
.filter_map(|x| x.cloned())
.collect();
// Check that we have enough disk space for optimization
self.check_segments_size(&optimizing_segments)?;
// Check if all segments are not under other optimization or some ids are missing
let all_segments_ok = optimizing_segments.len() == ids.len()
&& optimizing_segments
.iter()
.all(|s| matches!(s, LockedSegment::Original(_)));
if !all_segments_ok {
// Cancel the optimization
return Ok(0);
}
check_process_stopped(stopped)?;
on_successful_start();
let hw_counter = HardwareCounterCell::disposable(); // Internal operation, no measurement needed!
let extra_cow_segment_opt = need_extra_cow_segment
.then(|| self.temp_segment(false))
.transpose()?;
let mut proxies = Vec::new();
for sg in optimizing_segments.iter() {
let proxy = ProxySegment::new(sg.clone());
// Wrapped segment is fresh, so it has no operations
// Operation with number 0 will be applied
if let Some(extra_cow_segment) = &extra_cow_segment_opt {
proxy.replicate_field_indexes(0, &hw_counter, extra_cow_segment)?;
}
proxies.push(proxy);
}
// Save segment version once all payload indices have been converted
// If this ends up not being saved due to a crash, the segment will not be used
match &extra_cow_segment_opt {
Some(LockedSegment::Original(segment)) => {
let segment_path = &segment.read().current_path;
SegmentVersion::save(segment_path)?;
}
Some(LockedSegment::Proxy(_)) => unreachable!(),
None => {}
}
let mut locked_proxies: Vec<LockedSegment> = Vec::with_capacity(proxies.len());
let (proxy_ids, cow_segment_id_opt): (Vec<_>, _) = {
// Exclusive lock for the segments operations.
let mut write_segments = RwLockUpgradableReadGuard::upgrade(segments_lock);
let mut proxy_ids = Vec::new();
for (proxy, idx) in proxies.into_iter().zip(ids.iter().cloned()) {
// During optimization, we expect that logical point data in the wrapped segment is
// not changed at all. But this would be possible if we wrap another proxy segment,
// because it can share state through it's write segment. To prevent this we assert
// here that we only wrap non-proxy segments.
// Also helps to ensure the delete propagation behavior in
// `optimize_segment_propagate_changes` remains sound.
// See: <https://github.com/qdrant/qdrant/pull/7208>
debug_assert!(
matches!(proxy.wrapped_segment, LockedSegment::Original(_)),
"during optimization, wrapped segment in a proxy segment must not be another proxy segment",
);
// replicate_field_indexes for the second time,
// because optimized segments could have been changed.
// The probability is small, though,
// so we can afford this operation under the full collection write lock
if let Some(extra_cow_segment) = &extra_cow_segment_opt {
proxy.replicate_field_indexes(0, &hw_counter, extra_cow_segment)?; // Slow only in case the index is change in the gap between two calls
}
let locked_proxy = LockedSegment::from(proxy);
proxy_ids.push(
write_segments
.swap_new_locked(locked_proxy.clone(), &[idx])
.0,
);
locked_proxies.push(locked_proxy);
}
let cow_segment_id_opt = extra_cow_segment_opt
.map(|extra_cow_segment| write_segments.add_new_locked(extra_cow_segment));
(proxy_ids, cow_segment_id_opt)
};
// SLOW PART: create single optimized segment and propagate all new changes to it
let result = self.optimize_segment_propagate_changes(
&segments,
&optimizing_segments,
&locked_proxies,
permit,
resource_budget,
stopped,
&hw_counter,
progress,
);
let (optimized_segment, mut write_segments_guard) = match result {
Ok(segment) => segment,
Err(err) => {
// Properly cancel optimization on all error kinds
// Unwrap proxies and add temp segment to holder
self.handle_cancellation(&segments, &proxy_ids)?;
return Err(err);
}
};
// Replace proxy segments with new optimized segment
let point_count = optimized_segment.available_point_count();
let (_, proxies) = write_segments_guard.swap_new(optimized_segment, &proxy_ids);
debug_assert_eq!(
proxies.len(),
proxy_ids.len(),
"swapped different number of proxies on unwrap, missing or incorrect segment IDs?",
);
if let Some(cow_segment_id) = cow_segment_id_opt {
// Temp segment might be taken into another parallel optimization
// so it is not necessary exist by this time
write_segments_guard.remove_segment_if_not_needed(cow_segment_id)?;
}
// Release reference counter for each optimized segment
drop(optimizing_segments);
// Unlock collection for search and updates
// After the collection is unlocked - we can remove data as slow as we want
drop(write_segments_guard);
// Drop all pointers to proxies, so we can de-arc them
drop(locked_proxies);
// Only remove data after we ensure the consistency of the collection.
// If remove fails - we will still have operational collection with reported error.
for proxy in proxies {
proxy.drop_data()?;
}
timer.set_success(true);
Ok(point_count)
}
/// Accumulates approximate set of points deleted in a given set of proxies
///
/// This list is not synchronized (if not externally enforced),
/// but guarantees that it contains at least all points deleted in the proxies
/// before the call to this function.
fn proxy_deleted_points(&self, proxies: &[LockedSegment]) -> DeletedPoints {
let mut deleted_points = DeletedPoints::new();
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/mod.rs | lib/collection/src/collection_manager/optimizers/mod.rs | use std::collections::VecDeque;
use std::sync::Arc;
use chrono::{DateTime, Utc};
use common::progress_tracker::{ProgressTracker, ProgressView, new_progress_tracker};
use parking_lot::Mutex;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::{Deserialize, Serialize};
use super::holders::segment_holder::SegmentId;
pub mod config_mismatch_optimizer;
pub mod indexing_optimizer;
pub mod merge_optimizer;
pub mod segment_optimizer;
pub mod vacuum_optimizer;
/// Number of last trackers to keep in tracker log
///
/// Will never remove older trackers for failed or still ongoing optimizations.
const KEEP_LAST_TRACKERS: usize = 16;
/// A log of optimizer trackers holding their status
#[derive(Default, Clone, Debug)]
pub struct TrackerLog {
descriptions: VecDeque<Tracker>,
}
#[derive(Clone, Debug, Default)]
pub struct IndexingProgressViews {
pub ongoing: Vec<ProgressView>,
pub completed: Vec<ProgressView>,
}
impl TrackerLog {
/// Register a new optimizer tracker
pub fn register(&mut self, description: Tracker) {
self.descriptions.push_back(description);
self.truncate();
}
/// Truncate and forget old trackers for successful/cancelled optimizations
///
/// Will never remove older trackers with failed or still ongoing optimizations.
///
/// Always keeps the last `KEEP_TRACKERS` trackers.
fn truncate(&mut self) {
let truncate_range = self.descriptions.len().saturating_sub(KEEP_LAST_TRACKERS);
// Find items to truncate, start removing from the back
let truncate = self
.descriptions
.iter()
.enumerate()
.take(truncate_range)
.filter(|(_, tracker)| match tracker.state.lock().status {
TrackerStatus::Optimizing | TrackerStatus::Error(_) => false,
TrackerStatus::Done | TrackerStatus::Cancelled(_) => true,
})
.map(|(index, _)| index)
.collect::<Vec<_>>();
truncate.into_iter().rev().for_each(|index| {
self.descriptions.remove(index);
});
}
/// Convert log into list of objects usable in telemetry
pub fn to_telemetry(&self) -> Vec<TrackerTelemetry> {
self.descriptions
.iter()
// Show latest items first
.rev()
.map(Tracker::to_telemetry)
.collect()
}
pub fn progress_views(&self) -> IndexingProgressViews {
let mut ongoing = Vec::new();
let mut completed = Vec::new();
for tracker in self.descriptions.iter().rev() {
let state = tracker.state.lock();
match state.status {
TrackerStatus::Optimizing => ongoing.push(tracker.progress_view.clone()),
TrackerStatus::Done | TrackerStatus::Cancelled(_) | TrackerStatus::Error(_) => {
completed.push(tracker.progress_view.clone());
}
}
}
IndexingProgressViews { ongoing, completed }
}
}
/// Tracks the state of an optimizer
#[derive(Clone, Debug)]
pub struct Tracker {
/// Name of the optimizer
pub name: String,
/// Segment IDs being optimized
pub segment_ids: Vec<SegmentId>,
/// Start time of the optimizer
pub state: Arc<Mutex<TrackerState>>,
/// A read-only view to progress tracker
pub progress_view: ProgressView,
}
impl Tracker {
/// Start a new optimizer tracker.
///
/// Returns self (read-write) and a progress tracker (write-only).
pub fn start(
name: impl Into<String>,
segment_ids: Vec<SegmentId>,
) -> (Tracker, ProgressTracker) {
let (progress_view, progress_tracker) = new_progress_tracker();
let tracker = Self {
name: name.into(),
segment_ids,
state: Default::default(),
progress_view,
};
(tracker, progress_tracker)
}
/// Get handle to this tracker, allows updating state
pub fn handle(&self) -> TrackerHandle {
self.state.clone().into()
}
/// Convert into object used in telemetry
pub fn to_telemetry(&self) -> TrackerTelemetry {
let state = self.state.lock();
TrackerTelemetry {
name: self.name.clone(),
segment_ids: self.segment_ids.clone(),
status: state.status.clone(),
start_at: self.progress_view.started_at(),
end_at: state.end_at,
}
}
}
/// Tracker object used in telemetry
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct TrackerTelemetry {
/// Name of the optimizer
#[anonymize(false)]
pub name: String,
/// Segment IDs being optimized
pub segment_ids: Vec<SegmentId>,
/// Latest status of the optimizer
pub status: TrackerStatus,
/// Start time of the optimizer
pub start_at: DateTime<Utc>,
/// End time of the optimizer
pub end_at: Option<DateTime<Utc>>,
}
/// Handle to an optimizer tracker, allows updating its state
#[derive(Clone)]
pub struct TrackerHandle {
handle: Arc<Mutex<TrackerState>>,
}
impl TrackerHandle {
pub fn update(&self, status: TrackerStatus) {
self.handle.lock().update(status);
}
}
impl From<Arc<Mutex<TrackerState>>> for TrackerHandle {
fn from(state: Arc<Mutex<TrackerState>>) -> Self {
Self { handle: state }
}
}
/// Mutable state of an optimizer tracker
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct TrackerState {
pub status: TrackerStatus,
pub end_at: Option<DateTime<Utc>>,
}
impl TrackerState {
/// Update the tracker state to the given `status`
pub fn update(&mut self, status: TrackerStatus) {
match status {
TrackerStatus::Done | TrackerStatus::Cancelled(_) | TrackerStatus::Error(_) => {
self.end_at.replace(Utc::now());
}
TrackerStatus::Optimizing => {
self.end_at.take();
}
}
self.status = status;
}
}
/// Represents the current state of the optimizer being tracked
#[derive(
Serialize, Deserialize, Clone, Debug, JsonSchema, Anonymize, Default, Eq, PartialEq, Hash,
)]
#[serde(rename_all = "lowercase")]
pub enum TrackerStatus {
#[default]
Optimizing,
Done,
#[anonymize(false)]
Cancelled(String),
#[anonymize(false)]
Error(String),
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/optimizers/vacuum_optimizer.rs | lib/collection/src/collection_manager/optimizers/vacuum_optimizer.rs | use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use ordered_float::OrderedFloat;
use parking_lot::Mutex;
use segment::common::operation_time_statistics::OperationDurationsAggregator;
use segment::entry::entry_point::SegmentEntry;
use segment::index::VectorIndex;
use segment::types::{HnswConfig, HnswGlobalConfig, QuantizationConfig, SegmentType};
use segment::vector_storage::VectorStorage;
use crate::collection_manager::holders::segment_holder::{
LockedSegment, LockedSegmentHolder, SegmentId,
};
use crate::collection_manager::optimizers::segment_optimizer::{
OptimizerThresholds, SegmentOptimizer,
};
use crate::config::CollectionParams;
/// Optimizer which looks for segments with high amount of soft-deleted points or vectors
///
/// Since the creation of a segment, a lot of points or vectors may have been soft-deleted. This
/// results in the index slowly breaking apart, and unnecessary storage usage.
///
/// This optimizer will look for the worst segment to rebuilt the index and minimize storage usage.
pub struct VacuumOptimizer {
deleted_threshold: f64,
min_vectors_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
telemetry_durations_aggregator: Arc<Mutex<OperationDurationsAggregator>>,
}
impl VacuumOptimizer {
#[allow(clippy::too_many_arguments)]
pub fn new(
deleted_threshold: f64,
min_vectors_number: usize,
thresholds_config: OptimizerThresholds,
segments_path: PathBuf,
collection_temp_dir: PathBuf,
collection_params: CollectionParams,
hnsw_config: HnswConfig,
hnsw_global_config: HnswGlobalConfig,
quantization_config: Option<QuantizationConfig>,
) -> Self {
VacuumOptimizer {
deleted_threshold,
min_vectors_number,
thresholds_config,
segments_path,
collection_temp_dir,
collection_params,
hnsw_config,
quantization_config,
hnsw_global_config,
telemetry_durations_aggregator: OperationDurationsAggregator::new(),
}
}
fn worst_segment(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Option<SegmentId> {
let segments_read_guard = segments.read();
segments_read_guard
.iter()
// Excluded externally, might already be scheduled for optimization
.filter(|(idx, _segment)| !excluded_ids.contains(idx))
.flat_map(|(idx, segment)| {
// Calculate littered ratio for segment and named vectors
let littered_ratio_segment = self.littered_ratio_segment(segment);
let littered_ratio_vectors = self.littered_vectors_index_ratio(segment);
[littered_ratio_segment, littered_ratio_vectors]
.into_iter()
.flatten()
.map(|ratio| (*idx, ratio))
})
.max_by_key(|(_, ratio)| OrderedFloat(*ratio))
.map(|(idx, _)| idx)
}
/// Calculate littered ratio for segment on point level
///
/// Returns `None` if littered ratio did not reach vacuum thresholds.
fn littered_ratio_segment(&self, segment: &LockedSegment) -> Option<f64> {
let segment_entry = match segment {
LockedSegment::Original(segment) => segment,
LockedSegment::Proxy(_) => return None,
};
let read_segment = segment_entry.read();
let littered_ratio =
read_segment.deleted_point_count() as f64 / read_segment.total_point_count() as f64;
let is_big = read_segment.total_point_count() >= self.min_vectors_number;
let is_littered = littered_ratio > self.deleted_threshold;
(is_big && is_littered).then_some(littered_ratio)
}
/// Calculate littered ratio for segment on vector index level
///
/// If a segment has multiple named vectors, it checks each one.
/// We are only interested in indexed vectors, as they are the ones affected by soft-deletes.
///
/// This finds the maximum deletion ratio for a named vector. The ratio is based on the number
/// of deleted vectors versus the number of indexed vector.s
///
/// Returns `None` if littered ratio did not reach vacuum thresholds for no named vectors.
fn littered_vectors_index_ratio(&self, segment: &LockedSegment) -> Option<f64> {
{
let segment_entry = segment.get();
let read_segment = segment_entry.read();
// Never optimize special segments
if read_segment.segment_type() == SegmentType::Special {
return None;
}
// Segment must have any index
let segment_config = read_segment.config();
if !segment_config.is_any_vector_indexed() {
return None;
}
}
// We can only work with original segments
let real_segment = match segment {
LockedSegment::Original(segment) => segment.read(),
LockedSegment::Proxy(_) => return None,
};
// In this segment, check the index of each named vector for a high deletion ratio.
// Return the worst ratio.
real_segment
.vector_data
.values()
.filter(|vector_data| vector_data.vector_index.borrow().is_index())
.filter_map(|vector_data| {
// We use the number of now available vectors against the number of indexed vectors
// to determine how many are soft-deleted from the index.
let vector_index = vector_data.vector_index.borrow();
let vector_storage = vector_data.vector_storage.borrow();
let indexed_vector_count = vector_index.indexed_vector_count();
let deleted_from_index =
indexed_vector_count.saturating_sub(vector_storage.available_vector_count());
let deleted_ratio = if indexed_vector_count != 0 {
deleted_from_index as f64 / indexed_vector_count as f64
} else {
0.0
};
let reached_minimum = deleted_from_index >= self.min_vectors_number;
let reached_ratio = deleted_ratio > self.deleted_threshold;
(reached_minimum && reached_ratio).then_some(deleted_ratio)
})
.max_by_key(|ratio| OrderedFloat(*ratio))
}
}
impl SegmentOptimizer for VacuumOptimizer {
fn name(&self) -> &str {
"vacuum"
}
fn segments_path(&self) -> &Path {
self.segments_path.as_path()
}
fn temp_path(&self) -> &Path {
self.collection_temp_dir.as_path()
}
fn collection_params(&self) -> CollectionParams {
self.collection_params.clone()
}
fn hnsw_config(&self) -> &HnswConfig {
&self.hnsw_config
}
fn hnsw_global_config(&self) -> &HnswGlobalConfig {
&self.hnsw_global_config
}
fn quantization_config(&self) -> Option<QuantizationConfig> {
self.quantization_config.clone()
}
fn threshold_config(&self) -> &OptimizerThresholds {
&self.thresholds_config
}
fn check_condition(
&self,
segments: LockedSegmentHolder,
excluded_ids: &HashSet<SegmentId>,
) -> Vec<SegmentId> {
self.worst_segment(segments, excluded_ids)
.into_iter()
.collect()
}
fn get_telemetry_counter(&self) -> &Mutex<OperationDurationsAggregator> {
&self.telemetry_durations_aggregator
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourceBudget;
use common::counter::hardware_counter::HardwareCounterCell;
use common::progress_tracker::ProgressTracker;
use itertools::Itertools;
use parking_lot::RwLock;
use segment::entry::entry_point::SegmentEntry;
use segment::index::hnsw_index::num_rayon_threads;
use segment::payload_json;
use segment::types::{Distance, PayloadContainer, PayloadSchemaType, VectorName};
use serde_json::Value;
use tempfile::Builder;
use super::*;
use crate::collection_manager::fixtures::{random_multi_vec_segment, random_segment};
use crate::collection_manager::holders::segment_holder::SegmentHolder;
use crate::collection_manager::optimizers::indexing_optimizer::IndexingOptimizer;
use crate::operations::types::VectorsConfig;
use crate::operations::vector_params_builder::VectorParamsBuilder;
const VECTOR1_NAME: &VectorName = "vector1";
const VECTOR2_NAME: &VectorName = "vector2";
#[test]
fn test_vacuum_conditions() {
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let segment_id = holder.add_new(random_segment(dir.path(), 100, 200, 4));
let segment = holder.get(segment_id).unwrap();
let hw_counter = HardwareCounterCell::new();
let original_segment_path = match segment {
LockedSegment::Original(s) => s.read().current_path.clone(),
LockedSegment::Proxy(_) => panic!("Not expected"),
};
let segment_points_to_delete = segment
.get()
.read()
.iter_points()
.enumerate()
.filter_map(|(i, point_id)| (i % 2 == 0).then_some(point_id))
.collect_vec();
for &point_id in &segment_points_to_delete {
segment
.get()
.write()
.delete_point(101, point_id, &hw_counter)
.unwrap();
}
let segment_points_to_assign1 = segment
.get()
.read()
.iter_points()
.enumerate()
.filter_map(|(i, point_id)| (i % 20 == 0).then_some(point_id))
.collect_vec();
let segment_points_to_assign2 = segment
.get()
.read()
.iter_points()
.enumerate()
.filter_map(|(i, point_id)| (i % 20 == 0).then_some(point_id))
.collect_vec();
for &point_id in &segment_points_to_assign1 {
segment
.get()
.write()
.set_payload(
102,
point_id,
&payload_json! {"color": "red"},
&None,
&hw_counter,
)
.unwrap();
}
for &point_id in &segment_points_to_assign2 {
segment
.get()
.write()
.set_payload(
102,
point_id,
&payload_json! {"size": 0.42},
&None,
&hw_counter,
)
.unwrap();
}
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let vacuum_optimizer = VacuumOptimizer::new(
0.2,
50,
OptimizerThresholds {
max_segment_size_kb: 1000000,
memmap_threshold_kb: 1000000,
indexing_threshold_kb: 1000000,
},
dir.path().to_owned(),
temp_dir.path().to_owned(),
CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(4, Distance::Dot).build()),
..CollectionParams::empty()
},
Default::default(),
HnswGlobalConfig::default(),
Default::default(),
);
let suggested_to_optimize =
vacuum_optimizer.check_condition(locked_holder.clone(), &Default::default());
// Check that only one segment is selected for optimization
assert_eq!(suggested_to_optimize.len(), 1);
let permit_cpu_count = num_rayon_threads(0);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
vacuum_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&AtomicBool::new(false),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
let after_optimization_segments =
locked_holder.read().iter().map(|(x, _)| *x).collect_vec();
// Check only one new segment
assert_eq!(after_optimization_segments.len(), 1);
let optimized_segment_id = *after_optimization_segments.first().unwrap();
let holder_guard = locked_holder.read();
let optimized_segment = holder_guard.get(optimized_segment_id).unwrap();
let segment_arc = optimized_segment.get();
let segment_guard = segment_arc.read();
// Check new segment have proper amount of points
assert_eq!(
segment_guard.available_point_count(),
200 - segment_points_to_delete.len(),
);
// Check payload is preserved in optimized segment
for &point_id in &segment_points_to_assign1 {
assert!(segment_guard.has_point(point_id));
let payload = segment_guard.payload(point_id, &hw_counter).unwrap();
let payload_color = payload
.get_value(&"color".parse().unwrap())
.into_iter()
.next()
.unwrap();
match payload_color {
Value::String(x) => assert_eq!(x, "red"),
_ => panic!(),
}
}
// Check old segment data is removed from disk
assert!(!original_segment_path.exists());
}
/// This tests the vacuum optimizer when many vectors get deleted.
///
/// It tests whether:
/// - the condition check for deleted vectors work
/// - optimized segments (and vector storages) are properly rebuilt
///
/// In short, this is what happens in this test:
/// - create randomized multi vector segment as base
/// - use indexing optimizer to build index for our segment
/// - test vacuum deleted vectors condition: should not trigger yet
/// - delete many points and vectors
/// - assert deletions are stored properly
/// - test vacuum deleted vectors condition: should trigger due to deletions
/// - optimize segment with vacuum optimizer
/// - assert segment is properly optimized
#[test]
fn test_vacuum_deleted_vectors() {
// Collection configuration
let (point_count, vector1_dim, vector2_dim) = (1000, 10, 20);
let thresholds_config = OptimizerThresholds {
max_segment_size_kb: usize::MAX,
memmap_threshold_kb: usize::MAX,
indexing_threshold_kb: 10,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Multi(BTreeMap::from([
(
VECTOR1_NAME.to_owned(),
VectorParamsBuilder::new(vector1_dim, Distance::Dot).build(),
),
(
VECTOR2_NAME.to_owned(),
VectorParamsBuilder::new(vector2_dim, Distance::Dot).build(),
),
])),
..CollectionParams::empty()
};
// Base segment
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let mut segment = random_multi_vec_segment(
dir.path(),
100,
point_count,
vector1_dim as usize,
vector2_dim as usize,
);
let hw_counter = HardwareCounterCell::new();
segment
.create_field_index(
101,
&"keyword".parse().unwrap(),
Some(&PayloadSchemaType::Keyword.into()),
&hw_counter,
)
.unwrap();
let mut segment_id = holder.add_new(segment);
let locked_holder: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let hnsw_config = HnswConfig {
m: 16,
ef_construct: 100,
full_scan_threshold: 10, // Force to build HNSW links for payload
max_indexing_threads: 0,
on_disk: None,
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let budget = ResourceBudget::new(permit_cpu_count, permit_cpu_count);
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
// Optimizers used in test
let index_optimizer = IndexingOptimizer::new(
2,
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params.clone(),
hnsw_config,
HnswGlobalConfig::default(),
Default::default(),
);
let vacuum_optimizer = VacuumOptimizer::new(
0.2,
5,
thresholds_config,
dir.path().to_owned(),
temp_dir.path().to_owned(),
collection_params,
hnsw_config,
HnswGlobalConfig::default(),
Default::default(),
);
// Use indexing optimizer to build index for vacuum index test
let changed = index_optimizer
.optimize(
locked_holder.clone(),
vec![segment_id],
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
assert!(
locked_holder.read().get(segment_id).is_none(),
"optimized segment should be gone",
);
assert_eq!(locked_holder.read().len(), 2, "index must be built");
// Update working segment ID
segment_id = *locked_holder
.read()
.iter()
.find(|(_, segment)| {
let segment = match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
};
segment.total_point_count() > 0
})
.unwrap()
.0;
// Vacuum optimizer should not optimize yet, no points/vectors have been deleted
let suggested_to_optimize =
vacuum_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 0);
// Delete some points and vectors
{
let holder = locked_holder.write();
let segment = holder.get(segment_id).unwrap();
let mut segment = match segment {
LockedSegment::Original(s) => s.write(),
LockedSegment::Proxy(_) => unreachable!(),
};
// Delete 10% of points
let segment_points_to_delete = segment
.iter_points()
.enumerate()
.filter_map(|(i, point_id)| (i % 10 == 3).then_some(point_id))
.collect_vec();
for &point_id in &segment_points_to_delete {
segment.delete_point(201, point_id, &hw_counter).unwrap();
}
// Delete 25% of vectors named vector1
{
let id_tracker = segment.id_tracker.clone();
let vector1_data = segment.vector_data.get_mut(VECTOR1_NAME).unwrap();
let mut vector1_storage = vector1_data.vector_storage.borrow_mut();
let vector1_vecs_to_delete = id_tracker
.borrow()
.iter_external()
.enumerate()
.filter_map(|(i, point_id)| (i % 4 == 0).then_some(point_id))
.collect_vec();
for &point_id in &vector1_vecs_to_delete {
let id = id_tracker.borrow().internal_id(point_id).unwrap();
vector1_storage.delete_vector(id).unwrap();
}
}
// Delete 10% of vectors named vector2
{
let id_tracker = segment.id_tracker.clone();
let vector2_data = segment.vector_data.get_mut(VECTOR2_NAME).unwrap();
let mut vector2_storage = vector2_data.vector_storage.borrow_mut();
let vector2_vecs_to_delete = id_tracker
.borrow()
.iter_external()
.enumerate()
.filter_map(|(i, point_id)| (i % 10 == 7).then_some(point_id))
.collect_vec();
for &point_id in &vector2_vecs_to_delete {
let id = id_tracker.borrow().internal_id(point_id).unwrap();
vector2_storage.delete_vector(id).unwrap();
}
}
}
// Ensure deleted points and vectors are stored properly before optimizing
locked_holder
.read()
.iter()
.map(|(_, segment)| match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
})
.filter(|segment| segment.total_point_count() > 0)
.for_each(|segment| {
// We should still have all points
assert_eq!(segment.total_point_count(), point_count as usize);
// Named vector storages should have deletions, but not at creation
segment.vector_data.values().for_each(|vector_data| {
let vector_storage = vector_data.vector_storage.borrow();
assert!(vector_storage.deleted_vector_count() > 0);
});
});
// Run vacuum index optimizer, make sure it optimizes properly
let permit = budget.try_acquire(0, permit_cpu_count).unwrap();
let suggested_to_optimize =
vacuum_optimizer.check_condition(locked_holder.clone(), &Default::default());
assert_eq!(suggested_to_optimize.len(), 1);
let changed = vacuum_optimizer
.optimize(
locked_holder.clone(),
suggested_to_optimize,
permit,
budget.clone(),
&false.into(),
ProgressTracker::new_for_test(),
Box::new(|| ()),
)
.unwrap();
assert!(changed > 0, "optimizer should have rebuilt this segment");
// Ensure deleted points and vectors are optimized
locked_holder
.read()
.iter()
.map(|(_, segment)| match segment {
LockedSegment::Original(s) => s.read(),
LockedSegment::Proxy(_) => unreachable!(),
})
.filter(|segment| segment.total_point_count() > 0)
.for_each(|segment| {
// We should have deleted some points
assert!(segment.total_point_count() < point_count as usize);
// Named vector storages should have:
// - deleted vectors
// - indexed vectors (more than 0)
// - indexed less vectors than the total available
segment.vector_data.values().for_each(|vector_data| {
let vector_index = vector_data.vector_index.borrow();
let vector_storage = vector_data.vector_storage.borrow();
assert!(vector_storage.available_vector_count() > 0);
assert!(vector_storage.deleted_vector_count() > 0);
assert_eq!(
vector_index.indexed_vector_count(),
vector_storage.available_vector_count(),
);
});
});
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/tests/test_search_aggregation.rs | lib/collection/src/collection_manager/tests/test_search_aggregation.rs | use common::types::ScoreType;
use segment::types::{PointIdType, ScoredPoint, SeqNumberType};
use crate::collection_manager::segments_searcher::SegmentsSearcher;
fn score_point(id: usize, score: ScoreType, version: SeqNumberType) -> ScoredPoint {
ScoredPoint {
id: PointIdType::NumId(id as u64),
version,
score,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
fn search_result_b0_s0() -> Vec<ScoredPoint> {
vec![
score_point(11, 0.91, 11),
score_point(12, 0.81, 23),
score_point(13, 0.71, 12),
score_point(14, 0.61, 12),
score_point(15, 0.51, 12),
score_point(16, 0.41, 31), // Have points, but all bad
]
}
fn search_result_b0_s1() -> Vec<ScoredPoint> {
vec![
score_point(21, 0.92, 42),
score_point(22, 0.82, 12),
score_point(23, 0.74, 32),
score_point(24, 0.73, 21),
score_point(25, 0.72, 55),
score_point(26, 0.71, 10), // may contain more interesting points
]
}
fn search_result_b0_s2() -> Vec<ScoredPoint> {
vec![
score_point(31, 0.93, 52),
score_point(32, 0.83, 22),
score_point(33, 0.73, 21), // less point than expected
]
}
fn search_result_b1_s0() -> Vec<ScoredPoint> {
vec![
score_point(111, 0.92, 11),
score_point(112, 0.81, 23),
score_point(113, 0.71, 12),
]
}
fn search_result_b1_s1() -> Vec<ScoredPoint> {
vec![
score_point(111, 0.91, 14),
score_point(112, 0.81, 23),
score_point(113, 0.71, 12),
]
}
fn search_result_b1_s2() -> Vec<ScoredPoint> {
vec![
score_point(111, 0.91, 11),
score_point(112, 0.81, 23),
score_point(113, 0.71, 12),
]
}
#[test]
fn test_aggregation_of_batch_search_results() {
let search_results = vec![
vec![search_result_b0_s0(), search_result_b1_s0()],
vec![search_result_b0_s1(), search_result_b1_s1()],
vec![search_result_b0_s2(), search_result_b1_s2()],
];
let result_limits = vec![12, 5];
let further_results = vec![vec![true, true], vec![true, true], vec![false, true]];
let (aggregator, re_request) = SegmentsSearcher::process_search_result_step1(
search_results,
result_limits,
&further_results,
);
// ------------Segment----------batch---
assert!(re_request[&1].contains(&0));
assert!(re_request[&0].contains(&1)); // re-request all segments
assert!(re_request[&1].contains(&1));
assert!(re_request[&2].contains(&1));
let top_results = aggregator.into_topk();
eprintln!("top_results = {top_results:#?}");
assert_eq!(top_results.len(), 2);
assert_eq!(top_results[0].len(), 12);
assert_eq!(top_results[1].len(), 3);
assert_eq!(top_results[0][0].id, PointIdType::NumId(31));
assert_eq!(top_results[0][1].id, PointIdType::NumId(21));
assert_eq!(top_results[0][2].id, PointIdType::NumId(11));
assert_eq!(top_results[1][0].id, PointIdType::NumId(111));
assert_eq!(top_results[1][0].version, 14);
assert_eq!(top_results[1][0].score, 0.91);
assert_eq!(top_results[1][1].id, PointIdType::NumId(112));
assert_eq!(top_results[1][1].version, 23);
assert_eq!(top_results[1][1].score, 0.81);
assert_eq!(top_results[1][2].id, PointIdType::NumId(113));
assert_eq!(top_results[1][2].version, 12);
assert_eq!(top_results[1][2].score, 0.71);
}
// Ensure we don't panic if we search with a huge limit
// See: <https://github.com/qdrant/qdrant/issues/5483>
#[test]
fn test_batch_search_aggregation_high_limit() {
let search_results = vec![
vec![search_result_b0_s0(), search_result_b1_s0()],
vec![search_result_b0_s1(), search_result_b1_s1()],
vec![search_result_b0_s2(), search_result_b1_s2()],
];
let result_limits = vec![12, usize::MAX];
let further_results = vec![vec![true, true], vec![true, true], vec![false, true]];
let (_aggregator, _re_request) = SegmentsSearcher::process_search_result_step1(
search_results,
result_limits,
&further_results,
);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection_manager/tests/mod.rs | lib/collection/src/collection_manager/tests/mod.rs | use std::collections::HashSet;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use ahash::AHashMap;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use itertools::Itertools;
use parking_lot::RwLock;
use segment::data_types::vectors::{VectorStructInternal, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::types::{ExtendedPointId, PayloadContainer, PointIdType, WithPayload, WithVector};
use shard::retrieve::record_internal::RecordInternal;
use shard::retrieve::retrieve_blocking::retrieve_blocking;
use shard::update::{delete_points, set_payload, upsert_points};
use tempfile::Builder;
use crate::collection_manager::fixtures::{
TEST_TIMEOUT, build_segment_1, build_segment_2, empty_segment,
};
use crate::collection_manager::holders::proxy_segment::ProxySegment;
use crate::collection_manager::holders::segment_holder::{
LockedSegment, LockedSegmentHolder, SegmentHolder, SegmentId,
};
use crate::operations::point_ops::{PointStructPersisted, VectorStructPersisted};
mod test_search_aggregation;
fn wrap_proxy(segments: LockedSegmentHolder, sid: SegmentId) -> SegmentId {
let mut write_segments = segments.write();
let optimizing_segment = write_segments.get(sid).unwrap().clone();
let proxy = ProxySegment::new(optimizing_segment);
let (new_id, _replaced_segments) = write_segments.swap_new(proxy, &[sid]);
new_id
}
#[test]
fn test_update_proxy_segments() {
let is_stopped = AtomicBool::new(false);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let segment2 = build_segment_2(dir.path());
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let _sid2 = holder.add_new(segment2);
let segments = Arc::new(RwLock::new(holder));
let _proxy_id = wrap_proxy(segments.clone(), sid1);
let vectors = [
only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
];
let hw_counter = HardwareCounterCell::new();
for i in 1..10 {
let points = vec![
PointStructPersisted {
id: (100 * i + 1).into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vectors[0].clone())),
payload: None,
},
PointStructPersisted {
id: (100 * i + 2).into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vectors[1].clone())),
payload: None,
},
];
upsert_points(&segments.read(), 1000 + i, &points, &hw_counter).unwrap();
}
let all_ids = segments
.read()
.iter()
.flat_map(|(_id, segment)| {
segment
.get()
.read()
.read_filtered(None, Some(100), None, &is_stopped, &hw_counter)
})
.sorted()
.collect_vec();
for i in 1..10 {
let idx = 100 * i + 1;
assert!(all_ids.contains(&idx.into()), "Not found {idx}")
}
}
#[test]
fn test_move_points_to_copy_on_write() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let segment2 = build_segment_2(dir.path());
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
let segments = Arc::new(RwLock::new(holder));
let proxy_id = wrap_proxy(segments.clone(), sid1);
let hw_counter = HardwareCounterCell::new();
let points = vec![
PointStructPersisted {
id: 1.into(),
vector: VectorStructPersisted::from(vec![0.0, 0.0, 0.0, 0.0]),
payload: None,
},
PointStructPersisted {
id: 2.into(),
vector: VectorStructPersisted::from(vec![0.0, 0.0, 0.0, 0.0]),
payload: None,
},
];
// Points should be marked as deleted in proxy segment
// and moved to another appendable segment (segment2)
upsert_points(&segments.read(), 1001, &points, &hw_counter).unwrap();
let points = vec![
PointStructPersisted {
id: 2.into(),
vector: VectorStructPersisted::from(vec![0.0, 0.0, 0.0, 0.0]),
payload: None,
},
PointStructPersisted {
id: 3.into(),
vector: VectorStructPersisted::from(vec![0.0, 0.0, 0.0, 0.0]),
payload: None,
},
];
upsert_points(&segments.read(), 1002, &points, &hw_counter).unwrap();
let segments_write = segments.write();
let locked_proxy = match segments_write.get(proxy_id).unwrap() {
LockedSegment::Original(_) => panic!("wrong type"),
LockedSegment::Proxy(locked_proxy) => locked_proxy,
};
let read_proxy = locked_proxy.read();
let num_deleted_points_in_proxy = read_proxy.deleted_point_count();
assert_eq!(
num_deleted_points_in_proxy, 3,
"3 points should be deleted in proxy"
);
// Copy-on-write segment should contain all 3 points
let cow_segment = segments_write.get(sid2).unwrap();
let cow_segment_read = cow_segment.get().read();
let cow_points: HashSet<_> = cow_segment_read.iter_points().collect();
assert!(
cow_points.contains(&1.into()),
"Point 1 should be in copy-on-write segment"
);
assert!(
cow_points.contains(&2.into()),
"Point 2 should be in copy-on-write segment"
);
assert!(
cow_points.contains(&3.into()),
"Point 3 should be in copy-on-write segment"
);
}
#[test]
fn test_upsert_points_in_smallest_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_2(dir.path());
let segment3 = empty_segment(dir.path());
let hw_counter = HardwareCounterCell::new();
// Fill segment 1 and 2 to the capacity
for point_id in 0..100 {
segment1
.upsert_point(
20,
point_id.into(),
only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
20,
(100 + point_id).into(),
only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
&hw_counter,
)
.unwrap();
}
let mut holder = SegmentHolder::default();
let _sid1 = holder.add_new(segment1);
let _sid2 = holder.add_new(segment2);
let sid3 = holder.add_new(segment3);
let segments = Arc::new(RwLock::new(holder));
let points: Vec<_> = (1000..1010)
.map(|id| PointStructPersisted {
id: id.into(),
vector: VectorStructPersisted::from(VectorStructInternal::from(vec![
0.0, 0.0, 0.0, 0.0,
])),
payload: None,
})
.collect();
upsert_points(&segments.read(), 1000, &points, &hw_counter).unwrap();
// Segment 1 and 2 are over capacity, we expect to have the new points in segment 3
{
let segment3 = segments.read();
let segment3_read = segment3.get(sid3).unwrap().get().read();
for point_id in 1000..1010 {
assert!(segment3_read.has_point(point_id.into()));
}
for point_id in 0..10 {
assert!(!segment3_read.has_point(point_id.into()));
}
}
}
/// Test that a delete operation deletes all point versions.
///
/// See: <https://github.com/qdrant/qdrant/pull/5956>
#[test]
fn test_delete_all_point_versions() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let point_id = ExtendedPointId::from(123);
let old_vector = vec![0.0, 1.0, 2.0, 3.0];
let new_vector = vec![3.0, 2.0, 1.0, 0.0];
let mut segment1 = empty_segment(dir.path());
let mut segment2 = empty_segment(dir.path());
// Insert point 123 in both segments, having version 100 and 101
segment1
.upsert_point(
100,
point_id,
segment::data_types::vectors::only_default_vector(&old_vector),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
101,
point_id,
segment::data_types::vectors::only_default_vector(&new_vector),
&hw_counter,
)
.unwrap();
// Set up locked segment holder
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
let segments = Arc::new(RwLock::new(holder));
// We should be able to retrieve point 123
let retrieved = retrieve_blocking(
segments.clone(),
&[point_id],
&WithPayload::from(false),
&WithVector::from(true),
TEST_TIMEOUT,
&AtomicBool::new(false),
HwMeasurementAcc::new(),
)
.unwrap();
assert_eq!(
retrieved,
AHashMap::from([(
point_id,
RecordInternal {
id: point_id,
vector: Some(VectorStructInternal::Single(new_vector)),
payload: None,
shard_key: None,
order_value: None,
}
)])
);
{
// Assert that point 123 is in both segments
let holder = segments.read();
assert!(holder.get(sid1).unwrap().get().read().has_point(point_id));
assert!(holder.get(sid2).unwrap().get().read().has_point(point_id));
// Delete point 123
delete_points(&holder, 102, &[123.into()], &hw_counter).unwrap();
// Assert that point 123 is deleted from both segments
// Note: before the bug fix the point was only deleted from segment 2
assert!(!holder.get(sid1).unwrap().get().read().has_point(point_id));
assert!(!holder.get(sid2).unwrap().get().read().has_point(point_id));
}
// Drop the last segment, only keep the first
// Pretend the segment was picked up by the optimizer, and was totally optimized away
let removed_segments = segments.write().remove(&[sid2]);
assert_eq!(removed_segments.len(), 1);
// We must not be able to retrieve point 123
// Note: before the bug fix we could retrieve the point again from segment 1
let retrieved = retrieve_blocking(
segments.clone(),
&[point_id],
&WithPayload::from(false),
&WithVector::from(false),
TEST_TIMEOUT,
&AtomicBool::new(false),
HwMeasurementAcc::new(),
)
.unwrap();
assert!(retrieved.is_empty());
}
#[test]
fn test_proxy_shared_updates() {
// Testing that multiple proxies that share point with the same id but different versions
// are able to successfully apply and resolve update operation.
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = empty_segment(dir.path());
let mut segment2 = empty_segment(dir.path());
let old_vec = vec![1.0, 0.0, 0.0, 1.0];
let new_vec = vec![1.0, 0.0, 1.0, 0.0];
let old_payload = payload_json! {"size": vec!["small"]};
let new_payload = payload_json! {"size": vec!["big"]};
// Appendable segment that should serve as a copy-on-write segment for both proxies
let write_segment = LockedSegment::new(empty_segment(dir.path()));
let idx1 = PointIdType::from(1);
let idx2 = PointIdType::from(2);
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(10, idx1, only_default_vector(&old_vec), &hw_counter)
.unwrap();
segment1
.set_payload(10, idx1, &old_payload, &None, &hw_counter)
.unwrap();
segment1
.upsert_point(20, idx2, only_default_vector(&new_vec), &hw_counter)
.unwrap();
segment1
.set_payload(20, idx2, &new_payload, &None, &hw_counter)
.unwrap();
segment2
.upsert_point(20, idx1, only_default_vector(&new_vec), &hw_counter)
.unwrap();
segment2
.set_payload(20, idx1, &new_payload, &None, &hw_counter)
.unwrap();
segment2
.upsert_point(10, idx2, only_default_vector(&old_vec), &hw_counter)
.unwrap();
segment2
.set_payload(10, idx2, &old_payload, &None, &hw_counter)
.unwrap();
let locked_segment_1 = LockedSegment::new(segment1);
let locked_segment_2 = LockedSegment::new(segment2);
let proxy_segment_1 = ProxySegment::new(locked_segment_1);
let proxy_segment_2 = ProxySegment::new(locked_segment_2);
let mut holder = SegmentHolder::default();
let proxy_1_id = holder.add_new(proxy_segment_1);
let proxy_2_id = holder.add_new(proxy_segment_2);
let write_segment_id = holder.add_new(write_segment);
let payload = payload_json! {"color": vec!["yellow"]};
let ids = vec![idx1, idx2];
set_payload(&holder, 30, &payload, &ids, &None, &hw_counter).unwrap();
// Points should still be accessible in both proxies through write segment
for &point_id in &ids {
assert!(
!holder
.get(proxy_1_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
assert!(
!holder
.get(proxy_2_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
assert!(
holder
.get(write_segment_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
}
let locked_holder = Arc::new(RwLock::new(holder));
let is_stopped = AtomicBool::new(false);
let with_payload = WithPayload::from(true);
let with_vector = WithVector::from(true);
let result = retrieve_blocking(
locked_holder.clone(),
&ids,
&with_payload,
&with_vector,
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap();
assert_eq!(
result.keys().copied().collect::<HashSet<_>>(),
HashSet::from_iter(ids),
"must retrieve all point IDs",
);
let expected_payload = payload_json! {"size": vec!["big"], "color": vec!["yellow"]};
for (point_id, record) in result {
let payload = record
.payload
.unwrap_or_else(|| panic!("No payload for point_id = {point_id}"));
assert_eq!(payload, expected_payload);
}
}
#[test]
fn test_proxy_shared_updates_same_version() {
// Testing that multiple proxies that share point with the same id but the same versions
// are able to successfully apply and resolve update operation.
//
// It is undefined which instance of the point is picked if they have the exact same version.
// What we can check is that at least one instance of the point is selected, and that we don't
// accidentally lose points.
// This is especially important with merging <https://github.com/qdrant/qdrant/pull/5962>.
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = empty_segment(dir.path());
let mut segment2 = empty_segment(dir.path());
let old_vec = vec![1.0, 0.0, 0.0, 1.0];
let new_vec = vec![1.0, 0.0, 1.0, 0.0];
let old_payload = payload_json! {"size": "small"};
let new_payload = payload_json! {"size": "big"};
let write_segment = LockedSegment::new(empty_segment(dir.path()));
let idx1 = PointIdType::from(1);
let idx2 = PointIdType::from(2);
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(10, idx1, only_default_vector(&old_vec), &hw_counter)
.unwrap();
segment1
.set_payload(10, idx1, &old_payload, &None, &hw_counter)
.unwrap();
segment1
.upsert_point(10, idx2, only_default_vector(&new_vec), &hw_counter)
.unwrap();
segment1
.set_payload(10, idx2, &new_payload, &None, &hw_counter)
.unwrap();
segment2
.upsert_point(10, idx1, only_default_vector(&new_vec), &hw_counter)
.unwrap();
segment2
.set_payload(10, idx1, &new_payload, &None, &hw_counter)
.unwrap();
segment2
.upsert_point(10, idx2, only_default_vector(&old_vec), &hw_counter)
.unwrap();
segment2
.set_payload(10, idx2, &old_payload, &None, &hw_counter)
.unwrap();
let locked_segment_1 = LockedSegment::new(segment1);
let locked_segment_2 = LockedSegment::new(segment2);
let proxy_segment_1 = ProxySegment::new(locked_segment_1);
let proxy_segment_2 = ProxySegment::new(locked_segment_2);
let mut holder = SegmentHolder::default();
let proxy_1_id = holder.add_new(proxy_segment_1);
let proxy_2_id = holder.add_new(proxy_segment_2);
let write_segment_id = holder.add_new(write_segment);
let payload = payload_json! {"color": "yellow"};
let ids = vec![idx1, idx2];
set_payload(&holder, 20, &payload, &ids, &None, &hw_counter).unwrap();
// Points should still be accessible in both proxies through write segment
for &point_id in &ids {
assert!(
!holder
.get(proxy_1_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
assert!(
!holder
.get(proxy_2_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
assert!(
holder
.get(write_segment_id)
.unwrap()
.get()
.read()
.has_point(point_id),
);
}
let locked_holder = Arc::new(RwLock::new(holder));
let is_stopped = AtomicBool::new(false);
let with_payload = WithPayload::from(true);
let with_vector = WithVector::from(true);
let result = retrieve_blocking(
locked_holder.clone(),
&ids,
&with_payload,
&with_vector,
TEST_TIMEOUT,
&is_stopped,
HwMeasurementAcc::new(),
)
.unwrap();
assert_eq!(
result.keys().copied().collect::<HashSet<_>>(),
HashSet::from_iter(ids),
"must retrieve all point IDs",
);
for (point_id, record) in result {
let payload = record
.payload
.unwrap_or_else(|| panic!("No payload for point_id = {point_id}"));
dbg!(&payload);
let color = payload.get_value(&JsonPath::new("color"));
assert_eq!(color.len(), 1);
let color = color[0];
assert_eq!(color.as_str(), Some("yellow"));
let size = payload.get_value(&JsonPath::new("size"));
assert_eq!(size.len(), 1);
let size = size[0];
assert!(["small", "big"].contains(&size.as_str().unwrap()));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/hw_metrics.rs | lib/collection/src/tests/hw_metrics.rs | use std::sync::Arc;
use std::time::Duration;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::{HwMeasurementAcc, HwSharedDrain};
use common::save_on_disk::SaveOnDisk;
use rand::rngs::ThreadRng;
use rand::{RngCore, rng};
use segment::data_types::vectors::{NamedQuery, VectorInternal, VectorStructInternal};
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequestBatch;
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::operations::CollectionUpdateOperations;
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted,
};
use crate::operations::types::{CollectionError, CoreSearchRequest};
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::tests::fixtures::create_collection_config_with_dim;
#[tokio::test(flavor = "multi_thread")]
async fn test_hw_metrics_cancellation() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let mut config = create_collection_config_with_dim(512);
config.optimizer_config.indexing_threshold = None;
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = make_random_points_upsert_op(10_000);
shard
.update(upsert_ops.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
let mut rand = rng();
let req = CoreSearchRequestBatch {
searches: vec![CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery {
using: None,
query: VectorInternal::from(rand_vector(512, &mut rand)),
}),
filter: None,
params: None,
limit: 1010,
offset: 0,
with_payload: None,
with_vector: None,
score_threshold: None,
}],
};
let outer_hw = HwSharedDrain::default();
{
let hw_counter = HwMeasurementAcc::new_with_metrics_drain(outer_hw.clone());
let search_res = shard
.do_search(
Arc::new(req),
¤t_runtime,
Duration::from_millis(10), // Very short duration to hit timeout before the search finishes
hw_counter,
)
.await;
// Ensure we triggered a timeout and the search didn't exit too early.
assert!(matches!(
search_res.unwrap_err(),
CollectionError::Timeout { description: _ }
));
// Wait until the cancellation is processed is finished
std::thread::sleep(Duration::from_millis(50));
}
assert!(outer_hw.get_cpu() > 0);
}
fn make_random_points_upsert_op(len: usize) -> CollectionUpdateOperations {
let mut points = vec![];
let mut rand = rng();
for i in 0..len as u64 {
let rand_vector = rand_vector(512, &mut rand);
points.push(PointStructPersisted {
id: segment::types::ExtendedPointId::NumId(i),
vector: VectorStructInternal::from(rand_vector).into(),
payload: None,
});
}
let op = PointInsertOperationsInternal::from(points);
CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(op))
}
fn rand_vector(size: usize, rand: &mut ThreadRng) -> Vec<f32> {
(0..size).map(|_| rand.next_u32() as f32).collect()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/fix_payload_indices.rs | lib/collection/src/tests/fix_payload_indices.rs | use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use segment::types::{PayloadFieldSchema, PayloadSchemaType};
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::tests::fixtures::*;
#[tokio::test(flavor = "multi_thread")]
async fn test_fix_payload_indices() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let hw_acc = HwMeasurementAcc::new();
let upsert_ops = upsert_operation();
shard
.update(upsert_ops.into(), true, hw_acc.clone())
.await
.unwrap();
// Create payload index in shard locally, not in global collection configuration
let index_op = create_payload_index_operation();
shard
.update(index_op.into(), true, hw_acc.clone())
.await
.unwrap();
let delete_point_op = delete_point_operation(4);
shard
.update(delete_point_op.into(), true, hw_acc.clone())
.await
.unwrap();
std::thread::sleep(std::time::Duration::from_secs(1));
shard.stop_gracefully().await;
payload_index_schema
.write(|schema| {
schema.schema.insert(
"a".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Integer),
);
schema.schema.insert(
"b".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Keyword),
);
})
.unwrap();
let shard = LocalShard::load(
0,
collection_name,
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
config.optimizer_config.clone(),
Arc::new(Default::default()),
payload_index_schema,
true,
current_runtime.clone(),
current_runtime,
ResourceBudget::default(),
)
.await
.unwrap();
let info = shard.info().await.unwrap();
// Deleting existing payload index is not supported
// assert!(!info
// .payload_schema
// .contains_key(&"location".parse().unwrap()));
assert_eq!(
info.payload_schema
.get(&"a".parse().unwrap())
.unwrap()
.data_type,
PayloadSchemaType::Integer
);
assert_eq!(
info.payload_schema
.get(&"b".parse().unwrap())
.unwrap()
.data_type,
PayloadSchemaType::Keyword
);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/shard_telemetry.rs | lib/collection/src/tests/shard_telemetry.rs | use std::sync::Arc;
use std::time::Duration;
use common::budget::ResourceBudget;
use common::save_on_disk::SaveOnDisk;
use common::types::{DetailsLevel, TelemetryDetail};
use strum::IntoEnumIterator;
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::operations::types::CollectionError;
use crate::shards::local_shard::LocalShard;
use crate::tests::fixtures::*;
#[tokio::test(flavor = "multi_thread")]
#[allow(clippy::await_holding_lock)] // required for creating the synthetic lock test situation
async fn test_shard_telemetry() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
// Validate for all details levels because the implementations are different
for detail_level in DetailsLevel::iter() {
let details = TelemetryDetail::new(detail_level, false);
let telemetry = shard
.get_telemetry_data(details, Duration::from_millis(10))
.await
.unwrap();
assert_eq!(telemetry.num_points, Some(0));
// test that it timeouts if the segment_holder lock can't be acquired
let write_segment_holder_guard = shard.segments().write();
let telemetry = shard
.get_telemetry_data(details, Duration::from_millis(10))
.await;
assert!(matches!(telemetry, Err(CollectionError::Timeout { .. })));
drop(write_segment_holder_guard);
let telemetry = shard
.get_telemetry_data(details, Duration::from_millis(10))
.await
.unwrap();
assert_eq!(telemetry.num_points, Some(0));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/sparse_vectors_validation_tests.rs | lib/collection/src/tests/sparse_vectors_validation_tests.rs | use std::collections::HashMap;
use api::rest::{
BaseGroupRequest, Batch, BatchVectorStruct, PointStruct, PointVectors, PointsList,
SearchGroupsRequestInternal, SearchRequestInternal, Vector, VectorStruct,
};
use segment::types::VectorNameBuf;
use sparse::common::sparse_vector::SparseVector;
use validator::Validate;
use crate::operations::types::{
ContextExamplePair, DiscoverRequestInternal, RecommendExample, RecommendRequestInternal,
};
fn wrong_sparse_vector() -> SparseVector {
SparseVector {
indices: vec![1, 2],
values: vec![0.0, 1.0, 2.0],
}
}
fn wrong_named_vector_struct() -> api::rest::NamedVectorStruct {
api::rest::NamedVectorStruct::Sparse(segment::data_types::vectors::NamedSparseVector {
name: "sparse".into(),
vector: wrong_sparse_vector(),
})
}
fn wrong_point_struct() -> PointStruct {
let vector_data: HashMap<VectorNameBuf, _> =
HashMap::from([("sparse".into(), Vector::Sparse(wrong_sparse_vector()))]);
PointStruct {
id: 0.into(),
vector: VectorStruct::Named(vector_data),
payload: None,
}
}
fn wrong_recommend_example() -> RecommendExample {
RecommendExample::Sparse(wrong_sparse_vector())
}
fn check_validation_error<T: Validate>(v: T) {
match v.validate() {
Ok(_) => panic!("Expected validation error"),
// check if there is an error message about the length of the sparse vector
Err(e) => assert!(e.to_string().contains("must be the same length as indices")),
}
}
#[test]
fn validate_error_sparse_vector_point_struct() {
check_validation_error(wrong_point_struct());
}
#[test]
fn validate_error_sparse_vector_points_batch() {
let vector_data: HashMap<VectorNameBuf, Vec<_>> =
HashMap::from([("sparse".into(), vec![Vector::Sparse(wrong_sparse_vector())])]);
check_validation_error(Batch {
ids: vec![1.into()],
vectors: BatchVectorStruct::Named(vector_data),
payloads: None,
});
}
#[test]
fn validate_error_sparse_vector_points_list() {
check_validation_error(PointsList {
points: vec![wrong_point_struct()],
shard_key: None,
update_filter: None,
});
}
#[test]
fn validate_error_sparse_vector_search_request_internal() {
check_validation_error(SearchRequestInternal {
vector: wrong_named_vector_struct(),
filter: None,
params: None,
limit: 5,
offset: None,
with_payload: None,
with_vector: None,
score_threshold: None,
});
}
#[test]
fn validate_error_sparse_vector_search_groups_request_internal() {
check_validation_error(SearchGroupsRequestInternal {
vector: wrong_named_vector_struct(),
filter: None,
params: None,
with_payload: None,
with_vector: None,
score_threshold: None,
group_request: BaseGroupRequest {
group_by: "sparse".parse().unwrap(),
group_size: 5,
limit: 5,
with_lookup: None,
},
});
}
#[test]
fn validate_error_sparse_vector_recommend_example() {
check_validation_error(RecommendExample::Sparse(wrong_sparse_vector()));
}
#[test]
fn validate_error_sparse_vector_recommend_request_internal() {
check_validation_error(RecommendRequestInternal {
positive: vec![wrong_recommend_example()],
negative: vec![wrong_recommend_example()],
strategy: None,
filter: None,
params: None,
limit: 5,
offset: None,
with_payload: None,
with_vector: None,
score_threshold: None,
using: None,
lookup_from: None,
});
}
#[test]
fn validate_error_sparse_vector_context_example_pair() {
check_validation_error(ContextExamplePair {
positive: wrong_recommend_example(),
negative: wrong_recommend_example(),
});
}
#[test]
fn validate_error_sparse_vector_discover_request_internal() {
check_validation_error(DiscoverRequestInternal {
target: Some(wrong_recommend_example()),
context: Some(vec![ContextExamplePair {
positive: wrong_recommend_example(),
negative: wrong_recommend_example(),
}]),
filter: None,
params: None,
limit: 5,
offset: None,
with_payload: None,
with_vector: None,
using: None,
lookup_from: None,
});
}
#[test]
fn validate_error_sparse_vector_point_vectors() {
let vector_data: HashMap<VectorNameBuf, _> =
HashMap::from([("sparse".into(), Vector::Sparse(wrong_sparse_vector()))]);
let vector_struct = VectorStruct::Named(vector_data);
check_validation_error(PointVectors {
id: 1.into(),
vector: vector_struct,
});
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/snapshot_test.rs | lib/collection/src/tests/snapshot_test.rs | use std::collections::HashSet;
use std::num::NonZeroU32;
use std::sync::Arc;
use ahash::AHashMap;
use common::budget::ResourceBudget;
use segment::types::Distance;
use tempfile::Builder;
use crate::collection::{Collection, RequestShardTransfer};
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::{NodeType, VectorsConfig};
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState};
use crate::tests::fixtures::TEST_OPTIMIZERS_CONFIG;
pub fn dummy_on_replica_failure() -> ChangePeerFromState {
Arc::new(move |_peer_id, _shard_id, _from_state| {})
}
pub fn dummy_request_shard_transfer() -> RequestShardTransfer {
Arc::new(move |_transfer| {})
}
pub fn dummy_abort_shard_transfer() -> AbortShardTransfer {
Arc::new(|_transfer, _reason| {})
}
fn init_logger() {
let _ = env_logger::builder().is_test(true).try_init();
}
async fn _test_snapshot_collection(node_type: NodeType) {
let wal_config = WalConfig {
wal_capacity_mb: 1,
wal_segments_ahead: 0,
wal_retain_closed: 1,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(4, Distance::Dot).build()),
shard_number: NonZeroU32::new(4).unwrap(),
replication_factor: NonZeroU32::new(3).unwrap(),
write_consistency_factor: NonZeroU32::new(2).unwrap(),
..CollectionParams::empty()
};
let config = CollectionConfigInternal {
params: collection_params,
optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(),
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: Default::default(),
uuid: None,
metadata: None,
};
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let collection_name = "test".to_string();
let collection_name_rec = "test_rec".to_string();
let mut shards = AHashMap::new();
shards.insert(0, HashSet::from([1]));
shards.insert(1, HashSet::from([1]));
shards.insert(2, HashSet::from([10_000])); // remote shard
shards.insert(3, HashSet::from([1, 20_000, 30_000]));
let storage_config: SharedStorageConfig = SharedStorageConfig {
node_type,
..Default::default()
};
let collection = Collection::new(
collection_name,
1,
collection_dir.path(),
snapshots_path.path(),
&config,
Arc::new(storage_config),
CollectionShardDistribution { shards },
None,
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await
.unwrap();
let snapshots_temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap();
let snapshot_description = collection
.create_snapshot(snapshots_temp_dir.path(), 0)
.await
.unwrap();
assert_eq!(snapshot_description.checksum.unwrap().len(), 64);
{
let recover_dir = Builder::new()
.prefix("test_collection_rec")
.tempdir()
.unwrap();
// Do not recover in local mode if some shards are remote
assert!(
Collection::restore_snapshot(
&snapshots_path.path().join(&snapshot_description.name),
recover_dir.path(),
0,
false,
)
.is_err(),
);
}
let recover_dir = Builder::new()
.prefix("test_collection_rec")
.tempdir()
.unwrap();
if let Err(err) = Collection::restore_snapshot(
&snapshots_path.path().join(snapshot_description.name),
recover_dir.path(),
0,
true,
) {
panic!("Failed to restore snapshot: {err}")
}
let recovered_collection = Collection::load(
collection_name_rec,
1,
recover_dir.path(),
snapshots_path.path(),
Default::default(),
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await;
{
let shards_holder = &recovered_collection.shards_holder.read().await;
let replica_ser_0 = shards_holder.get_shard(0).unwrap();
assert!(replica_ser_0.is_local().await);
let replica_ser_1 = shards_holder.get_shard(1).unwrap();
assert!(replica_ser_1.is_local().await);
let replica_ser_2 = shards_holder.get_shard(2).unwrap();
assert!(!replica_ser_2.is_local().await);
assert_eq!(replica_ser_2.peers().len(), 1);
let replica_ser_3 = shards_holder.get_shard(3).unwrap();
assert!(replica_ser_3.is_local().await);
assert_eq!(replica_ser_3.peers().len(), 3); // 2 remotes + 1 local
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_snapshot_collection_normal() {
init_logger();
_test_snapshot_collection(NodeType::Normal).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_snapshot_collection_listener() {
init_logger();
_test_snapshot_collection(NodeType::Listener).await;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/fixtures.rs | lib/collection/src/tests/fixtures.rs | use ahash::AHashSet;
use segment::data_types::vectors::VectorStructInternal;
use segment::types::{
Condition, Distance, Filter, PayloadFieldSchema, PayloadSchemaType, PointIdType,
};
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted,
};
use crate::operations::types::VectorsConfig;
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::operations::{CollectionUpdateOperations, CreateIndex, FieldIndexOperations};
use crate::optimizers_builder::OptimizersConfig;
pub const TEST_OPTIMIZERS_CONFIG: OptimizersConfig = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 2,
max_segment_size: None,
#[expect(deprecated)]
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: Some(2),
};
pub fn create_collection_config_with_dim(dim: usize) -> CollectionConfigInternal {
let wal_config = WalConfig {
wal_capacity_mb: 1,
wal_segments_ahead: 0,
wal_retain_closed: 1,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(dim as u64, Distance::Dot).build()),
..CollectionParams::empty()
};
let mut optimizer_config = TEST_OPTIMIZERS_CONFIG.clone();
optimizer_config.default_segment_number = 1;
optimizer_config.flush_interval_sec = 0;
CollectionConfigInternal {
params: collection_params,
optimizer_config,
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: Default::default(),
uuid: None,
metadata: None,
}
}
pub fn create_collection_config() -> CollectionConfigInternal {
create_collection_config_with_dim(4)
}
pub fn upsert_operation() -> CollectionUpdateOperations {
let points = vec![
PointStructPersisted {
id: 1.into(),
vector: VectorStructInternal::from(vec![1.0, 2.0, 3.0, 4.0]).into(),
payload: Some(
serde_json::from_str(r#"{ "location": { "lat": 10.12, "lon": 32.12 } }"#).unwrap(),
),
},
PointStructPersisted {
id: 2.into(),
vector: VectorStructInternal::from(vec![2.0, 1.0, 3.0, 4.0]).into(),
payload: Some(
serde_json::from_str(r#"{ "location": { "lat": 11.12, "lon": 34.82 } }"#).unwrap(),
),
},
PointStructPersisted {
id: 3.into(),
vector: VectorStructInternal::from(vec![3.0, 2.0, 1.0, 4.0]).into(),
payload: Some(
serde_json::from_str(r#"{ "location": [ { "lat": 12.12, "lon": 34.82 }, { "lat": 12.2, "lon": 12.82 }] }"#).unwrap(),
),
},
PointStructPersisted {
id: 4.into(),
vector: VectorStructInternal::from(vec![4.0, 2.0, 3.0, 1.0]).into(),
payload: Some(
serde_json::from_str(r#"{ "location": { "lat": 13.12, "lon": 34.82 } }"#).unwrap(),
),
},
PointStructPersisted {
id: 5.into(),
vector: VectorStructInternal::from(vec![5.0, 2.0, 3.0, 4.0]).into(),
payload: Some(
serde_json::from_str(r#"{ "location": { "lat": 14.12, "lon": 32.12 } }"#).unwrap(),
),
},
];
let op = PointInsertOperationsInternal::from(points);
CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(op))
}
pub fn create_payload_index_operation() -> CollectionUpdateOperations {
CollectionUpdateOperations::FieldIndexOperation(FieldIndexOperations::CreateIndex(
CreateIndex {
field_name: "location".parse().unwrap(),
field_schema: Some(PayloadFieldSchema::FieldType(PayloadSchemaType::Geo)),
},
))
}
pub fn delete_point_operation(idx: u64) -> CollectionUpdateOperations {
CollectionUpdateOperations::PointOperation(PointOperations::DeletePoints {
ids: vec![idx.into()],
})
}
pub fn filter_single_id(id: impl Into<PointIdType>) -> Filter {
Filter::new_must(Condition::HasId(AHashSet::from([id.into()]).into()))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/mod.rs | lib/collection/src/tests/mod.rs | mod fix_payload_indices;
pub mod fixtures;
mod hw_metrics;
mod payload;
mod points_dedup;
mod query_prefetch_offset_limit;
mod sha_256_test;
mod shard_query;
mod shard_telemetry;
mod snapshot_test;
mod sparse_vectors_validation_tests;
mod wal_recovery_test;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use common::budget::ResourceBudget;
use common::counter::hardware_counter::HardwareCounterCell;
use common::save_on_disk::SaveOnDisk;
use futures::future::join_all;
use itertools::Itertools;
use parking_lot::{Mutex, RwLock};
use rand::Rng;
use segment::data_types::vectors::only_default_vector;
use segment::index::hnsw_index::num_rayon_threads;
use segment::types::{Distance, PointIdType};
use tempfile::Builder;
use tokio::time::{Instant, sleep};
use crate::collection::Collection;
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::collection_manager::fixtures::{
PointIdGenerator, get_indexing_optimizer, get_merge_optimizer, random_segment,
};
use crate::collection_manager::holders::segment_holder::{LockedSegment, SegmentHolder, SegmentId};
use crate::collection_manager::optimizers::TrackerStatus;
use crate::collection_manager::optimizers::segment_optimizer::OptimizerThresholds;
use crate::config::CollectionParams;
use crate::operations::types::VectorsConfig;
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::update_handler::Optimizer;
use crate::update_workers::UpdateWorkers;
#[tokio::test]
async fn test_optimization_process() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let dim = 256;
let mut holder = SegmentHolder::default();
let segments_to_merge = vec![
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
holder.add_new(random_segment(dir.path(), 100, 3, dim)),
];
let segment_to_index = holder.add_new(random_segment(dir.path(), 100, 110, dim));
let _other_segment_ids: Vec<SegmentId> = vec![
holder.add_new(random_segment(dir.path(), 100, 20, dim)),
holder.add_new(random_segment(dir.path(), 100, 20, dim)),
];
let merge_optimizer: Arc<Optimizer> =
Arc::new(get_merge_optimizer(dir.path(), temp_dir.path(), dim, None));
let indexing_optimizer: Arc<Optimizer> =
Arc::new(get_indexing_optimizer(dir.path(), temp_dir.path(), dim));
let optimizers = Arc::new(vec![merge_optimizer, indexing_optimizer]);
let optimizers_log = Arc::new(Mutex::new(Default::default()));
let total_optimized_points = Arc::new(AtomicUsize::new(0));
let segments: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let handles = UpdateWorkers::launch_optimization(
optimizers.clone(),
optimizers_log.clone(),
total_optimized_points.clone(),
&ResourceBudget::default(),
segments.clone(),
|| {},
None,
);
// We expect a total of 2 optimizations for the above segments
let mut total_optimizations = 2;
// The optimizers try to saturate the CPU, as number of optimizations tasks we should therefore
// expect the amount that would fit within our CPU budget
// We skip optimizations that use less than half of the preferred CPU budget
let expected_optimization_count = {
let cpus = common::cpu::get_cpu_budget(0);
let hnsw_threads = num_rayon_threads(0);
(cpus / hnsw_threads + usize::from((cpus % hnsw_threads) >= hnsw_threads.div_ceil(2)))
.clamp(1, total_optimizations)
};
assert_eq!(handles.len(), expected_optimization_count);
total_optimizations -= expected_optimization_count;
let join_res = join_all(handles.into_iter().map(|x| x.join_handle).collect_vec()).await;
// Assert optimizer statuses are tracked properly
{
let log = optimizers_log.lock().to_telemetry();
assert_eq!(log.len(), expected_optimization_count);
log.iter().for_each(|entry| {
assert!(["indexing", "merge"].contains(&entry.name.as_str()));
assert_eq!(entry.status, TrackerStatus::Done);
});
}
for res in join_res {
assert!(res.is_ok());
assert_eq!(res.unwrap(), Some(true));
}
let handles = UpdateWorkers::launch_optimization(
optimizers.clone(),
optimizers_log.clone(),
total_optimized_points.clone(),
&ResourceBudget::default(),
segments.clone(),
|| {},
None,
);
// Because we may not have completed all optimizations due to limited CPU budget, we may expect
// another round of optimizations here
assert_eq!(
handles.len(),
expected_optimization_count.min(total_optimizations),
);
let join_res = join_all(handles.into_iter().map(|x| x.join_handle).collect_vec()).await;
for res in join_res {
assert!(res.is_ok());
assert_eq!(res.unwrap(), Some(true));
}
assert_eq!(segments.read().len(), 4);
assert!(segments.read().get(segment_to_index).is_none());
for sid in segments_to_merge {
assert!(segments.read().get(sid).is_none());
}
assert_eq!(total_optimized_points.load(Ordering::Relaxed), 119);
}
#[tokio::test]
async fn test_cancel_optimization() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut holder = SegmentHolder::default();
let dim = 256;
for _ in 0..5 {
holder.add_new(random_segment(dir.path(), 100, 1000, dim));
}
let indexing_optimizer: Arc<Optimizer> =
Arc::new(get_indexing_optimizer(dir.path(), temp_dir.path(), dim));
let optimizers = Arc::new(vec![indexing_optimizer]);
let now = Instant::now();
let optimizers_log = Arc::new(Mutex::new(Default::default()));
let total_optimized_points = Arc::new(AtomicUsize::new(0));
let segments: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
let handles = UpdateWorkers::launch_optimization(
optimizers.clone(),
optimizers_log.clone(),
total_optimized_points.clone(),
&ResourceBudget::default(),
segments.clone(),
|| {},
None,
);
sleep(Duration::from_millis(100)).await;
let join_handles = handles.into_iter().filter_map(|h| h.stop()).collect_vec();
let optimization_res = join_all(join_handles).await;
let actual_optimization_duration = now.elapsed().as_millis();
eprintln!("actual_optimization_duration = {actual_optimization_duration:#?} ms");
for res in optimization_res {
let was_finished = res.expect("Should be no errors during optimization");
assert_ne!(was_finished, Some(true));
}
// Assert optimizer statuses are tracked properly
// The optimizers try to saturate the CPU, as number of optimizations tasks we should therefore
// expect the amount that would fit within our CPU budget
{
// We skip optimizations that use less than half of the preferred CPU budget
let expected_optimization_count = {
let cpus = common::cpu::get_cpu_budget(0);
let hnsw_threads = num_rayon_threads(0);
(cpus / hnsw_threads + usize::from((cpus % hnsw_threads) >= hnsw_threads.div_ceil(2)))
.clamp(1, 3)
};
let log = optimizers_log.lock().to_telemetry();
assert_eq!(log.len(), expected_optimization_count);
for status in log {
assert_eq!(status.name, "indexing");
assert!(matches!(status.status, TrackerStatus::Cancelled(_)));
}
}
for (_idx, segment) in segments.read().iter() {
match segment {
LockedSegment::Original(_) => {}
LockedSegment::Proxy(_) => panic!("segment is not restored"),
}
}
assert_eq!(total_optimized_points.load(Ordering::Relaxed), 0);
}
#[tokio::test]
async fn test_new_segment_when_all_over_capacity() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let dim = 256;
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(dim as u64, Distance::Dot).build()),
..CollectionParams::empty()
};
let optimizer_thresholds = OptimizerThresholds {
max_segment_size_kb: 1,
memmap_threshold_kb: 1_000_000,
indexing_threshold_kb: 1_000_000,
};
let payload_schema_file = dir.path().join("payload.schema");
let payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>> =
Arc::new(SaveOnDisk::load_or_init_default(payload_schema_file).unwrap());
let mut holder = SegmentHolder::default();
holder.add_new(random_segment(dir.path(), 100, 3, dim));
holder.add_new(random_segment(dir.path(), 100, 3, dim));
holder.add_new(random_segment(dir.path(), 100, 3, dim));
holder.add_new(random_segment(dir.path(), 100, 3, dim));
holder.add_new(random_segment(dir.path(), 100, 3, dim));
let segments: Arc<RwLock<_>> = Arc::new(RwLock::new(holder));
// Expect our 5 created segments now
assert_eq!(segments.read().len(), 5);
// On optimization we expect one new segment to be created, all are over capacity
UpdateWorkers::ensure_appendable_segment_with_capacity(
&segments,
dir.path(),
&collection_params,
None,
&optimizer_thresholds,
payload_index_schema.clone(),
)
.unwrap();
assert_eq!(segments.read().len(), 6);
// On reoptimization we don't expect another segment, we have one segment with capacity
UpdateWorkers::ensure_appendable_segment_with_capacity(
&segments,
dir.path(),
&collection_params,
None,
&optimizer_thresholds,
payload_index_schema.clone(),
)
.unwrap();
assert_eq!(segments.read().len(), 6);
let hw_counter = HardwareCounterCell::new();
// Insert some points in the smallest segment to fill capacity
{
let segments_read = segments.read();
let (_, segment) = segments_read
.iter()
.min_by_key(|(_, segment)| {
segment
.get()
.read()
.max_available_vectors_size_in_bytes()
.unwrap()
})
.unwrap();
let mut rnd = rand::rng();
for _ in 0..10 {
let point_id: PointIdType = PointIdGenerator::default().unique();
let random_vector: Vec<_> = (0..dim).map(|_| rnd.random()).collect();
segment
.get()
.write()
.upsert_point(
101,
point_id,
only_default_vector(&random_vector),
&hw_counter,
)
.unwrap();
}
}
// On reoptimization we expect one more segment to be created, all are over capacity
UpdateWorkers::ensure_appendable_segment_with_capacity(
&segments,
dir.path(),
&collection_params,
None,
&optimizer_thresholds,
payload_index_schema,
)
.unwrap();
assert_eq!(segments.read().len(), 7);
}
#[test]
fn check_version_upgrade() {
assert!(!Collection::can_upgrade_storage(
&"0.3.1".parse().unwrap(),
&"0.4.0".parse().unwrap()
));
assert!(!Collection::can_upgrade_storage(
&"0.4.0".parse().unwrap(),
&"0.5.0".parse().unwrap()
));
assert!(!Collection::can_upgrade_storage(
&"0.4.0".parse().unwrap(),
&"0.4.2".parse().unwrap()
));
assert!(Collection::can_upgrade_storage(
&"0.4.0".parse().unwrap(),
&"0.4.1".parse().unwrap()
));
assert!(Collection::can_upgrade_storage(
&"0.4.1".parse().unwrap(),
&"0.4.2".parse().unwrap()
));
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/payload.rs | lib/collection/src/tests/payload.rs | use std::str::FromStr;
use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use ordered_float::OrderedFloat;
use segment::json_path::JsonPath;
use segment::types::{
Condition, FieldCondition, Filter, GeoPoint, GeoRadius, PayloadFieldSchema, PayloadSchemaType,
Range,
};
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::collection::payload_index_schema::{self, PayloadIndexSchema};
use crate::operations::{CollectionUpdateOperations, CreateIndex, FieldIndexOperations};
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::tests::fixtures::{create_collection_config, upsert_operation};
#[tokio::test(flavor = "multi_thread")]
async fn test_payload_missing_index_check() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file.clone()).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
shard
.update(upsert_ops.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
let geo_filter = Filter::new_must(Condition::Field(FieldCondition::new_geo_radius(
JsonPath::from_str("location").unwrap(),
GeoRadius {
center: GeoPoint::new(12.0, 34.0).ok().unwrap(),
radius: OrderedFloat(50.0),
},
)));
// No index yet => Filter has unindexed field
assert_eq!(
payload_index_schema::one_unindexed_filter_key(
&shard.payload_index_schema.read(),
&geo_filter
)
.map(|(x, _)| x),
Some(JsonPath::from_str("location").unwrap())
);
// Create unnested index
create_index(
&shard,
&payload_index_schema,
"location",
PayloadSchemaType::Geo,
)
.await;
// Index created => Filter shouldn't have any unindexed field anymore
assert_eq!(
payload_index_schema::one_unindexed_filter_key(
&shard.payload_index_schema.read(),
&geo_filter
),
None
);
// Create nested filter
let condition = Condition::new_nested(
JsonPath::new("location"),
Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new("lat"),
Range {
gt: Some(12.into()),
..Default::default()
},
))),
);
let num_filter = Filter::new_must(condition);
// Index only exists for 'location' but not 'location.lat'
// so we expect it to be detected as unindexed
assert_eq!(
payload_index_schema::one_unindexed_filter_key(
&shard.payload_index_schema.read(),
&num_filter
)
.map(|(x, _)| x),
Some("location[].lat".parse().unwrap())
);
// Create index for nested field
create_index(
&shard,
&payload_index_schema,
"location[].lat",
PayloadSchemaType::Float,
)
.await;
// Nested field also gets detected as indexed and unindexed fields in the query are empty.
assert_eq!(
payload_index_schema::one_unindexed_filter_key(
&shard.payload_index_schema.read(),
&num_filter
),
None,
);
// Filters combined also completely indexed!
let combined_filter = geo_filter.merge(&num_filter);
assert_eq!(
payload_index_schema::one_unindexed_filter_key(
&shard.payload_index_schema.read(),
&combined_filter
),
None,
);
}
async fn create_index(
shard: &LocalShard,
payload_index_schema: &Arc<SaveOnDisk<PayloadIndexSchema>>,
name: &str,
field_type: PayloadSchemaType,
) {
payload_index_schema
.write(|schema| {
schema.schema.insert(
name.parse().unwrap(),
PayloadFieldSchema::FieldType(field_type),
);
})
.unwrap();
let create_index = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::CreateIndex(CreateIndex {
field_name: name.parse().unwrap(),
field_schema: Some(PayloadFieldSchema::FieldType(field_type)),
}),
);
shard
.update(create_index.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/points_dedup.rs | lib/collection/src/tests/points_dedup.rs | use std::collections::HashSet;
use std::num::NonZeroU32;
use std::sync::Arc;
use ahash::AHashMap;
use api::rest::OrderByInterface;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use rand::{Rng, rng};
use segment::data_types::vectors::NamedQuery;
use segment::types::{
Distance, ExtendedPointId, Payload, PayloadFieldSchema, PayloadSchemaType, SearchParams,
};
use serde_json::{Map, Value};
use shard::query::query_enum::QueryEnum;
use tempfile::Builder;
use crate::collection::{Collection, RequestShardTransfer};
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted,
};
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::{
CoreSearchRequest, PointRequestInternal, ScrollRequestInternal, VectorsConfig,
};
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::operations::{CollectionUpdateOperations, OperationWithClockTag};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState};
use crate::shards::shard::{PeerId, ShardId};
const DIM: u64 = 4;
const PEER_ID: u64 = 1;
const SHARD_COUNT: u32 = 4;
const DUPLICATE_POINT_ID: ExtendedPointId = ExtendedPointId::NumId(100);
/// Create the collection used for deduplication tests.
async fn fixture() -> Collection {
let wal_config = WalConfig {
wal_capacity_mb: 1,
wal_segments_ahead: 0,
wal_retain_closed: 1,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(DIM, Distance::Dot).build()),
shard_number: NonZeroU32::new(SHARD_COUNT).unwrap(),
replication_factor: NonZeroU32::new(1).unwrap(),
write_consistency_factor: NonZeroU32::new(1).unwrap(),
..CollectionParams::empty()
};
let config = CollectionConfigInternal {
params: collection_params,
optimizer_config: OptimizersConfig::fixture(),
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: Default::default(),
uuid: None,
metadata: None,
};
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
let collection_name = "test".to_string();
let shards: AHashMap<ShardId, HashSet<PeerId>> = (0..SHARD_COUNT)
.map(|i| (i, HashSet::from([PEER_ID])))
.collect();
let storage_config: SharedStorageConfig = SharedStorageConfig::default();
let storage_config = Arc::new(storage_config);
let collection = Collection::new(
collection_name.clone(),
PEER_ID,
collection_dir.path(),
snapshots_path.path(),
&config,
storage_config.clone(),
CollectionShardDistribution { shards },
None,
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await
.unwrap();
// Create payload index to allow order by
collection
.create_payload_index(
"num".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Integer),
HwMeasurementAcc::new(),
)
.await
.expect("failed to create payload index");
// Insert two points into all shards directly, a point matching the shard ID, and point 100
// We insert into all shards directly to prevent spreading points by the hashring
// We insert the same point into multiple shards on purpose
let mut rng = rng();
for (shard_id, shard) in collection.shards_holder().write().await.get_shards() {
let op = OperationWithClockTag::from(CollectionUpdateOperations::PointOperation(
PointOperations::UpsertPoints(PointInsertOperationsInternal::PointsList(vec![
PointStructPersisted {
id: u64::from(shard_id).into(),
vector: VectorStructPersisted::Single(
(0..DIM).map(|_| rng.random_range(0.0..1.0)).collect(),
),
payload: Some(Payload(Map::from_iter([(
"num".to_string(),
Value::from(-(shard_id as i32)),
)]))),
},
PointStructPersisted {
id: DUPLICATE_POINT_ID,
vector: VectorStructPersisted::Single(
(0..DIM).map(|_| rng.random_range(0.0..1.0)).collect(),
),
payload: Some(Payload(Map::from_iter([(
"num".to_string(),
Value::from(100 - shard_id as i32),
)]))),
},
])),
));
shard
.update_local(op, true, HwMeasurementAcc::new(), false)
.await
.expect("failed to insert points");
}
// Activate all shards
for shard_id in 0..SHARD_COUNT {
collection
.set_shard_replica_state(shard_id, PEER_ID, ReplicaState::Active, None)
.await
.expect("failed to active shard");
}
collection
}
#[tokio::test(flavor = "multi_thread")]
async fn test_scroll_dedup() {
let collection = fixture().await;
// Scroll all points without ordering
let result = collection
.scroll_by(
ScrollRequestInternal {
offset: None,
limit: Some(usize::MAX),
filter: None,
with_payload: Some(false.into()),
with_vector: false.into(),
order_by: None,
},
None,
&ShardSelectorInternal::All,
None,
HwMeasurementAcc::new(),
)
.await
.expect("failed to search");
assert!(!result.points.is_empty(), "expected some points");
let mut seen = HashSet::new();
for point_id in result.points.iter().map(|point| point.id) {
assert!(
seen.insert(point_id),
"got point id {point_id} more than once, they should be deduplicated",
);
}
// Scroll all points with ordering
let result = collection
.scroll_by(
ScrollRequestInternal {
offset: None,
limit: Some(usize::MAX),
filter: None,
with_payload: Some(false.into()),
with_vector: false.into(),
order_by: Some(OrderByInterface::Key("num".parse().unwrap())),
},
None,
&ShardSelectorInternal::All,
None,
HwMeasurementAcc::new(),
)
.await
.expect("failed to search");
assert!(!result.points.is_empty(), "expected some points");
let mut seen = HashSet::new();
for record in result.points.iter() {
assert!(
seen.insert((record.id, record.order_value)),
"got point id {:?} with order value {:?} more than once, they should be deduplicated",
record.id,
record.order_value,
);
assert!(record.order_value.is_some());
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_retrieve_dedup() {
let collection = fixture().await;
let records = collection
.retrieve(
PointRequestInternal {
ids: (0..u64::from(SHARD_COUNT))
.map(ExtendedPointId::from)
.chain([DUPLICATE_POINT_ID])
.collect(),
with_payload: Some(false.into()),
with_vector: false.into(),
},
None,
&ShardSelectorInternal::All,
None,
HwMeasurementAcc::new(),
)
.await
.expect("failed to search");
assert!(!records.is_empty(), "expected some records");
let mut seen = HashSet::new();
for point_id in records.iter().map(|record| record.id) {
assert!(
seen.insert(point_id),
"got point id {point_id} more than once, they should be deduplicated",
);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_dedup() {
let collection = fixture().await;
let hw_acc = HwMeasurementAcc::new();
let points = collection
.search(
CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4])),
filter: None,
params: Some(SearchParams {
exact: true,
..Default::default()
}),
limit: 100,
offset: 0,
with_payload: None,
with_vector: None,
score_threshold: None,
},
None,
&ShardSelectorInternal::All,
None,
hw_acc,
)
.await
.expect("failed to search");
assert!(!points.is_empty(), "expected some points");
let mut seen = HashSet::new();
for point_id in points.iter().map(|point| point.id) {
assert!(
seen.insert(point_id),
"got point id {point_id} more than once, they should be deduplicated",
);
}
}
pub fn dummy_on_replica_failure() -> ChangePeerFromState {
Arc::new(move |_peer_id, _shard_id, _from_state| {})
}
pub fn dummy_request_shard_transfer() -> RequestShardTransfer {
Arc::new(move |_transfer| {})
}
pub fn dummy_abort_shard_transfer() -> AbortShardTransfer {
Arc::new(|_transfer, _reason| {})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/shard_query.rs | lib/collection/src/tests/shard_query.rs | use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use segment::common::reciprocal_rank_fusion::DEFAULT_RRF_K;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, NamedQuery, VectorInternal};
use segment::types::{PointIdType, WithPayloadInterface, WithVector};
use shard::query::query_enum::QueryEnum;
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::operations::types::CollectionError;
use crate::operations::universal_query::shard_query::{
FusionInternal, ScoringQuery, ShardPrefetch, ShardQueryRequest,
};
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::tests::fixtures::*;
#[tokio::test(flavor = "multi_thread")]
async fn test_shard_query_rrf_rescoring() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema,
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
shard
.update(upsert_ops.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
// RRF query without prefetches
let query = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
score_threshold: None,
limit: 0,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await;
let expected_error = CollectionError::bad_input(
"Validation failed: cannot apply Fusion without prefetches".to_string(),
);
assert!(matches!(sources_scores, Err(err) if err == expected_error));
// RRF query with single prefetch
let nearest_query = QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0, 4.0]),
DEFAULT_VECTOR_NAME,
));
let inner_limit = 3;
let nearest_query_prefetch = ShardPrefetch {
prefetches: vec![], // no recursion here
query: Some(ScoringQuery::Vector(nearest_query.clone())),
limit: inner_limit,
params: None,
filter: None,
score_threshold: None,
};
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![nearest_query_prefetch.clone()],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// one score per prefetch
assert_eq!(sources_scores.len(), 1);
// in case of top level RFF we need to propagate intermediate results
// so the number of results is not limited by the outer limit at the shard level
// the first source returned all its inner results
assert_eq!(sources_scores[0].len(), inner_limit);
// no payload/vector were requested
sources_scores[0].iter().for_each(|scored_point| {
assert_eq!(scored_point.vector, None);
assert_eq!(scored_point.payload, None);
});
// RRF query with two prefetches
let inner_limit = 3;
let nearest_query_prefetch = ShardPrefetch {
prefetches: vec![], // no recursion here
query: Some(ScoringQuery::Vector(nearest_query.clone())),
limit: inner_limit,
params: None,
filter: None,
score_threshold: None,
};
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![
nearest_query_prefetch.clone(),
nearest_query_prefetch.clone(),
],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// one score per prefetch
assert_eq!(sources_scores.len(), 2);
// in case of top level RFF we need to propagate intermediate results
// so the number of results is not limited by the outer limit at the shard level
// the sources returned all inner results
for source in sources_scores.iter() {
assert_eq!(source.len(), inner_limit);
}
////// Test that the order of prefetches is preserved in the response //////
let query = ShardQueryRequest {
prefetches: vec![
ShardPrefetch {
filter: Some(filter_single_id(1)),
..nearest_query_prefetch.clone()
},
ShardPrefetch {
filter: Some(filter_single_id(2)),
..nearest_query_prefetch.clone()
},
ShardPrefetch {
filter: Some(filter_single_id(3)),
..nearest_query_prefetch.clone()
},
],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// one score per prefetch
assert_eq!(sources_scores.len(), 3);
assert!(sources_scores.iter().all(|source| source.len() == 1));
assert_eq!(sources_scores[0][0].id, PointIdType::NumId(1));
assert_eq!(sources_scores[1][0].id, PointIdType::NumId(2));
assert_eq!(sources_scores[2][0].id, PointIdType::NumId(3));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_shard_query_vector_rescoring() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema,
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
shard
.update(upsert_ops.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
let nearest_query = QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0, 4.0]),
DEFAULT_VECTOR_NAME,
));
let inner_limit = 3;
let nearest_query_prefetch = ShardPrefetch {
prefetches: vec![], // no recursion here
query: Some(ScoringQuery::Vector(nearest_query.clone())),
limit: inner_limit,
params: None,
filter: None,
score_threshold: None,
};
// rescoring against a vector without prefetches
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Vector(nearest_query.clone())),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// only one inner result in absence of prefetches
assert_eq!(sources_scores.len(), 1);
// number of results is limited by the outer limit for rescoring
assert_eq!(sources_scores[0].len(), outer_limit);
// rescoring against a vector with single prefetch
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![nearest_query_prefetch.clone()],
query: Some(ScoringQuery::Vector(nearest_query.clone())),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// only one inner result in absence of prefetches
assert_eq!(sources_scores.len(), 1);
// number of results is limited by the outer limit for vector rescoring
assert_eq!(sources_scores[0].len(), outer_limit);
// rescoring against a vector with two fetches
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![
nearest_query_prefetch.clone(),
nearest_query_prefetch.clone(),
],
query: Some(ScoringQuery::Vector(nearest_query)),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// only one inner result in absence of fusion
assert_eq!(sources_scores.len(), 1);
// merging taking place
// number of results is limited by the outer limit for vector rescoring
assert_eq!(sources_scores[0].len(), outer_limit);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_shard_query_payload_vector() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema,
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
shard
.update(upsert_ops.into(), true, HwMeasurementAcc::new())
.await
.unwrap();
let nearest_query = QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0, 4.0]),
DEFAULT_VECTOR_NAME,
));
// rescoring against a vector without prefetches
let outer_limit = 2;
let query = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Vector(nearest_query)),
filter: None,
score_threshold: None,
limit: outer_limit,
offset: 0,
params: None,
with_vector: WithVector::Bool(true), // requesting vector
with_payload: WithPayloadInterface::Bool(true), // requesting payload
};
let hw_acc = HwMeasurementAcc::new();
let sources_scores = shard
.query_batch(Arc::new(vec![query]), ¤t_runtime, None, hw_acc)
.await
.unwrap()
.pop()
.unwrap();
// only one inner result in absence of prefetches
assert_eq!(sources_scores.len(), 1);
// number of results is limited by the outer limit for rescoring
assert_eq!(sources_scores[0].len(), outer_limit);
// payload/vector were requested
sources_scores[0].iter().for_each(|scored_point| {
assert!(scored_point.vector.is_some());
assert!(scored_point.payload.is_some());
});
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/query_prefetch_offset_limit.rs | lib/collection/src/tests/query_prefetch_offset_limit.rs | use std::collections::HashSet;
use std::num::NonZeroU32;
use std::sync::Arc;
use ahash::AHashMap;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use rand::{Rng, rng};
use segment::data_types::vectors::NamedQuery;
use segment::types::{Distance, ExtendedPointId, WithPayloadInterface, WithVector};
use shard::query::query_enum::QueryEnum;
use tempfile::Builder;
use crate::collection::{Collection, RequestShardTransfer};
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::CollectionUpdateOperations;
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted,
WriteOrdering,
};
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::VectorsConfig;
use crate::operations::universal_query::shard_query::{
ScoringQuery, ShardPrefetch, ShardQueryRequest,
};
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState};
use crate::shards::shard::{PeerId, ShardId};
const DIM: u64 = 4;
const PEER_ID: u64 = 1;
const SHARD_COUNT: u32 = 1;
const POINT_COUNT: usize = 1_000;
/// Create a collection used for limit+offset tests
async fn fixture() -> Collection {
let wal_config = WalConfig {
wal_capacity_mb: 1,
wal_segments_ahead: 0,
wal_retain_closed: 1,
};
let collection_params = CollectionParams {
vectors: VectorsConfig::Single(VectorParamsBuilder::new(DIM, Distance::Dot).build()),
shard_number: NonZeroU32::new(SHARD_COUNT).unwrap(),
replication_factor: NonZeroU32::new(1).unwrap(),
write_consistency_factor: NonZeroU32::new(1).unwrap(),
..CollectionParams::empty()
};
let config = CollectionConfigInternal {
params: collection_params,
optimizer_config: OptimizersConfig::fixture(),
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: Default::default(),
uuid: None,
metadata: None,
};
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
let collection_name = "test".to_string();
let shards: AHashMap<ShardId, HashSet<PeerId>> = (0..SHARD_COUNT)
.map(|i| (i, HashSet::from([PEER_ID])))
.collect();
let storage_config: SharedStorageConfig = SharedStorageConfig::default();
let storage_config = Arc::new(storage_config);
let collection = Collection::new(
collection_name.clone(),
PEER_ID,
collection_dir.path(),
snapshots_path.path(),
&config,
storage_config.clone(),
CollectionShardDistribution { shards },
None,
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await
.unwrap();
// Activate all shards
for shard_id in 0..SHARD_COUNT {
collection
.set_shard_replica_state(shard_id, PEER_ID, ReplicaState::Active, None)
.await
.expect("failed to activate shard");
}
// Upsert points
let points = (0..POINT_COUNT)
.map(|i| PointStructPersisted {
id: ExtendedPointId::from(i as u64),
vector: VectorStructPersisted::Single(
(0..DIM).map(|_| rng().random_range(0.0..1.0)).collect(),
),
payload: None,
})
.collect();
let operation = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(
PointInsertOperationsInternal::PointsList(points),
));
collection
.update_from_client(
operation,
true,
WriteOrdering::Weak,
None,
HwMeasurementAcc::disposable(),
)
.await
.expect("failed to insert points");
collection
}
/// Test that limit and offset works properly with prefetches.
///
/// Bug: <https://github.com/qdrant/qdrant/pull/6412>
#[tokio::test(flavor = "multi_thread")]
async fn test_limit_offset_with_prefetch() {
let collection = fixture().await;
let do_query = async |offset, limit| {
collection
.query(
ShardQueryRequest {
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(
NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4]),
))),
prefetches: vec![ShardPrefetch {
prefetches: vec![],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(
NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4]),
))),
limit: 100,
params: None,
filter: None,
score_threshold: None,
}],
filter: None,
params: None,
offset,
limit,
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(false),
score_threshold: None,
},
None,
ShardSelectorInternal::All,
None,
HwMeasurementAcc::disposable(),
)
.await
.expect("failed to query")
};
// With an offset of 5 and a limit of 15, we should still get 15 results
// This was 10 before <https://github.com/qdrant/qdrant/pull/6412>
let points = do_query(5, 15).await;
assert_eq!(points.len(), 15, "expected 15 points, got {}", points.len());
let points = do_query(10, 10).await;
assert_eq!(points.len(), 10, "expected 10 points, got {}", points.len());
// Prefetch limited to 100, with offset of 95 we have just 5 results left
// This was zero before <https://github.com/qdrant/qdrant/pull/6412>
let points = do_query(95, 10).await;
assert_eq!(points.len(), 5, "expected 5 points, got {}", points.len());
// Use a nested prefetch limiting to 50 results
let do_query = async |offset, limit| {
collection
.query(
ShardQueryRequest {
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(
NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4]),
))),
prefetches: vec![ShardPrefetch {
prefetches: vec![ShardPrefetch {
prefetches: vec![],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(
NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4]),
))),
limit: 50,
params: None,
filter: None,
score_threshold: None,
}],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(
NamedQuery::default_dense(vec![0.1, 0.2, 0.3, 0.4]),
))),
limit: 100,
params: None,
filter: None,
score_threshold: None,
}],
filter: None,
params: None,
offset,
limit,
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(false),
score_threshold: None,
},
None,
ShardSelectorInternal::All,
None,
HwMeasurementAcc::disposable(),
)
.await
.expect("failed to query")
};
// With an offset of 5 and a limit of 15, we should still get 15 results
// This was 10 before <https://github.com/qdrant/qdrant/pull/6412>
let points = do_query(5, 15).await;
assert_eq!(points.len(), 15, "expected 15 points, got {}", points.len());
let points = do_query(10, 10).await;
assert_eq!(points.len(), 10, "expected 10 points, got {}", points.len());
// Nested prefetch limited to 50, with offset of 45 we have just 5 results left
// This was zero before <https://github.com/qdrant/qdrant/pull/6412>
let points = do_query(45, 10).await;
assert_eq!(points.len(), 5, "expected 5 points, got {}", points.len());
}
fn dummy_on_replica_failure() -> ChangePeerFromState {
Arc::new(move |_peer_id, _shard_id, _from_state| {})
}
fn dummy_request_shard_transfer() -> RequestShardTransfer {
Arc::new(move |_transfer| {})
}
fn dummy_abort_shard_transfer() -> AbortShardTransfer {
Arc::new(|_transfer, _reason| {})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/sha_256_test.rs | lib/collection/src/tests/sha_256_test.rs | use std::io::Write;
use tempfile::NamedTempFile;
use crate::common::sha_256::hash_file;
#[tokio::test]
async fn test_sha_256_digest() -> std::io::Result<()> {
let mut file = NamedTempFile::new()?;
write!(file, "This tests if the hashing a file works correctly.")?;
let result_hash = hash_file(file.path()).await?;
assert_eq!(
result_hash,
"735e3ec1b05d901d07e84b1504518442aba2395fe3f945a1c962e81a8e152b2d",
);
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/tests/wal_recovery_test.rs | lib/collection/src/tests/wal_recovery_test.rs | use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use segment::types::{PayloadFieldSchema, PayloadSchemaType};
use tempfile::Builder;
use tokio::runtime::Handle;
use tokio::sync::RwLock;
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::tests::fixtures::*;
#[tokio::test(flavor = "multi_thread")]
async fn test_delete_from_indexed_payload() {
// Init the logger
let _ = env_logger::builder().is_test(true).try_init();
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
let hw_acc = HwMeasurementAcc::new();
shard
.update(upsert_ops.into(), true, hw_acc.clone())
.await
.unwrap();
let index_op = create_payload_index_operation();
payload_index_schema
.write(|schema| {
schema.schema.insert(
"location".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Geo),
);
})
.unwrap();
shard
.update(index_op.into(), true, hw_acc.clone())
.await
.unwrap();
let delete_point_op = delete_point_operation(4);
shard
.update(delete_point_op.into(), true, hw_acc.clone())
.await
.unwrap();
let info = shard.info().await.unwrap();
eprintln!("info = {:#?}", info.payload_schema);
let number_of_indexed_points = info
.payload_schema
.get(&"location".parse().unwrap())
.unwrap()
.points;
shard.stop_gracefully().await;
let shard = LocalShard::load(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
config.optimizer_config.clone(),
Arc::new(Default::default()),
payload_index_schema.clone(),
true,
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
)
.await
.unwrap();
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
eprintln!("dropping point 5");
let delete_point_op = delete_point_operation(5);
shard
.update(delete_point_op.into(), true, hw_acc.clone())
.await
.unwrap();
shard.stop_gracefully().await;
let shard = LocalShard::load(
0,
collection_name,
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
config.optimizer_config.clone(),
Arc::new(Default::default()),
payload_index_schema,
true,
current_runtime.clone(),
current_runtime,
ResourceBudget::default(),
)
.await
.unwrap();
let info = shard.info().await.unwrap();
eprintln!("info = {:#?}", info.payload_schema);
let number_of_indexed_points_after_load = info
.payload_schema
.get(&"location".parse().unwrap())
.unwrap()
.points;
assert_eq!(number_of_indexed_points, 4);
assert_eq!(number_of_indexed_points_after_load, 3);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_partial_flush_recovery() {
// Init the logger
let _ = env_logger::builder().is_test(true).try_init();
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let config = create_collection_config();
let collection_name = "test".to_string();
let current_runtime: Handle = Handle::current();
let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap();
let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json");
let payload_index_schema =
Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap());
let shard = LocalShard::build(
0,
collection_name.clone(),
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
Arc::new(Default::default()),
payload_index_schema.clone(),
current_runtime.clone(),
current_runtime.clone(),
ResourceBudget::default(),
config.optimizer_config.clone(),
)
.await
.unwrap();
let upsert_ops = upsert_operation();
let hw_acc = HwMeasurementAcc::new();
shard
.update(upsert_ops.into(), true, hw_acc.clone())
.await
.unwrap();
let index_op = create_payload_index_operation();
payload_index_schema
.write(|schema| {
schema.schema.insert(
"location".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Geo),
);
})
.unwrap();
shard
.update(index_op.into(), true, hw_acc.clone())
.await
.unwrap();
shard.stop_flush_worker().await;
shard.full_flush();
let delete_point_op = delete_point_operation(4);
shard
.update(delete_point_op.into(), true, hw_acc.clone())
.await
.unwrap();
// This only flushed id-tracker-mapping, but not the storage change
shard.partial_flush();
shard.stop_gracefully().await;
let shard = LocalShard::load(
0,
collection_name,
collection_dir.path(),
Arc::new(RwLock::new(config.clone())),
config.optimizer_config.clone(),
Arc::new(Default::default()),
payload_index_schema,
true,
current_runtime.clone(),
current_runtime,
ResourceBudget::default(),
)
.await
.unwrap();
let info = shard.info().await.unwrap();
eprintln!("info = {:#?}", info.payload_schema);
let number_of_indexed_points_after_load = info
.payload_schema
.get(&"location".parse().unwrap())
.unwrap()
.points;
assert_eq!(number_of_indexed_points_after_load, 4);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/lookup/types.rs | lib/collection/src/lookup/types.rs | use std::fmt::Display;
use segment::data_types::groups::GroupId;
use segment::types::PointIdType;
use uuid::Uuid;
use super::WithLookup;
#[derive(Debug, Clone, PartialEq)]
pub enum WithLookupInterface {
Collection(String),
WithLookup(WithLookup),
}
impl From<api::rest::WithLookupInterface> for WithLookupInterface {
fn from(with_lookup: api::rest::WithLookupInterface) -> Self {
match with_lookup {
api::rest::WithLookupInterface::Collection(collection_name) => {
Self::Collection(collection_name)
}
api::rest::WithLookupInterface::WithLookup(with_lookup) => {
Self::WithLookup(WithLookup::from(with_lookup))
}
}
}
}
impl From<api::rest::WithLookupInterface> for WithLookup {
fn from(with_lookup: api::rest::WithLookupInterface) -> Self {
match with_lookup {
api::rest::WithLookupInterface::Collection(collection_name) => Self {
collection_name,
with_payload: Some(true.into()),
with_vectors: Some(false.into()),
},
api::rest::WithLookupInterface::WithLookup(with_lookup) => {
WithLookup::from(with_lookup)
}
}
}
}
impl From<api::rest::WithLookup> for WithLookup {
fn from(with_lookup: api::rest::WithLookup) -> Self {
WithLookup {
collection_name: with_lookup.collection_name,
with_payload: with_lookup.with_payload,
with_vectors: with_lookup.with_vectors,
}
}
}
/// A value that can be used as a temporary ID
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
pub enum PseudoId {
String(String),
NumberU64(u64),
NumberI64(i64),
}
impl Display for PseudoId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PseudoId::String(s) => write!(f, "{s}"),
PseudoId::NumberU64(n) => write!(f, "{n}"),
PseudoId::NumberI64(n) => write!(f, "{n}"),
}
}
}
impl From<GroupId> for PseudoId {
fn from(id: GroupId) -> Self {
match id {
GroupId::String(s) => Self::String(s),
GroupId::NumberU64(n) => Self::NumberU64(n),
GroupId::NumberI64(n) => Self::NumberI64(n),
}
}
}
impl From<PseudoId> for GroupId {
fn from(id: PseudoId) -> Self {
match id {
PseudoId::String(s) => Self::String(s),
PseudoId::NumberU64(n) => Self::NumberU64(n),
PseudoId::NumberI64(n) => Self::NumberI64(n),
}
}
}
#[derive(Debug)]
pub enum ConversionError {
IntError(core::num::TryFromIntError),
ParseError(uuid::Error),
}
impl TryFrom<PseudoId> for PointIdType {
type Error = ConversionError;
fn try_from(value: PseudoId) -> Result<Self, Self::Error> {
match value {
PseudoId::String(s) => Ok(PointIdType::Uuid(
Uuid::try_parse(&s).map_err(ConversionError::ParseError)?,
)),
PseudoId::NumberU64(n) => Ok(PointIdType::NumId(n)),
PseudoId::NumberI64(n) => Ok(PointIdType::NumId(
u64::try_from(n).map_err(ConversionError::IntError)?,
)),
}
}
}
impl From<PointIdType> for PseudoId {
fn from(id: PointIdType) -> Self {
match id {
PointIdType::NumId(n) => PseudoId::NumberU64(n),
PointIdType::Uuid(u) => PseudoId::String(u.to_string()),
}
}
}
impl From<u64> for PseudoId {
fn from(id: u64) -> Self {
PseudoId::NumberU64(id)
}
}
impl From<i64> for PseudoId {
fn from(id: i64) -> Self {
PseudoId::NumberI64(id)
}
}
impl From<String> for PseudoId {
fn from(id: String) -> Self {
PseudoId::String(id)
}
}
impl From<&str> for PseudoId {
fn from(id: &str) -> Self {
PseudoId::String(id.to_string())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/lookup/mod.rs | lib/collection/src/lookup/mod.rs | pub mod types;
use std::collections::HashMap;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::Future;
use itertools::Itertools;
use segment::types::{PointIdType, WithPayloadInterface, WithVector};
use serde::Serialize;
use shard::retrieve::record_internal::RecordInternal;
use tokio::sync::RwLockReadGuard;
use types::PseudoId;
use crate::collection::Collection;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{CollectionError, CollectionResult, PointRequestInternal};
#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct WithLookup {
/// Name of the collection to use for points lookup
pub collection_name: String,
/// Options for specifying which payload to include (or not)
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include (or not)
pub with_vectors: Option<WithVector>,
}
pub async fn lookup_ids<'a, F, Fut>(
request: WithLookup,
values: Vec<PseudoId>,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<HashMap<PseudoId, RecordInternal>>
where
F: FnOnce(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
let collection = collection_by_name(request.collection_name.clone())
.await
.ok_or_else(|| CollectionError::NotFound {
what: format!("Collection {}", request.collection_name),
})?;
let ids = values
.into_iter()
.filter_map(|v| PointIdType::try_from(v).ok())
.collect_vec();
if ids.is_empty() {
return Ok(HashMap::new());
}
let point_request = PointRequestInternal {
ids,
with_payload: request.with_payload,
with_vector: request.with_vectors.unwrap_or_default(),
};
let result = collection
.retrieve(
point_request,
read_consistency,
shard_selection,
timeout,
hw_measurement_acc,
)
.await?
.into_iter()
.map(|point| (PseudoId::from(point.id), point))
.collect();
Ok(result)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/state_management.rs | lib/collection/src/collection/state_management.rs | use std::collections::HashSet;
use ahash::AHashMap;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use crate::collection::Collection;
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::collection_state::{ShardInfo, State};
use crate::config::CollectionConfigInternal;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::replica_set::ShardReplicaSet;
use crate::shards::resharding::ReshardState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_holder::ShardTransferChange;
use crate::shards::shard_holder::shard_mapping::ShardKeyMapping;
use crate::shards::transfer::ShardTransfer;
impl Collection {
pub async fn check_config_compatible(
&self,
config: &CollectionConfigInternal,
) -> CollectionResult<()> {
self.collection_config
.read()
.await
.params
.check_compatible(&config.params)
}
pub async fn apply_state(
&self,
state: State,
this_peer_id: PeerId,
abort_transfer: impl FnMut(ShardTransfer),
) -> CollectionResult<()> {
let State {
config,
shards,
resharding,
transfers,
shards_key_mapping,
payload_index_schema,
} = state;
self.apply_config(config).await?;
self.apply_shard_transfers(transfers, this_peer_id, abort_transfer)
.await?;
self.apply_reshard_state(resharding).await?;
self.apply_shard_info(shards, shards_key_mapping).await?;
self.apply_payload_index_schema(payload_index_schema)
.await?;
Ok(())
}
async fn apply_shard_transfers(
&self,
shard_transfers: HashSet<ShardTransfer>,
this_peer_id: PeerId,
mut abort_transfer: impl FnMut(ShardTransfer),
) -> CollectionResult<()> {
let old_transfers = self
.shards_holder
.read()
.await
.shard_transfers
.read()
.clone();
for transfer in shard_transfers.intersection(&old_transfers) {
log::debug!("Aborting shard transfer: {transfer:?}");
}
for transfer in old_transfers.difference(&shard_transfers) {
log::debug!("Aborting shard transfer: {transfer:?}");
}
for transfer in shard_transfers.difference(&old_transfers) {
if transfer.from == this_peer_id {
// Abort transfer as sender should not learn about the transfer from snapshot
// If this happens it mean the sender is probably outdated and it is safer to abort
abort_transfer(transfer.clone());
// Since we remove the transfer from our list below, we don't invoke regular abort logic on this node
// Do it here explicitly so we don't miss a silent abort change
let _ = self
.shards_holder
.read()
.await
.shard_transfer_changes
.send(ShardTransferChange::Abort(transfer.key()));
}
}
self.shards_holder
.write()
.await
.shard_transfers
.write(|transfers| *transfers = shard_transfers)?;
Ok(())
}
async fn apply_reshard_state(&self, resharding: Option<ReshardState>) -> CollectionResult<()> {
// We don't have to explicitly abort resharding or bump shard replica states, because:
// - peers are not driving resharding themselves
// - ongoing (resharding) shard transfers are explicitly updated
// - shard replica set states are explicitly updated
self.shards_holder
.write()
.await
.resharding_state
.write(|state| *state = resharding)?;
Ok(())
}
async fn apply_config(&self, new_config: CollectionConfigInternal) -> CollectionResult<()> {
let recreate_optimizers;
{
let mut config = self.collection_config.write().await;
if config.uuid != new_config.uuid {
return Err(CollectionError::service_error(format!(
"collection {} UUID mismatch: \
UUID of existing collection is different from UUID of collection in Raft snapshot: \
existing collection UUID: {:?}, Raft snapshot collection UUID: {:?}",
self.id, config.uuid, new_config.uuid,
)));
}
if let Err(err) = config.params.check_compatible(&new_config.params) {
// Stop consensus with a service error, if new config is incompatible with current one.
//
// We expect that `apply_config` is only called when configs are compatible, otherwise
// collection have to be *recreated*.
return Err(CollectionError::service_error(err.to_string()));
}
// Destructure `new_config`, to ensure we compare all config fields. Compiler would
// complain, if new field is added to `CollectionConfig` struct, but not destructured
// explicitly. We have to explicitly compare config fields, because we want to compare
// `wal_config` and `strict_mode_config` independently of other fields.
let CollectionConfigInternal {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
uuid: _,
metadata,
} = &new_config;
let is_core_config_updated = params != &config.params
|| hnsw_config != &config.hnsw_config
|| optimizer_config != &config.optimizer_config
|| quantization_config != &config.quantization_config;
let is_metadata_updated = metadata != &config.metadata;
let is_wal_config_updated = wal_config != &config.wal_config;
let is_strict_mode_config_updated = strict_mode_config != &config.strict_mode_config;
let is_config_updated = is_core_config_updated
|| is_wal_config_updated
|| is_strict_mode_config_updated
|| is_metadata_updated;
if !is_config_updated {
return Ok(());
}
if is_wal_config_updated {
log::warn!(
"WAL config of collection {} updated when applying Raft snapshot, \
but updated WAL config will only be applied on Qdrant restart",
self.id,
);
}
*config = new_config;
// We need to recreate optimizers, if "core" config was updated
recreate_optimizers = is_core_config_updated;
}
self.collection_config.read().await.save(&self.path)?;
self.print_warnings().await;
if recreate_optimizers {
self.recreate_optimizers_blocking().await?;
}
Ok(())
}
async fn apply_shard_info(
&self,
shards: AHashMap<ShardId, ShardInfo>,
shards_key_mapping: ShardKeyMapping,
) -> CollectionResult<()> {
let mut extra_shards: AHashMap<ShardId, ShardReplicaSet> = AHashMap::new();
let shard_ids = shards.keys().copied().collect::<HashSet<_>>();
// There are two components, where shard-related info is stored:
// Shard objects themselves and shard_holder, that maps shard_keys to shards.
// On the first state of the update, we update state of shards themselves
// and create new shards if needed
let mut shards_holder = self.shards_holder.write().await;
for (shard_id, shard_info) in shards {
let shard_key = shards_key_mapping.shard_key(shard_id);
match shards_holder.get_shard_mut(shard_id) {
Some(replica_set) => {
replica_set
.apply_state(shard_info.replicas, shard_key)
.await?;
}
None => {
let shard_replicas: Vec<_> = shard_info.replicas.keys().copied().collect();
let mut replica_set = self
.create_replica_set(shard_id, shard_key.clone(), &shard_replicas, None)
.await?;
replica_set
.apply_state(shard_info.replicas, shard_key)
.await?;
extra_shards.insert(shard_id, replica_set);
}
}
}
// On the second step, we register missing shards and remove extra shards
shards_holder
.apply_shards_state(shard_ids, shards_key_mapping, extra_shards)
.await
}
async fn apply_payload_index_schema(
&self,
payload_index_schema: PayloadIndexSchema,
) -> CollectionResult<()> {
let state = self.state().await;
for field_name in state.payload_index_schema.schema.keys() {
if !payload_index_schema.schema.contains_key(field_name) {
self.drop_payload_index(field_name.clone()).await?;
}
}
for (field_name, field_schema) in payload_index_schema.schema {
// This function is only used in collection state recovery and thus an unmeasured internal operation.
self.create_payload_index(field_name, field_schema, HwMeasurementAcc::disposable())
.await?;
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/distance_matrix.rs | lib/collection/src/collection/distance_matrix.rs | use std::time::Duration;
use ahash::AHashSet;
use api::rest::{
SearchMatrixOffsetsResponse, SearchMatrixPair, SearchMatrixPairsResponse,
SearchMatrixRequestInternal,
};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::types::{
Condition, Filter, HasIdCondition, HasVectorCondition, PointIdType, ScoredPoint, VectorNameBuf,
WithPayloadInterface, WithVector,
};
use crate::collection::Collection;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::CollectionResult;
use crate::operations::universal_query::collection_query::{
CollectionQueryRequest, Query, VectorInputInternal, VectorQuery,
};
use crate::operations::universal_query::shard_query::{
SampleInternal, ScoringQuery, ShardQueryRequest,
};
#[derive(Debug, Default)]
pub struct CollectionSearchMatrixResponse {
pub sample_ids: Vec<PointIdType>, // sampled point ids
pub nearests: Vec<Vec<ScoredPoint>>, // nearest points for each sampled point
}
/// Internal representation of the distance matrix request, used to convert from REST and gRPC.
pub struct CollectionSearchMatrixRequest {
pub sample_size: usize,
pub limit_per_sample: usize,
pub filter: Option<Filter>,
pub using: VectorNameBuf,
}
impl CollectionSearchMatrixRequest {
pub const DEFAULT_LIMIT_PER_SAMPLE: usize = 3;
pub const DEFAULT_SAMPLE: usize = 10;
}
impl From<SearchMatrixRequestInternal> for CollectionSearchMatrixRequest {
fn from(request: SearchMatrixRequestInternal) -> Self {
let SearchMatrixRequestInternal {
sample,
limit,
filter,
using,
} = request;
Self {
sample_size: sample.unwrap_or(CollectionSearchMatrixRequest::DEFAULT_SAMPLE),
limit_per_sample: limit
.unwrap_or(CollectionSearchMatrixRequest::DEFAULT_LIMIT_PER_SAMPLE),
filter,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
}
}
}
impl From<CollectionSearchMatrixResponse> for SearchMatrixOffsetsResponse {
fn from(response: CollectionSearchMatrixResponse) -> Self {
let CollectionSearchMatrixResponse {
sample_ids,
nearests,
} = response;
let offset_by_id = sample_ids
.iter()
.enumerate()
.map(|(i, id)| (id, i))
.collect::<std::collections::HashMap<_, _>>();
let mut offsets_row = Vec::with_capacity(sample_ids.len());
let mut offsets_col = Vec::with_capacity(sample_ids.len());
for (row_offset, scored_points) in nearests.iter().enumerate() {
for p in scored_points {
offsets_row.push(row_offset as u64);
offsets_col.push(offset_by_id[&p.id] as u64);
}
}
let scores = nearests
.into_iter()
.flat_map(|row| row.into_iter().map(|p| p.score))
.collect();
Self {
offsets_row,
offsets_col,
scores,
ids: sample_ids,
}
}
}
impl From<CollectionSearchMatrixResponse> for SearchMatrixPairsResponse {
fn from(response: CollectionSearchMatrixResponse) -> Self {
let CollectionSearchMatrixResponse {
sample_ids,
nearests,
} = response;
let pairs_len = nearests.iter().map(|n| n.len()).sum();
let mut pairs = Vec::with_capacity(pairs_len);
for (a, scored_points) in sample_ids.into_iter().zip(nearests.into_iter()) {
for scored_point in scored_points {
pairs.push(SearchMatrixPair {
a,
b: scored_point.id,
score: scored_point.score,
});
}
}
Self { pairs }
}
}
impl From<CollectionSearchMatrixResponse> for api::grpc::qdrant::SearchMatrixPairs {
fn from(response: CollectionSearchMatrixResponse) -> Self {
let rest_result = SearchMatrixPairsResponse::from(response);
let pairs = rest_result.pairs.into_iter().map(From::from).collect();
Self { pairs }
}
}
impl From<CollectionSearchMatrixResponse> for api::grpc::qdrant::SearchMatrixOffsets {
fn from(response: CollectionSearchMatrixResponse) -> Self {
let rest_result = SearchMatrixOffsetsResponse::from(response);
Self {
offsets_row: rest_result.offsets_row,
offsets_col: rest_result.offsets_col,
scores: rest_result.scores,
ids: rest_result.ids.into_iter().map(From::from).collect(),
}
}
}
impl Collection {
pub async fn search_points_matrix(
&self,
request: CollectionSearchMatrixRequest,
shard_selection: ShardSelectorInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CollectionSearchMatrixResponse> {
let start = std::time::Instant::now();
let CollectionSearchMatrixRequest {
sample_size,
limit_per_sample,
filter,
using,
} = request;
if limit_per_sample == 0 || sample_size == 0 {
return Ok(Default::default());
}
// make sure the vector is present in the point
let has_vector = Filter::new_must(Condition::HasVector(HasVectorCondition::from(
using.clone(),
)));
// merge user's filter with the has_vector filter
let filter = Some(
filter
.map(|filter| filter.merge(&has_vector))
.unwrap_or(has_vector),
);
// sample random points
let sampling_query = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Sample(SampleInternal::Random)),
filter,
score_threshold: None,
limit: sample_size,
offset: 0,
params: None,
with_vector: WithVector::Selector(vec![using.clone()]), // retrieve the vector
with_payload: Default::default(),
};
let mut sampled_points = self
.query(
sampling_query,
read_consistency,
shard_selection.clone(),
timeout,
hw_measurement_acc.clone(),
)
.await?;
// if we have less than 2 points, we can't build a matrix
if sampled_points.len() < 2 {
return Ok(CollectionSearchMatrixResponse::default());
}
sampled_points.truncate(sample_size);
// sort by id for a deterministic order
sampled_points.sort_unstable_by_key(|p| p.id);
// collect the sampled point ids in the same order
let sampled_point_ids: Vec<_> = sampled_points.iter().map(|p| p.id).collect();
// filter to only include the sampled points in the search
// use the same filter for all requests to leverage batch search
let filter = Filter::new_must(Condition::HasId(HasIdCondition::from(
sampled_point_ids.iter().copied().collect::<AHashSet<_>>(),
)));
// Perform nearest neighbor search for each sampled point
let mut queries = Vec::with_capacity(sampled_points.len());
for point in sampled_points {
let vector = point
.vector
.as_ref()
.and_then(|v| v.get(&using))
.map(|v| v.to_owned())
.expect("Vector not found in the point");
// nearest query on the sample vector
let query = Query::Vector(VectorQuery::Nearest(VectorInputInternal::Vector(vector)));
let query_request = CollectionQueryRequest {
prefetch: vec![],
query: Some(query),
using: using.clone(),
filter: Some(filter.clone()),
score_threshold: None,
limit: limit_per_sample + 1, // +1 to exclude the point itself afterward
offset: 0,
params: None,
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
lookup_from: None,
};
queries.push((query_request, shard_selection.clone()));
}
// update timeout
let timeout = timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
// We know by construction that lookup_from is not used in the queries
// so can use placeholder closure here
let collection_by_name = |_name: String| async move { None };
// run batch search request
let mut nearest = self
.query_batch(
queries,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc,
)
.await?;
// postprocess the results to account for overlapping samples
for (scores, sample_id) in nearest.iter_mut().zip(sampled_point_ids.iter()) {
// need to remove the sample_id from the results
if let Some(sample_pos) = scores.iter().position(|p| p.id == *sample_id) {
scores.remove(sample_pos);
} else {
// if not found pop lowest score
if scores.len() == limit_per_sample + 1 {
// if we have enough results, remove the last one
scores.pop();
}
}
}
Ok(CollectionSearchMatrixResponse {
sample_ids: sampled_point_ids,
nearests: nearest,
})
}
}
#[cfg(test)]
mod tests {
use segment::types::ScoredPoint;
use super::*;
fn make_scored_point(id: u64, score: f32) -> ScoredPoint {
ScoredPoint {
id: id.into(),
version: 0,
score,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
// 3 samples, 2 results per sample
fn fixture_response() -> CollectionSearchMatrixResponse {
CollectionSearchMatrixResponse {
sample_ids: vec![1.into(), 2.into(), 3.into()],
nearests: vec![
vec![make_scored_point(1, 0.2), make_scored_point(2, 0.1)],
vec![make_scored_point(2, 0.4), make_scored_point(3, 0.3)],
vec![make_scored_point(1, 0.6), make_scored_point(3, 0.5)],
],
}
}
#[test]
fn test_matrix_pairs_response_conversion() {
let response = fixture_response();
let expected = SearchMatrixPairsResponse {
pairs: vec![
SearchMatrixPair::new(1, 1, 0.2),
SearchMatrixPair::new(1, 2, 0.1),
SearchMatrixPair::new(2, 2, 0.4),
SearchMatrixPair::new(2, 3, 0.3),
SearchMatrixPair::new(3, 1, 0.6),
SearchMatrixPair::new(3, 3, 0.5),
],
};
let actual = SearchMatrixPairsResponse::from(response);
assert_eq!(actual, expected);
}
#[test]
fn test_matrix_offsets_response_conversion() {
let response = fixture_response();
let expected = SearchMatrixOffsetsResponse {
offsets_row: vec![0, 0, 1, 1, 2, 2],
offsets_col: vec![0, 1, 1, 2, 0, 2],
scores: vec![0.2, 0.1, 0.4, 0.3, 0.6, 0.5],
ids: vec![1.into(), 2.into(), 3.into()],
};
let actual = SearchMatrixOffsetsResponse::from(response);
assert_eq!(actual, expected);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/mmr.rs | lib/collection/src/collection/mmr.rs | use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::ScoredPoint;
use shard::query::MmrInternal;
use shard::query::mmr::mmr_from_points_with_vector as mmr_from_points_with_vector_impl;
use tokio::runtime::Handle;
use tokio_util::task::AbortOnDropHandle;
use crate::config::CollectionParams;
use crate::operations::types::{CollectionError, CollectionResult};
pub async fn mmr_from_points_with_vector(
collection_params: &CollectionParams,
points_with_vector: impl IntoIterator<Item = ScoredPoint> + Send + 'static,
mmr: MmrInternal,
limit: usize,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let distance = collection_params.get_distance(&mmr.using)?;
let multivector_config = collection_params
.vectors
.get_params(&mmr.using)
.and_then(|vector_params| vector_params.multivector_config);
let handle = search_runtime_handle.spawn_blocking(move || {
mmr_from_points_with_vector_impl(
points_with_vector,
mmr,
distance,
multivector_config,
limit,
hw_measurement_acc,
)
});
let task = AbortOnDropHandle::new(handle);
let result = tokio::time::timeout(timeout, task)
.await
.map_err(|_| CollectionError::timeout(timeout, "mmr"))???;
Ok(result)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/telemetry.rs | lib/collection/src/collection/telemetry.rs | use std::time::Duration;
use common::types::{DetailsLevel, TelemetryDetail};
use crate::collection::Collection;
use crate::operations::types::CollectionResult;
use crate::telemetry::{CollectionConfigTelemetry, CollectionTelemetry};
impl Collection {
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<CollectionTelemetry> {
let (shards_telemetry, transfers, resharding) = {
if detail.level >= DetailsLevel::Level3 {
let shards_holder = self.shards_holder.read().await;
let mut shards_telemetry = Vec::new();
for shard in shards_holder.all_shards() {
shards_telemetry.push(shard.get_telemetry_data(detail, timeout).await?)
}
(
Some(shards_telemetry),
Some(shards_holder.get_shard_transfer_info(&*self.transfer_tasks.lock().await)),
Some(
shards_holder
.get_resharding_operations_info()
.unwrap_or_default(),
),
)
} else {
(None, None, None)
}
};
let shard_clean_tasks = self.clean_local_shards_statuses();
Ok(CollectionTelemetry {
id: self.name().to_string(),
init_time_ms: self.init_time.as_millis() as u64,
config: CollectionConfigTelemetry::from(self.collection_config.read().await.clone()),
shards: shards_telemetry,
transfers,
resharding,
shard_clean_tasks: (!shard_clean_tasks.is_empty()).then_some(shard_clean_tasks),
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/search.rs | lib/collection/src/collection/search.rs | use std::mem;
use std::sync::Arc;
use std::time::Duration;
use ahash::{AHashMap, AHashSet};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::{TryFutureExt, future};
use itertools::{Either, Itertools};
use segment::types::{
ExtendedPointId, Filter, Order, ScoredPoint, WithPayloadInterface, WithVector,
};
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::time::Instant;
use super::Collection;
use crate::events::SlowQueryEvent;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::*;
impl Collection {
#[cfg(feature = "testing")]
pub async fn search(
&self,
request: CoreSearchRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
if request.limit == 0 {
return Ok(vec![]);
}
// search is a special case of search_batch with a single batch
let request_batch = CoreSearchRequestBatch {
searches: vec![request],
};
let results = self
.do_core_search_batch(
request_batch,
read_consistency,
shard_selection,
timeout,
hw_measurement_acc,
)
.await?;
Ok(results.into_iter().next().unwrap())
}
pub async fn core_search_batch(
&self,
request: CoreSearchRequestBatch,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let start = Instant::now();
// shortcuts batch if all requests with limit=0
if request.searches.iter().all(|s| s.limit == 0) {
return Ok(vec![]);
}
let is_payload_required = request
.searches
.iter()
.all(|s| s.with_payload.as_ref().is_some_and(|p| p.is_required()));
let with_vectors = request
.searches
.iter()
.all(|s| s.with_vector.as_ref().is_some_and(|wv| wv.is_enabled()));
let metadata_required = is_payload_required || with_vectors;
let sum_limits: usize = request.searches.iter().map(|s| s.limit).sum();
let sum_offsets: usize = request.searches.iter().map(|s| s.offset).sum();
// Number of records we need to retrieve to fill the search result.
let require_transfers = self.shards_holder.read().await.len() * (sum_limits + sum_offsets);
// Actually used number of records.
let used_transfers = sum_limits;
let is_required_transfer_large_enough = require_transfers
> used_transfers.saturating_mul(super::query::PAYLOAD_TRANSFERS_FACTOR_THRESHOLD);
if metadata_required && is_required_transfer_large_enough {
// If there is a significant offset, we need to retrieve the whole result
// set without payload first and then retrieve the payload.
// It is required to do this because the payload might be too large to send over the
// network.
let mut without_payload_requests = Vec::with_capacity(request.searches.len());
for search in &request.searches {
let mut without_payload_request = search.clone();
without_payload_request
.with_payload
.replace(WithPayloadInterface::Bool(false));
without_payload_request
.with_vector
.replace(WithVector::Bool(false));
without_payload_requests.push(without_payload_request);
}
let without_payload_batch = CoreSearchRequestBatch {
searches: without_payload_requests,
};
let without_payload_results = self
.do_core_search_batch(
without_payload_batch,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// update timeout
let timeout = timeout.map(|t| t.saturating_sub(start.elapsed()));
let filled_results = without_payload_results
.into_iter()
.zip(request.searches.into_iter())
.map(|(without_payload_result, req)| {
self.fill_search_result_with_payload(
without_payload_result,
req.with_payload.clone(),
req.with_vector.unwrap_or_default(),
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc.clone(),
)
});
future::try_join_all(filled_results).await
} else {
let result = self
.do_core_search_batch(
request,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc,
)
.await?;
Ok(result)
}
}
async fn do_core_search_batch(
&self,
request: CoreSearchRequestBatch,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let request = Arc::new(request);
let instant = Instant::now();
// query all shards concurrently
let all_searches_res = {
let shard_holder = self.shards_holder.read().await;
let target_shards = shard_holder.select_shards(shard_selection)?;
let all_searches = target_shards.into_iter().map(|(shard, shard_key)| {
let shard_key = shard_key.cloned();
shard
.core_search(
request.clone(),
read_consistency,
shard_selection.is_shard_id(),
timeout,
hw_measurement_acc.clone(),
)
.and_then(move |mut records| async move {
if shard_key.is_none() {
return Ok(records);
}
for batch in &mut records {
for point in batch {
point.shard_key.clone_from(&shard_key);
}
}
Ok(records)
})
});
future::try_join_all(all_searches).await?
};
let result = self
.merge_from_shards(
all_searches_res,
request.clone(),
!shard_selection.is_shard_id(),
)
.await;
let filters_refs = request.searches.iter().map(|req| req.filter.as_ref());
self.post_process_if_slow_request(instant.elapsed(), filters_refs);
result
}
#[allow(clippy::too_many_arguments)]
pub(crate) async fn fill_search_result_with_payload(
&self,
search_result: Vec<ScoredPoint>,
with_payload: Option<WithPayloadInterface>,
with_vector: WithVector,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
// short-circuit if not needed
if let (&Some(WithPayloadInterface::Bool(false)), &WithVector::Bool(false)) =
(&with_payload, &with_vector)
{
return Ok(search_result
.into_iter()
.map(|point| ScoredPoint {
payload: None,
vector: None,
..point
})
.collect());
};
let retrieve_request = PointRequestInternal {
ids: search_result.iter().map(|x| x.id).collect(),
with_payload,
with_vector,
};
let retrieved_records = self
.retrieve(
retrieve_request,
read_consistency,
shard_selection,
timeout,
hw_measurement_acc,
)
.await?;
let mut records_map: AHashMap<ExtendedPointId, RecordInternal> = retrieved_records
.into_iter()
.map(|rec| (rec.id, rec))
.collect();
let enriched_result = search_result
.into_iter()
.filter_map(|mut scored_point| {
// Points might get deleted between search and retrieve.
// But it's not a problem, because we don't want to return deleted points.
// So we just filter out them.
records_map.remove(&scored_point.id).map(|record| {
scored_point.payload = record.payload;
scored_point.vector = record.vector;
scored_point
})
})
.collect();
Ok(enriched_result)
}
async fn merge_from_shards(
&self,
mut all_searches_res: Vec<Vec<Vec<ScoredPoint>>>,
request: Arc<CoreSearchRequestBatch>,
is_client_request: bool,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let batch_size = request.searches.len();
let collection_params = self.collection_config.read().await.params.clone();
// Merge results from shards in order and deduplicate based on point ID
let mut top_results: Vec<Vec<ScoredPoint>> = Vec::with_capacity(batch_size);
let mut seen_ids = AHashSet::new();
for (batch_index, request) in request.searches.iter().enumerate() {
let order = if request.query.is_distance_scored() {
collection_params
.get_distance(request.query.get_vector_name())?
.distance_order()
} else {
// Score comes from special handling of the distances in a way that it doesn't
// directly represent distance anymore, so the order is always `LargeBetter`
Order::LargeBetter
};
let results_from_shards = all_searches_res
.iter_mut()
.map(|res| res.get_mut(batch_index).map_or(Vec::new(), mem::take));
let merged_iter = match order {
Order::LargeBetter => Either::Left(results_from_shards.kmerge_by(|a, b| a > b)),
Order::SmallBetter => Either::Right(results_from_shards.kmerge_by(|a, b| a < b)),
}
.filter(|point| seen_ids.insert(point.id));
// Skip `offset` only for client requests
// to avoid applying `offset` twice in distributed mode.
let top_res = if is_client_request && request.offset > 0 {
merged_iter
.skip(request.offset)
.take(request.limit)
.collect()
} else {
merged_iter.take(request.offset + request.limit).collect()
};
top_results.push(top_res);
seen_ids.clear();
}
Ok(top_results)
}
pub fn post_process_if_slow_request<'a>(
&self,
duration: Duration,
filters: impl IntoIterator<Item = Option<&'a Filter>>,
) {
if duration > crate::problems::UnindexedField::slow_query_threshold() {
let filters = filters.into_iter().flatten().cloned().collect_vec();
let schema = self.payload_index_schema.read().schema.clone();
issues::publish(SlowQueryEvent {
collection_id: self.id.clone(),
filters,
schema,
});
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/facet.rs | lib/collection/src/collection/facet.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::TryStreamExt;
use futures::stream::FuturesUnordered;
use itertools::Itertools;
use segment::data_types::facets::{FacetParams, FacetResponse, FacetValueHit};
use super::Collection;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::CollectionResult;
impl Collection {
pub async fn facet(
&self,
request: FacetParams,
shard_selection: ShardSelectorInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
if request.limit == 0 {
return Ok(FacetResponse { hits: vec![] });
}
let request = Arc::new(request);
let shard_holder = self.shards_holder.read().await;
let target_shards = shard_holder.select_shards(&shard_selection)?;
let mut shards_reads_f = target_shards
.iter()
.map(|(shard, _shard_key)| {
shard.facet(
request.clone(),
read_consistency,
shard_selection.is_shard_id(),
timeout,
hw_measurement_acc.clone(),
)
})
.collect::<FuturesUnordered<_>>();
let mut aggregated_results = HashMap::new();
while let Some(response) = shards_reads_f.try_next().await? {
for hit in response.hits {
*aggregated_results.entry(hit.value).or_insert(0) += hit.count;
}
}
let hits = aggregated_results
.into_iter()
.map(|(value, count)| FacetValueHit { value, count })
.k_largest(request.limit)
.collect();
Ok(FacetResponse { hits })
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/mod.rs | lib/collection/src/collection/mod.rs | mod clean;
mod collection_ops;
pub mod distance_matrix;
mod facet;
pub mod mmr;
pub mod payload_index_schema;
mod point_ops;
pub mod query;
mod resharding;
mod search;
mod shard_transfer;
mod sharding_keys;
mod snapshots;
mod state_management;
mod telemetry;
use std::collections::HashMap;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use clean::ShardCleanTasks;
use common::budget::ResourceBudget;
use common::save_on_disk::SaveOnDisk;
use io::storage_version::StorageVersion;
use segment::types::ShardKey;
use semver::Version;
use tokio::runtime::Handle;
use tokio::sync::{Mutex, RwLock, RwLockWriteGuard};
use crate::collection::collection_ops::ABORT_TRANSFERS_ON_SHARD_DROP_FIX_FROM_VERSION;
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::collection_state::{ShardInfo, State};
use crate::common::collection_size_stats::{
CollectionSizeAtomicStats, CollectionSizeStats, CollectionSizeStatsCache,
};
use crate::common::is_ready::IsReady;
use crate::config::CollectionConfigInternal;
use crate::operations::config_diff::{DiffConfig, OptimizersConfigDiff};
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::{CollectionError, CollectionResult, NodeType, OptimizersStatus};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::local_shard::clock_map::RecoveryPoint;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::replica_set::replica_set_state::ReplicaState::{
Active, Dead, Initializing, Listener,
};
use crate::shards::replica_set::{ChangePeerFromState, ChangePeerState, ShardReplicaSet};
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_holder::shard_mapping::ShardKeyMapping;
use crate::shards::shard_holder::{LockedShardHolder, ShardHolder, shard_not_found_error};
use crate::shards::transfer::helpers::check_transfer_conflicts_strict;
use crate::shards::transfer::transfer_tasks_pool::{TaskResult, TransferTasksPool};
use crate::shards::transfer::{ShardTransfer, ShardTransferMethod};
use crate::shards::{CollectionId, replica_set};
use crate::telemetry::CollectionsAggregatedTelemetry;
/// Collection's data is split into several shards.
pub struct Collection {
pub(crate) id: CollectionId,
pub(crate) shards_holder: Arc<LockedShardHolder>,
pub(crate) collection_config: Arc<RwLock<CollectionConfigInternal>>,
pub(crate) shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
optimizers_overwrite: Option<OptimizersConfigDiff>,
this_peer_id: PeerId,
path: PathBuf,
snapshots_path: PathBuf,
channel_service: ChannelService,
transfer_tasks: Mutex<TransferTasksPool>,
request_shard_transfer_cb: RequestShardTransfer,
notify_peer_failure_cb: ChangePeerFromState,
abort_shard_transfer_cb: replica_set::AbortShardTransfer,
init_time: Duration,
// One-way boolean flag that is set to true when the collection is fully initialized
// i.e. all shards are activated for the first time.
is_initialized: Arc<IsReady>,
// Lock to temporary block collection update operations while the collection is being migrated.
// Lock is acquired for read on update operation and can be acquired for write externally,
// which will block all update operations until the lock is released.
updates_lock: Arc<RwLock<()>>,
// Update runtime handle.
update_runtime: Handle,
// Search runtime handle.
search_runtime: Handle,
optimizer_resource_budget: ResourceBudget,
// Cached statistics of collection size, may be outdated.
collection_stats_cache: CollectionSizeStatsCache,
// Background tasks to clean shards
shard_clean_tasks: ShardCleanTasks,
}
pub type RequestShardTransfer = Arc<dyn Fn(ShardTransfer) + Send + Sync>;
pub type OnTransferFailure = Arc<dyn Fn(ShardTransfer, CollectionId, &str) + Send + Sync>;
pub type OnTransferSuccess = Arc<dyn Fn(ShardTransfer, CollectionId) + Send + Sync>;
impl Collection {
#[allow(clippy::too_many_arguments)]
pub async fn new(
name: CollectionId,
this_peer_id: PeerId,
path: &Path,
snapshots_path: &Path,
collection_config: &CollectionConfigInternal,
shared_storage_config: Arc<SharedStorageConfig>,
shard_distribution: CollectionShardDistribution,
shard_key_mapping: Option<ShardKeyMapping>,
channel_service: ChannelService,
on_replica_failure: ChangePeerFromState,
request_shard_transfer: RequestShardTransfer,
abort_shard_transfer: replica_set::AbortShardTransfer,
search_runtime: Option<Handle>,
update_runtime: Option<Handle>,
optimizer_resource_budget: ResourceBudget,
optimizers_overwrite: Option<OptimizersConfigDiff>,
) -> CollectionResult<Self> {
let start_time = std::time::Instant::now();
let sharding_method = collection_config.params.sharding_method.unwrap_or_default();
let mut shard_holder = ShardHolder::new(path, sharding_method)?;
shard_holder.set_shard_key_mappings(shard_key_mapping.clone().unwrap_or_default())?;
let payload_index_schema = Arc::new(Self::load_payload_index_schema(path)?);
let shared_collection_config = Arc::new(RwLock::new(collection_config.clone()));
for (shard_id, mut peers) in shard_distribution.shards {
let is_local = peers.remove(&this_peer_id);
let mut effective_optimizers_config = collection_config.optimizer_config.clone();
if let Some(optimizers_overwrite) = optimizers_overwrite.clone() {
effective_optimizers_config =
effective_optimizers_config.update(&optimizers_overwrite);
}
let shard_key = shard_key_mapping
.as_ref()
.and_then(|mapping| mapping.shard_key(shard_id));
let replica_set = ShardReplicaSet::build(
shard_id,
shard_key.clone(),
name.clone(),
this_peer_id,
is_local,
peers,
on_replica_failure.clone(),
abort_shard_transfer.clone(),
path,
shared_collection_config.clone(),
effective_optimizers_config,
shared_storage_config.clone(),
payload_index_schema.clone(),
channel_service.clone(),
update_runtime.clone().unwrap_or_else(Handle::current),
search_runtime.clone().unwrap_or_else(Handle::current),
optimizer_resource_budget.clone(),
None,
)
.await?;
shard_holder
.add_shard(shard_id, replica_set, shard_key)
.await?;
}
let locked_shard_holder = Arc::new(LockedShardHolder::new(shard_holder));
let collection_stats_cache = CollectionSizeStatsCache::new_with_values(
Self::estimate_collection_size_stats(&locked_shard_holder).await,
);
// Once the config is persisted - the collection is considered to be successfully created.
CollectionVersion::save(path)?;
collection_config.save(path)?;
Ok(Self {
id: name.clone(),
shards_holder: locked_shard_holder,
collection_config: shared_collection_config,
optimizers_overwrite,
payload_index_schema,
shared_storage_config,
this_peer_id,
path: path.to_owned(),
snapshots_path: snapshots_path.to_owned(),
channel_service,
transfer_tasks: Mutex::new(TransferTasksPool::new(name.clone())),
request_shard_transfer_cb: request_shard_transfer.clone(),
notify_peer_failure_cb: on_replica_failure.clone(),
abort_shard_transfer_cb: abort_shard_transfer,
init_time: start_time.elapsed(),
is_initialized: Default::default(),
updates_lock: Default::default(),
update_runtime: update_runtime.unwrap_or_else(Handle::current),
search_runtime: search_runtime.unwrap_or_else(Handle::current),
optimizer_resource_budget,
collection_stats_cache,
shard_clean_tasks: Default::default(),
})
}
#[allow(clippy::too_many_arguments)]
pub async fn load(
collection_id: CollectionId,
this_peer_id: PeerId,
path: &Path,
snapshots_path: &Path,
shared_storage_config: Arc<SharedStorageConfig>,
channel_service: ChannelService,
on_replica_failure: replica_set::ChangePeerFromState,
request_shard_transfer: RequestShardTransfer,
abort_shard_transfer: replica_set::AbortShardTransfer,
search_runtime: Option<Handle>,
update_runtime: Option<Handle>,
optimizer_resource_budget: ResourceBudget,
optimizers_overwrite: Option<OptimizersConfigDiff>,
) -> Self {
let start_time = std::time::Instant::now();
let stored_version = CollectionVersion::load(path)
.expect("Can't read collection version")
.expect("Collection version is not found");
let app_version = CollectionVersion::current();
if stored_version > app_version {
panic!("Collection version is greater than application version");
}
if stored_version != app_version {
if Self::can_upgrade_storage(&stored_version, &app_version) {
log::info!("Migrating collection {stored_version} -> {app_version}");
CollectionVersion::save(path)
.unwrap_or_else(|err| panic!("Can't save collection version {err}"));
} else {
log::error!("Cannot upgrade version {stored_version} to {app_version}.");
panic!(
"Cannot upgrade version {stored_version} to {app_version}. Try to use older version of Qdrant first.",
);
}
}
let collection_config = CollectionConfigInternal::load(path).unwrap_or_else(|err| {
panic!(
"Can't read collection config due to {}\nat {}",
err,
path.to_str().unwrap(),
)
});
collection_config.validate_and_warn();
let sharding_method = collection_config.params.sharding_method.unwrap_or_default();
let mut shard_holder =
ShardHolder::new(path, sharding_method).expect("Can not create shard holder");
let mut effective_optimizers_config = collection_config.optimizer_config.clone();
if let Some(optimizers_overwrite) = optimizers_overwrite.clone() {
effective_optimizers_config = effective_optimizers_config.update(&optimizers_overwrite);
}
let shared_collection_config = Arc::new(RwLock::new(collection_config.clone()));
let payload_index_schema = Arc::new(
Self::load_payload_index_schema(path)
.expect("Can't load or initialize payload index schema"),
);
shard_holder
.load_shards(
path,
&collection_id,
shared_collection_config.clone(),
effective_optimizers_config,
shared_storage_config.clone(),
payload_index_schema.clone(),
channel_service.clone(),
on_replica_failure.clone(),
abort_shard_transfer.clone(),
this_peer_id,
update_runtime.clone().unwrap_or_else(Handle::current),
search_runtime.clone().unwrap_or_else(Handle::current),
optimizer_resource_budget.clone(),
)
.await;
let locked_shard_holder = Arc::new(LockedShardHolder::new(shard_holder));
let collection_stats_cache = CollectionSizeStatsCache::new_with_values(
Self::estimate_collection_size_stats(&locked_shard_holder).await,
);
Self {
id: collection_id.clone(),
shards_holder: locked_shard_holder,
collection_config: shared_collection_config,
optimizers_overwrite,
payload_index_schema,
shared_storage_config,
this_peer_id,
path: path.to_owned(),
snapshots_path: snapshots_path.to_owned(),
channel_service,
transfer_tasks: Mutex::new(TransferTasksPool::new(collection_id.clone())),
request_shard_transfer_cb: request_shard_transfer.clone(),
notify_peer_failure_cb: on_replica_failure,
abort_shard_transfer_cb: abort_shard_transfer,
init_time: start_time.elapsed(),
is_initialized: Default::default(),
updates_lock: Default::default(),
update_runtime: update_runtime.unwrap_or_else(Handle::current),
search_runtime: search_runtime.unwrap_or_else(Handle::current),
optimizer_resource_budget,
collection_stats_cache,
shard_clean_tasks: Default::default(),
}
}
pub async fn stop_gracefully(self) {
let mut owned_holder = self.shards_holder.write().await;
owned_holder.stop_gracefully().await;
}
/// Check if stored version have consequent version.
/// If major version is different, then it is not compatible.
/// If the difference in consecutive versions is greater than 1 in patch,
/// then the collection is not compatible with the current version.
///
/// Example:
/// 0.4.0 -> 0.4.1 = true
/// 0.4.0 -> 0.4.2 = false
/// 0.4.0 -> 0.5.0 = false
/// 0.4.0 -> 0.5.1 = false
pub fn can_upgrade_storage(stored: &Version, app: &Version) -> bool {
if stored.major != app.major {
return false;
}
if stored.minor != app.minor {
return false;
}
if stored.patch + 1 < app.patch {
return false;
}
true
}
pub fn name(&self) -> &str {
&self.id
}
pub async fn uuid(&self) -> Option<uuid::Uuid> {
self.collection_config.read().await.uuid
}
pub async fn get_shard_keys(&self) -> Vec<ShardKey> {
self.shards_holder
.read()
.await
.get_shard_key_to_ids_mapping()
.keys()
.cloned()
.collect()
}
/// Return a list of local shards, present on this peer
pub async fn get_local_shards(&self) -> Vec<ShardId> {
self.shards_holder.read().await.get_local_shards().await
}
pub async fn contains_shard(&self, shard_id: ShardId) -> bool {
self.shards_holder.read().await.contains_shard(shard_id)
}
pub async fn wait_local_shard_replica_state(
&self,
shard_id: ShardId,
state: ReplicaState,
timeout: Duration,
) -> CollectionResult<()> {
let shard_holder_read = self.shards_holder.read().await;
let shard = shard_holder_read.get_shard(shard_id);
let Some(replica_set) = shard else {
return Err(CollectionError::NotFound {
what: format!("Shard {shard_id}"),
});
};
replica_set.wait_for_local_state(state, timeout).await
}
pub async fn set_shard_replica_state(
&self,
shard_id: ShardId,
peer_id: PeerId,
new_state: ReplicaState,
from_state: Option<ReplicaState>,
) -> CollectionResult<()> {
let shard_holder = self.shards_holder.read().await;
let replica_set = shard_holder
.get_shard(shard_id)
.ok_or_else(|| shard_not_found_error(shard_id))?;
log::debug!(
"Changing shard {}:{shard_id} replica state from {:?} to {new_state:?}",
self.id,
replica_set.peer_state(peer_id),
);
let current_state = replica_set.peer_state(peer_id);
// Validation:
//
// 1. Check that peer exists in the cluster (peer might *not* exist, if it was removed from
// the cluster right before `SetShardReplicaSet` was proposed)
let peer_exists = self
.channel_service
.id_to_address
.read()
.contains_key(&peer_id);
let replica_exists = replica_set.peer_state(peer_id).is_some();
if !peer_exists && !replica_exists {
return Err(CollectionError::bad_input(format!(
"Can't set replica {peer_id}:{shard_id} state to {new_state:?}, \
because replica {peer_id}:{shard_id} does not exist \
and peer {peer_id} is not part of the cluster"
)));
}
// 2. Check that `from_state` matches current state
if from_state.is_some() && current_state != from_state {
return Err(CollectionError::bad_input(format!(
"Replica {peer_id} of shard {shard_id} has state {current_state:?}, but expected {from_state:?}"
)));
}
// 3. Do not deactivate the last active replica
//
// `is_last_active_replica` counts both `Active` and `ReshardingScaleDown` replicas!
if replica_set.is_last_source_of_truth_replica(peer_id) && !new_state.is_active() {
return Err(CollectionError::bad_input(format!(
"Cannot deactivate the last active replica {peer_id} of shard {shard_id}"
)));
}
// Update replica status
replica_set
.ensure_replica_with_state(peer_id, new_state)
.await?;
if new_state == ReplicaState::Dead {
let resharding_state = shard_holder.resharding_state.read().clone();
let all_nodes_fixed_cancellation = self
.channel_service
.all_peers_at_version(&ABORT_TRANSFERS_ON_SHARD_DROP_FIX_FROM_VERSION);
let related_transfers = if all_nodes_fixed_cancellation {
shard_holder.get_related_transfers(peer_id, shard_id)
} else {
// This is the old buggy logic, but we have to keep it
// for maintaining consistency in a cluster with mixed versions.
shard_holder.get_transfers(|transfer| {
transfer.shard_id == shard_id
&& (transfer.from == peer_id || transfer.to == peer_id)
})
};
// Functions below lock `shard_holder`!
drop(shard_holder);
let mut abort_resharding_result = CollectionResult::Ok(());
// Abort resharding, if resharding shard is marked as `Dead`.
//
// This branch should only be triggered, if resharding is currently at `MigratingPoints`
// stage, because target shard should be marked as `Active`, when all resharding transfers
// are successfully completed, and so the check *right above* this one would be triggered.
//
// So, if resharding reached `ReadHashRingCommitted`, this branch *won't* be triggered,
// and resharding *won't* be cancelled. The update request should *fail* with "failed to
// update all replicas of a shard" error.
//
// If resharding reached `ReadHashRingCommitted`, and this branch is triggered *somehow*,
// then `Collection::abort_resharding` call should return an error, so no special handling
// is needed.
let is_resharding = current_state
.as_ref()
.is_some_and(ReplicaState::is_resharding);
if is_resharding && let Some(state) = resharding_state {
abort_resharding_result = self.abort_resharding(state.key(), false).await;
}
// Terminate transfer if source or target replicas are now dead
for transfer in related_transfers {
self.abort_shard_transfer_and_resharding(transfer.key(), None)
.await?;
}
// Propagate resharding errors now
abort_resharding_result?;
}
// If not initialized yet, we need to check if it was initialized by this call
if !self.is_initialized.check_ready() {
let state = self.state().await;
let mut is_ready = true;
for (_shard_id, shard_info) in state.shards {
let all_replicas_active = shard_info.replicas.into_iter().all(|(_, state)| {
matches!(
state,
ReplicaState::Active | ReplicaState::ReshardingScaleDown
)
});
if !all_replicas_active {
is_ready = false;
break;
}
}
if is_ready {
self.is_initialized.make_ready();
}
}
Ok(())
}
pub async fn shard_recovery_point(&self, shard_id: ShardId) -> CollectionResult<RecoveryPoint> {
let shard_holder_read = self.shards_holder.read().await;
let shard = shard_holder_read.get_shard(shard_id);
let Some(replica_set) = shard else {
return Err(CollectionError::NotFound {
what: format!("Shard {shard_id}"),
});
};
replica_set.shard_recovery_point().await
}
pub async fn update_shard_cutoff_point(
&self,
shard_id: ShardId,
cutoff: &RecoveryPoint,
) -> CollectionResult<()> {
let shard_holder_read = self.shards_holder.read().await;
let shard = shard_holder_read.get_shard(shard_id);
let Some(replica_set) = shard else {
return Err(CollectionError::NotFound {
what: format!("Shard {shard_id}"),
});
};
replica_set.update_shard_cutoff_point(cutoff).await
}
pub async fn state(&self) -> State {
let shards_holder = self.shards_holder.read().await;
let transfers = shards_holder.shard_transfers.read().clone();
let resharding = shards_holder.resharding_state.read().clone();
State {
config: self.collection_config.read().await.clone(),
shards: shards_holder
.get_shards()
.map(|(shard_id, replicas)| {
let shard_info = ShardInfo {
replicas: replicas.peers(),
};
(shard_id, shard_info)
})
.collect(),
resharding,
transfers,
shards_key_mapping: shards_holder.get_shard_key_to_ids_mapping(),
payload_index_schema: self.payload_index_schema.read().clone(),
}
}
pub async fn remove_shards_at_peer(&self, peer_id: PeerId) -> CollectionResult<()> {
// Abort resharding, if shards are removed from peer driving resharding
// (which *usually* means the *peer* is being removed from consensus)
let resharding_state = self
.resharding_state()
.await
.filter(|state| state.peer_id == peer_id);
if let Some(state) = resharding_state
&& let Err(err) = self.abort_resharding(state.key(), true).await
{
log::error!(
"Failed to abort resharding {} while removing peer {peer_id}: {err}",
state.key(),
);
}
for transfer in self.get_related_transfers(peer_id).await {
self.abort_shard_transfer_and_resharding(transfer.key(), None)
.await?;
}
self.shards_holder
.read()
.await
.remove_shards_at_peer(peer_id)
.await
}
pub async fn sync_local_state(
&self,
on_transfer_failure: OnTransferFailure,
on_transfer_success: OnTransferSuccess,
on_finish_init: ChangePeerState,
on_convert_to_listener: ChangePeerState,
on_convert_from_listener: ChangePeerState,
) -> CollectionResult<()> {
// Check for disabled replicas
let shard_holder = self.shards_holder.read().await;
let get_shard_transfers = |shard_id, from| {
shard_holder.get_transfers(|transfer| transfer.is_source(from, shard_id))
};
for replica_set in shard_holder.all_shards() {
replica_set.sync_local_state(get_shard_transfers)?;
}
// Check for un-reported finished transfers
let outgoing_transfers = shard_holder.get_outgoing_transfers(self.this_peer_id);
let tasks_lock = self.transfer_tasks.lock().await;
for transfer in outgoing_transfers {
match tasks_lock
.get_task_status(&transfer.key())
.map(|s| s.result)
{
None => {
log::debug!(
"Transfer {:?} does not exist, but not reported as cancelled. Reporting now.",
transfer.key(),
);
on_transfer_failure(
transfer,
self.name().to_string(),
"transfer task does not exist",
);
}
Some(TaskResult::Running) => (),
Some(TaskResult::Finished) => {
log::debug!(
"Transfer {:?} is finished successfully, but not reported. Reporting now.",
transfer.key(),
);
on_transfer_success(transfer, self.name().to_string());
}
Some(TaskResult::Failed) => {
log::debug!(
"Transfer {:?} is failed, but not reported as failed. Reporting now.",
transfer.key(),
);
on_transfer_failure(transfer, self.name().to_string(), "transfer failed");
}
}
}
// Count how many transfers we are now proposing
// We must track this here so we can reference it when checking for tranfser limits,
// because transfers we propose now will not be in the consensus state within the lifetime
// of this function
let mut proposed = HashMap::<PeerId, usize>::new();
// Check for proper replica states
for replica_set in shard_holder.all_shards() {
let this_peer_id = replica_set.this_peer_id();
let shard_id = replica_set.shard_id;
let peers = replica_set.peers();
let this_peer_state = peers.get(&this_peer_id).copied();
if this_peer_state == Some(Initializing) {
// It is possible, that collection creation didn't report
// Try to activate shard, as the collection clearly exists
on_finish_init(this_peer_id, shard_id);
continue;
}
if self.shared_storage_config.node_type == NodeType::Listener {
// We probably should not switch node type during resharding, so we only check for `Active`,
// but not `ReshardingScaleDown` replica state here...
let is_last_active = peers.values().filter(|&&state| state == Active).count() == 1;
if this_peer_state == Some(Active) && !is_last_active {
// Convert active node from active to listener
on_convert_to_listener(this_peer_id, shard_id);
continue;
}
} else if this_peer_state == Some(Listener) {
// Convert listener node to active
on_convert_from_listener(this_peer_id, shard_id);
continue;
}
// Don't automatically recover replicas if started in recovery mode
if self.shared_storage_config.recovery_mode.is_some() {
continue;
}
// Don't recover replicas if not dead
let is_dead = this_peer_state == Some(Dead);
if !is_dead {
continue;
}
// Try to find dead replicas with no active transfers
let transfers = shard_holder.get_transfers(|_| true);
// Respect shard transfer limit, consider already proposed transfers in our counts
let (mut incoming, outgoing) = shard_holder.count_shard_transfer_io(this_peer_id);
incoming += proposed.get(&this_peer_id).copied().unwrap_or(0);
if self.check_auto_shard_transfer_limit(incoming, outgoing) {
log::trace!(
"Postponing automatic shard {shard_id} transfer to stay below limit on this node (incoming: {incoming}, outgoing: {outgoing})",
);
continue;
}
// Select shard transfer method, prefer user configured method or choose one now
// If all peers are 1.8+, we try WAL delta transfer, otherwise we use the default method
let shard_transfer_method = self
.shared_storage_config
.default_shard_transfer_method
.unwrap_or_else(|| {
let all_support_wal_delta = self
.channel_service
.all_peers_at_version(&Version::new(1, 8, 0));
if all_support_wal_delta {
ShardTransferMethod::WalDelta
} else {
ShardTransferMethod::default()
}
});
// Try to find a replica to transfer from
//
// `active_shards` includes `Active` and `ReshardingScaleDown` replicas!
for replica_id in replica_set.active_shards(true) {
let transfer = ShardTransfer {
from: replica_id,
to: this_peer_id,
shard_id,
to_shard_id: None,
sync: true,
// For automatic shard transfers, always select some default method from this point on
method: Some(shard_transfer_method),
filter: None,
};
if check_transfer_conflicts_strict(&transfer, transfers.iter()).is_some() {
continue; // this transfer won't work
}
// Respect shard transfer limit, consider already proposed transfers in our counts
let (incoming, mut outgoing) = shard_holder.count_shard_transfer_io(replica_id);
outgoing += proposed.get(&replica_id).copied().unwrap_or(0);
if self.check_auto_shard_transfer_limit(incoming, outgoing) {
log::trace!(
"Postponing automatic shard {shard_id} transfer to stay below limit on peer {replica_id} (incoming: {incoming}, outgoing: {outgoing})",
);
continue;
}
// TODO: Should we, maybe, throttle/backoff this requests a bit?
if let Err(err) = replica_set.health_check(replica_id).await {
// TODO: This is rather verbose, not sure if we want to log this at all... :/
log::trace!(
"Replica {replica_id}/{}:{} is not available \
to request shard transfer from: \
{err}",
self.id,
replica_set.shard_id,
);
continue;
}
log::debug!(
"Recovering shard {}:{shard_id} on peer {this_peer_id} by requesting it from {replica_id}",
self.name(),
);
// Update our counters for proposed transfers, then request (propose) shard transfer
*proposed.entry(transfer.from).or_default() += 1;
*proposed.entry(transfer.to).or_default() += 1;
self.request_shard_transfer(transfer);
break;
}
}
Ok(())
}
pub async fn get_aggregated_telemetry_data(
&self,
timeout: Duration,
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/snapshots.rs | lib/collection/src/collection/snapshots.rs | use std::collections::HashSet;
use std::path::Path;
use std::sync::Arc;
use common::tar_ext::BuilderExt;
use common::tempfile_ext::MaybeTempPath;
use fs_err::File;
use io::file_operations::read_json;
use io::storage_version::StorageVersion as _;
use segment::common::validate_snapshot_archive::open_snapshot_archive_with_validation;
use segment::data_types::manifest::SnapshotManifest;
use segment::types::SnapshotFormat;
use tokio::sync::OwnedRwLockReadGuard;
use super::Collection;
use crate::collection::CollectionVersion;
use crate::collection::payload_index_schema::PAYLOAD_INDEX_CONFIG_FILE;
use crate::common::snapshot_stream::SnapshotStream;
use crate::common::snapshots_manager::SnapshotStorageManager;
use crate::config::{COLLECTION_CONFIG_FILE, CollectionConfigInternal, ShardingMethod};
use crate::operations::snapshot_ops::SnapshotDescription;
use crate::operations::types::{CollectionError, CollectionResult, NodeType};
use crate::shards::local_shard::LocalShard;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::replica_set::ShardReplicaSet;
use crate::shards::replica_set::snapshots::RecoveryType;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_config::{self, ShardConfig};
use crate::shards::shard_holder::shard_mapping::ShardKeyMapping;
use crate::shards::shard_holder::{SHARD_KEY_MAPPING_FILE, ShardHolder, shard_not_found_error};
use crate::shards::shard_path;
impl Collection {
pub fn get_snapshots_storage_manager(&self) -> CollectionResult<SnapshotStorageManager> {
SnapshotStorageManager::new(&self.shared_storage_config.snapshots_config)
}
pub async fn list_snapshots(&self) -> CollectionResult<Vec<SnapshotDescription>> {
let snapshot_manager = self.get_snapshots_storage_manager()?;
snapshot_manager.list_snapshots(&self.snapshots_path).await
}
/// Creates a snapshot of the collection.
///
/// The snapshot is created in three steps:
/// 1. Create a temporary directory and create a snapshot of each shard in it.
/// 2. Archive the temporary directory into a single file.
/// 3. Move the archive to the final location.
///
/// # Arguments
///
/// * `global_temp_dir`: directory used to host snapshots while they are being created
/// * `this_peer_id`: current peer id
///
/// returns: Result<SnapshotDescription, CollectionError>
pub async fn create_snapshot(
&self,
global_temp_dir: &Path,
this_peer_id: PeerId,
) -> CollectionResult<SnapshotDescription> {
let snapshot_name = format!(
"{}-{this_peer_id}-{}.snapshot",
self.name(),
chrono::Utc::now().format("%Y-%m-%d-%H-%M-%S"),
);
// Final location of snapshot
let snapshot_path = self.snapshots_path.join(&snapshot_name);
log::info!("Creating collection snapshot {snapshot_name} into {snapshot_path:?}");
// Dedicated temporary file for archiving this snapshot (deleted on drop)
let snapshot_temp_arc_file = tempfile::Builder::new()
.prefix(&format!("{snapshot_name}-arc-"))
.tempfile_in(global_temp_dir)
.map_err(|err| {
CollectionError::service_error(format!(
"failed to create temporary snapshot directory {}/{snapshot_name}-arc-XXXX: \
{err}",
global_temp_dir.display(),
))
})?;
let tar = BuilderExt::new_seekable_owned(File::create(snapshot_temp_arc_file.path())?);
// Create snapshot of each shard
{
let snapshot_temp_temp_dir = tempfile::Builder::new()
.prefix(&format!("{snapshot_name}-temp-"))
.tempdir_in(global_temp_dir)
.map_err(|err| {
CollectionError::service_error(format!(
"failed to create temporary snapshot directory {}/{snapshot_name}-temp-XXXX: \
{err}",
global_temp_dir.display(),
))
})?;
let shards_holder = self.shards_holder.read().await;
// Create snapshot of each shard
for (shard_id, replica_set) in shards_holder.get_shards() {
let shard_snapshot_path = shard_path(Path::new(""), shard_id);
// If node is listener, we can save whatever currently is in the storage
let save_wal = self.shared_storage_config.node_type != NodeType::Listener;
replica_set
.create_snapshot(
snapshot_temp_temp_dir.path(),
&tar.descend(&shard_snapshot_path)?,
SnapshotFormat::Regular,
None,
save_wal,
)
.await
.map_err(|err| {
CollectionError::service_error(format!("failed to create snapshot: {err}"))
})?;
}
}
// Save collection config and version
tar.append_data(
CollectionVersion::current_raw().as_bytes().to_vec(),
Path::new(io::storage_version::VERSION_FILE),
)
.await?;
tar.append_data(
self.collection_config.read().await.to_bytes()?,
Path::new(COLLECTION_CONFIG_FILE),
)
.await?;
self.shards_holder
.read()
.await
.save_key_mapping_to_tar(&tar)
.await?;
self.payload_index_schema
.save_to_tar(&tar, Path::new(PAYLOAD_INDEX_CONFIG_FILE))
.await?;
tar.finish().await.map_err(|err| {
CollectionError::service_error(format!("failed to create snapshot archive: {err}"))
})?;
let snapshot_manager = self.get_snapshots_storage_manager()?;
snapshot_manager
.store_file(snapshot_temp_arc_file.path(), snapshot_path.as_path())
.await
.map_err(|err| {
CollectionError::service_error(format!(
"failed to store snapshot archive to {}: {err}",
snapshot_temp_arc_file.path().display()
))
})
}
/// Restore collection from snapshot
///
/// This method performs blocking IO.
pub fn restore_snapshot(
snapshot_path: &Path,
target_dir: &Path,
this_peer_id: PeerId,
is_distributed: bool,
) -> CollectionResult<()> {
// decompress archive
let mut ar = open_snapshot_archive_with_validation(snapshot_path)?;
ar.unpack(target_dir)?;
let config = CollectionConfigInternal::load(target_dir)?;
config.validate_and_warn();
let configured_shards = config.params.shard_number.get();
let shard_ids_list: Vec<_> = match config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => (0..configured_shards).collect(),
ShardingMethod::Custom => {
// Load shard mapping from disk
let mapping_path = target_dir.join(SHARD_KEY_MAPPING_FILE);
debug_assert!(
mapping_path.exists(),
"Shard mapping file must exist once custom sharding is used"
);
if !mapping_path.exists() {
Vec::new()
} else {
let shard_key_mapping: ShardKeyMapping = read_json(&mapping_path)?;
shard_key_mapping.shard_ids()
}
}
};
// Check that all shard ids are unique
debug_assert_eq!(
shard_ids_list.len(),
shard_ids_list.iter().collect::<HashSet<_>>().len(),
"Shard mapping must contain all shards",
);
for shard_id in shard_ids_list {
let shard_path = shard_path(target_dir, shard_id);
let shard_config_opt = ShardConfig::load(&shard_path)?;
if let Some(shard_config) = shard_config_opt {
match shard_config.r#type {
shard_config::ShardType::Local => LocalShard::restore_snapshot(&shard_path)?,
shard_config::ShardType::Remote { .. } => {
RemoteShard::restore_snapshot(&shard_path)
}
shard_config::ShardType::Temporary => {}
shard_config::ShardType::ReplicaSet => ShardReplicaSet::restore_snapshot(
&shard_path,
this_peer_id,
is_distributed,
)?,
}
} else {
return Err(CollectionError::service_error(format!(
"Can't read shard config at {}",
shard_path.display()
)));
}
}
Ok(())
}
/// # Cancel safety
///
/// This method is *not* cancel safe.
pub async fn recover_local_shard_from(
&self,
snapshot_shard_path: &Path,
recovery_type: RecoveryType,
shard_id: ShardId,
cancel: cancel::CancellationToken,
) -> CollectionResult<bool> {
// TODO:
// Check that shard snapshot is compatible with the collection
// (see `VectorsConfig::check_compatible_with_segment_config`)
// `ShardHolder::recover_local_shard_from` is *not* cancel safe
// (see `ShardReplicaSet::restore_local_replica_from`)
let res = self
.shards_holder
.read()
.await
.recover_local_shard_from(
snapshot_shard_path,
recovery_type,
&self.path,
shard_id,
cancel,
)
.await?;
Ok(res)
}
pub async fn list_shard_snapshots(
&self,
shard_id: ShardId,
) -> CollectionResult<Vec<SnapshotDescription>> {
self.shards_holder
.read()
.await
.list_shard_snapshots(&self.snapshots_path, shard_id)
.await
}
pub async fn create_shard_snapshot(
&self,
shard_id: ShardId,
temp_dir: &Path,
) -> CollectionResult<SnapshotDescription> {
self.shards_holder
.read()
.await
.create_shard_snapshot(&self.snapshots_path, self.name(), shard_id, temp_dir)
.await
}
pub async fn stream_shard_snapshot(
&self,
shard_id: ShardId,
manifest: Option<SnapshotManifest>,
temp_dir: &Path,
) -> CollectionResult<SnapshotStream> {
let shard = OwnedRwLockReadGuard::try_map(
Arc::clone(&self.shards_holder).read_owned().await,
|x| x.get_shard(shard_id),
)
.map_err(|_| shard_not_found_error(shard_id))?;
ShardHolder::stream_shard_snapshot(shard, self.name(), shard_id, manifest, temp_dir).await
}
/// # Cancel safety
///
/// This method is cancel safe.
#[expect(clippy::too_many_arguments)]
pub async fn restore_shard_snapshot(
&self,
shard_id: ShardId,
snapshot_path: MaybeTempPath,
recovery_type: RecoveryType,
this_peer_id: PeerId,
is_distributed: bool,
temp_dir: &Path,
cancel: cancel::CancellationToken,
) -> CollectionResult<impl Future<Output = CollectionResult<()>> + 'static> {
// `ShardHolder::validate_shard_snapshot` is cancel safe, so we explicitly cancel it
// when token is triggered
let shard_holder = cancel::future::cancel_on_token(cancel.clone(), async {
let shard_holder = self.shards_holder.clone().read_owned().await;
shard_holder.validate_shard_snapshot(&snapshot_path).await?;
CollectionResult::Ok(shard_holder)
})
.await??;
let collection_path = self.path.clone();
let collection_name = self.name().to_string();
let temp_dir = temp_dir.to_path_buf();
// `ShardHolder::restore_shard_snapshot` is *not* cancel safe, so we spawn it onto runtime,
// so that it won't be cancelled if current future is dropped
let restore = self.update_runtime.spawn(async move {
shard_holder
.restore_shard_snapshot(
&snapshot_path,
recovery_type,
&collection_path,
&collection_name,
shard_id,
this_peer_id,
is_distributed,
&temp_dir,
cancel,
)
.await?;
if let Err(err) = snapshot_path.close() {
log::error!("Failed to remove downloaded snapshot archive after recovery: {err}");
}
CollectionResult::Ok(())
});
// Flatten nested `Result<Result<()>>` into `Result<()>`
let restore = async move {
restore.await.map_err(CollectionError::from)??;
Ok(())
};
Ok(restore)
}
pub async fn assert_shard_exists(&self, shard_id: ShardId) -> CollectionResult<()> {
self.shards_holder
.read()
.await
.assert_shard_exists(shard_id)
}
pub async fn try_take_partial_snapshot_recovery_lock(
&self,
shard_id: ShardId,
recovery_type: RecoveryType,
) -> CollectionResult<Option<tokio::sync::OwnedRwLockWriteGuard<()>>> {
self.shards_holder
.read()
.await
.try_take_partial_snapshot_recovery_lock(shard_id, recovery_type)
}
pub async fn get_partial_snapshot_manifest(
&self,
shard_id: ShardId,
) -> CollectionResult<SnapshotManifest> {
self.shards_holder
.read()
.await
.get_shard(shard_id)
.ok_or_else(|| shard_not_found_error(shard_id))?
.get_partial_snapshot_manifest()
.await
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/sharding_keys.rs | lib/collection/src/collection/sharding_keys.rs | use std::collections::HashSet;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::ShardKey;
use crate::collection::Collection;
use crate::config::ShardingMethod;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::operations::{
CollectionUpdateOperations, CreateIndex, FieldIndexOperations, OperationWithClockTag,
};
use crate::shards::replica_set::ShardReplicaSet;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId, ShardsPlacement};
impl Collection {
pub async fn create_replica_set(
&self,
shard_id: ShardId,
shard_key: Option<ShardKey>,
replicas: &[PeerId],
init_state: Option<ReplicaState>,
) -> CollectionResult<ShardReplicaSet> {
let is_local = replicas.contains(&self.this_peer_id);
let peers = replicas
.iter()
.copied()
.filter(|peer_id| *peer_id != self.this_peer_id)
.collect();
let effective_optimizers_config = self.effective_optimizers_config().await?;
ShardReplicaSet::build(
shard_id,
shard_key,
self.name().to_string(),
self.this_peer_id,
is_local,
peers,
self.notify_peer_failure_cb.clone(),
self.abort_shard_transfer_cb.clone(),
&self.path,
self.collection_config.clone(),
effective_optimizers_config,
self.shared_storage_config.clone(),
self.payload_index_schema.clone(),
self.channel_service.clone(),
self.update_runtime.clone(),
self.search_runtime.clone(),
self.optimizer_resource_budget.clone(),
Some(init_state.unwrap_or(ReplicaState::Active)),
)
.await
}
/// # Cancel safety
///
/// This method is *not* cancel safe.
pub async fn create_shard_key(
&self,
shard_key: ShardKey,
placement: ShardsPlacement,
init_state: ReplicaState,
) -> CollectionResult<()> {
let hw_counter = HwMeasurementAcc::disposable(); // Internal operation. No measurement needed.
let state = self.state().await;
match state.config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
return Err(CollectionError::bad_request(format!(
"Shard Key {shard_key} cannot be created with Auto sharding method"
)));
}
ShardingMethod::Custom => {}
}
if state.shards_key_mapping.contains_key(&shard_key) {
return Err(CollectionError::bad_request(format!(
"Shard key {shard_key} already exists"
)));
}
let all_peers: HashSet<_> = self
.channel_service
.id_to_address
.read()
.keys()
.cloned()
.collect();
let unknown_peers: Vec<_> = placement
.iter()
.flatten()
.filter(|peer_id| !all_peers.contains(peer_id))
.collect();
if !unknown_peers.is_empty() {
return Err(CollectionError::bad_request(format!(
"Shard Key {shard_key} placement contains unknown peers: {unknown_peers:?}"
)));
}
let max_shard_id = state.max_shard_id();
let payload_schema = self.payload_index_schema.read().schema.clone();
for (idx, shard_replicas_placement) in placement.iter().enumerate() {
let shard_id = max_shard_id + idx as ShardId + 1;
let replica_set = self
.create_replica_set(
shard_id,
Some(shard_key.clone()),
shard_replicas_placement,
Some(init_state),
)
.await?;
for (field_name, field_schema) in payload_schema.iter() {
let create_index_op = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::CreateIndex(CreateIndex {
field_name: field_name.clone(),
field_schema: Some(field_schema.clone()),
}),
);
replica_set
.update_local(
OperationWithClockTag::from(create_index_op),
true,
hw_counter.clone(),
false,
) // TODO: Assign clock tag!? 🤔
.await?;
}
self.shards_holder
.write()
.await
.add_shard(shard_id, replica_set, Some(shard_key.clone()))
.await?;
}
Ok(())
}
pub async fn drop_shard_key(&self, shard_key: ShardKey) -> CollectionResult<()> {
let state = self.state().await;
match state.config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
return Err(CollectionError::bad_request(format!(
"Shard Key {shard_key} cannot be removed with Auto sharding method"
)));
}
ShardingMethod::Custom => {}
}
let resharding_state = self
.resharding_state()
.await
.filter(|state| state.shard_key.as_ref() == Some(&shard_key));
if let Some(state) = resharding_state
&& let Err(err) = self.abort_resharding(state.key(), true).await
{
log::error!(
"failed to abort resharding {} while deleting shard key {shard_key}: {err}",
state.key(),
);
}
// Invalidate local shard cleaning tasks
match self
.shards_holder
.read()
.await
.get_shard_ids_by_key(&shard_key)
{
Ok(shard_ids) => self.invalidate_clean_local_shards(shard_ids).await,
Err(err) => {
log::warn!("Failed to invalidate local shard cleaning task, ignoring: {err}");
}
}
self.shards_holder
.write()
.await
.remove_shard_key(&shard_key)
.await
}
pub async fn get_shard_ids(&self, shard_key: &ShardKey) -> CollectionResult<Vec<ShardId>> {
self.shards_holder
.read()
.await
.get_shard_key_to_ids_mapping()
.get(shard_key)
.map(|ids| ids.iter().cloned().collect())
.ok_or_else(|| {
CollectionError::bad_input(format!(
"Shard key {shard_key} does not exist for collection {}",
self.name()
))
})
}
pub async fn get_replicas(
&self,
shard_key: &ShardKey,
) -> CollectionResult<Vec<(ShardId, PeerId)>> {
let shard_ids = self.get_shard_ids(shard_key).await?;
let shard_holder = self.shards_holder.read().await;
let mut replicas = Vec::new();
for shard_id in shard_ids {
if let Some(replica_set) = shard_holder.get_shard(shard_id) {
for (peer_id, _) in replica_set.peers() {
replicas.push((shard_id, peer_id));
}
}
}
Ok(replicas)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.