repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/lib.rs
lib/posting_list/src/lib.rs
mod builder; mod iterator; mod posting_list; #[cfg(test)] mod tests; mod value_handler; mod view; mod visitor; use bitpacking::BitPacker; type BitPackerImpl = bitpacking::BitPacker4x; /// How many elements are packed in a single chunk. const CHUNK_LEN: usize = 128; const _: () = assert!(128 == BitPackerImpl::BLOCK_LEN); pub trait SizedValue: Sized + Copy + std::fmt::Debug {} impl SizedValue for () {} impl SizedValue for u32 {} impl SizedValue for u64 {} pub trait UnsizedValue { /// Returns the length of the serialized value in bytes. fn write_len(&self) -> usize; /// Writes the value to the given buffer. fn write_to(&self, dst: &mut [u8]); /// Creates a value from the given bytes. fn from_bytes(data: &[u8]) -> Self; } /// Sized value attached to each id in the posting list. /// /// Concrete values are usually just `()` or `u32`. pub type SizedTypeFor<V> = <<V as PostingValue>::Handler as ValueHandler>::Sized; /// Posting list of ids, where ids are compressed. pub type IdsPostingList = PostingList<()>; /// Non-owning posting list of ids, where ids are compressed. pub type IdsPostingListView<'a> = PostingListView<'a, ()>; pub use builder::PostingBuilder; pub use iterator::PostingIterator; pub use posting_list::{PostingChunk, PostingElement, PostingList, RemainderPosting}; pub use value_handler::{PostingValue, SizedHandler, UnsizedHandler, ValueHandler}; pub use view::{PostingListComponents, PostingListView}; pub use visitor::PostingVisitor;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/tests.rs
lib/posting_list/src/tests.rs
use std::collections::HashMap; use common::types::PointOffsetType; use rand::distr::{Alphanumeric, SampleString}; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use crate::{CHUNK_LEN, PostingBuilder, PostingList, PostingValue, UnsizedHandler, UnsizedValue}; // Simple struct that implements VarSizedValue for testing #[derive(Debug, Clone, PartialEq)] struct TestString(String); impl PostingValue for TestString { type Handler = UnsizedHandler<TestString>; } impl UnsizedValue for TestString { fn write_len(&self) -> usize { self.0.len() } fn write_to(&self, dst: &mut [u8]) { dst.copy_from_slice(self.0.as_bytes()); } fn from_bytes(data: &[u8]) -> Self { let s = String::from_utf8(data.to_vec()).expect("Failed to convert bytes to string"); TestString(s) } } #[test] fn test_just_ids_against_vec() { check_various_lengths(|len| { let posting_list = check_against_sorted_vec(|_rng, _id| (), len); // validate that chunks' sized values are empty, so we only have initial_id and offset if let Some(chunk) = posting_list.chunks.first() { assert_eq!(size_of_val(chunk), size_of::<u32>() * 2); } // similarly, validate that the remainder is equivalent to just one id if let Some(remainder) = posting_list.remainders.first() { assert_eq!(size_of_val(remainder), size_of::<u32>()); } }); } #[test] fn test_var_sized_against_vec() { let alphanumeric = Alphanumeric; check_various_lengths(|len| { check_against_sorted_vec( |rng, id| { let len = rng.random_range(1..=20); let s = alphanumeric.sample_string(rng, len); TestString(format!("item_{id} {s}")) }, len, ); }) } #[test] fn test_fixed_sized_against_vec() { check_various_lengths(|len| { check_against_sorted_vec(|_rng, id| u64::from(id) * 100, len); }); } fn generate_data<T, R: Rng>( amount: u32, rng: &mut R, gen_value: impl Fn(&mut R, u32) -> T, ) -> Vec<(u32, T)> { let gen_id = |rng: &mut R| rng.random_range(0..amount); (0..amount) .map(|_| { let id = gen_id(rng); (id, gen_value(rng, id)) }) .collect() } fn check_various_lengths(check: impl Fn(u32)) { let lengths = [ 0, 1, 2, 9, 10, CHUNK_LEN - 1, CHUNK_LEN, CHUNK_LEN + 1, CHUNK_LEN + 2, 2 * CHUNK_LEN + 10, 100 * CHUNK_LEN, 500 * CHUNK_LEN + 1, 500 * CHUNK_LEN - 1, 500 * CHUNK_LEN + CHUNK_LEN / 2, ]; for len in lengths { check(len as u32); } } fn check_against_sorted_vec<G, V>(gen_value: G, postings_count: u32) -> PostingList<V> where G: Fn(&mut StdRng, PointOffsetType) -> V, V: PostingValue + PartialEq + std::fmt::Debug, { let rng = &mut StdRng::seed_from_u64(42); let test_data = generate_data(postings_count, rng, gen_value); // Build our reference model let mut model = test_data.clone(); model.sort_unstable_by_key(|(id, _)| *id); // Create the posting list builder and add elements let mut builder = PostingBuilder::new(); for (id, value) in test_data { builder.add(id, value); } // Build the actual posting list let posting_list = builder.build(); // Access the posting list let mut visitor = posting_list.visitor(); let mut intersection_iter = posting_list.iter(); // Validate len() assert_eq!(visitor.len(), model.len()); // Iterate through the elements in reference_model and check they can be found for (offset, (expected_id, expected_value)) in model.iter().enumerate() { let Some(elem) = visitor.get_by_offset(offset) else { panic!("Element not found at offset {offset}"); }; assert_eq!(elem.id, *expected_id); assert_eq!(elem.value, *expected_value); // also check that contains function works assert!(visitor.contains(*expected_id)); // also check that the intersection is full let intersection = intersection_iter .advance_until_greater_or_equal(*expected_id) .unwrap(); assert_eq!(intersection.id, *expected_id); } // Bounds check assert!(visitor.get_by_offset(postings_count as usize).is_none()); let out_of_range = (postings_count.next_multiple_of(CHUNK_LEN as u32)) as usize; assert!(visitor.get_by_offset(out_of_range).is_none()); // There is no such id assert!(!visitor.contains(postings_count)); // intersect against all sequential ids in the posting range, model is a hashmap in this case let model = model.into_iter().collect::<HashMap<_, _>>(); let mut intersection_iter = posting_list.iter(); for seq_id in 0..postings_count { let model_contains = model.contains_key(&seq_id); let iter_contains = intersection_iter .advance_until_greater_or_equal(seq_id) .is_some_and(|elem| elem.id == seq_id); assert_eq!(model_contains, iter_contains, "Mismatch at seq_id {seq_id}"); } posting_list }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/view.rs
lib/posting_list/src/view.rs
use std::marker::PhantomData; use std::ops::RangeInclusive; use bitpacking::BitPacker; use common::types::PointOffsetType; use zerocopy::little_endian::U32; use crate::iterator::PostingIterator; use crate::posting_list::RemainderPosting; use crate::value_handler::PostingValue; use crate::visitor::PostingVisitor; use crate::{BitPackerImpl, CHUNK_LEN, PostingChunk, PostingList, SizedTypeFor}; /// A non-owning view of [`PostingList`]. pub struct PostingListView<'a, V: PostingValue> { pub(crate) id_data: &'a [u8], chunks: &'a [PostingChunk<SizedTypeFor<V>>], pub(crate) var_size_data: &'a [u8], remainders: &'a [RemainderPosting<SizedTypeFor<V>>], pub(crate) last_id: Option<PointOffsetType>, pub(crate) _phantom: PhantomData<V>, } pub struct PostingListComponents<'a, S> { pub id_data: &'a [u8], pub chunks: &'a [PostingChunk<S>], pub var_size_data: &'a [u8], pub remainders: &'a [RemainderPosting<S>], pub last_id: Option<U32>, } impl<'a, V: PostingValue> IntoIterator for PostingListView<'a, V> { type Item = <PostingIterator<'a, V> as Iterator>::Item; type IntoIter = PostingIterator<'a, V>; fn into_iter(self) -> Self::IntoIter { self.visitor().into_iter() } } impl<'a, V: PostingValue> PostingListView<'a, V> { pub fn visitor(self) -> PostingVisitor<'a, V> { PostingVisitor::new(self) } // not implemented as ToOwned trait because it requires PostingList's Borrow to return // a &PostingListView, which is not possible because it's a non-owning view pub fn to_owned(self) -> PostingList<V> { PostingList { id_data: self.id_data.to_vec(), chunks: self.chunks.to_vec(), var_size_data: self.var_size_data.to_owned(), remainders: self.remainders.to_vec(), last_id: self.last_id, _phantom: PhantomData, } } pub fn components(&self) -> PostingListComponents<'_, SizedTypeFor<V>> { let Self { id_data, chunks, var_size_data, remainders, last_id, _phantom, } = self; PostingListComponents { id_data, chunks, var_size_data, remainders, last_id: last_id.map(U32::from), } } pub fn from_components( id_data: &'a [u8], chunks: &'a [PostingChunk<SizedTypeFor<V>>], var_size_data: &'a [u8], remainders: &'a [RemainderPosting<SizedTypeFor<V>>], last_id: Option<PointOffsetType>, ) -> Self { Self { id_data, chunks, var_size_data, remainders, last_id, _phantom: PhantomData, } } pub(crate) fn decompress_chunk( &self, chunk_index: usize, decompressed_chunk: &mut [PointOffsetType; CHUNK_LEN], ) { let chunk = &self.chunks[chunk_index]; let compressed_size = PostingChunk::get_compressed_size(self.chunks, self.id_data, chunk_index); let chunk_bits = compressed_size * u8::BITS as usize / CHUNK_LEN; let start_offset = chunk.offset.get() as usize; let end_offset = start_offset + compressed_size; BitPackerImpl::new().decompress_sorted( chunk.initial_id.get(), &self.id_data[start_offset..end_offset], decompressed_chunk, chunk_bits as u8, ); } pub(crate) fn get_chunk_unchecked(&self, chunk_idx: usize) -> &PostingChunk<SizedTypeFor<V>> { &self.chunks[chunk_idx] } pub(crate) fn get_chunk(&self, chunk_idx: usize) -> Option<&PostingChunk<SizedTypeFor<V>>> { self.chunks.get(chunk_idx) } pub(crate) fn chunks_len(&self) -> usize { self.chunks.len() } pub(crate) fn remainders_len(&self) -> usize { self.remainders.len() } pub(crate) fn get_remainder(&self, idx: usize) -> Option<&RemainderPosting<SizedTypeFor<V>>> { self.remainders.get(idx) } pub(crate) fn ids_range(&self, start_chunk: usize) -> Option<RangeInclusive<u32>> { // if there is no last id, it means the posting list is empty let last_id = self.last_id?; let initial_id = self .chunks .get(start_chunk) .map(|chunk| chunk.initial_id.get()) .or_else(|| self.get_remainder(0).map(|elem| elem.id.get()))?; Some(initial_id..=last_id) } /// Find the chunk that may contain the id. /// It doesn't guarantee that the chunk contains the id, but if it is in the posting list, then it must be in the chunk. /// /// Assumes the id is in the posting list range. pub fn find_chunk(&self, id: PointOffsetType, start_chunk: Option<usize>) -> Option<usize> { let remainders = self.remainders; let chunks = self.chunks; // check if id might be in the remainders list if remainders.first().is_some_and(|elem| id >= elem.id.get()) { return None; } let start_chunk = start_chunk.unwrap_or(0); let chunks_slice = chunks.get(start_chunk..)?; if chunks_slice.is_empty() { return None; } // No need to check if id is under range of posting list, // this function assumes it is within the range debug_assert!(id >= chunks_slice[0].initial_id.get()); debug_assert!(self.last_id.is_some_and(|last_id| id <= last_id)); // Fast-path: check if `id` falls into the first chunk's range let first = &chunks_slice[0]; if let Some(second) = chunks_slice.get(1) { let id0 = first.initial_id.get(); let id1 = second.initial_id.get(); if id0 <= id && id < id1 { return Some(start_chunk); } } match chunks_slice.binary_search_by(|chunk| chunk.initial_id.get().cmp(&id)) { // id is the initial value of the chunk with index idx Ok(idx) => Some(start_chunk + idx), // id is not the initial_id of any chunk Err(insert_idx) if insert_idx > 0 => { // this is the index of the chunk that could contain id let idx = insert_idx - 1; // id could be within this chunk Some(start_chunk + idx) } Err(_) => None, } } pub(crate) fn search_in_remainders(&self, id: PointOffsetType) -> Result<usize, usize> { self.remainders .binary_search_by(|elem| elem.id.get().cmp(&id)) } /// The total number of elements in the posting list. pub fn len(&self) -> usize { self.chunks.len() * CHUNK_LEN + self.remainders.len() } /// Checks if there are no elements in the posting list. pub fn is_empty(&self) -> bool { self.len() == 0 } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/posting_list.rs
lib/posting_list/src/posting_list.rs
use std::borrow::Borrow; use std::marker::PhantomData; use common::types::PointOffsetType; use zerocopy::little_endian::U32; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::iterator::PostingIterator; use crate::value_handler::PostingValue; use crate::view::PostingListView; use crate::visitor::PostingVisitor; use crate::{CHUNK_LEN, PostingBuilder, SizedTypeFor}; /// Generic compressed posting list. /// /// - `PostingList<()>` when there are no values (unit type `()`), there are just compressed ids + remainders /// - `PostingList<V>` when there are values associated to each id. The value must implement `PostingValue`, which chooses the appropriate value handler. /// There are two available handlers: /// - [`SizedHandler`][1]: needs the value to implement [`SizedValue`][3]. Stores the value within the chunk. /// - [`UnsizedHandler`][2]: needs the value to implement [`UnsizedValue`][4]. Stores the value in the var_size_data. /// /// [1]: crate::value_handler::SizedHandler /// [2]: crate::value_handler::UnsizedHandler /// [3]: crate::SizedValue /// [4]: crate::UnsizedValue #[derive(Debug, Clone)] pub struct PostingList<V: PostingValue> { pub(crate) id_data: Vec<u8>, pub(crate) chunks: Vec<PostingChunk<SizedTypeFor<V>>>, pub(crate) remainders: Vec<RemainderPosting<SizedTypeFor<V>>>, pub(crate) var_size_data: Vec<u8>, pub(crate) last_id: Option<PointOffsetType>, pub(crate) _phantom: PhantomData<V>, } /// A single element in the posting list, which contains an id and a value. /// /// Stores a remainder of the posting list. The difference with [`PostingElement`] is /// so that this is zerocopy friendly #[derive(Clone, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] // Required for IntoBytes to work correctly pub struct RemainderPosting<S: Sized> { /// U32 is required for pinning endianness of the id pub id: U32, pub value: S, } /// A single element in the posting list, which contains an id and a value. /// /// Output-facing structure. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct PostingElement<V> { pub id: PointOffsetType, pub value: V, } #[derive(Debug, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub struct PostingChunk<S: Sized> { /// Initial data point id. Used for decompression. pub initial_id: U32, /// An offset within id_data pub offset: U32, /// Sized values for the chunk. pub sized_values: [S; CHUNK_LEN], } impl<S: Sized> PostingChunk<S> { /// Get byte size of the compressed ids chunk. pub(crate) fn get_compressed_size( chunks: &[PostingChunk<S>], ids_data: &[u8], chunk_index: usize, ) -> usize { if chunk_index + 1 < chunks.len() { chunks[chunk_index + 1].offset.get() as usize - chunks[chunk_index].offset.get() as usize } else { // Last chunk ids_data.len() - chunks[chunk_index].offset.get() as usize } } } impl<V: PostingValue> PostingList<V> { pub fn view(&self) -> PostingListView<'_, V> { let PostingList { id_data, chunks, remainders, var_size_data, last_id, _phantom, } = self; PostingListView::from_components( id_data, chunks, var_size_data.borrow(), remainders, *last_id, ) } pub fn visitor(&self) -> PostingVisitor<'_, V> { let view = self.view(); PostingVisitor::new(view) } pub fn iter(&self) -> PostingIterator<'_, V> { self.visitor().into_iter() } pub fn len(&self) -> usize { self.chunks.len() * CHUNK_LEN + self.remainders.len() } pub fn is_empty(&self) -> bool { self.len() == 0 } } impl<V: PostingValue> FromIterator<(PointOffsetType, V)> for PostingList<V> { fn from_iter<T: IntoIterator<Item = (PointOffsetType, V)>>(iter: T) -> Self { let mut builder = PostingBuilder::new(); for (id, value) in iter { builder.add(id, value); } builder.build() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/iterator.rs
lib/posting_list/src/iterator.rs
use std::iter::FusedIterator; use common::types::PointOffsetType; use crate::PostingElement; use crate::value_handler::PostingValue; use crate::visitor::PostingVisitor; pub struct PostingIterator<'a, V: PostingValue> { visitor: PostingVisitor<'a, V>, current_elem: Option<PostingElement<V>>, offset: usize, } impl<'a, V: PostingValue> PostingIterator<'a, V> { pub fn new(visitor: PostingVisitor<'a, V>) -> Self { Self { visitor, current_elem: None, offset: 0, } } /// Advances the iterator until the current element id is greater than or equal to the given id. /// /// Returns `Some(PostingElement)` on the first element that is greater than or equal to the given id. It can be possible that this id is /// the head of the iterator, so it does not need to be advanced. /// /// `None` means the iterator is exhausted. pub fn advance_until_greater_or_equal( &mut self, target_id: PointOffsetType, ) -> Option<PostingElement<V>> { if let Some(current) = &self.current_elem && current.id >= target_id { return Some(current.clone()); } if self.offset >= self.visitor.len() { return None; } let Some(offset) = self .visitor .search_greater_or_equal(target_id, Some(self.offset)) else { self.current_elem = None; self.offset = self.visitor.len(); return None; }; debug_assert!(offset >= self.offset); let greater_or_equal = self.visitor.get_by_offset(offset); self.current_elem = greater_or_equal.clone(); self.offset = offset; greater_or_equal } } impl<V: PostingValue> Iterator for PostingIterator<'_, V> { type Item = PostingElement<V>; fn next(&mut self) -> Option<Self::Item> { let next_opt = self.visitor.get_by_offset(self.offset).inspect(|_| { self.offset += 1; }); self.current_elem = next_opt.clone(); next_opt } fn size_hint(&self) -> (usize, Option<usize>) { let remaining_len = self.len(); (remaining_len, Some(remaining_len)) } fn count(self) -> usize { self.size_hint().0 } } impl<V: PostingValue> ExactSizeIterator for PostingIterator<'_, V> { fn len(&self) -> usize { self.visitor.list.len().saturating_sub(self.offset) } } impl<V: PostingValue> FusedIterator for PostingIterator<'_, V> {}
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/value_handler.rs
lib/posting_list/src/value_handler.rs
use std::fmt::Debug; use std::marker::PhantomData; use zerocopy::little_endian::U32; use crate::{SizedValue, UnsizedValue}; /// Trait for types that can be used as posting list values /// /// This trait associates a value type with its appropriate handler. pub trait PostingValue: Clone { type Handler: ValueHandler<Value = Self>; } // Implementations for built-in sized types impl PostingValue for () { type Handler = SizedHandler<()>; } impl PostingValue for u32 { type Handler = SizedHandler<u32>; } impl PostingValue for u64 { type Handler = SizedHandler<u64>; } /// Trait to abstract the handling of values in PostingList /// /// This trait handles the differences between fixed-size and variable-size value /// implementations, allowing us to have a unified implementation of `from_builder`. /// /// - For fixed-size values, the associated type [`ValueHandler::Sized`] is the same as the generic type V /// - For variable-size values, [`ValueHandler::Sized`] is an offset into the var_sized_data pub trait ValueHandler { /// The type of value in each PostingElement. type Value; /// The value to store within each chunk, or alongside each id. type Sized: std::marker::Sized + Copy + Debug; /// Process values before storage and return the necessary var_sized_data /// /// - For fixed-size values, this returns the values themselves and an empty var_sized_data. /// - For variable-size values, this returns offsets and the flattened serialized data. fn process_values(values: Vec<Self::Value>) -> (Vec<Self::Sized>, Vec<u8>); /// Retrieve a value. /// /// - For sized values it returns the first argument. /// - For variable-size values it returns the value between the two sized values in var_data. fn get_value<N>(sized_value: Self::Sized, next_sized_value: N, var_data: &[u8]) -> Self::Value where N: Fn() -> Option<Self::Sized>; } /// Fixed-size value handler #[derive(Default, Debug, Clone, Copy)] pub struct SizedHandler<V: SizedValue>(PhantomData<V>); impl<V: SizedValue> ValueHandler for SizedHandler<V> { type Value = V; type Sized = V; fn process_values(values: Vec<V>) -> (Vec<V>, Vec<u8>) { (values, Vec::new()) } fn get_value<N>(sized_value: V, _next_sized_value: N, _var_data: &[u8]) -> V where N: Fn() -> Option<Self::Sized>, { sized_value } } /// Var-size value handler #[derive(Default, Debug, Clone, Copy)] pub struct UnsizedHandler<V: UnsizedValue>(PhantomData<V>); impl<V: UnsizedValue> ValueHandler for UnsizedHandler<V> { type Value = V; type Sized = U32; fn process_values(values: Vec<Self::Value>) -> (Vec<Self::Sized>, Vec<u8>) { let mut offsets = Vec::with_capacity(values.len()); let mut current_offset = 0u32; for value in &values { offsets.push(U32::from(current_offset)); let value_len = u32::try_from(value.write_len()) .expect("Value larger than 4GB, use u64 offsets instead"); // prepare next starting offset current_offset = current_offset .checked_add(value_len) .expect("Size of all values exceeds 4GB"); } let last_offset = offsets.last(); let ranges = offsets .windows(2) .map(|w| w[0].get() as usize..w[1].get() as usize) // the last one is not included in windows, but goes until the end .chain(last_offset.map(|&last| last.get() as usize..current_offset as usize)); let mut var_sized_data = vec![0; current_offset as usize]; for (value, range) in values.iter().zip(ranges) { value.write_to(&mut var_sized_data[range]); } (offsets, var_sized_data) } fn get_value<N>(sized_value: Self::Sized, next_sized_value: N, var_data: &[u8]) -> Self::Value where N: Fn() -> Option<Self::Sized>, { let range = match next_sized_value() { Some(next_value) => sized_value.get() as usize..next_value.get() as usize, None => sized_value.get() as usize..var_data.len(), }; V::from_bytes(&var_data[range]) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/build.rs
lib/segment/build.rs
use std::env; fn main() { let target_arch = env::var("CARGO_CFG_TARGET_ARCH") .expect("CARGO_CFG_TARGET_ARCH env-var is not defined or is not UTF-8"); // TODO: Is `CARGO_CFG_TARGET_FEATURE` *always* defined? // // Cargo docs says that "boolean configurations are present if they are set, // and not present otherwise", so, what about "target features"? // // https://doc.rust-lang.org/cargo/reference/environment-variables.html (Ctrl-F CARGO_CFG_<cfg>) let target_feature = env::var("CARGO_CFG_TARGET_FEATURE") .expect("CARGO_CFG_TARGET_FEATURE env-var is not defined or is not UTF-8"); if target_arch == "aarch64" && target_feature.split(',').any(|feat| feat == "neon") { let mut builder = cc::Build::new(); builder.file("src/spaces/metric_f16/cpp/neon.c"); builder.flag("-O3"); builder.flag("-march=armv8.2-a+fp16"); builder.compile("simd_utils"); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/lib.rs
lib/segment/src/lib.rs
pub mod common; pub mod entry; #[cfg(feature = "testing")] pub mod fixtures; pub mod id_tracker; pub mod index; pub mod payload_storage; #[cfg(feature = "rocksdb")] pub mod rocksdb_backup; pub mod segment; pub mod segment_constructor; pub mod spaces; pub mod telemetry; mod compat; pub mod data_types; pub mod json_path; pub mod types; pub mod utils; pub mod vector_storage; #[macro_use] extern crate num_derive; extern crate core;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/telemetry.rs
lib/segment/src/telemetry.rs
use schemars::JsonSchema; use serde::Serialize; use crate::common::anonymize::Anonymize; use crate::common::operation_time_statistics::OperationDurationStatistics; use crate::types::{SegmentConfig, SegmentInfo, VectorNameBuf}; #[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)] pub struct SegmentTelemetry { pub info: SegmentInfo, pub config: SegmentConfig, pub vector_index_searches: Vec<VectorIndexSearchesTelemetry>, pub payload_field_indices: Vec<PayloadIndexTelemetry>, } #[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)] pub struct PayloadIndexTelemetry { #[anonymize(value = None)] pub field_name: Option<String>, #[anonymize(false)] pub index_type: &'static str, /// The amount of values indexed for all points. pub points_values_count: usize, /// The amount of points that have at least one value indexed. pub points_count: usize, #[serde(skip_serializing_if = "Option::is_none")] #[anonymize(false)] pub histogram_bucket_size: Option<usize>, } impl PayloadIndexTelemetry { pub fn set_name(mut self, name: String) -> Self { self.field_name = Some(name); self } } #[derive(Serialize, Clone, Debug, JsonSchema, Anonymize, Default)] pub struct VectorIndexSearchesTelemetry { #[serde(skip_serializing_if = "Option::is_none")] #[anonymize(value = None)] pub index_name: Option<VectorNameBuf>, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub unfiltered_plain: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub unfiltered_hnsw: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub unfiltered_sparse: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub filtered_plain: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub filtered_small_cardinality: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub filtered_large_cardinality: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub filtered_exact: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub filtered_sparse: OperationDurationStatistics, #[serde(skip_serializing_if = "OperationDurationStatistics::is_empty")] pub unfiltered_exact: OperationDurationStatistics, }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/types.rs
lib/segment/src/types.rs
use std::borrow::Cow; use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::{self, Display, Formatter}; use std::hash::{self, Hash, Hasher}; use std::mem; use std::ops::Deref; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use ahash::AHashSet; use common::stable_hash::StableHash; use common::types::ScoreType; use ecow::EcoString; use fnv::FnvBuildHasher; use geo::{Contains, Coord, Distance as GeoDistance, Haversine, LineString, Point, Polygon}; use indexmap::IndexSet; use itertools::Itertools; use ordered_float::OrderedFloat; use schemars::JsonSchema; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::{Map, Value}; use strum::{EnumIter, EnumString}; use uuid::Uuid; use validator::{Validate, ValidationError, ValidationErrors}; use zerocopy::native_endian::U64; use crate::common::anonymize::Anonymize; use crate::common::operation_error::{OperationError, OperationResult}; use crate::common::utils::{self, MaybeOneOrMany, MultiValue}; use crate::data_types::index::{ BoolIndexParams, DatetimeIndexParams, FloatIndexParams, GeoIndexParams, IntegerIndexParams, KeywordIndexParams, TextIndexParams, UuidIndexParams, }; use crate::data_types::modifier::Modifier; use crate::data_types::order_by::OrderValue; use crate::data_types::primitive::PrimitiveVectorElement; use crate::data_types::tiny_map::TinyMap; use crate::data_types::vectors::{DenseVector, VectorStructInternal}; use crate::index::field_index::CardinalityEstimation; use crate::index::sparse_index::sparse_index_config::SparseIndexConfig; use crate::json_path::JsonPath; use crate::spaces::metric::{Metric, MetricPostProcessing}; use crate::spaces::simple::{CosineMetric, DotProductMetric, EuclidMetric, ManhattanMetric}; use crate::types::utils::unordered_hash_unique; use crate::utils::maybe_arc::MaybeArc; pub type PayloadKeyType = JsonPath; pub type PayloadKeyTypeRef<'a> = &'a JsonPath; /// Sequential number of modification, applied to segment pub type SeqNumberType = u64; /// Type of float point payload pub type FloatPayloadType = f64; /// Type of integer point payload pub type IntPayloadType = i64; /// Type of datetime point payload pub type DateTimePayloadType = DateTimeWrapper; /// Type of Uuid point payload pub type UuidPayloadType = Uuid; /// Type of Uuid point payload key pub type UuidIntType = u128; /// Name of a vector pub type VectorName = str; /// Name of a vector (owned variant) pub type VectorNameBuf = String; /// Wraps `DateTime<Utc>` to allow more flexible deserialization #[derive(Clone, Copy, Serialize, JsonSchema, Debug, PartialEq, Eq, PartialOrd, Hash)] #[serde(transparent)] pub struct DateTimeWrapper(pub chrono::DateTime<chrono::Utc>); impl DateTimeWrapper { /// Qdrant's representation of datetime as timestamp is an i64 of microseconds pub fn timestamp(&self) -> i64 { self.0.timestamp_micros() } pub fn from_timestamp(ts: i64) -> Option<Self> { Some(Self(chrono::DateTime::from_timestamp_micros(ts)?)) } } impl<'de> Deserialize<'de> for DateTimePayloadType { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let str_datetime = <&str>::deserialize(deserializer)?; let parse_result = DateTimePayloadType::from_str(str_datetime).ok(); match parse_result { Some(datetime) => Ok(datetime), None => Err(serde::de::Error::custom(format!( "'{str_datetime}' is not in a supported date/time format, please use RFC 3339" ))), } } } impl FromStr for DateTimePayloadType { type Err = chrono::ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { // Attempt to parse the input string in RFC 3339 format if let Ok(datetime) = chrono::DateTime::parse_from_rfc3339(s) // Attempt to parse default to-string format .or_else(|_| chrono::DateTime::from_str(s)) // Attempt to parse the input string in the specified formats: // - YYYY-MM-DD'T'HH:MM:SS-HHMM (timezone without colon) // - YYYY-MM-DD HH:MM:SS-HHMM (timezone without colon) .or_else(|_| chrono::DateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f%#z")) .or_else(|_| chrono::DateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f%#z")) .map(|dt| chrono::DateTime::<chrono::Utc>::from(dt).into()) { return Ok(datetime); } // Attempt to parse the input string in the specified formats: // - YYYY-MM-DD'T'HH:MM:SS (without timezone or Z) // - YYYY-MM-DD HH:MM:SS (without timezone or Z) // - YYYY-MM-DD HH:MM // - YYYY-MM-DD // See: <https://github.com/qdrant/qdrant/issues/3529> let datetime = chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") .or_else(|_| chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f")) .or_else(|_| chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M")) .or_else(|_| chrono::NaiveDate::parse_from_str(s, "%Y-%m-%d").map(Into::into))?; // Convert the parsed NaiveDateTime to a DateTime<Utc> let datetime_utc = datetime.and_utc().into(); Ok(datetime_utc) } } impl Display for DateTimePayloadType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } impl From<chrono::DateTime<chrono::Utc>> for DateTimePayloadType { fn from(dt: chrono::DateTime<chrono::Utc>) -> Self { DateTimeWrapper(dt) } } fn id_num_example() -> u64 { 42 } fn id_uuid_example() -> String { "550e8400-e29b-41d4-a716-446655440000".to_string() } /// Type, used for specifying point ID in user interface #[derive(Debug, Serialize, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, JsonSchema)] #[serde(untagged)] pub enum ExtendedPointId { #[schemars(example = "id_num_example")] NumId(u64), #[schemars(example = "id_uuid_example")] Uuid(Uuid), } impl StableHash for ExtendedPointId { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { match self { ExtendedPointId::NumId(num) => { 0u64.stable_hash(write); // discriminant for NumId num.stable_hash(write); } ExtendedPointId::Uuid(uuid) => { 1u64.stable_hash(write); // discriminant for Uuid uuid.as_bytes().len().stable_hash(write); // compatibility with uuid <= v1.16.0 write(uuid.as_bytes()); } } } } impl ExtendedPointId { pub fn is_num_id(&self) -> bool { matches!(self, ExtendedPointId::NumId(..)) } pub fn is_uuid(&self) -> bool { matches!(self, ExtendedPointId::Uuid(..)) } } impl std::fmt::Display for ExtendedPointId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { ExtendedPointId::NumId(idx) => write!(f, "{idx}"), ExtendedPointId::Uuid(uuid) => write!(f, "{uuid}"), } } } impl From<u64> for ExtendedPointId { fn from(idx: u64) -> Self { ExtendedPointId::NumId(idx) } } impl FromStr for ExtendedPointId { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { let try_num: Result<u64, _> = s.parse(); if let Ok(num) = try_num { return Ok(Self::NumId(num)); } let try_uuid = Uuid::from_str(s); if let Ok(uuid) = try_uuid { return Ok(Self::Uuid(uuid)); } Err(()) } } impl<'de> serde::Deserialize<'de> for ExtendedPointId { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let value = serde_value::Value::deserialize(deserializer)?; if let Ok(num) = value.clone().deserialize_into() { return Ok(ExtendedPointId::NumId(num)); } if let Ok(uuid) = value.clone().deserialize_into() { return Ok(ExtendedPointId::Uuid(uuid)); } Err(serde::de::Error::custom(format!( "value {} is not a valid point ID, \ valid values are either an unsigned integer or a UUID", crate::utils::fmt::SerdeValue(&value), ))) } } /// Type of point index across all segments pub type PointIdType = ExtendedPointId; /// Compact representation of [`ExtendedPointId`]. /// Unlike [`ExtendedPointId`], this type is 17 bytes long vs 24 bytes. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum CompactExtendedPointId { NumId(U64), Uuid(Uuid), } impl From<ExtendedPointId> for CompactExtendedPointId { fn from(id: ExtendedPointId) -> Self { match id { ExtendedPointId::NumId(num) => CompactExtendedPointId::NumId(U64::new(num)), ExtendedPointId::Uuid(uuid) => CompactExtendedPointId::Uuid(uuid), } } } impl From<CompactExtendedPointId> for ExtendedPointId { fn from(id: CompactExtendedPointId) -> Self { match id { CompactExtendedPointId::NumId(num) => ExtendedPointId::NumId(num.get()), CompactExtendedPointId::Uuid(uuid) => ExtendedPointId::Uuid(uuid), } } } /// Type of internal tags, build from payload #[derive( Debug, Deserialize, Serialize, JsonSchema, Anonymize, Clone, Copy, FromPrimitive, PartialEq, Eq, Hash, EnumString, EnumIter, )] /// Distance function types used to compare vectors pub enum Distance { // <https://en.wikipedia.org/wiki/Cosine_similarity> Cosine, // <https://en.wikipedia.org/wiki/Euclidean_distance> Euclid, // <https://en.wikipedia.org/wiki/Dot_product> Dot, // <https://simple.wikipedia.org/wiki/Manhattan_distance> Manhattan, } impl Distance { pub fn postprocess_score(&self, score: ScoreType) -> ScoreType { match self { Distance::Cosine => CosineMetric::postprocess(score), Distance::Euclid => EuclidMetric::postprocess(score), Distance::Dot => DotProductMetric::postprocess(score), Distance::Manhattan => ManhattanMetric::postprocess(score), } } pub fn preprocess_vector<T: PrimitiveVectorElement>(&self, vector: DenseVector) -> DenseVector where CosineMetric: Metric<T>, EuclidMetric: Metric<T>, DotProductMetric: Metric<T>, ManhattanMetric: Metric<T>, { match self { Distance::Cosine => CosineMetric::preprocess(vector), Distance::Euclid => EuclidMetric::preprocess(vector), Distance::Dot => DotProductMetric::preprocess(vector), Distance::Manhattan => ManhattanMetric::preprocess(vector), } } pub fn distance_order(&self) -> Order { match self { Distance::Cosine | Distance::Dot => Order::LargeBetter, Distance::Euclid | Distance::Manhattan => Order::SmallBetter, } } pub fn is_ordered(&self, left: ScoreType, right: ScoreType) -> bool { match self.distance_order() { Order::LargeBetter => left >= right, Order::SmallBetter => left <= right, } } /// Checks if score satisfies threshold condition pub fn check_threshold(&self, score: ScoreType, threshold: ScoreType) -> bool { match self.distance_order() { Order::LargeBetter => score > threshold, Order::SmallBetter => score < threshold, } } } #[derive(Debug, PartialEq, Clone, Copy)] pub enum Order { LargeBetter, SmallBetter, } /// Search result #[derive(Clone, Debug)] pub struct ScoredPoint { /// Point id pub id: PointIdType, /// Point version pub version: SeqNumberType, /// Points vector distance to the query vector pub score: ScoreType, /// Payload - values assigned to the point pub payload: Option<Payload>, /// Vector of the point pub vector: Option<VectorStructInternal>, /// Shard Key pub shard_key: Option<ShardKey>, /// Order-by value pub order_value: Option<OrderValue>, } impl Eq for ScoredPoint {} impl Ord for ScoredPoint { /// Compare two scored points by score, unless they have `order_value`, in that case compare by `order_value`. fn cmp(&self, other: &Self) -> Ordering { match (&self.order_value, &other.order_value) { (None, None) => OrderedFloat(self.score).cmp(&OrderedFloat(other.score)), (Some(_), None) => Ordering::Greater, (None, Some(_)) => Ordering::Less, (Some(self_order), Some(other_order)) => self_order.cmp(other_order), } } } impl PartialOrd for ScoredPoint { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for ScoredPoint { fn eq(&self, other: &Self) -> bool { (self.id, &self.score) == (other.id, &other.score) } } /// Type of segment #[derive(Debug, Serialize, JsonSchema, Anonymize, Clone, Copy, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum SegmentType { // There are no index built for the segment, all operations are available Plain, // Segment with some sort of index built. Optimized for search, appending new points will require reindexing Indexed, // Some index which you better don't touch Special, } /// Display payload field type & index information #[derive(Debug, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub struct PayloadIndexInfo { pub data_type: PayloadSchemaType, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<PayloadSchemaParams>, /// Number of points indexed with this index pub points: usize, } impl PayloadIndexInfo { pub fn new(field_type: PayloadFieldSchema, points_count: usize) -> Self { match field_type { PayloadFieldSchema::FieldType(data_type) => PayloadIndexInfo { data_type, params: None, points: points_count, }, PayloadFieldSchema::FieldParams(schema_params) => PayloadIndexInfo { data_type: schema_params.kind(), params: Some(schema_params), points: points_count, }, } } } #[derive(Debug, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub struct VectorDataInfo { pub num_vectors: usize, pub num_indexed_vectors: usize, pub num_deleted_vectors: usize, } /// Aggregated information about segment #[derive(Debug, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub struct SegmentInfo { pub segment_type: SegmentType, pub num_vectors: usize, pub num_points: usize, pub num_indexed_vectors: usize, pub num_deleted_vectors: usize, /// An ESTIMATION of effective amount of bytes used for vectors /// Do NOT rely on this number unless you know what you are doing pub vectors_size_bytes: usize, /// An estimation of the effective amount of bytes used for payloads pub payloads_size_bytes: usize, pub ram_usage_bytes: usize, pub disk_usage_bytes: usize, pub is_appendable: bool, pub index_schema: HashMap<PayloadKeyType, PayloadIndexInfo>, pub vector_data: HashMap<String, VectorDataInfo>, } #[derive(Debug, Default)] pub struct SizeStats { pub num_vectors: usize, pub num_vectors_by_name: TinyMap<VectorNameBuf, usize>, pub vectors_size_bytes: usize, pub payloads_size_bytes: usize, pub num_points: usize, } /// Additional parameters of the search #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Copy, PartialEq, Default)] #[serde(rename_all = "snake_case")] pub struct QuantizationSearchParams { /// If true, quantized vectors are ignored. Default is false. #[serde(default = "default_quantization_ignore_value")] pub ignore: bool, /// If true, use original vectors to re-score top-k results. /// Might require more time in case if original vectors are stored on disk. /// If not set, qdrant decides automatically apply rescoring or not. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub rescore: Option<bool>, /// Oversampling factor for quantization. Default is 1.0. /// /// Defines how many extra vectors should be pre-selected using quantized index, /// and then re-scored using original vectors. /// /// For example, if `oversampling` is 2.4 and `limit` is 100, then 240 vectors will be pre-selected using quantized index, /// and then top-100 will be returned after re-scoring. #[serde(default = "default_quantization_oversampling_value")] #[validate(range(min = 1.0))] #[serde(skip_serializing_if = "Option::is_none")] pub oversampling: Option<f64>, } impl Hash for QuantizationSearchParams { fn hash<H: Hasher>(&self, state: &mut H) { let Self { ignore, rescore, oversampling, } = self; ignore.hash(state); rescore.hash(state); oversampling.map(OrderedFloat).hash(state); } } pub const fn default_quantization_ignore_value() -> bool { false } pub const fn default_quantization_oversampling_value() -> Option<f64> { None } /// Default value for [`AcornSearchParams::max_selectivity`]. /// /// After change, update docs for GRPC and REST API. pub const ACORN_MAX_SELECTIVITY_DEFAULT: f64 = 0.4; /// ACORN-related search parameters #[derive( Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Copy, PartialEq, Default, Hash, )] #[serde(rename_all = "snake_case")] pub struct AcornSearchParams { /// If true, then ACORN may be used for the HNSW search based on filters /// selectivity. /// Improves search recall for searches with multiple low-selectivity /// payload filters, at cost of performance. #[serde(default)] pub enable: bool, /// Maximum selectivity of filters to enable ACORN. /// /// If estimated filters selectivity is higher than this value, /// ACORN will not be used. Selectivity is estimated as: /// `estimated number of points satisfying the filters / total number of points`. /// /// 0.0 for never, 1.0 for always. Default is 0.4. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] #[validate(range(min = 0.0, max = 1.0))] pub max_selectivity: Option<OrderedFloat<f64>>, } /// Additional parameters of the search #[derive( Debug, Deserialize, Serialize, JsonSchema, Validate, Copy, Clone, PartialEq, Default, Hash, )] #[serde(rename_all = "snake_case")] pub struct SearchParams { /// Params relevant to HNSW index /// Size of the beam in a beam-search. Larger the value - more accurate the result, more time required for search. #[serde(skip_serializing_if = "Option::is_none")] pub hnsw_ef: Option<usize>, /// Search without approximation. If set to true, search may run long but with exact results. #[serde(default)] pub exact: bool, /// Quantization params #[serde(default)] #[validate(nested)] #[serde(skip_serializing_if = "Option::is_none")] pub quantization: Option<QuantizationSearchParams>, /// If enabled, the engine will only perform search among indexed or small segments. /// Using this option prevents slow searches in case of delayed index, but does not /// guarantee that all uploaded vectors will be included in search results #[serde(default)] pub indexed_only: bool, /// ACORN search params #[serde(default)] #[validate(nested)] #[serde(skip_serializing_if = "Option::is_none")] pub acorn: Option<AcornSearchParams>, } /// Configuration for vectors. #[derive(Debug, Deserialize, Validate, Clone, PartialEq, Eq)] pub struct VectorsConfigDefaults { #[serde(default)] pub on_disk: Option<bool>, } /// Vector index configuration #[derive(Debug, Deserialize, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] #[serde(tag = "type", content = "options")] pub enum Indexes { /// Do not use any index, scan whole vector collection during search. /// Guarantee 100% precision, but may be time consuming on large collections. Plain {}, /// Use filterable HNSW index for approximate search. Is very fast even on a very huge collections, /// but require additional space to store index and additional time to build it. Hnsw(HnswConfig), } impl Indexes { pub fn is_indexed(&self) -> bool { match self { Indexes::Plain {} => false, Indexes::Hnsw(_) => true, } } pub fn is_on_disk(&self) -> bool { match self { Indexes::Plain {} => false, Indexes::Hnsw(config) => config.on_disk.unwrap_or_default(), } } } /// Config of HNSW index #[derive( Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema, Validate, Anonymize, )] #[serde(rename_all = "snake_case")] #[anonymize(false)] pub struct HnswConfig { /// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. pub m: usize, /// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index. #[validate(range(min = 4))] pub ef_construct: usize, /// Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. /// This measures the total size of vectors being queried against. /// When the maximum estimated amount of points that a condition satisfies is smaller than /// `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index /// traversal for better performance. /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "full_scan_threshold_kb")] pub full_scan_threshold: usize, /// Number of parallel threads used for background index building. /// If 0 - automatically select from 8 to 16. /// Best to keep between 8 and 16 to prevent likelihood of slow building or broken/inefficient HNSW graphs. /// On small CPUs, less threads are used. #[serde(default = "default_max_indexing_threads")] pub max_indexing_threads: usize, /// Store HNSW index on disk. If set to false, index will be stored in RAM. Default: false #[serde(default, skip_serializing_if = "Option::is_none")] // Better backward compatibility pub on_disk: Option<bool>, /// Custom M param for hnsw graph built for payload index. If not set, default M will be used. #[serde(default, skip_serializing_if = "Option::is_none")] // Better backward compatibility pub payload_m: Option<usize>, /// Store copies of original and quantized vectors within the HNSW index file. Default: false. /// Enabling this option will trade the search speed for disk usage by reducing amount of /// random seeks during the search. /// Requires quantized vectors to be enabled. Multi-vectors are not supported. #[serde(default, skip_serializing_if = "Option::is_none")] pub inline_storage: Option<bool>, } impl HnswConfig { /// Detect configuration mismatch against `other` that requires rebuilding /// /// Returns true only if both conditions are met: /// - this configuration does not match `other` /// - to effectively change the configuration, a HNSW rebuild is required /// /// For example, a change in `max_indexing_threads` will not require rebuilding because it /// doesn't affect the final index, and thus this would return false. pub fn mismatch_requires_rebuild(&self, other: &Self) -> bool { let HnswConfig { m, ef_construct, full_scan_threshold, max_indexing_threads: _, payload_m, on_disk, inline_storage, } = *self; m != other.m || ef_construct != other.ef_construct || full_scan_threshold != other.full_scan_threshold || payload_m != other.payload_m // Data on disk is the same, we have a unit test for that. We can eventually optimize // this to just reload the collection rather than optimizing it again as a whole just // to flip this flag || on_disk != other.on_disk || inline_storage != other.inline_storage } } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Anonymize, Clone)] #[serde(rename_all = "snake_case", default)] #[anonymize(false)] pub struct HnswGlobalConfig { /// Enable HNSW healing if the ratio of missing points is no more than this value. /// To disable healing completely, set this value to `0.0`. #[validate(range(min = 0.0, max = 1.0))] pub healing_threshold: f64, } impl Default for HnswGlobalConfig { fn default() -> Self { Self { healing_threshold: 0.3, } } } const fn default_max_indexing_threads() -> usize { 0 } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Eq, Hash)] #[serde(rename_all = "lowercase")] pub enum CompressionRatio { X4, X8, X16, X32, X64, } #[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema)] #[serde(rename_all = "lowercase")] pub enum ScalarType { #[default] Int8, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct ScalarQuantizationConfig { /// Type of quantization to use /// If `int8` - 8 bit quantization will be used pub r#type: ScalarType, /// Quantile for quantization. Expected value range in [0.5, 1.0]. If not set - use the whole range of values #[serde(skip_serializing_if = "Option::is_none")] #[validate(range(min = 0.5, max = 1.0))] pub quantile: Option<f32>, /// If true - quantized vectors always will be stored in RAM, ignoring the config of main storage #[serde(skip_serializing_if = "Option::is_none")] pub always_ram: Option<bool>, } impl ScalarQuantizationConfig { /// Detect configuration mismatch against `other` that requires rebuilding /// /// Returns true only if both conditions are met: /// - this configuration does not match `other` /// - to effectively change the configuration, a quantization rebuild is required pub fn mismatch_requires_rebuild(&self, other: &Self) -> bool { self != other } } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Validate)] pub struct ScalarQuantization { #[validate(nested)] pub scalar: ScalarQuantizationConfig, } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct ProductQuantizationConfig { pub compression: CompressionRatio, #[serde(skip_serializing_if = "Option::is_none")] pub always_ram: Option<bool>, } impl ProductQuantizationConfig { /// Detect configuration mismatch against `other` that requires rebuilding /// /// Returns true only if both conditions are met: /// - this configuration does not match `other` /// - to effectively change the configuration, a quantization rebuild is required pub fn mismatch_requires_rebuild(&self, other: &Self) -> bool { self != other } } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Validate)] pub struct ProductQuantization { #[validate(nested)] pub product: ProductQuantizationConfig, } impl Hash for ScalarQuantizationConfig { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.always_ram.hash(state); self.r#type.hash(state); } } impl Eq for ScalarQuantizationConfig {} #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Eq, Hash, Default)] #[serde(rename_all = "snake_case")] pub enum BinaryQuantizationEncoding { #[default] OneBit, TwoBits, OneAndHalfBits, } impl BinaryQuantizationEncoding { pub fn is_one_bit(&self) -> bool { matches!(self, BinaryQuantizationEncoding::OneBit) } } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct BinaryQuantizationConfig { #[serde(skip_serializing_if = "Option::is_none")] pub always_ram: Option<bool>, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub encoding: Option<BinaryQuantizationEncoding>, /// Asymmetric quantization configuration allows a query to have different quantization than stored vectors. /// It can increase the accuracy of search at the cost of performance. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub query_encoding: Option<BinaryQuantizationQueryEncoding>, } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Validate)] pub struct BinaryQuantization { #[validate(nested)] pub binary: BinaryQuantizationConfig, } #[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema, Anonymize)] #[serde(untagged, rename_all = "snake_case")] #[anonymize(false)] pub enum QuantizationConfig { Scalar(ScalarQuantization), Product(ProductQuantization), Binary(BinaryQuantization), } impl QuantizationConfig { /// Detect configuration mismatch against `other` that requires rebuilding /// /// Returns true only if both conditions are met: /// - this configuration does not match `other` /// - to effectively change the configuration, a quantization rebuild is required pub fn mismatch_requires_rebuild(&self, other: &Self) -> bool { self != other } pub fn supports_appendable(&self) -> bool { matches!(self, QuantizationConfig::Binary(_)) } } impl Validate for QuantizationConfig { fn validate(&self) -> Result<(), ValidationErrors> { match self { QuantizationConfig::Scalar(scalar) => scalar.validate(), QuantizationConfig::Product(product) => product.validate(), QuantizationConfig::Binary(binary) => binary.validate(), } } } #[derive( Default, Debug, Deserialize, Serialize, JsonSchema, Anonymize, Clone, Copy, PartialEq, Eq, Hash, )] #[serde(rename_all = "lowercase")] #[anonymize(false)] pub enum BinaryQuantizationQueryEncoding { #[default] Default, Binary, Scalar4Bits, Scalar8Bits, } impl From<ScalarQuantizationConfig> for QuantizationConfig { fn from(config: ScalarQuantizationConfig) -> Self { QuantizationConfig::Scalar(ScalarQuantization { scalar: config }) } } impl From<ProductQuantizationConfig> for QuantizationConfig { fn from(config: ProductQuantizationConfig) -> Self { QuantizationConfig::Product(ProductQuantization { product: config }) } } impl From<BinaryQuantizationConfig> for QuantizationConfig { fn from(config: BinaryQuantizationConfig) -> Self { QuantizationConfig::Binary(BinaryQuantization { binary: config }) } } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq, Default, Hash)] pub struct StrictModeSparse { /// Max length of sparse vector #[serde(skip_serializing_if = "Option::is_none")] #[validate(range(min = 1))] pub max_length: Option<usize>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq, Default, Hash)] #[schemars(deny_unknown_fields)] pub struct StrictModeSparseConfig { #[validate(nested)] #[serde(flatten)] pub config: BTreeMap<VectorNameBuf, StrictModeSparse>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Default)] #[schemars(deny_unknown_fields)] pub struct StrictModeSparseConfigOutput { #[serde(flatten)] pub config: BTreeMap<VectorNameBuf, StrictModeSparseOutput>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Anonymize, Clone, PartialEq, Default)] pub struct StrictModeSparseOutput { /// Max length of sparse vector #[serde(skip_serializing_if = "Option::is_none")] #[anonymize(false)] pub max_length: Option<usize>, } impl From<StrictModeSparseConfig> for StrictModeSparseConfigOutput { fn from(config: StrictModeSparseConfig) -> Self { let StrictModeSparseConfig { config } = config;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/rocksdb_backup.rs
lib/segment/src/rocksdb_backup.rs
use std::path::Path; use fs_err as fs; use crate::common::operation_error::{OperationError, OperationResult}; pub fn create(db: &rocksdb::DB, backup_path: &Path) -> OperationResult<()> { if !backup_path.exists() { create_dir_all(backup_path)?; } else if !backup_path.is_dir() { return Err(not_a_directory_error(backup_path)); } else if fs::read_dir(backup_path).unwrap().next().is_some() { return Err(directory_not_empty_error(backup_path)); } backup_engine(backup_path)? .create_new_backup(db) .map_err(|err| { OperationError::service_error(format!("failed to create RocksDB backup: {err}")) }) } pub fn restore(backup_path: &Path, restore_path: &Path) -> OperationResult<()> { backup_engine(backup_path)? .restore_from_latest_backup(restore_path, restore_path, &Default::default()) .map_err(|err| { OperationError::service_error(format!("failed to restore RocksDB backup: {err}")) }) } fn backup_engine(path: &Path) -> OperationResult<rocksdb::backup::BackupEngine> { let options = rocksdb::backup::BackupEngineOptions::new(path).map_err(|err| { OperationError::service_error(format!( "failed to create RocksDB backup engine options: {err}" )) })?; let env = rocksdb::Env::new().map_err(|err| { OperationError::service_error(format!( "failed to create RocksDB backup engine environment: {err}" )) })?; rocksdb::backup::BackupEngine::open(&options, &env).map_err(|err| { OperationError::service_error(format!( "failed to open RocksDB backup engine {path:?}: {err}" )) }) } fn create_dir_all(path: &Path) -> OperationResult<()> { fs::create_dir_all(path).map_err(|err| { OperationError::service_error(format!( "failed to create RocksDB backup directory {path:?}: {err}" )) }) } fn not_a_directory_error(path: &Path) -> OperationError { OperationError::service_error(format!("RocksDB backup path {path:?} is not a directory")) } fn directory_not_empty_error(path: &Path) -> OperationError { OperationError::service_error(format!( "RockDB backup directory {path:?} already exists and is not empty" )) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/compat.rs
lib/segment/src/compat.rs
#![allow(deprecated)] use std::collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use crate::types::{ Distance, HnswConfig, Indexes, PayloadStorageType, QuantizationConfig, SegmentConfig, SegmentState, SeqNumberType, VectorDataConfig, VectorNameBuf, VectorStorageType, }; #[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone)] #[serde(rename_all = "snake_case")] #[deprecated = "use SegmentConfig instead"] pub struct SegmentConfigV5 { pub vector_data: HashMap<VectorNameBuf, VectorDataConfigV5>, /// Type of index used for search pub index: Indexes, /// Type of vector storage pub storage_type: StorageTypeV5, /// Defines payload storage type #[serde(default)] pub payload_storage_type: PayloadStorageType, /// Quantization parameters. If none - quantization is disabled. #[serde(default)] pub quantization_config: Option<QuantizationConfig>, } impl From<SegmentConfigV5> for SegmentConfig { fn from(old_segment: SegmentConfigV5) -> Self { let vector_data = old_segment .vector_data .into_iter() .map(|(vector_name, old_data)| { let new_data = VectorDataConfig { size: old_data.size, distance: old_data.distance, // Use HNSW index if vector specific one is set, or fall back to segment index index: match old_data.hnsw_config { Some(hnsw_config) => Indexes::Hnsw(hnsw_config), None => old_segment.index.clone(), }, // Remove vector specific quantization config if no segment one is set // This is required because in some cases this was incorrectly set on the vector // level quantization_config: old_segment .quantization_config .as_ref() .and(old_data.quantization_config), // Mmap if explicitly on disk, otherwise convert old storage type storage_type: if old_data.on_disk == Some(true) { VectorStorageType::Mmap } else { old_segment.storage_type.into() }, multivector_config: None, datatype: None, }; (vector_name, new_data) }) .collect(); SegmentConfig { vector_data, sparse_vector_data: Default::default(), payload_storage_type: old_segment.payload_storage_type, } } } /// Type of vector storage #[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Copy, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] #[serde(tag = "type", content = "options")] #[deprecated] pub enum StorageTypeV5 { // Store vectors in memory and use persistence storage only if vectors are changed #[default] InMemory, // Use memmap to store vectors, a little slower than `InMemory`, but requires little RAM Mmap, } impl From<StorageTypeV5> for VectorStorageType { fn from(old: StorageTypeV5) -> Self { match old { StorageTypeV5::InMemory => Self::Memory, StorageTypeV5::Mmap => Self::Mmap, } } } /// Config of single vector data storage #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)] #[serde(rename_all = "snake_case")] #[deprecated = "use VectorDataConfig instead"] pub struct VectorDataConfigV5 { /// Size of a vectors used pub size: usize, /// Type of distance function used for measuring distance between vectors pub distance: Distance, /// Vector specific HNSW config that overrides collection config #[serde(default)] pub hnsw_config: Option<HnswConfig>, /// Vector specific quantization config that overrides collection config #[serde(default)] pub quantization_config: Option<QuantizationConfig>, /// If true - vectors will not be stored in memory. /// Instead, it will store vectors on mmap-files. /// If enabled, search performance will defined by disk speed /// and fraction of vectors that fit in RAM. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub on_disk: Option<bool>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "snake_case")] #[deprecated = "use SegmentState instead"] pub struct SegmentStateV5 { pub version: Option<SeqNumberType>, pub config: SegmentConfigV5, } impl From<SegmentStateV5> for SegmentState { fn from(old: SegmentStateV5) -> Self { let SegmentStateV5 { version, config } = old; Self { initial_version: None, version, config: config.into(), } } } #[cfg(test)] mod tests { use super::*; use crate::types::{ScalarQuantization, ScalarQuantizationConfig}; #[test] fn convert_from_v5_to_newest() { let old_segment = SegmentConfigV5 { vector_data: vec![ ( "vec1".into(), VectorDataConfigV5 { size: 10, distance: Distance::Dot, hnsw_config: Some(HnswConfig { m: 20, ef_construct: 100, full_scan_threshold: 10000, max_indexing_threads: 0, on_disk: None, payload_m: Some(10), inline_storage: None, }), quantization_config: None, on_disk: None, }, ), ( "vec2".into(), VectorDataConfigV5 { size: 10, distance: Distance::Dot, hnsw_config: None, quantization_config: Some(QuantizationConfig::Scalar(ScalarQuantization { scalar: ScalarQuantizationConfig { r#type: Default::default(), quantile: Some(0.99), always_ram: Some(true), }, })), on_disk: None, }, ), ] .into_iter() .collect(), index: Indexes::Hnsw(HnswConfig { m: 25, ef_construct: 120, full_scan_threshold: 10000, max_indexing_threads: 0, on_disk: None, payload_m: None, inline_storage: None, }), storage_type: StorageTypeV5::InMemory, payload_storage_type: PayloadStorageType::default(), quantization_config: None, }; let new_segment: SegmentConfig = old_segment.into(); eprintln!("new = {new_segment:#?}"); match &new_segment.vector_data.get("vec1").unwrap().index { Indexes::Plain { .. } => panic!("expected HNSW index"), Indexes::Hnsw(hnsw) => { assert_eq!(hnsw.m, 20); } } match &new_segment.vector_data.get("vec2").unwrap().index { Indexes::Plain { .. } => panic!("expected HNSW index"), Indexes::Hnsw(hnsw) => { assert_eq!(hnsw.m, 25); } } if new_segment .vector_data .get("vec1") .unwrap() .quantization_config .is_some() { panic!("expected no quantization"); } } #[test] fn convert_from_v5_to_newest_2() { let old_segment = SegmentConfigV5 { vector_data: vec![ ( "vec1".into(), VectorDataConfigV5 { size: 10, distance: Distance::Dot, hnsw_config: None, quantization_config: None, on_disk: None, }, ), ( "vec2".into(), VectorDataConfigV5 { size: 10, distance: Distance::Dot, hnsw_config: None, quantization_config: Some(QuantizationConfig::Scalar(ScalarQuantization { scalar: ScalarQuantizationConfig { r#type: Default::default(), quantile: Some(0.99), always_ram: Some(true), }, })), on_disk: None, }, ), ] .into_iter() .collect(), index: Indexes::Hnsw(HnswConfig { m: 25, ef_construct: 120, full_scan_threshold: 10000, max_indexing_threads: 0, on_disk: None, payload_m: None, inline_storage: None, }), storage_type: StorageTypeV5::InMemory, payload_storage_type: PayloadStorageType::default(), quantization_config: Some(QuantizationConfig::Scalar(ScalarQuantization { scalar: ScalarQuantizationConfig { r#type: Default::default(), quantile: Some(0.95), always_ram: Some(true), }, })), }; let new_segment: SegmentConfig = old_segment.into(); eprintln!("new = {new_segment:#?}"); if new_segment .vector_data .get("vec1") .unwrap() .quantization_config .is_some() { panic!("expected no quantization"); } match &new_segment .vector_data .get("vec2") .unwrap() .quantization_config { Some(q) => match q { QuantizationConfig::Scalar(scalar) => { assert_eq!(scalar.scalar.quantile, Some(0.99)); } QuantizationConfig::Product(_) => { panic!("expected scalar quantization") } QuantizationConfig::Binary(_) => { panic!("expected scalar quantization") } }, _ => { panic!("expected quantization") } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/point_mappings.rs
lib/segment/src/id_tracker/point_mappings.rs
use std::collections::BTreeMap; #[cfg(test)] use std::collections::btree_map::Entry; use std::iter; use bitvec::prelude::{BitSlice, BitVec}; use byteorder::LittleEndian; #[cfg(test)] use common::bitpacking::make_bitmask; use common::types::PointOffsetType; use itertools::Itertools; #[cfg(test)] use rand::Rng as _; use rand::distr::Distribution; #[cfg(test)] use rand::rngs::StdRng; #[cfg(test)] use rand::seq::SliceRandom as _; use uuid::Uuid; use crate::types::PointIdType; /// Used endianness for storing PointMapping-files. pub type FileEndianess = LittleEndian; #[derive(Clone, PartialEq, Default, Debug)] pub struct PointMappings { /// `deleted` specifies which points of internal_to_external was deleted. /// It is possible that `deleted` can be longer or shorter than `internal_to_external`. /// - if `deleted` is longer, then extra bits should be set to `false` and ignored. /// - if `deleted` is shorter, then extra indices are as if the bits were set to `true`. deleted: BitVec, internal_to_external: Vec<PointIdType>, // Having two separate maps allows us iterating only over one type at a time without having to filter. external_to_internal_num: BTreeMap<u64, PointOffsetType>, external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType>, } impl PointMappings { pub fn new( deleted: BitVec, internal_to_external: Vec<PointIdType>, external_to_internal_num: BTreeMap<u64, PointOffsetType>, external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType>, ) -> Self { Self { deleted, internal_to_external, external_to_internal_num, external_to_internal_uuid, } } /// ToDo: this function is temporary and should be removed before PR is merged pub fn deconstruct( self, ) -> ( BitVec, Vec<PointIdType>, BTreeMap<u64, PointOffsetType>, BTreeMap<Uuid, PointOffsetType>, ) { ( self.deleted, self.internal_to_external, self.external_to_internal_num, self.external_to_internal_uuid, ) } /// Number of points, excluding deleted ones. pub(crate) fn available_point_count(&self) -> usize { self.external_to_internal_num.len() + self.external_to_internal_uuid.len() } pub(crate) fn deleted(&self) -> &BitSlice { &self.deleted } pub(crate) fn internal_id(&self, external_id: &PointIdType) -> Option<PointOffsetType> { match external_id { PointIdType::NumId(num) => self.external_to_internal_num.get(num).copied(), PointIdType::Uuid(uuid) => self.external_to_internal_uuid.get(uuid).copied(), } } pub(crate) fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { if *self.deleted.get(internal_id as usize)? { return None; } self.internal_to_external .get(internal_id as usize) .map(|i| i.into()) } pub(crate) fn drop(&mut self, external_id: PointIdType) -> Option<PointOffsetType> { let internal_id = match external_id { // We "temporarily" remove existing points from the BTreeMaps without writing them to disk // because we remove deleted points of a previous load directly when loading. PointIdType::NumId(num) => self.external_to_internal_num.remove(&num), PointIdType::Uuid(uuid) => self.external_to_internal_uuid.remove(&uuid), }; // Also reset inverse mapping if let Some(internal_id) = internal_id { self.internal_to_external[internal_id as usize] = PointIdType::NumId(u64::MAX); } if let Some(internal_id) = &internal_id { self.deleted.set(*internal_id as usize, true); } internal_id } pub(crate) fn iter_random( &self, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { let rng = rand::rng(); let max_internal = self.internal_to_external.len(); if max_internal == 0 { return Box::new(iter::empty()); } let uniform = rand::distr::Uniform::new(0, max_internal) .expect("above check guarantees max_internal > 0"); let iter = Distribution::sample_iter(uniform, rng) // TODO: this is not efficient if `max_internal` is large and we iterate over most of them, // but it's good enough for low limits. // // We could improve it by using a variable-period PRNG to adjust depending on the number of available points. .unique() .take(max_internal) .filter_map(move |i| { if self.deleted[i] { None } else { Some((self.internal_to_external[i], i as PointOffsetType)) } }); Box::new(iter) } pub(crate) fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { let full_num_iter = || { self.external_to_internal_num .iter() .map(|(k, v)| (PointIdType::NumId(*k), *v)) }; let offset_num_iter = |offset: u64| { self.external_to_internal_num .range(offset..) .map(|(k, v)| (PointIdType::NumId(*k), *v)) }; let full_uuid_iter = || { self.external_to_internal_uuid .iter() .map(|(k, v)| (PointIdType::Uuid(*k), *v)) }; let offset_uuid_iter = |offset: Uuid| { self.external_to_internal_uuid .range(offset..) .map(|(k, v)| (PointIdType::Uuid(*k), *v)) }; match external_id { None => { let iter_num = full_num_iter(); let iter_uuid = full_uuid_iter(); // order is important here, we want to iterate over the u64 ids first Box::new(iter_num.chain(iter_uuid)) } Some(offset) => match offset { PointIdType::NumId(idx) => { // Because u64 keys are less that uuid key, we can just use the full iterator for uuid let iter_num = offset_num_iter(idx); let iter_uuid = full_uuid_iter(); // order is important here, we want to iterate over the u64 ids first Box::new(iter_num.chain(iter_uuid)) } PointIdType::Uuid(uuid) => { // if offset is a uuid, we can only iterate over uuids Box::new(offset_uuid_iter(uuid)) } }, } } pub(crate) fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { let iter_num = self .external_to_internal_num .keys() .map(|i| PointIdType::NumId(*i)); let iter_uuid = self .external_to_internal_uuid .keys() .map(|i| PointIdType::Uuid(*i)); // order is important here, we want to iterate over the u64 ids first Box::new(iter_num.chain(iter_uuid)) } pub(crate) fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { Box::new( (0..self.internal_to_external.len() as PointOffsetType) .filter(move |i| !self.deleted[*i as usize]), ) } #[cfg(test)] pub(crate) fn iter_internal_raw( &self, ) -> impl Iterator<Item = (PointOffsetType, PointIdType)> + '_ { self.internal_to_external .iter() .enumerate() .map(|(offset, point_id)| (offset as _, *point_id)) } pub(crate) fn is_deleted_point(&self, key: PointOffsetType) -> bool { let key = key as usize; if key >= self.deleted.len() { return true; } self.deleted[key] } /// Sets the link between an external and internal id. /// Returns the previous internal id if it existed. pub(crate) fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> Option<PointOffsetType> { let old_internal_id = match external_id { PointIdType::NumId(idx) => self.external_to_internal_num.insert(idx, internal_id), PointIdType::Uuid(uuid) => self.external_to_internal_uuid.insert(uuid, internal_id), }; let internal_id = internal_id as usize; if internal_id >= self.internal_to_external.len() { self.internal_to_external .resize(internal_id + 1, PointIdType::NumId(u64::MAX)); } if internal_id >= self.deleted.len() { self.deleted.resize(internal_id + 1, true); } if let Some(old_internal_id) = &old_internal_id { let old_internal_id = *old_internal_id as usize; if old_internal_id != internal_id { self.deleted.set(old_internal_id, true); } } self.internal_to_external[internal_id] = external_id; self.deleted.set(internal_id, false); old_internal_id } pub(crate) fn total_point_count(&self) -> usize { self.internal_to_external.len() } /// Generate a random [`PointMappings`]. #[cfg(test)] pub fn random(rand: &mut StdRng, total_size: u32) -> Self { Self::random_with_params(rand, total_size, total_size, 128) } /// Generate a random [`PointMappings`] using the following parameters: /// /// - `total_size`: total number of points, including deleted ones. /// - `preserved_size`: number of points that are not deleted. /// - `bits_in_id`: number of bits in generated ids. /// Decrease this value to restrict the amount of unique ids across all /// multiple invocations of this function. /// E.g. if `bits_in_id` is 8, then only 512 unique ids will be generated. /// (256 uuids + 256 u64s) #[cfg(test)] pub fn random_with_params( rand: &mut StdRng, total_size: u32, preserved_size: u32, bits_in_id: u8, ) -> Self { let mask: u128 = make_bitmask(bits_in_id); let mask_u64: u64 = mask as u64; const UUID_LIKELYNESS: f64 = 0.5; let mut external_to_internal_num = BTreeMap::new(); let mut external_to_internal_uuid = BTreeMap::new(); let mut internal_ids = (0..total_size).collect_vec(); internal_ids.shuffle(rand); internal_ids.truncate(preserved_size as usize); let mut deleted = BitVec::repeat(true, total_size as usize); for id in &internal_ids { deleted.set(*id as usize, false); } let internal_to_external = (0..total_size) .map(|pos| { loop { if rand.random_bool(UUID_LIKELYNESS) { let uuid = Uuid::from_u128(rand.random_range(0..=mask)); if let Entry::Vacant(e) = external_to_internal_uuid.entry(uuid) { e.insert(pos); return PointIdType::Uuid(uuid); } } else { let num = rand.random_range(0..=mask_u64); if let Entry::Vacant(e) = external_to_internal_num.entry(num) { e.insert(pos); return PointIdType::NumId(num); } } } }) .collect(); Self { deleted, internal_to_external, external_to_internal_num, external_to_internal_uuid, } } #[cfg(debug_assertions)] pub fn assert_mappings(&self) { for (external_id, internal_id) in self.external_to_internal_num.iter() { debug_assert!( self.internal_to_external[*internal_id as usize] == PointIdType::NumId(*external_id), "Internal id {internal_id} is mapped to external id {}, but should be {}", self.internal_to_external[*internal_id as usize], PointIdType::NumId(*external_id), ); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/in_memory_id_tracker.rs
lib/segment/src/id_tracker/in_memory_id_tracker.rs
use std::path::PathBuf; use bitvec::prelude::BitSlice; use common::types::PointOffsetType; #[cfg(test)] use rand::Rng as _; #[cfg(test)] use rand::rngs::StdRng; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::id_tracker::point_mappings::PointMappings; use crate::id_tracker::{DELETED_POINT_VERSION, IdTracker}; use crate::types::{PointIdType, SeqNumberType}; /// A non-persistent ID tracker for faster and more efficient building of `ImmutableIdTracker`. #[derive(Debug, Default)] pub struct InMemoryIdTracker { internal_to_version: Vec<SeqNumberType>, mappings: PointMappings, } impl InMemoryIdTracker { pub fn new() -> Self { Self::default() } pub fn into_internal(self) -> (Vec<SeqNumberType>, PointMappings) { (self.internal_to_version, self.mappings) } /// Generate a random [`InMemoryIdTracker`]. #[cfg(test)] pub fn random(rand: &mut StdRng, size: u32, preserved_size: u32, bits_in_id: u8) -> Self { Self { internal_to_version: vec![rand.random(); size as usize], mappings: PointMappings::random_with_params(rand, size, preserved_size, bits_in_id), } } } impl IdTracker for InMemoryIdTracker { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType> { self.internal_to_version.get(internal_id as usize).copied() } fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()> { if self.external_id(internal_id).is_some() { if let Some(old_version) = self.internal_to_version.get_mut(internal_id as usize) { *old_version = version; } else { self.internal_to_version.resize(internal_id as usize + 1, 0); self.internal_to_version[internal_id as usize] = version; } } Ok(()) } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { self.mappings.internal_id(&external_id) } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { self.mappings.external_id(internal_id) } fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()> { let _replaced_internal_id = self.mappings.set_link(external_id, internal_id); Ok(()) } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { // Unset version first because it still requires the mapping to exist if let Some(internal_id) = self.internal_id(external_id) { self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; } self.mappings.drop(external_id); Ok(()) } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { // Unset version first because it still requires the mapping to exist self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; if let Some(external_id) = self.mappings.external_id(internal_id) { self.mappings.drop(external_id); } Ok(()) } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { self.mappings.iter_external() } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { self.mappings.iter_internal() } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_from(external_id) } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_random() } /// Creates a flusher function, that writes the deleted points bitvec to disk. fn mapping_flusher(&self) -> Flusher { debug_assert!(false, "InMemoryIdTracker should not be flushed"); Box::new(|| Ok(())) } /// Creates a flusher function, that writes the points versions to disk. fn versions_flusher(&self) -> Flusher { debug_assert!(false, "InMemoryIdTracker should not be flushed"); Box::new(|| Ok(())) } fn total_point_count(&self) -> usize { self.mappings.total_point_count() } fn available_point_count(&self) -> usize { self.mappings.available_point_count() } fn deleted_point_count(&self) -> usize { self.total_point_count() - self.available_point_count() } fn deleted_point_bitslice(&self) -> &BitSlice { self.mappings.deleted() } fn is_deleted_point(&self, key: PointOffsetType) -> bool { self.mappings.is_deleted_point(key) } fn name(&self) -> &'static str { "in memory id tracker" } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { Box::new( self.internal_to_version .iter() .enumerate() .map(|(i, version)| (i as PointOffsetType, *version)), ) } fn files(&self) -> Vec<PathBuf> { debug_assert!(false, "InMemoryIdTracker should not be persisted"); vec![] } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/mod.rs
lib/segment/src/id_tracker/mod.rs
pub mod compressed; pub mod id_tracker_base; pub mod immutable_id_tracker; pub mod in_memory_id_tracker; pub mod mutable_id_tracker; pub mod point_mappings; #[cfg(feature = "rocksdb")] pub mod simple_id_tracker; use common::types::PointOffsetType; pub use id_tracker_base::*; use itertools::Itertools as _; use crate::types::{ExtendedPointId, PointIdType}; /// Calling [`for_each_unique_point`] will yield this struct for each unique /// point. #[derive(Debug, Clone, Copy)] pub struct MergedPointId { /// Unique external ID. If the same external ID is present in multiple /// trackers, the item with the highest version takes precedence. pub external_id: ExtendedPointId, /// An index within `id_trackers` iterator that points to the [`IdTracker`] /// that contains this point. pub tracker_index: usize, /// The internal ID of the point within the [`IdTracker`] that contains it. pub internal_id: PointOffsetType, /// The version of the point within the [`IdTracker`] that contains it. pub version: u64, } /// Calls a closure for each unique point from multiple ID trackers. /// /// Discard points that have no version. pub fn for_each_unique_point<'a>( id_trackers: impl Iterator<Item = &'a (impl IdTracker + ?Sized + 'a)>, mut f: impl FnMut(MergedPointId), ) { let mut iter = id_trackers .enumerate() .map(|(segment_index, id_tracker)| { id_tracker .iter_from(None) .filter_map(move |(external_id, internal_id)| { let version = id_tracker.internal_version(internal_id); // a point without a version had an interrupted flush sequence and should be discarded version.map(|version| MergedPointId { external_id, tracker_index: segment_index, internal_id, version, }) }) }) .kmerge_by(|a, b| a.external_id < b.external_id); let Some(mut best_item) = iter.next() else { return; }; for item in iter { if best_item.external_id == item.external_id { if best_item.version < item.version { best_item = item; } } else { f(best_item); best_item = item; } } f(best_item); } impl From<&ExtendedPointId> for PointIdType { fn from(point_id: &ExtendedPointId) -> Self { match point_id { ExtendedPointId::NumId(idx) => PointIdType::NumId(*idx), ExtendedPointId::Uuid(uuid) => PointIdType::Uuid(*uuid), } } } #[cfg(test)] mod tests { use std::collections::{HashMap, hash_map}; use in_memory_id_tracker::InMemoryIdTracker; use rand::SeedableRng as _; use rand::rngs::StdRng; use rstest::rstest; use super::*; #[rstest] fn test_for_each_unique_point(#[values(0, 1, 5)] tracker_count: usize) { let mut rand = StdRng::seed_from_u64(42); let id_trackers = (0..tracker_count) .map(|_| InMemoryIdTracker::random(&mut rand, 1000, 500, 10)) .collect_vec(); let mut collisions = 0; // Naive HashMap-based implementation of for_each_unique_point. let mut expected = HashMap::<ExtendedPointId, MergedPointId>::new(); for (tracker_index, id_tracker) in id_trackers.iter().enumerate() { for (external_id, internal_id) in id_tracker.iter_from(None) { let version = id_tracker.internal_version(internal_id).unwrap(); let merged_point_id = MergedPointId { external_id, tracker_index, internal_id, version, }; match expected.entry(external_id) { hash_map::Entry::Occupied(mut entry) => { collisions += 1; if entry.get().version < version { entry.insert(merged_point_id); } } hash_map::Entry::Vacant(entry) => { entry.insert(merged_point_id); } } } } if tracker_count > 1 { // Ensure generated id_trackers have a lot of common points, so we // are testing the merge logic. assert!(collisions > 500); } else { // No collisions expected for a single or no id_trackers. assert_eq!(collisions, 0); } if tracker_count == 0 { assert!(expected.is_empty()); } for_each_unique_point(id_trackers.iter(), |merged_point_id| { let v = expected.remove(&merged_point_id.external_id).unwrap(); assert_eq!(merged_point_id.tracker_index, v.tracker_index); assert_eq!(merged_point_id.internal_id, v.internal_id); assert_eq!(merged_point_id.version, v.version); }); assert!(expected.is_empty()); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/simple_id_tracker.rs
lib/segment/src/id_tracker/simple_id_tracker.rs
use std::collections::BTreeMap; use std::path::PathBuf; use std::sync::Arc; use bincode; use bitvec::prelude::{BitSlice, BitVec}; use common::types::PointOffsetType; use parking_lot::RwLock; use rocksdb::DB; use serde::{Deserialize, Serialize}; use uuid::Uuid; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::common::rocksdb_buffered_update_wrapper::DatabaseColumnScheduledUpdateWrapper; use crate::common::rocksdb_wrapper::{DB_MAPPING_CF, DB_VERSIONS_CF, DatabaseColumnWrapper}; use crate::id_tracker::IdTracker; use crate::id_tracker::point_mappings::PointMappings; use crate::types::{ExtendedPointId, PointIdType, SeqNumberType}; /// Point Id type used for storing ids internally /// Should be serializable by `bincode`, therefore is not untagged. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] enum StoredPointId { NumId(u64), Uuid(Uuid), String(String), } #[derive(Debug)] pub struct SimpleIdTracker { internal_to_version: Vec<SeqNumberType>, mapping_db_wrapper: DatabaseColumnScheduledUpdateWrapper, versions_db_wrapper: DatabaseColumnScheduledUpdateWrapper, mappings: PointMappings, } impl SimpleIdTracker { pub fn open(store: Arc<RwLock<DB>>) -> OperationResult<Self> { let mut deleted = BitVec::new(); let mut internal_to_external: Vec<PointIdType> = Default::default(); let mut external_to_internal_num: BTreeMap<u64, PointOffsetType> = Default::default(); let mut external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType> = Default::default(); let mapping_db_wrapper = DatabaseColumnScheduledUpdateWrapper::new( DatabaseColumnWrapper::new(store.clone(), DB_MAPPING_CF), ); for (key, val) in mapping_db_wrapper.lock_db().iter()? { let external_id = Self::restore_key(&key); let internal_id: PointOffsetType = bincode::deserialize::<PointOffsetType>(&val).unwrap(); if internal_id as usize >= internal_to_external.len() { internal_to_external.resize(internal_id as usize + 1, PointIdType::NumId(u64::MAX)); } if internal_id as usize >= deleted.len() { deleted.resize(internal_id as usize + 1, true); } let replaced_id = internal_to_external[internal_id as usize]; internal_to_external[internal_id as usize] = external_id; if !deleted[internal_id as usize] { // Fixing corrupted mapping - this id should be recovered from WAL // This should not happen in normal operation, but it can happen if // the database is corrupted. log::warn!( "removing duplicated external id {external_id} in internal id {replaced_id}", ); match replaced_id { PointIdType::NumId(idx) => { external_to_internal_num.remove(&idx); } PointIdType::Uuid(uuid) => { external_to_internal_uuid.remove(&uuid); } } } deleted.set(internal_id as usize, false); match external_id { PointIdType::NumId(idx) => { external_to_internal_num.insert(idx, internal_id); } PointIdType::Uuid(uuid) => { external_to_internal_uuid.insert(uuid, internal_id); } } } let mut internal_to_version: Vec<SeqNumberType> = Vec::with_capacity(internal_to_external.len()); let versions_db_wrapper = DatabaseColumnScheduledUpdateWrapper::new( DatabaseColumnWrapper::new(store, DB_VERSIONS_CF), ); for (key, val) in versions_db_wrapper.lock_db().iter()? { let external_id = Self::restore_key(&key); let version: SeqNumberType = bincode::deserialize(&val).unwrap(); let internal_id = match external_id { PointIdType::NumId(idx) => external_to_internal_num.get(&idx).copied(), PointIdType::Uuid(uuid) => external_to_internal_uuid.get(&uuid).copied(), }; if let Some(internal_id) = internal_id { if internal_id as usize >= internal_to_version.len() { internal_to_version.resize(internal_id as usize + 1, 0); } internal_to_version[internal_id as usize] = version; } else { log::debug!( "Found version: {version} without internal id, external id: {external_id}" ); } } let mappings = PointMappings::new( deleted, internal_to_external, external_to_internal_num, external_to_internal_uuid, ); #[cfg(debug_assertions)] mappings.assert_mappings(); Ok(SimpleIdTracker { internal_to_version, mapping_db_wrapper, versions_db_wrapper, mappings, }) } fn store_key(external_id: &PointIdType) -> Vec<u8> { bincode::serialize(&StoredPointId::from(external_id)).unwrap() } fn restore_key(data: &[u8]) -> PointIdType { let stored_external_id: StoredPointId = bincode::deserialize(data).unwrap(); PointIdType::from(stored_external_id) } fn delete_key(&self, external_id: &PointIdType) -> OperationResult<()> { self.mapping_db_wrapper .remove(Self::store_key(external_id))?; self.versions_db_wrapper .remove(Self::store_key(external_id))?; Ok(()) } fn persist_key( &self, external_id: &PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()> { self.mapping_db_wrapper.put( Self::store_key(external_id), bincode::serialize(&internal_id).unwrap(), ) } /// Destroy this simple ID tracker, remove persisted data from RocksDB pub fn destroy(self) -> OperationResult<()> { self.mapping_db_wrapper.remove_column_family()?; self.versions_db_wrapper.remove_column_family()?; Ok(()) } /// Iterate over all point versions /// /// Includes deleted points. pub(crate) fn iter_versions(&self) -> impl Iterator<Item = (PointOffsetType, SeqNumberType)> { self.internal_to_version .iter() .enumerate() .map(|(internal_id, &version)| (internal_id as PointOffsetType, version)) } } impl IdTracker for SimpleIdTracker { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType> { self.internal_to_version.get(internal_id as usize).copied() } fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()> { if let Some(external_id) = self.external_id(internal_id) { if internal_id as usize >= self.internal_to_version.len() { #[cfg(debug_assertions)] { if internal_id as usize > self.internal_to_version.len() + 1 { log::info!( "Resizing versions is initializing larger range {} -> {}", self.internal_to_version.len(), internal_id + 1, ); } } self.internal_to_version.resize(internal_id as usize + 1, 0); } self.internal_to_version[internal_id as usize] = version; self.versions_db_wrapper.put( Self::store_key(&external_id), bincode::serialize(&version).unwrap(), )?; } Ok(()) } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { self.mappings.internal_id(&external_id) } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { self.mappings.external_id(internal_id) } fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()> { self.mappings.set_link(external_id, internal_id); self.persist_key(&external_id, internal_id as _)?; Ok(()) } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { self.mappings.drop(external_id); self.delete_key(&external_id)?; Ok(()) } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { if let Some(external_id) = self.mappings.external_id(internal_id) { self.mappings.drop(external_id); self.delete_key(&external_id)?; } Ok(()) } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { self.mappings.iter_external() } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { self.mappings.iter_internal() } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_from(external_id) } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_random() } fn total_point_count(&self) -> usize { self.mappings.total_point_count() } fn available_point_count(&self) -> usize { self.mappings.available_point_count() } fn deleted_point_count(&self) -> usize { self.total_point_count() - self.available_point_count() } /// Creates a flusher function, that persists the removed points in the mapping database /// and flushes the mapping to disk. /// This function should be called _before_ flushing the version database. fn mapping_flusher(&self) -> Flusher { self.mapping_db_wrapper.flusher() } /// Creates a flusher function, that persists the removed points in the version database /// and flushes the version database to disk. /// This function should be called _after_ flushing the mapping database. fn versions_flusher(&self) -> Flusher { self.versions_db_wrapper.flusher() } fn is_deleted_point(&self, key: PointOffsetType) -> bool { self.mappings.is_deleted_point(key) } fn deleted_point_bitslice(&self) -> &BitSlice { self.mappings.deleted() } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { Box::new( self.internal_to_version .iter() .enumerate() .map(|(i, version)| (i as PointOffsetType, *version)), ) } fn name(&self) -> &'static str { "simple id tracker" } fn files(&self) -> Vec<PathBuf> { vec![] } } impl From<&ExtendedPointId> for StoredPointId { fn from(point_id: &ExtendedPointId) -> Self { match point_id { ExtendedPointId::NumId(idx) => StoredPointId::NumId(*idx), ExtendedPointId::Uuid(uuid) => StoredPointId::Uuid(*uuid), } } } impl From<ExtendedPointId> for StoredPointId { fn from(point_id: ExtendedPointId) -> Self { Self::from(&point_id) } } impl From<&StoredPointId> for ExtendedPointId { fn from(point_id: &StoredPointId) -> Self { match point_id { StoredPointId::NumId(idx) => ExtendedPointId::NumId(*idx), StoredPointId::Uuid(uuid) => ExtendedPointId::Uuid(*uuid), StoredPointId::String(str) => { unimplemented!("cannot convert internal string id '{str}' to external id") } } } } impl From<StoredPointId> for ExtendedPointId { fn from(point_id: StoredPointId) -> Self { match point_id { StoredPointId::NumId(idx) => ExtendedPointId::NumId(idx), StoredPointId::Uuid(uuid) => ExtendedPointId::Uuid(uuid), StoredPointId::String(str) => { unimplemented!("cannot convert internal string id '{str}' to external id") } } } } #[cfg(test)] mod tests { use std::collections::{HashMap, HashSet}; use itertools::Itertools; use rand::rngs::StdRng; use rand::{RngCore, SeedableRng}; use serde::de::DeserializeOwned; use tempfile::Builder; use super::*; use crate::common::rocksdb_wrapper::open_db; use crate::segment_constructor::migrate_rocksdb_id_tracker_to_mutable; const RAND_SEED: u64 = 42; const DEFAULT_VERSION: SeqNumberType = 42; pub const TEST_POINTS: &[PointIdType] = &[ PointIdType::NumId(100), PointIdType::Uuid(Uuid::from_u128(123_u128)), PointIdType::Uuid(Uuid::from_u128(156_u128)), PointIdType::NumId(150), PointIdType::NumId(120), PointIdType::Uuid(Uuid::from_u128(12_u128)), PointIdType::NumId(180), PointIdType::NumId(110), PointIdType::NumId(115), PointIdType::Uuid(Uuid::from_u128(673_u128)), PointIdType::NumId(190), PointIdType::NumId(177), PointIdType::Uuid(Uuid::from_u128(971_u128)), ]; fn check_bincode_serialization< T: Serialize + DeserializeOwned + PartialEq + std::fmt::Debug, >( record: T, ) { let binary_entity = bincode::serialize(&record).expect("serialization ok"); let de_record: T = bincode::deserialize(&binary_entity).expect("deserialization ok"); assert_eq!(record, de_record); } #[test] fn test_serialization() { check_bincode_serialization(StoredPointId::NumId(123)); check_bincode_serialization(StoredPointId::Uuid(Uuid::from_u128(123_u128))); check_bincode_serialization(StoredPointId::String("hello".to_string())); } #[test] fn test_iterator() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let db = open_db(dir.path(), &[DB_MAPPING_CF, DB_VERSIONS_CF]).unwrap(); let mut id_tracker = SimpleIdTracker::open(db).unwrap(); id_tracker.set_link(200.into(), 0).unwrap(); id_tracker.set_link(100.into(), 1).unwrap(); id_tracker.set_link(150.into(), 2).unwrap(); id_tracker.set_link(120.into(), 3).unwrap(); id_tracker.set_link(180.into(), 4).unwrap(); id_tracker.set_link(110.into(), 5).unwrap(); id_tracker.set_link(115.into(), 6).unwrap(); id_tracker.set_link(190.into(), 7).unwrap(); id_tracker.set_link(177.into(), 8).unwrap(); id_tracker.set_link(118.into(), 9).unwrap(); let first_four = id_tracker.iter_from(None).take(4).collect_vec(); assert_eq!(first_four.len(), 4); assert_eq!(first_four[0].0, 100.into()); let last = id_tracker.iter_from(Some(first_four[3].0)).collect_vec(); assert_eq!(last.len(), 7); } #[test] fn test_mixed_types_iterator() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let db = open_db(dir.path(), &[DB_MAPPING_CF, DB_VERSIONS_CF]).unwrap(); let mut id_tracker = SimpleIdTracker::open(db).unwrap(); let mut values: Vec<PointIdType> = vec![ 100.into(), PointIdType::Uuid(Uuid::from_u128(123_u128)), PointIdType::Uuid(Uuid::from_u128(156_u128)), 150.into(), 120.into(), PointIdType::Uuid(Uuid::from_u128(12_u128)), 180.into(), 110.into(), 115.into(), PointIdType::Uuid(Uuid::from_u128(673_u128)), 190.into(), 177.into(), PointIdType::Uuid(Uuid::from_u128(971_u128)), ]; for (id, value) in values.iter().enumerate() { id_tracker.set_link(*value, id as PointOffsetType).unwrap(); } let sorted_from_tracker = id_tracker.iter_from(None).map(|(k, _)| k).collect_vec(); values.sort(); assert_eq!(sorted_from_tracker, values); } /// Create RocksDB based ID tracker with mappings and various mutations. /// Migrate it to the mutable ID tracker and assert that the mappings are correct. /// /// Test based upton [`super::mutable_id_tracker::tests::test_store_load_mutated`] #[test] fn test_migrate_simple_to_mutable() { let mut rng = StdRng::seed_from_u64(RAND_SEED); let db_dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let db = open_db(db_dir.path(), &[DB_MAPPING_CF, DB_VERSIONS_CF]).unwrap(); // Create RocksDB ID tracker and insert test points let mut id_tracker = SimpleIdTracker::open(db).unwrap(); for value in TEST_POINTS.iter() { let internal_id = id_tracker.total_point_count() as PointOffsetType; id_tracker.set_link(*value, internal_id).unwrap(); id_tracker .set_internal_version(internal_id, DEFAULT_VERSION) .unwrap() } // Mutate mappings let mut dropped_points = HashSet::new(); let mut custom_version = HashMap::new(); for (index, point) in TEST_POINTS.iter().enumerate() { if index % 2 == 0 { continue; } if index % 3 == 0 { id_tracker.drop(*point).unwrap(); dropped_points.insert(*point); continue; } if index % 5 == 0 { let new_version = rng.next_u64(); id_tracker .set_internal_version(index as PointOffsetType, new_version) .unwrap(); custom_version.insert(index as PointOffsetType, new_version); } } let available_point_count = id_tracker.available_point_count(); let total_point_count = id_tracker.total_point_count(); // Migrate from RocksDB to mutable ID tracker let segment_dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let id_tracker = migrate_rocksdb_id_tracker_to_mutable(id_tracker, segment_dir.path()) .expect("failed to migrate from RocksDB to mutable"); // We can drop RocksDB storage now db_dir.close().expect("failed to drop RocksDB storage"); // Assert point counts // The mutable ID tracker recognizes deletions because there are gaps in mappings // The available and total point counts remains the same, because the last point mapping was not deleted assert_eq!(id_tracker.available_point_count(), available_point_count); assert_eq!(id_tracker.total_point_count(), total_point_count); // Assert mapping correctness for (index, point) in TEST_POINTS.iter().enumerate() { let internal_id = index as PointOffsetType; if dropped_points.contains(point) { assert!(id_tracker.is_deleted_point(internal_id)); assert_eq!(id_tracker.external_id(internal_id), None); assert!(id_tracker.mappings.internal_id(point).is_none()); continue; } // Check version let expect_version = custom_version .get(&internal_id) .copied() .unwrap_or(DEFAULT_VERSION); assert_eq!( id_tracker.internal_version(internal_id), Some(expect_version), ); // Check that unmodified points still haven't changed. assert_eq!( id_tracker.external_id(index as PointOffsetType), Some(*point), ); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/id_tracker_base.rs
lib/segment/src/id_tracker/id_tracker_base.rs
use std::fmt; use std::path::PathBuf; use bitvec::prelude::BitSlice; use common::ext::BitSliceExt as _; use common::types::PointOffsetType; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use super::in_memory_id_tracker::InMemoryIdTracker; use super::mutable_id_tracker::MutableIdTracker; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::id_tracker::immutable_id_tracker::ImmutableIdTracker; #[cfg(feature = "rocksdb")] use crate::id_tracker::simple_id_tracker::SimpleIdTracker; use crate::types::{PointIdType, SeqNumberType}; /// Sampling randomness seed /// /// Using seeded randomness so search results don't show randomness or 'inconsistencies' which /// would otherwise be introduced by HNSW/ID tracker point sampling. const SEED: u64 = 0b1011000011011110001110010101001010001011001101001010010001111010; /// This version should be assigned to a point when it is deleted. /// It does not mean a point with this version is always deleted. pub const DELETED_POINT_VERSION: SeqNumberType = 0; /// Trait for point ids tracker. /// /// This tracker is used to convert external (i.e. user-facing) point id into internal point id /// as well as for keeping track on point version /// Internal ids are useful for contiguous-ness pub trait IdTracker: fmt::Debug { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType>; fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()>; /// Returns internal ID of the point, which is used inside this segment /// /// Excludes soft deleted points. fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType>; /// Return external ID for internal point, defined by user /// /// Excludes soft deleted points. fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType>; /// Set mapping fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()>; /// Drop mapping fn drop(&mut self, external_id: PointIdType) -> OperationResult<()>; /// Same as `drop`, but by internal ID /// If mapping doesn't exist, still removes( unsets ) version. fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()>; /// Iterate over all external IDs /// /// Count should match `available_point_count`, excludes soft deleted points. fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_>; /// Iterate over internal IDs (offsets) /// /// Count should match `total_point_count`, excludes soft deleted points. fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_>; /// Iterate over internal IDs (offsets) /// /// - excludes soft deleted points /// - excludes flagged items from `exclude_bitslice` fn iter_internal_excluding<'a>( &'a self, exclude_bitslice: &'a BitSlice, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { Box::new( self.iter_internal() .filter(|point| !exclude_bitslice.get_bit(*point as usize).unwrap_or(false)), ) } /// Iterate starting from a given ID /// /// Excludes soft deleted points. fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_>; /// Iterate over internal IDs in a random order /// /// Excludes soft deleted points. fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_>; /// Flush id mapping to disk fn mapping_flusher(&self) -> Flusher; /// Flush points versions to disk fn versions_flusher(&self) -> Flusher; /// Number of total points /// /// - includes soft deleted points fn total_point_count(&self) -> usize; /// Number of available points /// /// - excludes soft deleted points fn available_point_count(&self) -> usize { self.total_point_count() - self.deleted_point_count() } /// Number of deleted points fn deleted_point_count(&self) -> usize; /// Get [`BitSlice`] representation for deleted points with deletion flags /// /// The size of this slice is not guaranteed. It may be smaller/larger than the number of /// vectors in this segment. fn deleted_point_bitslice(&self) -> &BitSlice; /// Check whether the given point is soft deleted fn is_deleted_point(&self, internal_id: PointOffsetType) -> bool; fn name(&self) -> &'static str; /// Iterator over `n` random IDs which are not deleted /// /// A [`BitSlice`] of deleted vectors may optionally be given to also consider deleted named /// vectors. fn sample_ids<'a>( &'a self, deleted_vector_bitslice: Option<&'a BitSlice>, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { // Use seeded randomness, prevents 'inconsistencies' in search results with sampling let mut rng = StdRng::seed_from_u64(SEED); let total = self.total_point_count() as PointOffsetType; Box::new( (0..total) .map(move |_| rng.random_range(0..total)) .filter(move |&x| { // Check for deleted vector first, as that is more likely !deleted_vector_bitslice .and_then(|d| d.get_bit(x as usize)) .unwrap_or(false) // Also check point deletion for integrity && !self.is_deleted_point(x) }), ) } /// Iterate over all stored internal versions, even if they were deleted /// Required for cleanup on segment open fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_>; /// Finds inconsistencies between id mapping and versions storage. /// It might happen that point doesn't have version due to un-flushed WAL. /// This method makes those points usable again. /// /// Returns a list of internal ids, that have non-zero versions, but are missing in the id mapping. /// Those points should be removed from all other parts of the segment. fn fix_inconsistencies(&mut self) -> OperationResult<Vec<PointOffsetType>> { // Points with mapping, but no version. // Can happen if insertion didn't complete. // We need to remove mapping and assume the point is going to be re-inserted by WAL. let mut to_remove = Vec::new(); // Points with version, but no mapping. // Can happen if point was deleted, but version (and likely the storage) wasn't cleaned up. // We return those points to the caller to clean up the storage. let mut to_return = Vec::new(); for (internal_id, version) in self.iter_internal_versions() { if version != DELETED_POINT_VERSION && self.external_id(internal_id).is_none() { to_return.push(internal_id); } } for internal_id in self.iter_internal() { if self.internal_version(internal_id).is_none() { if let Some(external_id) = self.external_id(internal_id) { to_remove.push(external_id); to_return.push(internal_id); } else { debug_assert!(false, "internal id {internal_id} has no external id"); } } } for external_id in to_remove { self.drop(external_id)?; #[cfg(debug_assertions)] log::debug!("dropped mapping for point {external_id} without version"); } Ok(to_return) } fn files(&self) -> Vec<PathBuf>; fn immutable_files(&self) -> Vec<PathBuf> { Vec::new() } } pub type IdTrackerSS = dyn IdTracker + Sync + Send; #[derive(Debug)] #[allow(clippy::large_enum_variant)] pub enum IdTrackerEnum { MutableIdTracker(MutableIdTracker), ImmutableIdTracker(ImmutableIdTracker), InMemoryIdTracker(InMemoryIdTracker), // Deprecated since Qdrant 1.14 #[cfg(feature = "rocksdb")] RocksDbIdTracker(SimpleIdTracker), } impl IdTracker for IdTrackerEnum { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.internal_version(internal_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => { id_tracker.internal_version(internal_id) } IdTrackerEnum::InMemoryIdTracker(id_tracker) => { id_tracker.internal_version(internal_id) } #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.internal_version(internal_id), } } fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => { id_tracker.set_internal_version(internal_id, version) } IdTrackerEnum::ImmutableIdTracker(id_tracker) => { id_tracker.set_internal_version(internal_id, version) } IdTrackerEnum::InMemoryIdTracker(id_tracker) => { id_tracker.set_internal_version(internal_id, version) } #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => { id_tracker.set_internal_version(internal_id, version) } } } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.internal_id(external_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.internal_id(external_id), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.internal_id(external_id), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.internal_id(external_id), } } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.external_id(internal_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.external_id(internal_id), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.external_id(internal_id), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.external_id(internal_id), } } fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => { id_tracker.set_link(external_id, internal_id) } IdTrackerEnum::ImmutableIdTracker(id_tracker) => { id_tracker.set_link(external_id, internal_id) } IdTrackerEnum::InMemoryIdTracker(id_tracker) => { id_tracker.set_link(external_id, internal_id) } #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => { id_tracker.set_link(external_id, internal_id) } } } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.drop(external_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.drop(external_id), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.drop(external_id), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.drop(external_id), } } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.drop_internal(internal_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.drop_internal(internal_id), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.drop_internal(internal_id), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.drop_internal(internal_id), } } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.iter_external(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.iter_external(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.iter_external(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.iter_external(), } } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.iter_internal(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.iter_internal(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.iter_internal(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.iter_internal(), } } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.iter_from(external_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.iter_from(external_id), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.iter_from(external_id), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.iter_from(external_id), } } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.iter_random(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.iter_random(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.iter_random(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.iter_random(), } } fn mapping_flusher(&self) -> Flusher { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.mapping_flusher(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.mapping_flusher(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.mapping_flusher(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.mapping_flusher(), } } fn versions_flusher(&self) -> Flusher { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.versions_flusher(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.versions_flusher(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.versions_flusher(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.versions_flusher(), } } fn total_point_count(&self) -> usize { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.total_point_count(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.total_point_count(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.total_point_count(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.total_point_count(), } } fn deleted_point_count(&self) -> usize { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.deleted_point_count(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.deleted_point_count(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.deleted_point_count(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.deleted_point_count(), } } fn deleted_point_bitslice(&self) -> &BitSlice { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.deleted_point_bitslice(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.deleted_point_bitslice(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.deleted_point_bitslice(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.deleted_point_bitslice(), } } fn is_deleted_point(&self, internal_id: PointOffsetType) -> bool { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.is_deleted_point(internal_id), IdTrackerEnum::ImmutableIdTracker(id_tracker) => { id_tracker.is_deleted_point(internal_id) } IdTrackerEnum::InMemoryIdTracker(id_tracker) => { id_tracker.is_deleted_point(internal_id) } #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.is_deleted_point(internal_id), } } fn name(&self) -> &'static str { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.name(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.name(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.name(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.name(), } } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.iter_internal_versions(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.iter_internal_versions(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.iter_internal_versions(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.iter_internal_versions(), } } fn fix_inconsistencies(&mut self) -> OperationResult<Vec<PointOffsetType>> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.fix_inconsistencies(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.fix_inconsistencies(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.fix_inconsistencies(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.fix_inconsistencies(), } } fn files(&self) -> Vec<PathBuf> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.files(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.files(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.files(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.files(), } } fn immutable_files(&self) -> Vec<PathBuf> { match self { IdTrackerEnum::MutableIdTracker(id_tracker) => id_tracker.immutable_files(), IdTrackerEnum::ImmutableIdTracker(id_tracker) => id_tracker.immutable_files(), IdTrackerEnum::InMemoryIdTracker(id_tracker) => id_tracker.immutable_files(), #[cfg(feature = "rocksdb")] IdTrackerEnum::RocksDbIdTracker(id_tracker) => id_tracker.immutable_files(), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/immutable_id_tracker.rs
lib/segment/src/id_tracker/immutable_id_tracker.rs
use std::io::{BufRead, BufReader, BufWriter, Read, Write}; use std::mem::{size_of, size_of_val}; use std::path::{Path, PathBuf}; use bitvec::prelude::BitSlice; use bitvec::vec::BitVec; use byteorder::{ReadBytesExt, WriteBytesExt}; use common::ext::BitSliceExt as _; use common::types::PointOffsetType; use fs_err::File; use memory::madvise::AdviceSetting; use memory::mmap_ops::{create_and_ensure_length, open_write_mmap}; use memory::mmap_type::{MmapBitSlice, MmapSlice}; use uuid::Uuid; use crate::common::Flusher; use crate::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper; use crate::common::mmap_slice_buffered_update_wrapper::MmapSliceBufferedUpdateWrapper; use crate::common::operation_error::{OperationError, OperationResult}; use crate::id_tracker::compressed::compressed_point_mappings::CompressedPointMappings; use crate::id_tracker::compressed::external_to_internal::CompressedExternalToInternal; use crate::id_tracker::compressed::internal_to_external::CompressedInternalToExternal; use crate::id_tracker::compressed::versions_store::CompressedVersions; use crate::id_tracker::in_memory_id_tracker::InMemoryIdTracker; use crate::id_tracker::point_mappings::FileEndianess; use crate::id_tracker::{DELETED_POINT_VERSION, IdTracker}; use crate::types::{ExtendedPointId, PointIdType, SeqNumberType}; pub const DELETED_FILE_NAME: &str = "id_tracker.deleted"; pub const MAPPINGS_FILE_NAME: &str = "id_tracker.mappings"; pub const VERSION_MAPPING_FILE_NAME: &str = "id_tracker.versions"; #[derive(Copy, Clone)] #[repr(u8)] enum ExternalIdType { Number = 0, Uuid = 1, } impl ExternalIdType { fn from_byte(byte: u8) -> Option<Self> { match byte { x if x == Self::Number as u8 => Some(Self::Number), x if x == Self::Uuid as u8 => Some(Self::Uuid), _ => None, } } fn from_point_id(point_id: &PointIdType) -> Self { match point_id { PointIdType::NumId(_) => Self::Number, PointIdType::Uuid(_) => Self::Uuid, } } } #[derive(Debug)] pub struct ImmutableIdTracker { path: PathBuf, deleted_wrapper: MmapBitSliceBufferedUpdateWrapper, internal_to_version: CompressedVersions, internal_to_version_wrapper: MmapSliceBufferedUpdateWrapper<SeqNumberType>, mappings: CompressedPointMappings, } impl ImmutableIdTracker { pub fn from_in_memory_tracker( in_memory_tracker: InMemoryIdTracker, path: &Path, ) -> OperationResult<Self> { let (internal_to_version, mappings) = in_memory_tracker.into_internal(); let compressed_mappings = CompressedPointMappings::from_mappings(mappings); let id_tracker = Self::new(path, &internal_to_version, compressed_mappings)?; Ok(id_tracker) } /// Loads a `CompressedPointMappings` from the given reader. Applies an optional filter of deleted items /// to prevent allocating unneeded data. fn load_mapping<R: BufRead>( mut reader: R, deleted: Option<BitVec>, ) -> OperationResult<CompressedPointMappings> { // Deserialize the header let len = reader.read_u64::<FileEndianess>()? as usize; let mut deleted = deleted.unwrap_or_else(|| BitVec::repeat(false, len)); deleted.truncate(len); let mut internal_to_external = CompressedInternalToExternal::with_capacity(len); let mut external_to_internal_num: Vec<(u64, PointOffsetType)> = Vec::new(); let mut external_to_internal_uuid: Vec<(Uuid, PointOffsetType)> = Vec::new(); // Deserialize the list entries for i in 0..len { let (internal_id, external_id) = Self::read_entry(&mut reader) .map_err(|err| { OperationError::inconsistent_storage(format!("Immutable ID tracker failed to read next mapping, reading {} out of {len}, assuming malformed storage: {err}", i + 1)) })?; // Need to push this regardless of point deletion as the vecs index represents the internal id // which would become wrong if we leave out entries. if internal_to_external.len() <= internal_id as usize { internal_to_external.resize(internal_id as usize + 1, PointIdType::NumId(0)); } internal_to_external.set(internal_id, external_id); let point_deleted = deleted.get_bit(i).unwrap_or(false); if point_deleted { continue; } match external_id { ExtendedPointId::NumId(num) => { external_to_internal_num.push((num, internal_id)); } ExtendedPointId::Uuid(uuid) => { external_to_internal_uuid.push((uuid, internal_id)); } } } // Check that the file has been fully read. #[cfg(debug_assertions)] // Only for dev builds { debug_assert_eq!(reader.bytes().map(Result::unwrap).count(), 0,); } let external_to_internal = CompressedExternalToInternal::from_vectors( external_to_internal_num, external_to_internal_uuid, ); Ok(CompressedPointMappings::new( deleted, internal_to_external, external_to_internal, )) } /// Loads a single entry from a reader. Expects the reader to be aligned so, that the next read /// byte is the first byte of a new entry. /// This function reads exact one entry which means after calling this function, the reader /// will be at the start of the next entry. pub(crate) fn read_entry<R: Read>( mut reader: R, ) -> OperationResult<(PointOffsetType, ExtendedPointId)> { let point_id_type = reader.read_u8().map_err(|err| { OperationError::inconsistent_storage(format!( "failed to read point ID type from file: {err}" )) })?; let external_id = match ExternalIdType::from_byte(point_id_type) { None => { return Err(OperationError::inconsistent_storage( "invalid byte for point ID type", )); } Some(ExternalIdType::Number) => { let num = reader.read_u64::<FileEndianess>().map_err(|err| { OperationError::inconsistent_storage(format!( "failed to read numeric point ID from file: {err}" )) })?; PointIdType::NumId(num) } Some(ExternalIdType::Uuid) => { let uuid_u128 = reader.read_u128::<FileEndianess>().map_err(|err| { OperationError::inconsistent_storage(format!( "failed to read UUID point ID from file: {err}" )) })?; PointIdType::Uuid(Uuid::from_u128_le(uuid_u128)) } }; let internal_id = reader.read_u32::<FileEndianess>().map_err(|err| { OperationError::inconsistent_storage(format!( "failed to read internal point ID from file: {err}" )) })? as PointOffsetType; Ok((internal_id, external_id)) } /// Serializes the `PointMappings` into the given writer using the file format specified below. /// /// ## File format /// In general the format looks like this: /// +---------------------------+-----------------+ /// | Header (list length: u64) | List of entries | /// +---------------------------+-----------------+ /// /// A single list entry: /// +-----------------+-----------------------+------------------+ /// | PointIdType: u8 | Number/UUID: u64/u128 | Internal ID: u32 | /// +-----------------+-----------------------+------------------+ /// A single entry is thus either 1+8+4=13 or 1+16+4=21 bytes in size depending /// on the PointIdType. fn store_mapping<W: Write>( mappings: &CompressedPointMappings, mut writer: W, ) -> OperationResult<()> { let number_of_entries = mappings.total_point_count(); // Serialize the header (=length). writer.write_u64::<FileEndianess>(number_of_entries as u64)?; // Serialize all entries for (internal_id, external_id) in mappings.iter_internal_raw() { Self::write_entry(&mut writer, internal_id, external_id)?; } Ok(()) } fn write_entry<W: Write>( mut writer: W, internal_id: PointOffsetType, external_id: PointIdType, ) -> OperationResult<()> { // Byte to distinguish between Number and UUID writer.write_u8(ExternalIdType::from_point_id(&external_id) as u8)?; // Serializing External ID match external_id { PointIdType::NumId(num) => { // The PointID's number writer.write_u64::<FileEndianess>(num)?; } PointIdType::Uuid(uuid) => { // The PointID's UUID writer.write_u128::<FileEndianess>(uuid.to_u128_le())?; } } // Serializing Internal ID writer.write_u32::<FileEndianess>(internal_id)?; Ok(()) } pub fn open(segment_path: &Path) -> OperationResult<Self> { let deleted_raw = open_write_mmap( &Self::deleted_file_path(segment_path), AdviceSetting::Global, true, )?; let deleted_mmap = MmapBitSlice::try_from(deleted_raw, 0)?; let deleted_bitvec = deleted_mmap.to_bitvec(); let deleted_wrapper = MmapBitSliceBufferedUpdateWrapper::new(deleted_mmap); let internal_to_version_map = open_write_mmap( &Self::version_mapping_file_path(segment_path), AdviceSetting::Global, true, )?; let internal_to_version_mapslice: MmapSlice<SeqNumberType> = unsafe { MmapSlice::try_from(internal_to_version_map)? }; let internal_to_version = CompressedVersions::from_slice(&internal_to_version_mapslice); let internal_to_version_wrapper = MmapSliceBufferedUpdateWrapper::new(internal_to_version_mapslice); let reader = BufReader::new(File::open(Self::mappings_file_path(segment_path))?); let mappings = Self::load_mapping(reader, Some(deleted_bitvec))?; Ok(Self { path: segment_path.to_path_buf(), deleted_wrapper, internal_to_version_wrapper, internal_to_version, mappings, }) } pub fn new( path: &Path, internal_to_version: &[SeqNumberType], mappings: CompressedPointMappings, ) -> OperationResult<Self> { // Create mmap file for deleted bitvec let deleted_filepath = Self::deleted_file_path(path); { let deleted_size = bitmap_mmap_size(mappings.total_point_count()); create_and_ensure_length(&deleted_filepath, deleted_size)?; } debug_assert!(mappings.deleted().len() <= mappings.total_point_count()); let deleted_mmap = open_write_mmap(&deleted_filepath, AdviceSetting::Global, false)?; let mut deleted_new = MmapBitSlice::try_from(deleted_mmap, 0)?; deleted_new[..mappings.deleted().len()].copy_from_bitslice(mappings.deleted()); for i in mappings.deleted().len()..mappings.total_point_count() { deleted_new.set(i, true); } let deleted_wrapper = MmapBitSliceBufferedUpdateWrapper::new(deleted_new); // Create mmap file for internal-to-version list let version_filepath = Self::version_mapping_file_path(path); // Amount of points without version let missing_version_count = mappings .total_point_count() .saturating_sub(internal_to_version.len()); let missing_versions_size = missing_version_count * size_of::<SeqNumberType>(); let internal_to_version_size = size_of_val(internal_to_version); let min_size = internal_to_version_size + missing_versions_size; { let version_size = mmap_size::<SeqNumberType>(min_size); create_and_ensure_length(&version_filepath, version_size)?; } let mut internal_to_version_wrapper = unsafe { MmapSlice::try_from(open_write_mmap( &version_filepath, AdviceSetting::Global, false, )?)? }; internal_to_version_wrapper[..internal_to_version.len()] .copy_from_slice(internal_to_version); let internal_to_version = CompressedVersions::from_slice(&internal_to_version_wrapper); debug_assert_eq!(internal_to_version.len(), mappings.total_point_count()); let internal_to_version_wrapper = MmapSliceBufferedUpdateWrapper::new(internal_to_version_wrapper); // Write mappings to disk. let file = File::create(Self::mappings_file_path(path))?; let mut writer = BufWriter::new(file); Self::store_mapping(&mappings, &mut writer)?; // Explicitly fsync file contents to ensure durability writer.flush()?; let file = writer.into_inner().unwrap(); file.sync_all()?; deleted_wrapper.flusher()()?; internal_to_version_wrapper.flusher()()?; Ok(Self { path: path.to_path_buf(), deleted_wrapper, internal_to_version_wrapper, internal_to_version, mappings, }) } fn deleted_file_path(base: &Path) -> PathBuf { base.join(DELETED_FILE_NAME) } fn version_mapping_file_path(base: &Path) -> PathBuf { base.join(VERSION_MAPPING_FILE_NAME) } pub(crate) fn mappings_file_path(base: &Path) -> PathBuf { base.join(MAPPINGS_FILE_NAME) } } /// Returns the required mmap filesize for a given length of a slice of type `T`. fn mmap_size<T>(len: usize) -> usize { let item_width = size_of::<T>(); len.div_ceil(item_width) * item_width // Make it a multiple of usize-width. } /// Returns the required mmap filesize for a `BitSlice`. fn bitmap_mmap_size(number_of_elements: usize) -> usize { mmap_size::<usize>(number_of_elements.div_ceil(u8::BITS as usize)) } impl IdTracker for ImmutableIdTracker { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType> { self.internal_to_version.get(internal_id) } fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()> { let has_version = self.internal_to_version.has(internal_id); debug_assert!( has_version, "Can't extend version list in immutable tracker", ); if has_version { self.internal_to_version.set(internal_id, version); self.internal_to_version_wrapper.set(internal_id, version); } Ok(()) } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { self.mappings.internal_id(&external_id) } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { self.mappings.external_id(internal_id) } fn set_link( &mut self, _external_id: PointIdType, _internal_id: PointOffsetType, ) -> OperationResult<()> { panic!("Trying to call a mutating function (`set_link`) of an immutable id tracker"); } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { let internal_id = self.mappings.drop(external_id); if let Some(internal_id) = internal_id { self.deleted_wrapper.set(internal_id as usize, true); self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; } Ok(()) } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { if let Some(external_id) = self.mappings.external_id(internal_id) { self.mappings.drop(external_id); } self.deleted_wrapper.set(internal_id as usize, true); self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; Ok(()) } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { self.mappings.iter_external() } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { self.mappings.iter_internal() } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_from(external_id) } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_random() } /// Creates a flusher function, that writes the deleted points bitvec to disk. fn mapping_flusher(&self) -> Flusher { // Only flush deletions because mappings are immutable self.deleted_wrapper.flusher() } /// Creates a flusher function, that writes the points versions to disk. fn versions_flusher(&self) -> Flusher { self.internal_to_version_wrapper.flusher() } fn total_point_count(&self) -> usize { self.mappings.total_point_count() } fn available_point_count(&self) -> usize { self.mappings.available_point_count() } fn deleted_point_count(&self) -> usize { self.total_point_count() - self.available_point_count() } fn deleted_point_bitslice(&self) -> &BitSlice { self.mappings.deleted() } fn is_deleted_point(&self, key: PointOffsetType) -> bool { self.mappings.is_deleted_point(key) } fn name(&self) -> &'static str { "immutable id tracker" } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { Box::new(self.internal_to_version.iter()) } fn files(&self) -> Vec<PathBuf> { vec![ Self::deleted_file_path(&self.path), Self::mappings_file_path(&self.path), Self::version_mapping_file_path(&self.path), ] } fn immutable_files(&self) -> Vec<PathBuf> { vec![Self::mappings_file_path(&self.path)] } } #[cfg(test)] pub(super) mod test { use std::collections::{HashMap, HashSet}; use itertools::Itertools; #[cfg(feature = "rocksdb")] use rand::Rng; use rand::prelude::*; use tempfile::Builder; use uuid::Uuid; use super::*; #[cfg(feature = "rocksdb")] use crate::id_tracker::simple_id_tracker::SimpleIdTracker; const RAND_SEED: u64 = 42; #[test] fn test_iterator() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let mut id_tracker = InMemoryIdTracker::new(); id_tracker.set_link(200.into(), 0).unwrap(); id_tracker.set_link(100.into(), 1).unwrap(); id_tracker.set_link(150.into(), 2).unwrap(); id_tracker.set_link(120.into(), 3).unwrap(); id_tracker.set_link(180.into(), 4).unwrap(); id_tracker.set_link(110.into(), 5).unwrap(); id_tracker.set_link(115.into(), 6).unwrap(); id_tracker.set_link(190.into(), 7).unwrap(); id_tracker.set_link(177.into(), 8).unwrap(); id_tracker.set_link(118.into(), 9).unwrap(); let id_tracker = ImmutableIdTracker::from_in_memory_tracker(id_tracker, dir.path()).unwrap(); let first_four = id_tracker.iter_from(None).take(4).collect_vec(); assert_eq!(first_four.len(), 4); assert_eq!(first_four[0].0, 100.into()); let last = id_tracker.iter_from(Some(first_four[3].0)).collect_vec(); assert_eq!(last.len(), 7); } pub const TEST_POINTS: &[PointIdType] = &[ PointIdType::NumId(100), PointIdType::Uuid(Uuid::from_u128(123_u128)), PointIdType::Uuid(Uuid::from_u128(156_u128)), PointIdType::NumId(150), PointIdType::NumId(120), PointIdType::Uuid(Uuid::from_u128(12_u128)), PointIdType::NumId(180), PointIdType::NumId(110), PointIdType::NumId(115), PointIdType::Uuid(Uuid::from_u128(673_u128)), PointIdType::NumId(190), PointIdType::NumId(177), PointIdType::Uuid(Uuid::from_u128(971_u128)), ]; #[test] fn test_mixed_types_iterator() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let id_tracker = make_immutable_tracker(dir.path()); let sorted_from_tracker = id_tracker.iter_from(None).map(|(k, _)| k).collect_vec(); let mut values = TEST_POINTS.to_vec(); values.sort(); assert_eq!(sorted_from_tracker, values); } #[test] fn test_load_store() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let (old_mappings, old_versions) = { let id_tracker = make_immutable_tracker(dir.path()); (id_tracker.mappings, id_tracker.internal_to_version) }; let mut loaded_id_tracker = ImmutableIdTracker::open(dir.path()).unwrap(); // We may extend the length of deleted bitvec as memory maps need to be aligned to // a multiple of `usize-width`. assert_eq!( old_versions.len(), loaded_id_tracker.internal_to_version.len() ); for i in 0..old_versions.len() as u32 { assert_eq!( old_versions.get(i), loaded_id_tracker.internal_to_version.get(i), "Version mismatch at index {i}", ); } assert_eq!(old_mappings, loaded_id_tracker.mappings); loaded_id_tracker.drop(PointIdType::NumId(180)).unwrap(); } /// Mutates an ID tracker and stores it to disk. Tests whether loading results in the exact same /// ID tracker. #[test] fn test_store_load_mutated() { let mut rng = StdRng::seed_from_u64(RAND_SEED); let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let (dropped_points, custom_version) = { let mut id_tracker = make_immutable_tracker(dir.path()); let mut dropped_points = HashSet::new(); let mut custom_version = HashMap::new(); for (index, point) in TEST_POINTS.iter().enumerate() { if index % 2 == 0 { continue; } if index % 3 == 0 { id_tracker.drop(*point).unwrap(); dropped_points.insert(*point); continue; } if index % 5 == 0 { let new_version = rng.next_u64(); id_tracker .set_internal_version(index as PointOffsetType, new_version) .unwrap(); custom_version.insert(index as PointOffsetType, new_version); } } id_tracker.mapping_flusher()().unwrap(); id_tracker.versions_flusher()().unwrap(); (dropped_points, custom_version) }; let id_tracker = ImmutableIdTracker::open(dir.path()).unwrap(); for (index, point) in TEST_POINTS.iter().enumerate() { let internal_id = index as PointOffsetType; if dropped_points.contains(point) { assert!(id_tracker.is_deleted_point(internal_id)); assert_eq!(id_tracker.external_id(internal_id), None); assert!(id_tracker.mappings.internal_id(point).is_none()); continue; } // Check version let expect_version = custom_version .get(&internal_id) .copied() .unwrap_or(DEFAULT_VERSION); assert_eq!( id_tracker.internal_to_version.get(internal_id), Some(expect_version) ); // Check that unmodified points still haven't changed. assert_eq!( id_tracker.external_id(index as PointOffsetType), Some(*point) ); } } #[test] fn test_all_points_have_version() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let id_tracker = make_immutable_tracker(dir.path()); for i in id_tracker.iter_internal() { assert!(id_tracker.internal_version(i).is_some()); } } #[test] fn test_point_deletion_correctness() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let mut id_tracker = make_immutable_tracker(dir.path()); let deleted_points = id_tracker.total_point_count() - id_tracker.available_point_count(); let point_to_delete = PointIdType::NumId(100); assert!(id_tracker.iter_external().contains(&point_to_delete)); assert_eq!(id_tracker.internal_id(point_to_delete), Some(0)); id_tracker.drop(point_to_delete).unwrap(); let point_exists = id_tracker.internal_id(point_to_delete).is_some() && id_tracker.iter_external().contains(&point_to_delete) && id_tracker.iter_from(None).any(|i| i.0 == point_to_delete); assert!(!point_exists); let new_deleted_points = id_tracker.total_point_count() - id_tracker.available_point_count(); assert_eq!(new_deleted_points, deleted_points + 1); } #[test] fn test_point_deletion_persists_reload() { let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let point_to_delete = PointIdType::NumId(100); let old_mappings = { let mut id_tracker = make_immutable_tracker(dir.path()); let intetrnal_id = id_tracker .internal_id(point_to_delete) .expect("Point to delete exists."); assert!(!id_tracker.is_deleted_point(intetrnal_id)); id_tracker.drop(point_to_delete).unwrap(); id_tracker.mapping_flusher()().unwrap(); id_tracker.versions_flusher()().unwrap(); id_tracker.mappings }; // Point should still be gone let id_tracker = ImmutableIdTracker::open(dir.path()).unwrap(); assert_eq!(id_tracker.internal_id(point_to_delete), None); old_mappings .iter_internal_raw() .zip(id_tracker.mappings.iter_internal_raw()) .for_each( |((old_internal, old_external), (new_internal, new_external))| { assert_eq!(old_internal, new_internal); assert_eq!(old_external, new_external); }, ); } /// Tests de/serializing of whole `PointMappings`. #[test] fn test_point_mappings_de_serialization() { let mut rng = StdRng::seed_from_u64(RAND_SEED); let mut buf = vec![]; // Test different sized PointMappings, growing exponentially to also test large ones. // This way we test up to 2^16 entries. for size_exp in (0..16u32).step_by(3) { buf.clear(); let size = 2usize.pow(size_exp); let mappings = CompressedPointMappings::random(&mut rng, size as u32); ImmutableIdTracker::store_mapping(&mappings, &mut buf).unwrap(); // 16 is the min byte size of an entry. The exact number is not that important // we just want to ensure that the written bytes correlate to the amount of entries. assert!(buf.len() >= size * 16); let new_mappings = ImmutableIdTracker::load_mapping(&*buf, None).unwrap(); assert_eq!(new_mappings.total_point_count(), size); assert_eq!(mappings, new_mappings); } } /// Verifies that de/serializing works properly for empty `PointMappings`. #[test] fn test_point_mappings_de_serialization_empty() { let mut rng = StdRng::seed_from_u64(RAND_SEED); let mappings = CompressedPointMappings::random(&mut rng, 0); let mut buf = vec![]; ImmutableIdTracker::store_mapping(&mappings, &mut buf).unwrap(); // We still have a header! assert!(!buf.is_empty()); let new_mappings = ImmutableIdTracker::load_mapping(&*buf, None).unwrap(); assert_eq!(new_mappings.total_point_count(), 0); assert_eq!(mappings, new_mappings); } /// Tests de/serializing of only single ID mappings. #[test] fn test_point_mappings_de_serialization_single() { let mut rng = StdRng::seed_from_u64(RAND_SEED); const SIZE: usize = 400_000; let mappings = CompressedPointMappings::random(&mut rng, SIZE as u32); for i in 0..SIZE { let mut buf = vec![]; let internal_id = i as PointOffsetType; let expected_external = mappings.external_id(internal_id).unwrap(); ImmutableIdTracker::write_entry(&mut buf, internal_id, expected_external).unwrap(); let (got_internal, got_external) = ImmutableIdTracker::read_entry(&*buf).unwrap(); assert_eq!(i as PointOffsetType, got_internal); assert_eq!(expected_external, got_external); } } const DEFAULT_VERSION: SeqNumberType = 42; fn make_in_memory_tracker_from_memory() -> InMemoryIdTracker { let mut id_tracker = InMemoryIdTracker::new(); for value in TEST_POINTS.iter() { let internal_id = id_tracker.total_point_count() as PointOffsetType; id_tracker.set_link(*value, internal_id).unwrap(); id_tracker .set_internal_version(internal_id, DEFAULT_VERSION) .unwrap() } id_tracker } fn make_immutable_tracker(path: &Path) -> ImmutableIdTracker { let id_tracker = make_in_memory_tracker_from_memory(); ImmutableIdTracker::from_in_memory_tracker(id_tracker, path).unwrap() } #[test] fn test_id_tracker_equal() { let in_memory_id_tracker = make_in_memory_tracker_from_memory(); let immutable_id_tracker_dir = Builder::new() .prefix("storage_dir_immutable") .tempdir() .unwrap(); let immutable_id_tracker = make_immutable_tracker(immutable_id_tracker_dir.path()); assert_eq!( in_memory_id_tracker.available_point_count(), immutable_id_tracker.available_point_count() ); assert_eq!( in_memory_id_tracker.total_point_count(), immutable_id_tracker.total_point_count() ); for (internal, external) in TEST_POINTS.iter().enumerate() { let internal = internal as PointOffsetType; assert_eq!( in_memory_id_tracker.internal_id(*external), immutable_id_tracker.internal_id(*external) ); assert_eq!( in_memory_id_tracker .internal_version(internal) .unwrap_or_default(), immutable_id_tracker .internal_version(internal) .unwrap_or_default() ); assert_eq!( in_memory_id_tracker.external_id(internal), immutable_id_tracker.external_id(internal) ); } } #[test] #[cfg(feature = "rocksdb")] fn simple_id_tracker_vs_immutable_tracker_congruence() { use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db}; let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap(); let mut id_tracker = InMemoryIdTracker::new(); let mut simple_id_tracker = SimpleIdTracker::open(db).unwrap(); // Insert 100 random points into id_tracker let num_points = 200; let mut rng = StdRng::seed_from_u64(RAND_SEED); for _ in 0..num_points { // Generate num id in range from 0 to 100 let point_id = PointIdType::NumId(rng.random_range(0..num_points as u64)); let version = rng.random_range(0..1000); let internal_id_mmap = id_tracker.total_point_count() as PointOffsetType; let internal_id_simple = simple_id_tracker.total_point_count() as PointOffsetType; assert_eq!(internal_id_mmap, internal_id_simple); if id_tracker.internal_id(point_id).is_some() {
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/mutable_id_tracker.rs
lib/segment/src/id_tracker/mutable_id_tracker.rs
use std::collections::BTreeMap; use std::io::{self, BufReader, BufWriter, Read, Seek, Write}; use std::path::{Path, PathBuf}; use std::sync::Arc; use bitvec::prelude::{BitSlice, BitVec}; use byteorder::{ReadBytesExt, WriteBytesExt}; use common::is_alive_lock::IsAliveLock; use common::types::PointOffsetType; use fs_err::File; use itertools::Itertools; use memory::fadvise::OneshotFile; use parking_lot::Mutex; use uuid::Uuid; use super::point_mappings::FileEndianess; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::id_tracker::point_mappings::PointMappings; use crate::id_tracker::{DELETED_POINT_VERSION, IdTracker}; use crate::types::{PointIdType, SeqNumberType}; const FILE_MAPPINGS: &str = "mutable_id_tracker.mappings"; const FILE_VERSIONS: &str = "mutable_id_tracker.versions"; const VERSION_ELEMENT_SIZE: u64 = size_of::<SeqNumberType>() as u64; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] enum MappingChange { Insert(PointIdType, PointOffsetType), Delete(PointIdType), } impl MappingChange { fn change_type(&self) -> MappingChangeType { match self { Self::Insert(PointIdType::NumId(_), _) => MappingChangeType::InsertNum, Self::Insert(PointIdType::Uuid(_), _) => MappingChangeType::InsertUuid, Self::Delete(PointIdType::NumId(_)) => MappingChangeType::DeleteNum, Self::Delete(PointIdType::Uuid(_)) => MappingChangeType::DeleteUuid, } } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u8)] enum MappingChangeType { InsertNum = 1, InsertUuid = 2, DeleteNum = 3, DeleteUuid = 4, } impl MappingChangeType { const fn from_byte(byte: u8) -> Option<Self> { match byte { x if x == Self::InsertNum as u8 => Some(Self::InsertNum), x if x == Self::InsertUuid as u8 => Some(Self::InsertUuid), x if x == Self::DeleteNum as u8 => Some(Self::DeleteNum), x if x == Self::DeleteUuid as u8 => Some(Self::DeleteUuid), _ => None, } } /// Get size of the persisted operation in bytes /// /// +-----------------------+-----------------------+------------------+ /// | MappingChangeType: u8 | Number/UUID: u64/u128 | Internal ID: u32 | /// +-----------------------+-----------------------+------------------+ /// /// Deletion changes are serialized as follows: /// /// +-----------------------+-----------------------+ /// | MappingChangeType: u8 | Number/UUID: u64/u128 | /// +-----------------------+-----------------------+ const fn operation_size(self) -> usize { match self { Self::InsertNum => size_of::<u8>() + size_of::<u64>() + size_of::<u32>(), Self::InsertUuid => size_of::<u8>() + size_of::<u128>() + size_of::<u32>(), Self::DeleteNum => size_of::<u8>() + size_of::<u64>(), Self::DeleteUuid => size_of::<u8>() + size_of::<u128>(), } } } /// Mutable in-memory ID tracker with simple file based backing storage /// /// This ID tracker simply persists all recorded point mapping and versions changes to disk by /// appending these changes to a file. When loading, all mappings and versions are deduplicated in /// memory so that only the latest mappings for a point are kept. /// /// This structure may grow forever by collecting changes. It therefore relies on the optimization /// processes in Qdrant to eventually vacuum the segment this ID tracker belongs to. Reoptimization /// will clear all collected changes and start from scratch. /// /// This ID tracker primarily replaces [`SimpleIdTracker`], so that we can eliminate the use of /// RocksDB. #[derive(Debug)] pub struct MutableIdTracker { segment_path: PathBuf, internal_to_version: Vec<SeqNumberType>, pub(super) mappings: PointMappings, /// List of point versions pending to be persisted, will be persisted on flush pending_versions: Arc<Mutex<BTreeMap<PointOffsetType, SeqNumberType>>>, /// List of point mappings pending to be persisted, will be persisted on flush pending_mappings: Arc<Mutex<Vec<MappingChange>>>, is_alive_lock: IsAliveLock, } impl MutableIdTracker { pub fn open(segment_path: impl Into<PathBuf>) -> OperationResult<Self> { let segment_path = segment_path.into(); let (mappings_path, versions_path) = (mappings_path(&segment_path), versions_path(&segment_path)); let (has_mappings, has_versions) = (mappings_path.is_file(), versions_path.is_file()); // Warn or error about unlikely or problematic scenarios if !has_mappings && has_versions { debug_assert!( false, "Missing mappings file for ID tracker while versions file exists, storage may be corrupted!", ); log::error!( "Missing mappings file for ID tracker while versions file exists, storage may be corrupted!", ); } if has_mappings && !has_versions { log::warn!( "Missing versions file for ID tracker, assuming automatic point mappings and version recovery by WAL", ); } let mappings = if has_mappings { load_mappings(&mappings_path).map_err(|err| { OperationError::service_error(format!("Failed to load ID tracker mappings: {err}")) })? } else { PointMappings::default() }; let internal_to_version = if has_versions { load_versions(&versions_path).map_err(|err| { OperationError::service_error(format!("Failed to load ID tracker versions: {err}")) })? } else { vec![] }; // Compare internal point mappings and versions count, report warning if we don't debug_assert!( mappings.total_point_count() >= internal_to_version.len(), "can never have more versions than internal point mappings", ); if mappings.total_point_count() != internal_to_version.len() { log::warn!( "Mutable ID tracker mappings and versions count mismatch, could have been partially flushed, assuming automatic recovery by WAL ({} mappings, {} versions)", mappings.total_point_count(), internal_to_version.len(), ); } #[cfg(debug_assertions)] mappings.assert_mappings(); Ok(Self { segment_path, internal_to_version, mappings, pending_versions: Default::default(), pending_mappings: Default::default(), is_alive_lock: IsAliveLock::new(), }) } pub fn segment_files(segment_path: &Path) -> Vec<PathBuf> { [mappings_path(segment_path), versions_path(segment_path)] .into_iter() .filter(|path| path.is_file()) .collect() } } impl IdTracker for MutableIdTracker { fn internal_version(&self, internal_id: PointOffsetType) -> Option<SeqNumberType> { self.internal_to_version.get(internal_id as usize).copied() } fn set_internal_version( &mut self, internal_id: PointOffsetType, version: SeqNumberType, ) -> OperationResult<()> { if internal_id as usize >= self.internal_to_version.len() { #[cfg(debug_assertions)] { if internal_id as usize > self.internal_to_version.len() + 1 { log::info!( "Resizing versions is initializing larger range {} -> {}", self.internal_to_version.len(), internal_id + 1, ); } } self.internal_to_version.resize(internal_id as usize + 1, 0); } self.internal_to_version[internal_id as usize] = version; self.pending_versions.lock().insert(internal_id, version); Ok(()) } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { self.mappings.internal_id(&external_id) } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { self.mappings.external_id(internal_id) } fn set_link( &mut self, external_id: PointIdType, internal_id: PointOffsetType, ) -> OperationResult<()> { self.mappings.set_link(external_id, internal_id); self.pending_mappings .lock() .push(MappingChange::Insert(external_id, internal_id)); Ok(()) } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { let internal_id = self.mappings.drop(external_id); self.pending_mappings .lock() .push(MappingChange::Delete(external_id)); if let Some(internal_id) = internal_id { self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; } Ok(()) } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { if let Some(external_id) = self.mappings.external_id(internal_id) { self.mappings.drop(external_id); self.pending_mappings .lock() .push(MappingChange::Delete(external_id)); } self.set_internal_version(internal_id, DELETED_POINT_VERSION)?; Ok(()) } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { self.mappings.iter_external() } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { self.mappings.iter_internal() } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_from(external_id) } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { self.mappings.iter_random() } fn total_point_count(&self) -> usize { self.mappings.total_point_count() } fn available_point_count(&self) -> usize { self.mappings.available_point_count() } fn deleted_point_count(&self) -> usize { self.total_point_count() - self.available_point_count() } /// Creates a flusher function, that persists the removed points in the mapping database /// and flushes the mapping to disk. /// This function should be called _before_ flushing the version database. fn mapping_flusher(&self) -> Flusher { let mappings_path = mappings_path(&self.segment_path); let changes = { let changes_guard = self.pending_mappings.lock(); if changes_guard.is_empty() { return Box::new(|| Ok(())); } changes_guard.clone() }; let is_alive_handle = self.is_alive_lock.handle(); let pending_mappings_weak = Arc::downgrade(&self.pending_mappings); Box::new(move || { let (Some(is_alive_guard), Some(pending_mappings_arc)) = ( is_alive_handle.lock_if_alive(), pending_mappings_weak.upgrade(), ) else { return Ok(()); }; store_mapping_changes(&mappings_path, &changes)?; reconcile_persisted_mapping_changes(&pending_mappings_arc, &changes); drop(is_alive_guard); Ok(()) }) } /// Creates a flusher function, that persists the removed points in the version database /// and flushes the version database to disk. /// This function should be called _after_ flushing the mapping database. fn versions_flusher(&self) -> Flusher { let changes = { let changes_guard = self.pending_versions.lock(); if changes_guard.is_empty() { return Box::new(|| Ok(())); } changes_guard.clone() }; let versions_path = versions_path(&self.segment_path); let pending_versions_weak = Arc::downgrade(&self.pending_versions); let is_alive_handle = self.is_alive_lock.handle(); Box::new(move || { let (Some(is_alive_guard), Some(pending_versions_arc)) = ( is_alive_handle.lock_if_alive(), pending_versions_weak.upgrade(), ) else { return Ok(()); }; store_version_changes(&versions_path, &changes)?; reconcile_persisted_version_changes(&pending_versions_arc, changes); drop(is_alive_guard); Ok(()) }) } fn is_deleted_point(&self, key: PointOffsetType) -> bool { self.mappings.is_deleted_point(key) } fn deleted_point_bitslice(&self) -> &BitSlice { self.mappings.deleted() } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { Box::new( self.internal_to_version .iter() .enumerate() .map(|(i, version)| (i as PointOffsetType, *version)), ) } fn name(&self) -> &'static str { "mutable id tracker" } #[inline] fn files(&self) -> Vec<PathBuf> { Self::segment_files(&self.segment_path) } } pub(crate) fn mappings_path(segment_path: &Path) -> PathBuf { segment_path.join(FILE_MAPPINGS) } fn versions_path(segment_path: &Path) -> PathBuf { segment_path.join(FILE_VERSIONS) } /// Store new mapping changes, appending them to the given file fn store_mapping_changes( mappings_path: &Path, changes: &Vec<MappingChange>, ) -> OperationResult<()> { // Create or open file in append mode to write new changes to the end let file = File::options() .create(true) .append(true) .open(mappings_path)?; let mut writer = BufWriter::new(file); write_mapping_changes(&mut writer, changes).map_err(|err| { OperationError::service_error(format!( "Failed to persist ID tracker point mappings ({}): {err}", mappings_path.display(), )) })?; // Explicitly fsync file contents to ensure durability writer.flush()?; let file = writer.into_inner().unwrap(); file.sync_all().map_err(|err| { OperationError::service_error(format!("Failed to fsync ID tracker point mappings: {err}")) })?; Ok(()) } /// Serializes pending point mapping changes into the given writer /// /// ## File format /// /// All entries have a variable size and are simply concatenated. Each entry has a 1-byte header /// which specifies the change type and implies the length of the entry. /// /// See [`read_entry`] and [`write_entry`] for more details. fn write_mapping_changes<W: Write>( mut writer: W, changes: &Vec<MappingChange>, ) -> OperationResult<()> { for &change in changes { write_entry(&mut writer, change)?; } // Explicitly flush writer to catch IO errors writer.flush()?; Ok(()) } /// Load point mappings from the given file /// /// If the file ends with an incomplete entry, it is truncated from the file. fn load_mappings(mappings_path: &Path) -> OperationResult<PointMappings> { let file = OneshotFile::open(mappings_path)?; let file_len = file.metadata()?.len(); let mut reader = BufReader::new(file); let mappings = read_mappings(&mut reader)?; let read_to = reader.stream_position()?; reader.into_inner().drop_cache()?; // If reader is not fully exhausted, there's an incomplete entry at the end, truncate the file // It can happen on crash while flushing. We must truncate the file here to not corrupt new // entries we append to the file debug_assert!(read_to <= file_len, "cannot read past the end of the file"); if read_to < file_len { log::warn!( "Mutable ID tracker mappings file ends with incomplete entry, removing last {} bytes and assuming automatic recovery by WAL", file_len - read_to, ); let file = File::options() .write(true) .truncate(false) .open(mappings_path)?; file.set_len(read_to)?; file.sync_all()?; } Ok(mappings) } /// Iterate over mapping changes from the given reader /// /// Each non-errorous item is a tuple of the mapping change and the number of bytes read so far. /// /// The iterator ends when the end of the file is reached, or when an error occurred. /// /// ## Error /// /// An error item is returned if reading a mapping change fails due to malformed data. Then the /// iterator will not produce any more items. fn read_mappings_iter<R>(mut reader: R) -> impl Iterator<Item = OperationResult<MappingChange>> where R: Read + Seek, { let mut position = reader.stream_position().unwrap_or(0); // Keep reading until end of file or error std::iter::from_fn(move || match read_entry(&mut reader) { Ok((entry, read_bytes)) => { position += read_bytes; Some(Ok(entry)) } // Done reading if end of file is reached // Explicitly seek to after last read entry, allows to detect if full file was read Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { match reader.seek(io::SeekFrom::Start(position)) { Ok(_) => None, Err(err) => Some(Err(err.into())), } } // Propagate deserialization error Err(err) => Some(Err(err.into())), }) // Can't read any more data reliably after first error .take_while_inclusive(|item| item.is_ok()) } /// Read point mappings from the given reader /// /// Returns loaded point mappings. fn read_mappings<R>(reader: R) -> OperationResult<PointMappings> where R: Read + Seek, { let mut deleted = BitVec::new(); let mut internal_to_external: Vec<PointIdType> = Default::default(); let mut external_to_internal_num: BTreeMap<u64, PointOffsetType> = Default::default(); let mut external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType> = Default::default(); for change in read_mappings_iter(reader) { match change? { MappingChange::Insert(external_id, internal_id) => { // Update internal to external mapping if internal_id as usize >= internal_to_external.len() { internal_to_external .resize(internal_id as usize + 1, PointIdType::NumId(u64::MAX)); } let replaced_external_id = internal_to_external[internal_id as usize]; internal_to_external[internal_id as usize] = external_id; // If point already exists, drop existing mapping if deleted .get(internal_id as usize) .is_some_and(|deleted| !deleted) { // Fixing corrupted mapping - this id should be recovered from WAL // This should not happen in normal operation, but it can happen if // the database is corrupted. log::warn!( "removing duplicated external id {external_id} in internal id {replaced_external_id}", ); debug_assert!(false, "should never have to remove"); match replaced_external_id { PointIdType::NumId(num) => { external_to_internal_num.remove(&num); } PointIdType::Uuid(uuid) => { external_to_internal_uuid.remove(&uuid); } } } // Mark point entry as not deleted if internal_id as usize >= deleted.len() { deleted.resize(internal_id as usize + 1, true); } deleted.set(internal_id as usize, false); // Set external to internal mapping match external_id { PointIdType::NumId(num) => { external_to_internal_num.insert(num, internal_id); } PointIdType::Uuid(uuid) => { external_to_internal_uuid.insert(uuid, internal_id); } } } MappingChange::Delete(external_id) => { // Remove external to internal mapping let internal_id = match external_id { PointIdType::NumId(idx) => external_to_internal_num.remove(&idx), PointIdType::Uuid(uuid) => external_to_internal_uuid.remove(&uuid), }; let Some(internal_id) = internal_id else { continue; }; // Set internal to external mapping back to max int if (internal_id as usize) < internal_to_external.len() { internal_to_external[internal_id as usize] = PointIdType::NumId(u64::MAX); } // Mark internal point as deleted if internal_id as usize >= deleted.len() { deleted.resize(internal_id as usize + 1, true); } deleted.set(internal_id as usize, true); } } } let mappings = PointMappings::new( deleted, internal_to_external, external_to_internal_num, external_to_internal_uuid, ); Ok(mappings) } /// Deserialize a single mapping change entry from the given reader /// /// This function reads exact one entry which means after calling this function, the reader /// will be at the start of the next entry. /// /// The number of bytes read is returned on successful read. fn read_entry<R: Read>(reader: &mut R) -> io::Result<(MappingChange, u64)> { let change_type = reader.read_u8()?; let change_type = MappingChangeType::from_byte(change_type).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!("Corrupted ID tracker mapping storage, got malformed mapping change byte {change_type:#04X}"), ) })?; // Size of persisted operation in bytes let operation_size = change_type.operation_size() as u64; match change_type { MappingChangeType::InsertNum => { let external_id = PointIdType::NumId(reader.read_u64::<FileEndianess>()?); let internal_id = reader.read_u32::<FileEndianess>()? as PointOffsetType; Ok(( MappingChange::Insert(external_id, internal_id), operation_size, )) } MappingChangeType::InsertUuid => { let external_id = PointIdType::Uuid(Uuid::from_u128_le(reader.read_u128::<FileEndianess>()?)); let internal_id = reader.read_u32::<FileEndianess>()? as PointOffsetType; Ok(( MappingChange::Insert(external_id, internal_id), operation_size, )) } MappingChangeType::DeleteNum => { let external_id = PointIdType::NumId(reader.read_u64::<FileEndianess>()?); Ok((MappingChange::Delete(external_id), operation_size)) } MappingChangeType::DeleteUuid => { let external_id = PointIdType::Uuid(Uuid::from_u128_le(reader.read_u128::<FileEndianess>()?)); Ok((MappingChange::Delete(external_id), operation_size)) } } } /// Serialize a single mapping change and write it into the given writer /// /// # File format /// /// Each change entry has a variable size. We first write a 1-byte header to define the change /// type. The change type implies how long the entry is. /// /// Insertion changes are serialized as follows: /// /// +-----------------------+-----------------------+------------------+ /// | MappingChangeType: u8 | Number/UUID: u64/u128 | Internal ID: u32 | /// +-----------------------+-----------------------+------------------+ /// /// Deletion changes are serialized as follows: /// /// +-----------------------+-----------------------+ /// | MappingChangeType: u8 | Number/UUID: u64/u128 | /// +-----------------------+-----------------------+ fn write_entry<W: Write>(mut writer: W, change: MappingChange) -> OperationResult<()> { // Byte to identity type of change writer.write_u8(change.change_type() as u8)?; // Serialize mapping change match change { MappingChange::Insert(PointIdType::NumId(external_id), internal_id) => { writer.write_u64::<FileEndianess>(external_id)?; writer.write_u32::<FileEndianess>(internal_id)?; } MappingChange::Insert(PointIdType::Uuid(external_id), internal_id) => { writer.write_u128::<FileEndianess>(external_id.to_u128_le())?; writer.write_u32::<FileEndianess>(internal_id)?; } MappingChange::Delete(PointIdType::NumId(external_id)) => { writer.write_u64::<FileEndianess>(external_id)?; } MappingChange::Delete(PointIdType::Uuid(external_id)) => { writer.write_u128::<FileEndianess>(external_id.to_u128_le())?; } } Ok(()) } fn load_versions(versions_path: &Path) -> OperationResult<Vec<SeqNumberType>> { let file = File::open(versions_path)?; let file_len = file.metadata()?.len(); if file_len % VERSION_ELEMENT_SIZE != 0 { log::warn!( "Corrupted ID tracker versions storage, file size not a multiple of a version, assuming automatic recovery by WAL" ); } let version_count = file_len / VERSION_ELEMENT_SIZE; let mut reader = BufReader::new(file); Ok((0..version_count) .map(|_| reader.read_u64::<FileEndianess>()) .collect::<Result<_, _>>()?) } /// Store new version changes, appending them to the given file fn store_version_changes( versions_path: &Path, changes: &BTreeMap<PointOffsetType, SeqNumberType>, ) -> OperationResult<()> { if changes.is_empty() { return Ok(()); } // Create or open file let file = File::options() .create(true) .write(true) .truncate(false) .open(versions_path)?; // Grow file if necessary in one shot // Prevents potentially reallocating the file multiple times when progressively writing changes match file.metadata() { Ok(metadata) => { let (&max_internal_id, _) = changes.last_key_value().unwrap(); let required_size = u64::from(max_internal_id + 1) * VERSION_ELEMENT_SIZE; if metadata.len() < required_size { file.set_len(required_size)?; } } Err(err) => { log::warn!( "Failed to get file length of mutable ID tracker versions file, ignoring: {err}" ); } } let mut writer = BufWriter::new(file); write_version_changes(&mut writer, changes).map_err(|err| { OperationError::service_error(format!( "Failed to persist ID tracker point versions ({}): {err}", versions_path.display(), )) })?; // Explicitly fsync file contents to ensure durability writer.flush().map_err(|err| { OperationError::service_error(format!( "Failed to flush ID tracker point versions write buffer: {err}", )) })?; let file = writer.into_inner().map_err(|err| err.into_error())?; file.sync_all().map_err(|err| { OperationError::service_error(format!("Failed to fsync ID tracker point versions: {err}")) })?; Ok(()) } /// Serializes pending point version changes into the given writer fn write_version_changes<W>( mut writer: W, changes: &BTreeMap<PointOffsetType, SeqNumberType>, ) -> OperationResult<()> where W: Write + Seek, { let mut position = writer.stream_position()?; // Write all changes, must be ordered by internal ID, see optimization note below for (&internal_id, &version) in changes { let offset = u64::from(internal_id) * VERSION_ELEMENT_SIZE; // Seek to correct position if not already at it // // This assumes we're using a BufWriter. We only explicitly seek if not at the correct // position already, because seeking is expensive. When we seek it automatically flushes // our buffered writes to durable storage even if our position didn't change. This // optimization significantly improves performance when writing a large batch of versions // by reducing the number of flushes and syscalls. // See: <https://doc.rust-lang.org/std/io/trait.Seek.html#tymethod.seek> // // We track the position ourselves because using `stream_position()` as getter also invokes // a seek, causing an explicit flush. // // Now we only flush if: // - we seek to a new position because there's a gap in versions to update // - our write buffer is full // - after writing all versions if offset != position { position = writer.seek(io::SeekFrom::Start(offset))?; } // Write version and update position writer.write_u64::<FileEndianess>(version)?; position += VERSION_ELEMENT_SIZE; } // Explicitly flush writer to catch IO errors writer.flush()?; Ok(()) } fn reconcile_persisted_version_changes( pending: &Mutex<BTreeMap<PointOffsetType, SeqNumberType>>, changes: BTreeMap<PointOffsetType, SeqNumberType>, ) { pending.lock().retain(|point_offset, pending_version| { changes .get(point_offset) .is_none_or(|persisted_version| pending_version != persisted_version) }); } fn reconcile_persisted_mapping_changes( pending: &Mutex<Vec<MappingChange>>, changes: &Vec<MappingChange>, ) { let mut pending = pending.lock(); // Count how many entries are equal in both lists // With concurrent flushers it is possible that the beginning of the lists doesn't match. Since // each event is idempotent it is not a problem, and we can store everything again in the next // iteration. let count = pending .iter() .zip(changes) .take_while(|(pending, persisted)| pending == persisted) .count(); pending.drain(0..count); } #[cfg(test)] pub(super) mod tests { use std::collections::{HashMap, HashSet}; use std::io::Cursor; use fs_err as fs; use itertools::Itertools; #[cfg(feature = "rocksdb")] use rand::Rng; use rand::prelude::*; use tempfile::Builder; use uuid::Uuid; use super::*; use crate::id_tracker::compressed::compressed_point_mappings::CompressedPointMappings; use crate::id_tracker::in_memory_id_tracker::InMemoryIdTracker; #[cfg(feature = "rocksdb")] use crate::id_tracker::simple_id_tracker::SimpleIdTracker; const RAND_SEED: u64 = 42; const DEFAULT_VERSION: SeqNumberType = 42; #[test] fn test_iterator() { let segment_dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let mut id_tracker = MutableIdTracker::open(segment_dir.path()).unwrap(); id_tracker.set_link(200.into(), 0).unwrap(); id_tracker.set_link(100.into(), 1).unwrap(); id_tracker.set_link(150.into(), 2).unwrap(); id_tracker.set_link(120.into(), 3).unwrap(); id_tracker.set_link(180.into(), 4).unwrap(); id_tracker.set_link(110.into(), 5).unwrap(); id_tracker.set_link(115.into(), 6).unwrap(); id_tracker.set_link(190.into(), 7).unwrap(); id_tracker.set_link(177.into(), 8).unwrap(); id_tracker.set_link(118.into(), 9).unwrap(); let first_four = id_tracker.iter_from(None).take(4).collect_vec(); assert_eq!(first_four.len(), 4); assert_eq!(first_four[0].0, 100.into()); let last = id_tracker.iter_from(Some(first_four[3].0)).collect_vec(); assert_eq!(last.len(), 7); } pub const TEST_POINTS: &[PointIdType] = &[ PointIdType::NumId(100), PointIdType::Uuid(Uuid::from_u128(123_u128)), PointIdType::Uuid(Uuid::from_u128(156_u128)), PointIdType::NumId(150), PointIdType::NumId(120), PointIdType::Uuid(Uuid::from_u128(12_u128)), PointIdType::NumId(180), PointIdType::NumId(110), PointIdType::NumId(115), PointIdType::Uuid(Uuid::from_u128(673_u128)),
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/compressed/compressed_point_mappings.rs
lib/segment/src/id_tracker/compressed/compressed_point_mappings.rs
#[cfg(test)] use std::collections::BTreeMap; #[cfg(test)] use std::collections::btree_map::Entry; use std::iter; use bitvec::prelude::{BitSlice, BitVec}; use byteorder::LittleEndian; #[cfg(test)] use common::bitpacking::make_bitmask; use common::types::PointOffsetType; use itertools::Itertools; #[cfg(test)] use rand::Rng as _; use rand::distr::Distribution; #[cfg(test)] use rand::rngs::StdRng; #[cfg(test)] use rand::seq::SliceRandom as _; #[cfg(test)] use uuid::Uuid; use crate::id_tracker::compressed::external_to_internal::CompressedExternalToInternal; use crate::id_tracker::compressed::internal_to_external::CompressedInternalToExternal; use crate::id_tracker::point_mappings::PointMappings; use crate::types::PointIdType; /// Used endianness for storing PointMapping-files. pub type FileEndianess = LittleEndian; #[derive(Clone, PartialEq, Default, Debug)] pub struct CompressedPointMappings { /// `deleted` specifies which points of internal_to_external was deleted. /// Its size is exactly the same as `internal_to_external`. deleted: BitVec, internal_to_external: CompressedInternalToExternal, // Having two separate maps allows us iterating only over one type at a time without having to filter. external_to_internal: CompressedExternalToInternal, } impl CompressedPointMappings { pub fn new( mut deleted: BitVec, internal_to_external: CompressedInternalToExternal, external_to_internal: CompressedExternalToInternal, ) -> Self { // Resize deleted to have the same number of elements as internal_to_external // Not all structures we may source this from enforce the same size deleted.resize(internal_to_external.len(), false); Self { deleted, internal_to_external, external_to_internal, } } pub fn from_mappings(mapping: PointMappings) -> Self { let (deleted, internal_to_external, external_to_internal_num, external_to_internal_uuid) = mapping.deconstruct(); let compressed_internal_to_external = CompressedInternalToExternal::from_slice(&internal_to_external); let compressed_external_to_internal = CompressedExternalToInternal::from_maps( external_to_internal_num, external_to_internal_uuid, ); Self { deleted, internal_to_external: compressed_internal_to_external, external_to_internal: compressed_external_to_internal, } } /// Number of points, excluding deleted ones. pub(crate) fn available_point_count(&self) -> usize { self.external_to_internal.len() } pub(crate) fn deleted(&self) -> &BitSlice { &self.deleted } pub(crate) fn internal_id(&self, external_id: &PointIdType) -> Option<PointOffsetType> { self.external_to_internal.get(external_id) } pub(crate) fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { if *self.deleted.get(internal_id as usize)? { return None; } self.internal_to_external.get(internal_id) } pub(crate) fn drop(&mut self, external_id: PointIdType) -> Option<PointOffsetType> { let internal_id = self.external_to_internal.remove(&external_id); if let Some(internal_id) = &internal_id { self.deleted.set(*internal_id as usize, true); } internal_id } pub(crate) fn iter_random( &self, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { let rng = rand::rng(); let max_internal = self.internal_to_external.len(); if max_internal == 0 { return Box::new(iter::empty()); } let uniform = rand::distr::Uniform::new(0, max_internal) .expect("above check guarantees max_internal > 0"); let iter = Distribution::sample_iter(uniform, rng) // TODO: this is not efficient if `max_internal` is large and we iterate over most of them, // but it's good enough for low limits. // // We could improve it by using a variable-period PRNG to adjust depending on the number of available points. .unique() .take(max_internal) .filter_map(move |i| { if self.deleted[i] { None } else { let point_offset = i as PointOffsetType; Some(( self.internal_to_external.get(point_offset).unwrap(), point_offset, )) } }); Box::new(iter) } pub(crate) fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { match external_id { None => Box::new(self.external_to_internal.iter()), Some(point_id) => Box::new(self.external_to_internal.iter_from(point_id)), } } pub(crate) fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { Box::new( self.external_to_internal .iter() .map(|(point_id, _)| point_id), ) } pub(crate) fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { Box::new( (0..self.internal_to_external.len() as PointOffsetType) .filter(move |i| !self.deleted[*i as usize]), ) } pub(crate) fn iter_internal_raw( &self, ) -> impl Iterator<Item = (PointOffsetType, PointIdType)> + '_ { self.internal_to_external .iter() .enumerate() .map(|(offset, point_id)| (offset as _, point_id)) } pub(crate) fn is_deleted_point(&self, key: PointOffsetType) -> bool { let key = key as usize; if key >= self.deleted.len() { return true; } self.deleted[key] } pub(crate) fn total_point_count(&self) -> usize { self.internal_to_external.len() } /// Generate a random [`PointMappings`]. #[cfg(test)] pub fn random(rand: &mut StdRng, total_size: u32) -> Self { Self::random_with_params(rand, total_size, total_size, 128) } /// Generate a random [`PointMappings`] using the following parameters: /// /// - `total_size`: total number of points, including deleted ones. /// - `preserved_size`: number of points that are not deleted. /// - `bits_in_id`: number of bits in generated ids. /// Decrease this value to restrict the amount of unique ids across all /// multiple invocations of this function. /// E.g. if `bits_in_id` is 8, then only 512 unique ids will be generated. /// (256 uuids + 256 u64s) #[cfg(test)] pub fn random_with_params( rand: &mut StdRng, total_size: u32, preserved_size: u32, bits_in_id: u8, ) -> Self { let mask: u128 = make_bitmask(bits_in_id); let mask_u64: u64 = mask as u64; const UUID_LIKELYNESS: f64 = 0.5; let mut external_to_internal_num = BTreeMap::new(); let mut external_to_internal_uuid = BTreeMap::new(); let mut internal_ids = (0..total_size).collect_vec(); internal_ids.shuffle(rand); internal_ids.truncate(preserved_size as usize); let mut deleted = BitVec::repeat(true, total_size as usize); for id in &internal_ids { deleted.set(*id as usize, false); } let internal_to_external: Vec<_> = (0..total_size) .map(|pos| { loop { if rand.random_bool(UUID_LIKELYNESS) { let uuid = Uuid::from_u128(rand.random_range(0..=mask)); if let Entry::Vacant(e) = external_to_internal_uuid.entry(uuid) { e.insert(pos); return PointIdType::Uuid(uuid); } } else { let num = rand.random_range(0..=mask_u64); if let Entry::Vacant(e) = external_to_internal_num.entry(num) { e.insert(pos); return PointIdType::NumId(num); } } } }) .collect(); let compressed_internal_to_external = CompressedInternalToExternal::from_slice(&internal_to_external); let external_to_internal = CompressedExternalToInternal::from_maps( external_to_internal_num, external_to_internal_uuid, ); Self { deleted, internal_to_external: compressed_internal_to_external, external_to_internal, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/compressed/mod.rs
lib/segment/src/id_tracker/compressed/mod.rs
pub mod compressed_point_mappings; pub mod external_to_internal; pub mod internal_to_external; pub mod versions_store;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/compressed/versions_store.rs
lib/segment/src/id_tracker/compressed/versions_store.rs
use ahash::AHashMap; use common::types::PointOffsetType; use crate::types::SeqNumberType; /// Compressed representation of Vec<SeqNumberType> /// Which takes advantage of the fact that the sequence numbers are likely to be < 2**32 /// /// Implements a required subset of the Vec API: /// /// * get by index /// * set by index /// * [] operator /// * len /// * push #[derive(Debug)] pub struct CompressedVersions { lower_bytes: Vec<u32>, upper_bytes: AHashMap<u32, u32>, } impl CompressedVersions { fn version_from_parts(lower: u32, upper: u32) -> SeqNumberType { (u64::from(upper) << u32::BITS) | u64::from(lower) } fn version_to_parts(value: SeqNumberType) -> (u32, u32) { let lower = value as u32; let upper = (value >> u32::BITS) as u32; (lower, upper) } pub fn has(&self, index: u32) -> bool { index < self.len() as u32 } pub fn get(&self, index: u32) -> Option<SeqNumberType> { self.lower_bytes.get(index as usize).map(|&lower| { let upper = *self.upper_bytes.get(&index).unwrap_or(&0); Self::version_from_parts(lower, upper) }) } /// Set the point version at the given internal index /// /// # Panics /// /// Panics if `index` is out of bounds. The internal structure will not grow. pub fn set(&mut self, index: u32, value: SeqNumberType) { let (lower, upper) = Self::version_to_parts(value); self.lower_bytes[index as usize] = lower; if upper > 0 { self.upper_bytes.insert(index, upper); } else { self.upper_bytes.remove(&index); } } pub fn len(&self) -> usize { self.lower_bytes.len() } pub fn is_empty(&self) -> bool { self.lower_bytes.is_empty() } /// Create version store from the given slice of versions /// /// # Panics /// /// Panics if the slice is larger than `u32::MAX` elements pub fn from_slice(slice: &[SeqNumberType]) -> Self { assert!( slice.len() <= u32::MAX as usize, "version slice cannot be larger than u32::MAX", ); let mut lower_bytes = Vec::with_capacity(slice.len()); let mut upper_bytes = AHashMap::new(); for (index, &value) in slice.iter().enumerate() { let (lower, upper) = Self::version_to_parts(value); lower_bytes.push(lower); if upper > 0 { upper_bytes.insert(index as u32, upper); } } Self { lower_bytes, upper_bytes, } } pub fn iter(&self) -> impl Iterator<Item = (PointOffsetType, SeqNumberType)> + '_ { self.lower_bytes.iter().enumerate().map(|(index, &lower)| { let upper = *self.upper_bytes.get(&(index as u32)).unwrap_or(&0); ( index as PointOffsetType, Self::version_from_parts(lower, upper), ) }) } } #[cfg(test)] mod tests { use std::ops::RangeInclusive; use proptest::prelude::*; use rand::Rng; use super::*; use crate::types::SeqNumberType; const fn model_test_range() -> RangeInclusive<SeqNumberType> { 0..=SeqNumberType::MAX } proptest! { #[test] fn compare_with_vec_model( mut model in prop::collection::vec(model_test_range(), 0..1000) ) { let mut compressed = CompressedVersions::from_slice(&model); // Check get() for (i, model_value) in model.iter().enumerate() { assert_eq!(*model_value, compressed.get(i as u32).unwrap()); } // Check set() let mut rng = rand::rng(); #[expect(clippy::needless_range_loop)] for i in 0..model.len() { let new_value = rng.random_range(model_test_range()); model[i] = new_value; compressed.set(i as u32, new_value); assert_eq!(model[i], compressed.get(i as u32).unwrap()); } // Check len() assert_eq!(model.len(), compressed.len()); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/compressed/external_to_internal.rs
lib/segment/src/id_tracker/compressed/external_to_internal.rs
use std::collections::BTreeMap; use bitvec::prelude::BitVec; use common::types::PointOffsetType; use itertools::Either; use uuid::Uuid; use crate::types::PointIdType; /// A compressed representation of /// /// - `external_to_internal_num: BTreeMap<u64, PointOffsetType>` /// - `external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType>` /// /// The main idea is to use sorted vector instead of BTreeMap. /// This structure doesn't require random insertions, so we can sort it once and then use binary search. /// /// There is, however, a requirement to remove elements, so we will use a BitVec to mark removed elements. #[derive(Clone, PartialEq, Default, Debug)] pub struct CompressedExternalToInternal { num_ids: Vec<(u64, PointOffsetType)>, num_ids_removed: BitVec, uuids: Vec<(Uuid, PointOffsetType)>, uuids_removed: BitVec, count_removed: usize, } impl CompressedExternalToInternal { pub fn from_vectors( external_to_internal_num: Vec<(u64, PointOffsetType)>, external_to_internal_uuid: Vec<(Uuid, PointOffsetType)>, ) -> Self { let mut num_ids = external_to_internal_num; let mut uuids = external_to_internal_uuid; num_ids.shrink_to_fit(); uuids.shrink_to_fit(); num_ids.sort_unstable(); uuids.sort_unstable(); let num_ids_removed = BitVec::repeat(false, num_ids.len()); let uuids_removed = BitVec::repeat(false, uuids.len()); let num_removed = 0; Self { num_ids, num_ids_removed, uuids, uuids_removed, count_removed: num_removed, } } pub fn from_maps( external_to_internal_num: BTreeMap<u64, PointOffsetType>, external_to_internal_uuid: BTreeMap<Uuid, PointOffsetType>, ) -> Self { let mut num_ids: Vec<_> = external_to_internal_num.into_iter().collect(); let mut uuids: Vec<_> = external_to_internal_uuid.into_iter().collect(); num_ids.sort_unstable(); uuids.sort_unstable(); let num_ids_removed = BitVec::repeat(false, num_ids.len()); let uuids_removed = BitVec::repeat(false, uuids.len()); let num_removed = 0; Self { num_ids, num_ids_removed, uuids, uuids_removed, count_removed: num_removed, } } pub fn len(&self) -> usize { self.num_ids.len() + self.uuids.len() - self.count_removed } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn get(&self, external_id: &PointIdType) -> Option<PointOffsetType> { match external_id { PointIdType::NumId(num) => { let idx = self .num_ids .binary_search_by_key(num, |(num, _)| *num) .ok()?; if self.num_ids_removed[idx] { None } else { Some(self.num_ids[idx].1) } } PointIdType::Uuid(uuid) => { let idx = self .uuids .binary_search_by_key(uuid, |(uuid, _)| *uuid) .ok()?; if self.uuids_removed[idx] { None } else { Some(self.uuids[idx].1) } } } } pub fn remove(&mut self, external_id: &PointIdType) -> Option<PointOffsetType> { match external_id { PointIdType::NumId(num) => { let idx = self .num_ids .binary_search_by_key(num, |(num, _)| *num) .ok()?; if self.num_ids_removed[idx] { None } else { self.num_ids_removed.set(idx, true); self.count_removed += 1; Some(self.num_ids[idx].1) } } PointIdType::Uuid(uuid) => { let idx = self .uuids .binary_search_by_key(uuid, |(uuid, _)| *uuid) .ok()?; if self.uuids_removed[idx] { None } else { self.uuids_removed.set(idx, true); self.count_removed += 1; Some(self.uuids[idx].1) } } } } fn num_iter(&self) -> impl Iterator<Item = (PointIdType, PointOffsetType)> + '_ { self.num_ids .iter() .enumerate() .filter_map(move |(idx, (num, internal_id))| { if self.num_ids_removed[idx] { None } else { Some((PointIdType::NumId(*num), *internal_id)) } }) } fn uuid_iter(&self) -> impl Iterator<Item = (PointIdType, PointOffsetType)> + '_ { self.uuids .iter() .enumerate() .filter_map(move |(idx, (uuid, internal_id))| { if self.uuids_removed[idx] { None } else { Some((PointIdType::Uuid(*uuid), *internal_id)) } }) } pub fn iter(&self) -> impl Iterator<Item = (PointIdType, PointOffsetType)> + '_ { let num_iter = self.num_iter(); let uuid_iter = self.uuid_iter(); num_iter.chain(uuid_iter) } pub fn iter_from( &self, point_id: PointIdType, ) -> impl Iterator<Item = (PointIdType, PointOffsetType)> + '_ { match point_id { PointIdType::NumId(num_id) => { // Iterator over range of num ids and then over all UUID, as we assume that // any UUID is bigger than any num id. let num_id_iterator_from = self .num_ids .binary_search_by_key(&num_id, |(num, _)| *num) .unwrap_or_else(|x| x); let num_id_iter = (num_id_iterator_from..self.num_ids.len()).filter_map(|idx| { let (point_id, point_offset) = self.num_ids[idx]; let is_removed = self.num_ids_removed[idx]; if is_removed { None } else { Some((PointIdType::NumId(point_id), point_offset)) } }); let uuid_iter = self.uuid_iter(); Either::Right(num_id_iter.chain(uuid_iter)) } PointIdType::Uuid(uuid) => { // Just iterate over range of uuids let uuid_iterator_from = self .uuids .binary_search_by_key(&uuid, |(uuid, _)| *uuid) .unwrap_or_else(|x| x); let uuid_iter = (uuid_iterator_from..self.uuids.len()).filter_map(|idx| { let (point_id, point_offset) = self.uuids[idx]; let is_removed = self.uuids_removed[idx]; if is_removed { None } else { Some((PointIdType::Uuid(point_id), point_offset)) } }); Either::Left(uuid_iter) } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/id_tracker/compressed/internal_to_external.rs
lib/segment/src/id_tracker/compressed/internal_to_external.rs
use bitvec::prelude::BitVec; use common::types::PointOffsetType; use uuid::Uuid; use crate::types::PointIdType; /// A compressed representation of `internal_to_external: Vec<PointIdType>`. /// /// The main idea is instead of `PointIdType` enum (which is 24 bytes) we use one /// Vec<u128> and bitmask which defines if the id is u64 or UUID (which is ~16 bytes). #[derive(Clone, PartialEq, Default, Debug)] pub struct CompressedInternalToExternal { data: Vec<u128>, is_uuid: BitVec, } impl CompressedInternalToExternal { pub fn with_capacity(capacity: usize) -> Self { Self { data: Vec::with_capacity(capacity), is_uuid: BitVec::with_capacity(capacity), } } pub fn len(&self) -> usize { self.data.len() } pub fn is_empty(&self) -> bool { self.data.is_empty() } pub fn resize(&mut self, new_len: usize, value: PointIdType) { let stored_value = match value { PointIdType::NumId(num_id) => u128::from(num_id), PointIdType::Uuid(uuid) => uuid.as_u128(), }; let is_uuid = matches!(value, PointIdType::Uuid(_)); self.data.resize(new_len, stored_value); self.is_uuid.resize(new_len, is_uuid); } pub fn set(&mut self, internal_id: PointOffsetType, value: PointIdType) { let index = internal_id as usize; let stored_value = match value { PointIdType::NumId(num_id) => u128::from(num_id), PointIdType::Uuid(uuid) => uuid.as_u128(), }; let is_uuid = matches!(value, PointIdType::Uuid(_)); self.data[index] = stored_value; self.is_uuid.set(index, is_uuid); } pub fn from_slice(slice: &[PointIdType]) -> Self { let mut data = Vec::with_capacity(slice.len()); let mut is_uuid = BitVec::with_capacity(slice.len()); for id in slice { match id { PointIdType::NumId(num_id) => { data.push(u128::from(*num_id)); is_uuid.push(false); } PointIdType::Uuid(uuid) => { data.push(uuid.as_u128()); is_uuid.push(true); } } } Self { data, is_uuid } } pub fn get(&self, internal_id: PointOffsetType) -> Option<PointIdType> { let index = internal_id as usize; let data = self.data.get(index)?; let is_uuid = *self.is_uuid.get(index)?; if is_uuid { Some(PointIdType::Uuid(Uuid::from_u128(*data))) } else { debug_assert!( *data <= u128::from(u64::MAX), "type mismatch, external ID does not fit u64", ); Some(PointIdType::NumId(*data as u64)) } } pub fn iter(&self) -> impl Iterator<Item = PointIdType> + '_ { self.data .iter() .zip(self.is_uuid.iter()) .map(|(data, is_uuid)| { if *is_uuid { PointIdType::Uuid(Uuid::from_u128(*data)) } else { debug_assert!( *data <= u128::from(u64::MAX), "type mismatch, external ID does not fit u64", ); PointIdType::NumId(*data as u64) } }) } } #[cfg(test)] mod tests { use uuid::Uuid; use super::*; fn create_uuid() -> Uuid { Uuid::new_v4() } #[test] fn with_capacity_creates_empty_struct() { let compressed = CompressedInternalToExternal::with_capacity(10); assert_eq!(compressed.len(), 0); assert!(compressed.is_empty()); } #[test] fn resize_changes_length_and_fills_with_value() { let mut compressed = CompressedInternalToExternal::with_capacity(0); let uuid = create_uuid(); compressed.resize(5, PointIdType::Uuid(uuid)); assert_eq!(compressed.len(), 5); for i in 0..5 { assert_eq!( compressed.get(i as PointOffsetType), Some(PointIdType::Uuid(uuid)) ); } } #[test] fn set_updates_value_at_index() { let mut compressed = CompressedInternalToExternal::with_capacity(1); let uuid = create_uuid(); compressed.resize(1, PointIdType::NumId(42)); compressed.set(0, PointIdType::Uuid(uuid)); assert_eq!(compressed.get(0), Some(PointIdType::Uuid(uuid))); } #[test] fn from_slice_creates_struct_from_slice() { let uuid = create_uuid(); let slice = vec![PointIdType::NumId(42), PointIdType::Uuid(uuid)]; let compressed = CompressedInternalToExternal::from_slice(&slice); assert_eq!(compressed.len(), 2); assert_eq!(compressed.get(0), Some(PointIdType::NumId(42))); assert_eq!(compressed.get(1), Some(PointIdType::Uuid(uuid))); } #[test] fn get_returns_none_for_out_of_bounds() { let compressed = CompressedInternalToExternal::with_capacity(0); assert_eq!(compressed.get(0), None); } #[test] fn iter_returns_all_elements() { let uuid = create_uuid(); let slice = vec![PointIdType::NumId(42), PointIdType::Uuid(uuid)]; let compressed = CompressedInternalToExternal::from_slice(&slice); let collected: Vec<PointIdType> = compressed.iter().collect(); assert_eq!(collected, slice); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/entry/mod.rs
lib/segment/src/entry/mod.rs
pub mod entry_point; pub mod snapshot_entry; pub use entry_point::SegmentEntry; pub use snapshot_entry::SnapshotEntry;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/entry/snapshot_entry.rs
lib/segment/src/entry/snapshot_entry.rs
use std::path::Path; use common::tar_ext; use crate::common::operation_error::OperationResult; use crate::data_types::manifest::SnapshotManifest; use crate::types::SnapshotFormat; pub trait SnapshotEntry { /// Take a snapshot of the segment. /// /// Creates a tar archive of the segment directory into `snapshot_dir_path`. /// Uses `temp_path` to prepare files to archive. fn take_snapshot( &self, temp_path: &Path, tar: &tar_ext::BuilderExt, format: SnapshotFormat, manifest: Option<&SnapshotManifest>, ) -> OperationResult<()>; fn collect_snapshot_manifest(&self, manifest: &mut SnapshotManifest) -> OperationResult<()>; }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/entry/entry_point.rs
lib/segment/src/entry/entry_point.rs
use std::collections::{BTreeSet, HashMap, HashSet}; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::types::TelemetryDetail; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult, SegmentFailedState}; use crate::data_types::build_index_result::BuildFieldIndexResult; use crate::data_types::facets::{FacetParams, FacetValue}; use crate::data_types::named_vectors::NamedVectors; use crate::data_types::order_by::{OrderBy, OrderValue}; use crate::data_types::query_context::{FormulaContext, QueryContext, SegmentQueryContext}; use crate::data_types::vectors::{QueryVector, VectorInternal}; use crate::entry::snapshot_entry::SnapshotEntry; use crate::index::field_index::{CardinalityEstimation, FieldIndex}; use crate::json_path::JsonPath; use crate::telemetry::SegmentTelemetry; use crate::types::{ Filter, Payload, PayloadFieldSchema, PayloadKeyType, PayloadKeyTypeRef, PointIdType, ScoredPoint, SearchParams, SegmentConfig, SegmentInfo, SegmentType, SeqNumberType, VectorName, VectorNameBuf, WithPayload, WithVector, }; /// Define all operations which can be performed with Segment or Segment-like entity. /// /// Assume all operations are idempotent - which means that no matter how many times an operation /// is executed - the storage state will be the same. pub trait SegmentEntry: SnapshotEntry { /// Get current update version of the segment fn version(&self) -> SeqNumberType; /// Get current persistent version of the segment fn persistent_version(&self) -> SeqNumberType; fn is_proxy(&self) -> bool; /// Get version of specified point /// /// Returns `None` if point does not exist or is soft-deleted. fn point_version(&self, point_id: PointIdType) -> Option<SeqNumberType>; #[allow(clippy::too_many_arguments)] fn search_batch( &self, vector_name: &VectorName, query_vectors: &[&QueryVector], with_payload: &WithPayload, with_vector: &WithVector, filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, query_context: &SegmentQueryContext, ) -> OperationResult<Vec<Vec<ScoredPoint>>>; /// Rescore results with a formula that can reference payload values. /// /// A deleted bitslice is passed to exclude points from a wrapped segment. fn rescore_with_formula( &self, formula_ctx: Arc<FormulaContext>, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<ScoredPoint>>; fn upsert_point( &mut self, op_num: SeqNumberType, point_id: PointIdType, vectors: NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn delete_point( &mut self, op_num: SeqNumberType, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn update_vectors( &mut self, op_num: SeqNumberType, point_id: PointIdType, vectors: NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn delete_vector( &mut self, op_num: SeqNumberType, point_id: PointIdType, vector_name: &VectorName, ) -> OperationResult<bool>; fn set_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, payload: &Payload, key: &Option<JsonPath>, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn set_full_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, full_payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn delete_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, key: PayloadKeyTypeRef, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn clear_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool>; fn vector( &self, vector_name: &VectorName, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<VectorInternal>>; fn all_vectors( &self, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<NamedVectors<'_>>; /// Retrieve payload for the point /// If not found, return empty payload fn payload( &self, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload>; /// Iterator over all points in segment in ascending order. fn iter_points(&self) -> Box<dyn Iterator<Item = PointIdType> + '_>; /// Paginate over points which satisfies filtering condition starting with `offset` id including. /// /// Cancelled by `is_stopped` flag. fn read_filtered( &self, offset: Option<PointIdType>, limit: Option<usize>, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType>; /// Return points which satisfies filtering condition ordered by the `order_by.key` field, /// starting with `order_by.start_from` value including. /// /// Will fail if there is no index for the order_by key. /// Cancelled by `is_stopped` flag. fn read_ordered_filtered<'a>( &'a self, limit: Option<usize>, filter: Option<&'a Filter>, order_by: &'a OrderBy, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<(OrderValue, PointIdType)>>; /// Return random points which satisfies filtering condition. /// /// Cancelled by `is_stopped` flag. fn read_random_filtered( &self, limit: usize, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType>; /// Read points in [from; to) range fn read_range(&self, from: Option<PointIdType>, to: Option<PointIdType>) -> Vec<PointIdType>; /// Return all unique values for the given key. fn unique_values( &self, key: &JsonPath, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<BTreeSet<FacetValue>>; /// Return the largest counts for the given facet request. fn facet( &self, request: &FacetParams, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<HashMap<FacetValue, usize>>; /// Check if there is point with `point_id` in this segment. /// /// Soft deleted points are excluded. fn has_point(&self, point_id: PointIdType) -> bool; /// Estimate available point count in this segment for given filter. fn estimate_point_count<'a>( &'a self, filter: Option<&'a Filter>, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation; fn vector_names(&self) -> HashSet<VectorNameBuf>; /// Whether this segment is completely empty in terms of points /// /// The segment is considered to not be empty if it contains any points, even if deleted. /// Deleted points still have a version which may be important at time of recovery. Deciding /// this by just the reported point count is not reliable in case a proxy segment is used. /// /// Payload indices or type of storage are not considered here. fn is_empty(&self) -> bool; /// Number of available points /// /// - excludes soft deleted points fn available_point_count(&self) -> usize; /// Number of deleted points fn deleted_point_count(&self) -> usize; /// Size of all available vectors in storage fn available_vectors_size_in_bytes(&self, vector_name: &VectorName) -> OperationResult<usize>; /// Max value from all `available_vectors_size_in_bytes` fn max_available_vectors_size_in_bytes(&self) -> OperationResult<usize> { self.vector_names() .into_iter() .map(|vector_name| self.available_vectors_size_in_bytes(&vector_name)) .collect::<OperationResult<Vec<_>>>() .map(|sizes| sizes.into_iter().max().unwrap_or_default()) } /// Get segment type fn segment_type(&self) -> SegmentType; /// Get current stats of the segment fn info(&self) -> SegmentInfo; /// Get size related stats of the segment. /// This returns `SegmentInfo` with some non size-related data (like `schema`) unset to improve performance. fn size_info(&self) -> SegmentInfo; /// Get segment configuration fn config(&self) -> &SegmentConfig; /// Whether this segment is appendable /// /// Returns appendable state of outer most segment. If this is a proxy segment, this shadows /// the appendable state of the wrapped segment. fn is_appendable(&self) -> bool; /// Get flush ordering affinity /// When multiple segments are flushed together, it must follow this ordering to guarantee data /// consistency. fn flush_ordering(&self) -> SegmentFlushOrdering; /// Returns a function, which when called, will flush all pending changes to disk. /// If there are currently no changes to flush, returns None. /// If `force` is true, will return a flusher even if there are no changes to flush. fn flusher(&self, force: bool) -> Option<Flusher>; /// Immediately flush all changes to disk and return persisted version. /// Blocks the current thread. fn flush(&self, force: bool) -> OperationResult<SeqNumberType> { if let Some(flusher) = self.flusher(force) { flusher()?; } Ok(self.persistent_version()) } /// Removes all persisted data and forces to destroy segment fn drop_data(self) -> OperationResult<()>; /// Path to data, owned by segment fn data_path(&self) -> PathBuf; /// Delete field index, if exists fn delete_field_index( &mut self, op_num: SeqNumberType, key: PayloadKeyTypeRef, ) -> OperationResult<bool>; /// Delete field index, if exists and doesn't match the schema fn delete_field_index_if_incompatible( &mut self, op_num: SeqNumberType, key: PayloadKeyTypeRef, field_schema: &PayloadFieldSchema, ) -> OperationResult<bool>; /// Build the field index for the key and schema, if not built before. fn build_field_index( &self, op_num: SeqNumberType, key: PayloadKeyTypeRef, field_type: &PayloadFieldSchema, hw_counter: &HardwareCounterCell, ) -> OperationResult<BuildFieldIndexResult>; /// Apply a built index. Returns whether it was actually applied or not. fn apply_field_index( &mut self, op_num: SeqNumberType, key: PayloadKeyType, field_schema: PayloadFieldSchema, field_index: Vec<FieldIndex>, ) -> OperationResult<bool>; /// Create index for a payload field, if not exists fn create_field_index( &mut self, op_num: SeqNumberType, key: PayloadKeyTypeRef, field_schema: Option<&PayloadFieldSchema>, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let Some(field_schema) = field_schema else { // Legacy case, where we tried to automatically detect the schema for the field. // We don't do this anymore, as it is not reliable. return Err(OperationError::TypeInferenceError { field_name: key.clone(), }); }; self.delete_field_index_if_incompatible(op_num, key, field_schema)?; let (schema, indexes) = match self.build_field_index(op_num, key, field_schema, hw_counter)? { BuildFieldIndexResult::SkippedByVersion => { return Ok(false); } BuildFieldIndexResult::AlreadyExists => { return Ok(false); } BuildFieldIndexResult::IncompatibleSchema => { // This is a service error, as we should have just removed the old index // So it should not be possible to get this error return Err(OperationError::service_error(format!( "Incompatible schema for field index on field {key}", ))); } BuildFieldIndexResult::Built { schema, indexes } => (schema, indexes), }; self.apply_field_index(op_num, key.to_owned(), schema, indexes) } /// Get indexed fields fn get_indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema>; /// Checks if segment errored during last operations fn check_error(&self) -> Option<SegmentFailedState>; // Get collected telemetry data of segment fn get_telemetry_data(&self, detail: TelemetryDetail) -> SegmentTelemetry; fn fill_query_context(&self, query_context: &mut QueryContext); } /// Defines in what order multiple segments must be flushed. /// /// To achieve data consistency with our point copy on write mechanism, we must flush segments in a /// strict order. Appendable segments must be flushed first, non-appendable segments last. Proxy /// segments fall in between. /// /// When flush the segment holder, we effectively flush in four stages defined by the enum variants /// below. #[derive(PartialEq, Eq, Debug, Clone, Copy, Ord, PartialOrd)] pub enum SegmentFlushOrdering { // Must always be flushed first // - Point-CoW moves points into this segment Appendable, // - Point-CoW may have moved points into this segment before proxying, might be pending flush // - Point-CoW may have moved out and deleted points from this segment, these are not persisted ProxyWithAppendable, // - Point-CoW may have moved out and deleted points from here, these are not persisted ProxyWithNonAppendable, // Must always be flushed last // - Point-CoW moves out and deletes points from this segment NonAppendable, } impl SegmentFlushOrdering { pub fn proxy(self) -> Self { match self { SegmentFlushOrdering::Appendable => SegmentFlushOrdering::ProxyWithAppendable, SegmentFlushOrdering::NonAppendable => SegmentFlushOrdering::ProxyWithNonAppendable, proxy @ SegmentFlushOrdering::ProxyWithAppendable => proxy, proxy @ SegmentFlushOrdering::ProxyWithNonAppendable => proxy, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/key_encoding.rs
lib/segment/src/index/key_encoding.rs
const FLOAT_NAN: u8 = 0x00; const FLOAT_NEG: u8 = 0x01; const FLOAT_ZERO: u8 = 0x02; const FLOAT_POS: u8 = 0x03; const F64_KEY_LEN: usize = 13; const I64_KEY_LEN: usize = 12; const U128_KEY_LEN: usize = 20; /// Encode a f64 into `buf` /// /// The encoded format for a f64 is : /// /// **for positives:** the f64 bits ( in IEEE 754 format ) are re-interpreted as an int64 and /// encoded using big-endian order. /// /// **for negative** f64 : invert all the bits and encode it using bit-endian order. /// /// A single-byte prefix tag is appended to the front of the encoding slice to ensures that /// NaNs are always sorted first. /// /// This approach was inspired by <https://github.com/cockroachdb/cockroach/blob/master/pkg/util/encoding/float.go> /// /// /// #f64 encoding format /// ///```text /// 0 1 9 /// ┌───────────────────┬─────────────────┐ /// │ Float Type │ NEG: !key_val │ /// │ NAN/NEG/ZERO/POS | POS: key_val │ /// │ (big-endian) │ (big-endian) │ /// └───────────────────┴─────────────────┘ /// ``` /// pub fn encode_f64_ascending(val: f64, buf: &mut Vec<u8>) { if val.is_nan() { buf.push(FLOAT_NAN); buf.extend([0_u8; std::mem::size_of::<f64>()]); return; } if val == 0f64 { buf.push(FLOAT_ZERO); buf.extend([0_u8; std::mem::size_of::<f64>()]); return; } let f_as_u64 = val.to_bits(); if f_as_u64 & (1 << 63) != 0 { let f = !f_as_u64; buf.push(FLOAT_NEG); buf.extend(f.to_be_bytes()); } else { buf.push(FLOAT_POS); buf.extend(f_as_u64.to_be_bytes()); } } /// Decode a f64 from a slice. pub fn decode_f64_ascending(buf: &[u8]) -> f64 { match buf[0] { FLOAT_NAN => f64::NAN, FLOAT_NEG => { let u = u64::from_be_bytes(buf[1..9].try_into().expect("cannot decode f64")); let f = !u; f64::from_bits(f) } FLOAT_ZERO => 0f64, FLOAT_POS => { let u = u64::from_be_bytes(buf[1..9].try_into().expect("cannot decode f64")); f64::from_bits(u) } _ => panic!("invalid f64 prefix"), } } /// Encode a i64 into `buf` so that is sorts ascending. pub fn encode_i64_ascending(val: i64, buf: &mut Vec<u8>) { let i = val ^ i64::MIN; buf.extend(i.to_be_bytes()); } /// Decode a i64 from a slice pub fn decode_i64_ascending(buf: &[u8]) -> i64 { let i = i64::from_be_bytes(buf[0..8].try_into().expect("cannot decode i64")); i ^ i64::MIN } /// Encodes a f64 key so that it sort in ascending order. /// /// The key is compound by the numeric value of the key plus a u32 representing /// the payload offset within the payload store. /// /// # float key encoding format /// ///```text /// /// 0 1 9 13 /// ┌───────────────────┬─────────────────┬──────────────┐ /// │ Float Type │ NEG: !key_val │ │ /// │ NAN/NEG/ZERO/POS | POS: key_val │ point_offset │ /// │ (big-endian) │ (big-endian) │ │ /// └───────────────────┴─────────────────┴──────────────┘ /// ``` /// pub fn encode_f64_key_ascending(key_val: f64, point_offset: u32) -> Vec<u8> { let mut buf = Vec::with_capacity(F64_KEY_LEN); encode_f64_ascending(key_val, &mut buf); buf.extend(point_offset.to_be_bytes()); buf } pub fn decode_f64_key_ascending(buf: &[u8]) -> (u32, f64) { ( u32::from_be_bytes( (&buf[F64_KEY_LEN - std::mem::size_of::<u32>()..]) .try_into() .unwrap(), ), decode_f64_ascending(buf), ) } /// Encodes a i64 key so that it sort in ascending order. /// /// The key is compound by the numeric value of the key plus a u32 representing /// the payload offset within the payload store. /// /// # int key encoding format /// ///```text /// /// 0 8 12 /// ┌────────────────────┬──────────────┐ /// │ key_val ^ i64::MIN │ point_offset │ /// │ (big-endian) │ (big-endian) │ /// └────────────────────┴──────────────┘ ///``` pub fn encode_i64_key_ascending(key_val: i64, point_offset: u32) -> Vec<u8> { let mut buf = Vec::with_capacity(I64_KEY_LEN); encode_i64_ascending(key_val, &mut buf); buf.extend(point_offset.to_be_bytes()); buf } pub fn decode_i64_key_ascending(buf: &[u8]) -> (u32, i64) { ( u32::from_be_bytes( (&buf[I64_KEY_LEN - std::mem::size_of::<u32>()..]) .try_into() .unwrap(), ), decode_i64_ascending(buf), ) } /// Encodes a u128 key so that it sort in ascending order. /// /// The key is compound by the numeric value of the key plus a u32 representing /// the payload offset within the payload store. /// /// # int key encoding format /// ///```text /// /// 0 16 20 /// ┌─────────────────────┬──────────────┐ /// │ key_val │ point_offset │ /// │ (big-endian) │ (big-endian) │ /// └─────────────────────┴──────────────┘ ///``` pub fn encode_u128_key_ascending(key_val: u128, point_offset: u32) -> Vec<u8> { let mut buf = Vec::with_capacity(U128_KEY_LEN); buf.extend(key_val.to_be_bytes()); buf.extend(point_offset.to_be_bytes()); buf } pub fn decode_u128_key_ascending(buf: &[u8]) -> (u32, u128) { ( u32::from_be_bytes( (&buf[U128_KEY_LEN - std::mem::size_of::<u32>()..]) .try_into() .unwrap(), ), u128::from_be_bytes(buf[0..16].try_into().expect("cannot decode u128")), ) } #[cfg(test)] mod tests { use std::cmp::Ordering; use crate::index::key_encoding::{ decode_f64_ascending, decode_i64_ascending, encode_f64_ascending, encode_i64_ascending, }; #[test] fn test_encode_f64() { test_f64_encoding_roundtrip(0.42342); test_f64_encoding_roundtrip(0f64); test_f64_encoding_roundtrip(f64::NAN); test_f64_encoding_roundtrip(-0.423423983); } #[test] fn test_encode_i64() { test_i64_encoding_roundtrip(i64::MIN); test_i64_encoding_roundtrip(i64::MAX); test_i64_encoding_roundtrip(0); test_i64_encoding_roundtrip(41262); test_i64_encoding_roundtrip(-98793); } #[test] fn test_f64_lex_order() { let mut nan_buf = Vec::new(); let mut zero_buf = Vec::new(); let mut pos_buf = Vec::new(); let mut neg_buf = Vec::new(); encode_f64_ascending(f64::NAN, &mut nan_buf); encode_f64_ascending(0f64, &mut zero_buf); encode_f64_ascending(0.2435224412, &mut pos_buf); encode_f64_ascending(-0.82976347, &mut neg_buf); assert_eq!(nan_buf.cmp(&neg_buf), Ordering::Less); assert_eq!(neg_buf.cmp(&zero_buf), Ordering::Less); assert_eq!(zero_buf.cmp(&pos_buf), Ordering::Less); } #[test] fn test_i64_lex_order() { let mut zero_buf = Vec::new(); let mut pos_buf = Vec::new(); let mut neg_buf = Vec::new(); encode_i64_ascending(0, &mut zero_buf); encode_i64_ascending(123, &mut pos_buf); encode_i64_ascending(-4324, &mut neg_buf); assert_eq!(neg_buf.cmp(&zero_buf), Ordering::Less); assert_eq!(zero_buf.cmp(&pos_buf), Ordering::Less); } fn test_f64_encoding_roundtrip(val: f64) { let mut buf = Vec::new(); encode_f64_ascending(val, &mut buf); let dec_val = decode_f64_ascending(buf.as_slice()); if val.is_nan() { assert!(dec_val.is_nan()); return; } assert_eq!(val, dec_val); } fn test_i64_encoding_roundtrip(val: i64) { let mut buf = Vec::new(); encode_i64_ascending(val, &mut buf); let res = decode_i64_ascending(buf.as_slice()); assert_eq!(val, res); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/payload_config.rs
lib/segment/src/index/payload_config.rs
use std::collections::HashMap; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use io::file_operations::{atomic_save_json, read_json}; use serde::{Deserialize, Serialize}; use crate::common::operation_error::OperationResult; use crate::types::{PayloadFieldSchema, PayloadKeyType}; pub const PAYLOAD_INDEX_CONFIG_FILE: &str = "config.json"; /// Keeps information of which field should be index #[derive(Debug, Default, Deserialize, Serialize, Clone)] pub struct PayloadConfig { /// Mapping of payload index schemas and types #[serde(flatten)] pub indices: PayloadIndices, /// If true, don't create/initialize RocksDB for payload index /// This is required for migrating away from RocksDB in favor of the /// custom storage engine #[cfg(feature = "rocksdb")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_rocksdb: Option<bool>, } impl PayloadConfig { pub fn get_config_path(path: &Path) -> PathBuf { path.join(PAYLOAD_INDEX_CONFIG_FILE) } pub fn load(path: &Path) -> OperationResult<Self> { Ok(read_json(path)?) } pub fn save(&self, path: &Path) -> OperationResult<()> { Ok(atomic_save_json(path, self)?) } } /// Map of indexed fields with their schema and type /// /// Virtual structure, serialized and deserialized through `PayloadIndicesStorage`. #[derive(Debug, Default, Deserialize, Serialize, Clone)] #[serde(from = "PayloadIndicesStorage", into = "PayloadIndicesStorage")] pub struct PayloadIndices { fields: HashMap<PayloadKeyType, PayloadFieldSchemaWithIndexType>, } impl PayloadIndices { /// Check if any payload field has no explicit types configured /// /// Returns false if empty. pub fn any_has_no_type(&self) -> bool { self.fields.values().any(|index| index.types.is_empty()) } /// Check if any payload field used RocksDB /// /// Returns false if empty. #[cfg(feature = "rocksdb")] pub fn any_is_rocksdb(&self) -> bool { self.fields.values().any(|index| { index .types .iter() .any(|t| t.storage_type == StorageType::RocksDb) }) } pub fn to_schemas(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema> { self.fields .iter() .map(|(field, index)| (field.clone(), index.schema.clone())) .collect() } } impl Deref for PayloadIndices { type Target = HashMap<PayloadKeyType, PayloadFieldSchemaWithIndexType>; fn deref(&self) -> &Self::Target { &self.fields } } impl DerefMut for PayloadIndices { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.fields } } /// Storage helper for `PayloadIndices` /// /// This type is used for serialization and deserialization of the payload indices. It is /// compatible with the old format of payload indices, which only stored the indexed fields. #[derive(Deserialize, Serialize)] pub struct PayloadIndicesStorage { /// Map of indexed fields and their schema pub indexed_fields: HashMap<PayloadKeyType, PayloadFieldSchema>, /// Map of indexed fields and their explicit index types /// /// If empty for a field, no explicit payload index type mappings have been stored yet. /// Then use `schemas` to determine the index types with a best effort approach. /// /// Added since Qdrant 1.15 #[serde(default, skip_serializing_if = "HashMap::is_empty")] pub indexed_types: HashMap<PayloadKeyType, Vec<FullPayloadIndexType>>, } impl From<PayloadIndicesStorage> for PayloadIndices { fn from(mut storage: PayloadIndicesStorage) -> Self { let fields = storage .indexed_fields .into_iter() .map(|(field, schema)| { let index_types = storage.indexed_types.remove(&field).unwrap_or_default(); ( field, PayloadFieldSchemaWithIndexType::new(schema, index_types), ) }) .collect::<HashMap<_, _>>(); Self { fields } } } impl From<PayloadIndices> for PayloadIndicesStorage { fn from(storage: PayloadIndices) -> Self { let (indexed_fields, indexed_types) = storage.fields.into_iter().fold( (HashMap::new(), HashMap::new()), |(mut fields, mut types), (field, schema)| { fields.insert(field.clone(), schema.schema); if !schema.types.is_empty() { types.insert(field, schema.types); } (fields, types) }, ); Self { indexed_fields, indexed_types, } } } #[derive(Debug, Clone, PartialEq)] pub struct PayloadFieldSchemaWithIndexType { pub schema: PayloadFieldSchema, pub types: Vec<FullPayloadIndexType>, } impl PayloadFieldSchemaWithIndexType { pub fn new(schema: PayloadFieldSchema, types: Vec<FullPayloadIndexType>) -> Self { Self { schema, types } } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] #[serde(rename_all = "snake_case")] pub enum PayloadIndexType { IntIndex, DatetimeIndex, IntMapIndex, KeywordIndex, FloatIndex, GeoIndex, FullTextIndex, BoolIndex, UuidIndex, UuidMapIndex, NullIndex, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct FullPayloadIndexType { pub index_type: PayloadIndexType, pub mutability: IndexMutability, pub storage_type: StorageType, } impl FullPayloadIndexType { pub fn mutability(&self) -> &IndexMutability { &self.mutability } } #[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] #[serde(rename_all = "snake_case")] pub enum IndexMutability { /// Supports insertions, updates, and deletions Mutable, /// Only supports deletions Immutable, } #[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] #[serde(rename_all = "snake_case")] pub enum StorageType { Gridstore, RocksDb, Mmap { is_on_disk: bool }, } #[cfg(test)] mod test { use std::str::FromStr; use serde_json::Value; use super::*; use crate::json_path::JsonPath; #[test] fn test_storage_compatibility() { // Check that old format can be parsed with new format. let old = r#"{"indexed_fields":{"c":{"type":"integer","lookup":true,"range":false,"is_principal":false,"on_disk":false}}}"#; let payload_config: PayloadConfig = serde_json::from_str(old).unwrap(); let old_value: Value = serde_json::from_str(old).unwrap(); let old_schema = old_value .as_object() .unwrap() .get("indexed_fields") .unwrap() .get("c") .unwrap() .clone(); let old_config: PayloadFieldSchema = serde_json::from_value(old_schema).unwrap(); assert_eq!( payload_config .indices .get(&JsonPath::from_str("c").unwrap()) .unwrap() .schema, old_config ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_estimator.rs
lib/segment/src/index/query_estimator.rs
//! Contains functions for estimating of how many points should be processed for a given filter query //! //! Filter query is used e.g. for determining how would be faster to process the query: //! - use vector index or payload index first use std::cmp::{max, min}; use itertools::Itertools; use crate::index::field_index::{CardinalityEstimation, PrimaryCondition}; use crate::types::{Condition, Filter, MinShould}; /// Re-estimate cardinality based on number of available vectors /// Assuming that deleted vectors are not correlated with the filter /// /// # Arguments /// /// * `estimation` - cardinality estimations of number of points selected by payload filter /// * `available_vectors` - number of available vectors for the named vector storage /// * `total_vectors` - total number of points in the segment /// /// # Result /// /// * `CardinalityEstimation` - new cardinality estimation pub fn adjust_to_available_vectors( estimation: CardinalityEstimation, available_vectors: usize, available_points: usize, ) -> CardinalityEstimation { if available_points == 0 || available_vectors == 0 { return CardinalityEstimation { primary_clauses: estimation.primary_clauses, min: 0, exp: 0, max: 0, }; } let number_of_deleted_vectors = available_points.saturating_sub(available_vectors); // It is possible, all deleted vectors are selected in worst case let min = estimation.min.saturating_sub(number_of_deleted_vectors); // Another extreme case - all deleted vectors are not selected let max = estimation.max.min(available_vectors).min(available_points); let availability_prob = (available_vectors as f64 / available_points as f64).min(1.0); let exp = (estimation.exp as f64 * availability_prob).round() as usize; debug_assert!( min <= exp, "estimation: {estimation:?}, available_vectors: {available_vectors}, available_points: {available_points}, min: {min}, exp: {exp}" ); debug_assert!( exp <= max, "estimation: {estimation:?}, available_vectors: {available_vectors}, available_points: {available_points}, exp: {exp}, max: {max}" ); CardinalityEstimation { primary_clauses: estimation.primary_clauses, min, exp, max, } } /// Combine cardinality of multiple estimations in an OR fashion by using the complement rule. /// Assumes that the estimations are independent. /// /// Formula is `(1 - ∏(1-pᵢ)) * total`: /// * For each condition, it calculates the probability that an item does not match it: `1 - (x / total)`. /// * It multiplies these probabilities to get the probability that an item matches none of the conditions. /// * Subtracts this from 1 to get the probability that an item matches at least one condition. /// * Multiplies this probability by the total number of items and rounds to get the expected count. pub fn expected_should_estimation(estimations: impl Iterator<Item = usize>, total: usize) -> usize { if total == 0 { return 0; } let element_not_hit_prob: f64 = estimations .map(|x| 1.0 - (x as f64 / total as f64)) .product(); let element_hit_prob = 1.0 - element_not_hit_prob; (element_hit_prob * (total as f64)).round() as usize } pub fn combine_should_estimations( estimations: &[CardinalityEstimation], total: usize, ) -> CardinalityEstimation { let mut clauses: Vec<PrimaryCondition> = vec![]; for estimation in estimations { if estimation.primary_clauses.is_empty() { // If some branch is un-indexed - we can't make // any assumptions about the whole `should` clause clauses = vec![]; break; } clauses.append(&mut estimation.primary_clauses.clone()); } let expected_count = expected_should_estimation(estimations.iter().map(|x| x.exp), total); CardinalityEstimation { primary_clauses: clauses, min: estimations.iter().map(|x| x.min).max().unwrap_or(0), exp: expected_count, max: min(estimations.iter().map(|x| x.max).sum(), total), } } pub fn combine_min_should_estimations( estimations: &[CardinalityEstimation], min_count: usize, total: usize, ) -> CardinalityEstimation { /* | First estimate cardinality of intersections and then combine the estimations | ex) min_count : 2, # of estimations : 4 | |(A ⋂ B) ∪ (A ⋂ C) ∪ (A ⋂ D) ∪ (B ⋂ C) ∪ (B ⋂ D) ∪ (C ⋂ D)| */ let intersection_estimations = estimations .iter() .combinations(min_count) .map(|intersection| { combine_must_estimations(&intersection.into_iter().cloned().collect_vec(), total) }) .collect_vec(); combine_should_estimations(&intersection_estimations, total) } pub fn combine_must_estimations( estimations: &[CardinalityEstimation], total: usize, ) -> CardinalityEstimation { let min_estimation = estimations .iter() .map(|x| x.min) .fold(total as i64, |acc, x| { max(0, acc + (x as i64) - (total as i64)) }) as usize; let max_estimation = estimations.iter().map(|x| x.max).min().unwrap_or(total); let exp_estimation_prob: f64 = estimations .iter() .map(|x| (x.exp as f64) / (total as f64)) .product(); let exp_estimation = (exp_estimation_prob * (total as f64)).round() as usize; let clauses = estimations .iter() .filter(|x| !x.primary_clauses.is_empty()) .min_by_key(|x| x.exp) .map(|x| x.primary_clauses.clone()) .unwrap_or_default(); CardinalityEstimation { primary_clauses: clauses, min: min_estimation, exp: exp_estimation, max: max_estimation, } } fn estimate_condition<F>( estimator: &F, condition: &Condition, total: usize, ) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { match condition { Condition::Filter(filter) => estimate_filter(estimator, filter, total), _ => estimator(condition), } } pub fn estimate_filter<F>(estimator: &F, filter: &Filter, total: usize) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { let mut filter_estimations: Vec<CardinalityEstimation> = vec![]; match &filter.must { None => {} Some(conditions) => { if !conditions.is_empty() { filter_estimations.push(estimate_must(estimator, conditions, total)); } } } match &filter.should { None => {} Some(conditions) => { if !conditions.is_empty() { filter_estimations.push(estimate_should(estimator, conditions, total)); } } } match &filter.min_should { None => {} Some(MinShould { conditions, min_count, }) => filter_estimations.push(estimate_min_should( estimator, conditions, *min_count, total, )), } match &filter.must_not { None => {} Some(conditions) => { if !conditions.is_empty() { filter_estimations.push(estimate_must_not(estimator, conditions, total)) } } } combine_must_estimations(&filter_estimations, total) } fn estimate_should<F>( estimator: &F, conditions: &[Condition], total: usize, ) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { let estimate = |x| estimate_condition(estimator, x, total); let should_estimations = conditions.iter().map(estimate).collect_vec(); combine_should_estimations(&should_estimations, total) } fn estimate_min_should<F>( estimator: &F, conditions: &[Condition], min_count: usize, total: usize, ) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { let estimate = |x| estimate_condition(estimator, x, total); let min_should_estimations = conditions.iter().map(estimate).collect_vec(); combine_min_should_estimations(&min_should_estimations, min_count, total) } fn estimate_must<F>(estimator: &F, conditions: &[Condition], total: usize) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { let estimate = |x| estimate_condition(estimator, x, total); let must_estimations = conditions.iter().map(estimate).collect_vec(); combine_must_estimations(&must_estimations, total) } pub fn invert_estimation( estimation: &CardinalityEstimation, total: usize, ) -> CardinalityEstimation { CardinalityEstimation { primary_clauses: vec![], min: total.saturating_sub(estimation.max), exp: total.saturating_sub(estimation.exp), max: total.saturating_sub(estimation.min), } } fn estimate_must_not<F>( estimator: &F, conditions: &[Condition], total: usize, ) -> CardinalityEstimation where F: Fn(&Condition) -> CardinalityEstimation, { let estimate = |x| invert_estimation(&estimate_condition(estimator, x, total), total); let must_not_estimations = conditions.iter().map(estimate).collect_vec(); combine_must_estimations(&must_not_estimations, total) } #[cfg(test)] mod tests { use super::*; use crate::index::field_index::ResolvedHasId; use crate::json_path::JsonPath; use crate::types::{FieldCondition, HasIdCondition}; const TOTAL: usize = 1000; fn test_condition(key: &str) -> Condition { Condition::Field(FieldCondition { key: JsonPath::new(key), r#match: None, range: None, geo_bounding_box: None, geo_radius: None, values_count: None, is_empty: None, geo_polygon: None, is_null: None, }) } fn test_estimator(condition: &Condition) -> CardinalityEstimation { match condition { Condition::Filter(_) => panic!("unexpected Filter"), Condition::Nested(_) => panic!("unexpected Nested"), Condition::CustomIdChecker(_) => panic!("unexpected CustomIdChecker"), Condition::Field(field) => match field.key.to_string().as_str() { "color" => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(field.clone()))], min: 100, exp: 200, max: 300, }, "size" => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(field.clone()))], min: 100, exp: 100, max: 100, }, "price" => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(field.clone()))], min: 10, exp: 15, max: 20, }, _ => CardinalityEstimation::unknown(TOTAL), }, Condition::HasId(has_id) => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Ids(ResolvedHasId { point_ids: has_id.has_id.clone(), resolved_point_offsets: has_id .has_id .iter() .map(|id| id.to_string().parse().unwrap()) .collect(), })], min: has_id.has_id.len(), exp: has_id.has_id.len(), max: has_id.has_id.len(), }, Condition::IsEmpty(condition) => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new( FieldCondition::new_is_empty(condition.is_empty.key.clone(), true), ))], min: 0, exp: TOTAL / 2, max: TOTAL, }, Condition::IsNull(condition) => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new( FieldCondition::new_is_null(condition.is_null.key.clone(), true), ))], min: 0, exp: TOTAL / 2, max: TOTAL, }, Condition::HasVector(condition) => CardinalityEstimation { primary_clauses: vec![PrimaryCondition::HasVector(condition.has_vector.clone())], min: 0, exp: TOTAL / 2, max: TOTAL, }, } } #[test] fn simple_query_estimation_test() { let query = Filter::new_must(test_condition("color")); let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.exp, 200); assert!(!estimation.primary_clauses.is_empty()); } #[test] fn must_estimation_query_test() { let query = Filter { should: None, min_should: None, must: Some(vec![ test_condition("color"), test_condition("size"), test_condition("un-indexed"), ]), must_not: None, }; let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 1); match &estimation.primary_clauses[0] { PrimaryCondition::Condition(field) => assert_eq!(&field.key.to_string(), "size"), _ => panic!(), } assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn should_estimation_query_test() { let query = Filter { should: Some(vec![test_condition("color"), test_condition("size")]), min_should: None, must: None, must_not: None, }; let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 2); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn another_should_estimation_query_test() { let query = Filter { should: Some(vec![ test_condition("color"), test_condition("size"), test_condition("un-indexed"), ]), min_should: None, must: None, must_not: None, }; let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 0); eprintln!("estimation = {estimation:#?}"); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn min_should_estimation_query_test() { let query = Filter::new_min_should(MinShould { conditions: vec![test_condition("color"), test_condition("size")], min_count: 1, }); let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 2); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn another_min_should_estimation_query_test() { let query = Filter::new_min_should(MinShould { conditions: vec![ test_condition("color"), test_condition("size"), test_condition("price"), ], min_count: 2, }); let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 3); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn min_should_with_min_count_same_as_condition_count_is_equivalent_to_must() { let conditions = vec![ test_condition("color"), test_condition("size"), test_condition("price"), ]; let min_should_query = Filter::new_min_should(MinShould { conditions: conditions.clone(), min_count: 3, }); let estimation = estimate_filter(&test_estimator, &min_should_query, TOTAL); let must_query = Filter { should: None, min_should: None, must: Some(conditions), must_not: None, }; let expected_estimation = estimate_filter(&test_estimator, &must_query, TOTAL); assert_eq!( estimation.primary_clauses, expected_estimation.primary_clauses ); assert_eq!(estimation.max, expected_estimation.max); assert_eq!(estimation.exp, expected_estimation.exp); assert_eq!(estimation.min, expected_estimation.min); } #[test] fn complex_estimation_query_test() { let query = Filter { should: Some(vec![ Condition::Filter(Filter { should: None, min_should: None, must: Some(vec![test_condition("color"), test_condition("size")]), must_not: None, }), Condition::Filter(Filter { should: None, min_should: None, must: Some(vec![test_condition("price"), test_condition("size")]), must_not: None, }), ]), min_should: None, must: None, must_not: Some(vec![Condition::HasId(HasIdCondition { has_id: [1, 2, 3, 4, 5].into_iter().map(|x| x.into()).collect(), })]), }; let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 2); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn another_complex_estimation_query_test() { let query = Filter { should: None, min_should: None, must: Some(vec![ Condition::Filter(Filter { must: None, should: Some(vec![test_condition("color"), test_condition("size")]), min_should: None, must_not: None, }), Condition::Filter(Filter { must: None, should: Some(vec![test_condition("price"), test_condition("size")]), min_should: None, must_not: None, }), ]), must_not: Some(vec![Condition::HasId(HasIdCondition { has_id: [1, 2, 3, 4, 5].into_iter().map(|x| x.into()).collect(), })]), }; let estimation = estimate_filter(&test_estimator, &query, TOTAL); assert_eq!(estimation.primary_clauses.len(), 2); estimation.primary_clauses.iter().for_each(|x| match x { PrimaryCondition::Condition(field) => { assert!(["price", "size"].contains(&field.key.to_string().as_str())) } _ => panic!("Should not go here"), }); assert!(estimation.max <= TOTAL); assert!(estimation.exp <= estimation.max); assert!(estimation.min <= estimation.exp); } #[test] fn test_combine_must_estimations() { let estimations = vec![CardinalityEstimation { primary_clauses: vec![], min: 12, exp: 12, max: 12, }]; let res = combine_must_estimations(&estimations, 10_000); eprintln!("res = {res:#?}"); } #[test] fn test_adjust_to_available_vectors() { let estimation = CardinalityEstimation { primary_clauses: vec![], min: 0, exp: 64, max: 100, }; let new_estimation = adjust_to_available_vectors(estimation, 50, 200); assert_eq!(new_estimation.min, 0); assert_eq!(new_estimation.exp, 16); assert_eq!(new_estimation.max, 50); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/struct_payload_index.rs
lib/segment/src/index/struct_payload_index.rs
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::AtomicBool; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::counter::iterator_hw_measurement::HwMeasurementIteratorExt; use common::either_variant::EitherVariant; use common::iterator_ext::IteratorExt; use common::types::PointOffsetType; use fs_err as fs; use schemars::_serde_json::Value; use super::field_index::facet_index::FacetIndexEnum; #[cfg(feature = "rocksdb")] use super::field_index::index_selector::IndexSelectorRocksDb; use super::field_index::index_selector::{ IndexSelector, IndexSelectorGridstore, IndexSelectorMmap, }; use super::field_index::{FieldIndexBuilderTrait as _, ResolvedHasId}; use super::payload_config::{FullPayloadIndexType, PayloadFieldSchemaWithIndexType}; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::common::utils::IndexesMap; use crate::id_tracker::IdTrackerSS; use crate::index::field_index::{ CardinalityEstimation, FieldIndex, PayloadBlockCondition, PrimaryCondition, }; use crate::index::payload_config::{self, PayloadConfig}; use crate::index::query_estimator::estimate_filter; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::struct_filter_context::StructFilterContext; use crate::index::visited_pool::VisitedPool; use crate::index::{BuildIndexResult, PayloadIndex}; use crate::json_path::JsonPath; use crate::payload_storage::payload_storage_enum::PayloadStorageEnum; use crate::payload_storage::{FilterContext, PayloadStorage}; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{ Condition, FieldCondition, Filter, IsEmptyCondition, IsNullCondition, Payload, PayloadContainer, PayloadFieldSchema, PayloadKeyType, PayloadKeyTypeRef, VectorNameBuf, }; use crate::vector_storage::{VectorStorage, VectorStorageEnum}; #[derive(Debug)] #[allow(clippy::enum_variant_names)] enum StorageType { #[cfg(feature = "rocksdb")] RocksDbAppendable(std::sync::Arc<parking_lot::RwLock<rocksdb::DB>>), GridstoreAppendable, #[cfg(feature = "rocksdb")] RocksDbNonAppendable(Arc<parking_lot::RwLock<rocksdb::DB>>), GridstoreNonAppendable, } impl StorageType { #[cfg(feature = "rocksdb")] pub fn is_appendable(&self) -> bool { match self { StorageType::RocksDbAppendable(_) => true, StorageType::GridstoreAppendable => true, StorageType::RocksDbNonAppendable(_) => false, StorageType::GridstoreNonAppendable => false, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { StorageType::RocksDbAppendable(_) => true, StorageType::RocksDbNonAppendable(_) => true, StorageType::GridstoreAppendable => false, StorageType::GridstoreNonAppendable => false, } } } /// `PayloadIndex` implementation, which actually uses index structures for providing faster search #[derive(Debug)] pub struct StructPayloadIndex { /// Payload storage pub(super) payload: Arc<AtomicRefCell<PayloadStorageEnum>>, /// Used for `has_id` condition and estimating cardinality pub(super) id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, /// Vector storages for each field, used for `has_vector` condition pub(super) vector_storages: HashMap<VectorNameBuf, Arc<AtomicRefCell<VectorStorageEnum>>>, /// Indexes, associated with fields pub field_indexes: IndexesMap, config: PayloadConfig, /// Root of index persistence dir path: PathBuf, /// Used to select unique point ids visited_pool: VisitedPool, /// Desired storage type for payload indices, used in builder to pick correct type storage_type: StorageType, /// RocksDB instance, if any index is using it #[cfg(feature = "rocksdb")] db: Option<Arc<parking_lot::RwLock<rocksdb::DB>>>, } impl StructPayloadIndex { pub fn estimate_field_condition( &self, condition: &FieldCondition, nested_path: Option<&JsonPath>, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { let full_path = JsonPath::extend_or_new(nested_path, &condition.key); self.field_indexes.get(&full_path).and_then(|indexes| { // rewrite condition with fullpath to enable cardinality estimation let full_path_condition = FieldCondition { key: full_path, ..condition.clone() }; indexes .iter() .find_map(|index| index.estimate_cardinality(&full_path_condition, hw_counter)) }) } fn query_field<'a>( &'a self, condition: &'a PrimaryCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match condition { PrimaryCondition::Condition(field_condition) => { let field_key = &field_condition.key; let field_indexes = self.field_indexes.get(field_key)?; field_indexes .iter() .find_map(|field_index| field_index.filter(field_condition, hw_counter)) } PrimaryCondition::Ids(ids) => { Some(Box::new(ids.resolved_point_offsets.iter().copied())) } PrimaryCondition::HasVector(_) => None, } } fn config_path(&self) -> PathBuf { PayloadConfig::get_config_path(&self.path) } fn save_config(&self) -> OperationResult<()> { let config_path = self.config_path(); self.config.save(&config_path) } fn load_all_fields(&mut self, create_if_missing: bool) -> OperationResult<()> { let mut field_indexes: IndexesMap = Default::default(); let mut indices = std::mem::take(&mut self.config.indices); let mut is_dirty = false; for (field, payload_schema) in indices.iter_mut() { let (field_index, dirty) = self.load_from_db(field, payload_schema, create_if_missing)?; field_indexes.insert(field.clone(), field_index); is_dirty |= dirty; } // Put updated payload schemas back into the config self.config.indices = indices; if is_dirty { self.save_config()?; } self.field_indexes = field_indexes; Ok(()) } #[cfg_attr(not(feature = "rocksdb"), allow(clippy::needless_pass_by_ref_mut))] fn load_from_db( &mut self, field: PayloadKeyTypeRef, // TODO: refactor this and remove the &mut reference. payload_schema: &mut PayloadFieldSchemaWithIndexType, create_if_missing: bool, ) -> OperationResult<(Vec<FieldIndex>, bool)> { let total_point_count = self.id_tracker.borrow().total_point_count(); let mut rebuild = false; let mut is_dirty = false; let mut indexes = if payload_schema.types.is_empty() { let indexes = self.selector(&payload_schema.schema).new_index( field, &payload_schema.schema, create_if_missing, )?; if let Some(mut indexes) = indexes { debug_assert!( !indexes .iter() .any(|index| matches!(index, FieldIndex::NullIndex(_))), "index selector is not expected to provide null index", ); // Special null index complements every index. if let Some(null_index) = IndexSelector::new_null_index( &self.path, field, total_point_count, create_if_missing, )? { indexes.push(null_index); } // Persist exact payload index types is_dirty = true; payload_schema.types = indexes.iter().map(|i| i.get_full_index_type()).collect(); indexes } else { rebuild = true; vec![] } } else { payload_schema .types .iter() // Load each index .map(|index| { self.selector_with_type(index).and_then(|selector| { selector.new_index_with_type( field, &payload_schema.schema, index, &self.path, total_point_count, create_if_missing, ) }) }) // Interrupt loading indices if one fails to load // Set rebuild flag if any index fails to load .take_while(|index| { let is_loaded = index.as_ref().is_ok_and(|index| index.is_some()); rebuild |= !is_loaded; is_loaded }) .filter_map(|index| index.transpose()) .collect::<OperationResult<Vec<_>>>()? }; // Actively migrate away from RocksDB indices // Naively implemented by just rebuilding the indices from scratch #[cfg(feature = "rocksdb")] if common::flags::feature_flags().migrate_rocksdb_payload_indices && indexes.iter().any(|index| index.is_rocksdb()) { log::info!("Migrating away from RocksDB indices for field `{field}`"); rebuild = true; is_dirty = true; // Change storage type, set skip RocksDB flag and persist // Needed to not use RocksDB when rebuilding indices below match self.storage_type { StorageType::RocksDbAppendable(_) => { self.storage_type = StorageType::GridstoreAppendable; } StorageType::GridstoreAppendable => {} StorageType::RocksDbNonAppendable(_) => { self.storage_type = StorageType::GridstoreNonAppendable; } StorageType::GridstoreNonAppendable => {} } self.config.skip_rocksdb.replace(true); // Wipe all existing indices for index in indexes.drain(..) { index.wipe().map_err(|err| { OperationError::service_error(format!( "Failed to delete existing payload index for field `{field}` before rebuild: {err}" )) })?; } } // If index is not properly loaded or when migrating, rebuild indices if rebuild { log::debug!("Rebuilding payload index for field `{field}`..."); indexes = self.build_field_indexes( field, &payload_schema.schema, &HardwareCounterCell::disposable(), // Internal operation )?; // Persist exact payload index types of newly built indices is_dirty = true; payload_schema.types = indexes.iter().map(|i| i.get_full_index_type()).collect(); } Ok((indexes, is_dirty)) } pub fn open( payload: Arc<AtomicRefCell<PayloadStorageEnum>>, id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, vector_storages: HashMap<VectorNameBuf, Arc<AtomicRefCell<VectorStorageEnum>>>, path: &Path, is_appendable: bool, create: bool, ) -> OperationResult<Self> { fs::create_dir_all(path)?; let config_path = PayloadConfig::get_config_path(path); let config = if config_path.exists() { PayloadConfig::load(&config_path)? } else { #[cfg(feature = "rocksdb")] { let mut new_config = PayloadConfig::default(); let skip_rocksdb = if is_appendable { common::flags::feature_flags().payload_index_skip_mutable_rocksdb } else { common::flags::feature_flags().payload_index_skip_rocksdb }; if skip_rocksdb { new_config.skip_rocksdb = Some(true); } new_config } #[cfg(not(feature = "rocksdb"))] { PayloadConfig::default() } }; #[cfg(feature = "rocksdb")] let mut db = None; let storage_type = if is_appendable { #[cfg(feature = "rocksdb")] { let skip_rocksdb = config.skip_rocksdb.unwrap_or(false); if !skip_rocksdb { let rocksdb = crate::common::rocksdb_wrapper::open_db_with_existing_cf(path) .map_err(|err| { OperationError::service_error(format!("RocksDB open error: {err}")) })?; db.replace(rocksdb.clone()); StorageType::RocksDbAppendable(rocksdb) } else { StorageType::GridstoreAppendable } } #[cfg(not(feature = "rocksdb"))] { StorageType::GridstoreAppendable } } else { #[cfg(feature = "rocksdb")] { let skip_rocksdb = config.skip_rocksdb.unwrap_or(false); if !skip_rocksdb { let rocksdb = crate::common::rocksdb_wrapper::open_db_with_existing_cf(path) .map_err(|err| { OperationError::service_error(format!("RocksDB open error: {err}")) })?; db.replace(rocksdb.clone()); StorageType::RocksDbNonAppendable(rocksdb) } else { StorageType::GridstoreNonAppendable } } #[cfg(not(feature = "rocksdb"))] { StorageType::GridstoreNonAppendable } }; // Also prematurely open RocksDB if any index is still using it #[cfg(feature = "rocksdb")] if db.is_none() && config.indices.any_is_rocksdb() { log::debug!("Opening RocksDB to load old payload index"); let rocksdb = crate::common::rocksdb_wrapper::open_db_with_existing_cf(path).map_err(|err| { OperationError::service_error(format!("RocksDB open error: {err}")) })?; db.replace(rocksdb); } let mut index = StructPayloadIndex { payload, id_tracker, vector_storages, field_indexes: Default::default(), config, path: path.to_owned(), visited_pool: Default::default(), storage_type, #[cfg(feature = "rocksdb")] db, }; if !index.config_path().exists() { // Save default config index.save_config()?; } index.load_all_fields(create)?; // If we have a RocksDB instance, but no index using it, completely delete it here #[cfg(feature = "rocksdb")] if !index.storage_type.is_rocksdb() && !index.config.indices.any_is_rocksdb() && let Some(db) = index.db.take() { match Arc::try_unwrap(db) { Ok(db) => { log::trace!( "Deleting RocksDB for payload indices, no payload index uses it anymore" ); // Close RocksDB instance let db = db.into_inner(); drop(db); // Destroy all RocksDB files let options = crate::common::rocksdb_wrapper::make_db_options(); match rocksdb::DB::destroy(&options, &index.path) { Ok(_) => log::debug!("Deleted RocksDB for payload indices"), Err(err) => { log::warn!("Failed to delete RocksDB for payload indices: {err}") } } } // Here we don't have exclusive ownership of RocksDB, which prevents us from // controlling and closing the instance. Because of it, we cannot destroy the // RocksDB files, and leave them behind. We don't consider this a problem, because // a future optimization run will get rid of these files. Err(db) => { log::warn!( "RocksDB for payload indices could not be deleted, does not have exclusive ownership" ); index.db.replace(db); } } } Ok(index) } pub fn build_field_indexes( &self, field: PayloadKeyTypeRef, payload_schema: &PayloadFieldSchema, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<FieldIndex>> { let payload_storage = self.payload.borrow(); let mut builders = self .selector(payload_schema) .index_builder(field, payload_schema)?; // Special null index complements every index. let null_index = IndexSelector::null_builder(&self.path, field)?; builders.push(null_index); for index in &mut builders { index.init()?; } payload_storage.iter( |point_id, point_payload| { let field_value = &point_payload.get_value(field); for builder in builders.iter_mut() { builder.add_point(point_id, field_value, hw_counter)?; } Ok(true) }, hw_counter, )?; builders .into_iter() .map(|builder| builder.finalize()) .collect() } /// Number of available points /// /// - excludes soft deleted points pub fn available_point_count(&self) -> usize { self.id_tracker.borrow().available_point_count() } pub fn struct_filtered_context<'a>( &'a self, filter: &'a Filter, hw_counter: &HardwareCounterCell, ) -> StructFilterContext<'a> { let payload_provider = PayloadProvider::new(self.payload.clone()); let (optimized_filter, _) = self.optimize_filter( filter, payload_provider, self.available_point_count(), hw_counter, ); StructFilterContext::new(optimized_filter) } pub(super) fn condition_cardinality( &self, condition: &Condition, nested_path: Option<&JsonPath>, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { match condition { Condition::Filter(_) => panic!("Unexpected branching"), Condition::Nested(nested) => { // propagate complete nested path in case of multiple nested layers let full_path = JsonPath::extend_or_new(nested_path, &nested.array_key()); self.estimate_nested_cardinality(nested.filter(), &full_path, hw_counter) } Condition::IsEmpty(IsEmptyCondition { is_empty: field }) => { let available_points = self.available_point_count(); let condition = FieldCondition::new_is_empty(field.key.clone(), true); self.estimate_field_condition(&condition, nested_path, hw_counter) .unwrap_or_else(|| CardinalityEstimation::unknown(available_points)) } Condition::IsNull(IsNullCondition { is_null: field }) => { let available_points = self.available_point_count(); let condition = FieldCondition::new_is_null(field.key.clone(), true); self.estimate_field_condition(&condition, nested_path, hw_counter) .unwrap_or_else(|| CardinalityEstimation::unknown(available_points)) } Condition::HasId(has_id) => { let point_ids = has_id.has_id.clone(); let id_tracker = self.id_tracker.borrow(); let resolved_point_offsets: Vec<PointOffsetType> = point_ids .iter() .filter_map(|external_id| id_tracker.internal_id(*external_id)) .collect(); let num_ids = resolved_point_offsets.len(); CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Ids(ResolvedHasId { point_ids, resolved_point_offsets, })], min: num_ids, exp: num_ids, max: num_ids, } } Condition::HasVector(has_vectors) => { if let Some(vector_storage) = self.vector_storages.get(&has_vectors.has_vector) { let vector_storage = vector_storage.borrow(); let vectors = vector_storage.available_vector_count(); CardinalityEstimation::exact(vectors).with_primary_clause( PrimaryCondition::HasVector(has_vectors.has_vector.clone()), ) } else { CardinalityEstimation::exact(0) } } Condition::Field(field_condition) => self .estimate_field_condition(field_condition, nested_path, hw_counter) .unwrap_or_else(|| CardinalityEstimation::unknown(self.available_point_count())), Condition::CustomIdChecker(cond) => cond .0 .estimate_cardinality(self.id_tracker.borrow().available_point_count()), } } pub fn get_telemetry_data(&self) -> Vec<PayloadIndexTelemetry> { self.field_indexes .iter() .flat_map(|(name, field)| -> Vec<PayloadIndexTelemetry> { field .iter() .map(|field| field.get_telemetry_data().set_name(name.to_string())) .collect() }) .collect() } #[cfg(feature = "rocksdb")] pub fn restore_database_snapshot( snapshot_path: &Path, segment_path: &Path, ) -> OperationResult<()> { crate::rocksdb_backup::restore(snapshot_path, &segment_path.join("payload_index")) } fn clear_index_for_point(&mut self, point_id: PointOffsetType) -> OperationResult<()> { for (_, field_indexes) in self.field_indexes.iter_mut() { for index in field_indexes { index.remove_point(point_id)?; } } Ok(()) } pub fn config(&self) -> &PayloadConfig { &self.config } pub fn is_tenant(&self, field: &PayloadKeyType) -> bool { self.config .indices .get(field) .map(|indexed_field| indexed_field.schema.is_tenant()) .unwrap_or(false) } pub fn iter_filtered_points<'a>( &'a self, filter: &'a Filter, id_tracker: &'a IdTrackerSS, query_cardinality: &'a CardinalityEstimation, hw_counter: &'a HardwareCounterCell, is_stopped: &'a AtomicBool, ) -> impl Iterator<Item = PointOffsetType> + 'a { if query_cardinality.primary_clauses.is_empty() { let full_scan_iterator = id_tracker.iter_internal(); let struct_filtered_context = self.struct_filtered_context(filter, hw_counter); // Worst case: query expected to return few matches, but index can't be used let matched_points = full_scan_iterator .stop_if(is_stopped) .filter(move |i| struct_filtered_context.check(*i)); EitherVariant::A(matched_points) } else { // CPU-optimized strategy here: points are made unique before applying other filters. let mut visited_list = self.visited_pool.get(id_tracker.total_point_count()); // If even one iterator is None, we should replace the whole thing with // an iterator over all ids. let primary_clause_iterators: Option<Vec<_>> = query_cardinality .primary_clauses .iter() .map(move |clause| self.query_field(clause, hw_counter)) .collect(); if let Some(primary_iterators) = primary_clause_iterators { let all_conditions_are_primary = filter .iter_conditions() .all(|condition| query_cardinality.is_primary(condition)); let joined_primary_iterator = primary_iterators.into_iter().flatten().stop_if(is_stopped); return if all_conditions_are_primary { // All conditions are primary clauses, // We can avoid post-filtering let iter = joined_primary_iterator .filter(move |&id| !visited_list.check_and_update_visited(id)); EitherVariant::B(iter) } else { // Some conditions are primary clauses, some are not let struct_filtered_context = self.struct_filtered_context(filter, hw_counter); let iter = joined_primary_iterator.filter(move |&id| { !visited_list.check_and_update_visited(id) && struct_filtered_context.check(id) }); EitherVariant::C(iter) }; } // We can't use primary conditions, so we fall back to iterating over all ids // and applying full filter. let struct_filtered_context = self.struct_filtered_context(filter, hw_counter); let id_tracker_iterator = id_tracker.iter_internal(); let iter = id_tracker_iterator .stop_if(is_stopped) .measure_hw_with_cell(hw_counter, size_of::<PointOffsetType>(), |i| { i.cpu_counter() }) .filter(move |&id| { !visited_list.check_and_update_visited(id) && struct_filtered_context.check(id) }); EitherVariant::D(iter) } } /// Select which type of PayloadIndex to use for the field fn selector(&self, payload_schema: &PayloadFieldSchema) -> IndexSelector<'_> { let is_on_disk = payload_schema.is_on_disk(); match &self.storage_type { #[cfg(feature = "rocksdb")] StorageType::RocksDbAppendable(db) => IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: true, }), StorageType::GridstoreAppendable => { IndexSelector::Gridstore(IndexSelectorGridstore { dir: &self.path }) } #[cfg(feature = "rocksdb")] StorageType::RocksDbNonAppendable(db) => { // legacy logic: we keep rocksdb, but load mmap indexes if !is_on_disk { return IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: false, }); } IndexSelector::Mmap(IndexSelectorMmap { dir: &self.path, is_on_disk, }) } StorageType::GridstoreNonAppendable => IndexSelector::Mmap(IndexSelectorMmap { dir: &self.path, is_on_disk, }), } } fn selector_with_type( &self, index_type: &FullPayloadIndexType, ) -> OperationResult<IndexSelector<'_>> { let selector = match index_type.storage_type { payload_config::StorageType::Gridstore => { IndexSelector::Gridstore(IndexSelectorGridstore { dir: &self.path }) } payload_config::StorageType::RocksDb => { #[cfg(feature = "rocksdb")] { let db = match (&self.storage_type, &self.db) { ( StorageType::RocksDbAppendable(db) | StorageType::RocksDbNonAppendable(db), _, ) => db, ( StorageType::GridstoreAppendable | StorageType::GridstoreNonAppendable, Some(db), ) => db, ( StorageType::GridstoreAppendable | StorageType::GridstoreNonAppendable, None, ) => { return Err(OperationError::service_error( "Loading payload index failed: Configured storage type and payload schema mismatch!", )); } }; return Ok(IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: self.storage_type.is_appendable(), })); } #[cfg(not(feature = "rocksdb"))] return Err(OperationError::service_error( "Loading payload index failed: Index is RocksDB but RocksDB feature is disabled.", )); } payload_config::StorageType::Mmap { is_on_disk } => { IndexSelector::Mmap(IndexSelectorMmap { dir: &self.path, is_on_disk, }) } }; Ok(selector) } pub fn get_facet_index(&self, key: &JsonPath) -> OperationResult<FacetIndexEnum<'_>> { self.field_indexes .get(key) .and_then(|index| index.iter().find_map(|index| index.as_facet_index())) .ok_or_else(|| OperationError::MissingMapIndexForFacet { key: key.to_string(), }) } pub fn populate(&self) -> OperationResult<()> { for (_, field_indexes) in self.field_indexes.iter() { for index in field_indexes { index.populate()?; } } Ok(()) } pub fn clear_cache(&self) -> OperationResult<()> { for (_, field_indexes) in self.field_indexes.iter() { for index in field_indexes { index.clear_cache()?; } } Ok(()) } pub fn clear_cache_if_on_disk(&self) -> OperationResult<()> { for (_, field_indexes) in self.field_indexes.iter() { for index in field_indexes { if index.is_on_disk() { index.clear_cache()?; } } } Ok(()) } } impl PayloadIndex for StructPayloadIndex { fn indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema> { self.config.indices.to_schemas() } fn build_index( &self, field: PayloadKeyTypeRef, payload_schema: &PayloadFieldSchema, hw_counter: &HardwareCounterCell, ) -> OperationResult<BuildIndexResult> { if let Some(prev_schema) = self.config.indices.get(field) { // the field is already indexed with the same schema // no need to rebuild index and to save the config return if prev_schema.schema == *payload_schema { Ok(BuildIndexResult::AlreadyBuilt) } else { Ok(BuildIndexResult::IncompatibleSchema) }; } let indexes = self.build_field_indexes(field, payload_schema, hw_counter)?; Ok(BuildIndexResult::Built(indexes)) } fn apply_index( &mut self, field: PayloadKeyType, payload_schema: PayloadFieldSchema, field_index: Vec<FieldIndex>, ) -> OperationResult<()> { let index_types: Vec<_> = field_index .iter() .map(|i| i.get_full_index_type()) .collect(); self.field_indexes.insert(field.clone(), field_index); self.config.indices.insert( field,
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/plain_payload_index.rs
lib/segment/src/index/plain_payload_index.rs
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::AtomicBool; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use common::types::PointOffsetType; use fs_err as fs; use schemars::_serde_json::Value; use super::field_index::FieldIndex; use super::payload_config::PayloadFieldSchemaWithIndexType; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::id_tracker::IdTrackerSS; use crate::index::field_index::{CardinalityEstimation, PayloadBlockCondition}; use crate::index::payload_config::PayloadConfig; use crate::index::{BuildIndexResult, PayloadIndex}; use crate::json_path::JsonPath; use crate::payload_storage::{ConditionCheckerSS, FilterContext}; use crate::types::{Filter, Payload, PayloadFieldSchema, PayloadKeyType, PayloadKeyTypeRef}; /// Implementation of `PayloadIndex` which does not really indexes anything. /// /// Used for small segments, which are easier to keep simple for faster updates, /// rather than spend time for index re-building pub struct PlainPayloadIndex { condition_checker: Arc<ConditionCheckerSS>, id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, config: PayloadConfig, path: PathBuf, } impl PlainPayloadIndex { fn config_path(&self) -> PathBuf { PayloadConfig::get_config_path(&self.path) } fn save_config(&self) -> OperationResult<()> { let config_path = self.config_path(); self.config.save(&config_path) } pub fn open( condition_checker: Arc<ConditionCheckerSS>, id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, path: &Path, ) -> OperationResult<Self> { fs::create_dir_all(path)?; let config_path = PayloadConfig::get_config_path(path); let config = if config_path.exists() { PayloadConfig::load(&config_path)? } else { PayloadConfig::default() }; let index = PlainPayloadIndex { condition_checker, id_tracker, config, path: path.to_owned(), }; if !index.config_path().exists() { index.save_config()?; } Ok(index) } } impl PayloadIndex for PlainPayloadIndex { fn indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema> { self.config.indices.to_schemas() } fn build_index( &self, _field: PayloadKeyTypeRef, _payload_schema: &PayloadFieldSchema, _hw_counter: &HardwareCounterCell, ) -> OperationResult<BuildIndexResult> { Ok(BuildIndexResult::AlreadyBuilt) // No index to build } fn apply_index( &mut self, field: PayloadKeyType, payload_schema: PayloadFieldSchema, field_index: Vec<FieldIndex>, ) -> OperationResult<()> { let new_schema = PayloadFieldSchemaWithIndexType::new( payload_schema, field_index .iter() .map(|i| i.get_full_index_type()) .collect(), ); let prev_schema = self.config.indices.insert(field, new_schema.clone()); if let Some(prev_schema) = prev_schema { // the field is already present with the same schema, no need to save the config if prev_schema == new_schema { return Ok(()); } } self.save_config()?; Ok(()) } fn set_indexed( &mut self, field: PayloadKeyTypeRef, payload_schema: impl Into<PayloadFieldSchema>, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { // No need to build index, just set the field as indexed self.apply_index(field.clone(), payload_schema.into(), vec![]) } fn drop_index(&mut self, field: PayloadKeyTypeRef) -> OperationResult<bool> { let is_removed = self.config.indices.remove(field).is_some(); self.save_config()?; Ok(is_removed) } fn drop_index_if_incompatible( &mut self, field: PayloadKeyTypeRef, _new_payload_schema: &PayloadFieldSchema, ) -> OperationResult<bool> { // Just always drop the index, as we don't have any indexes self.drop_index(field) } fn estimate_cardinality( &self, _query: &Filter, _hw_counter: &HardwareCounterCell, // No measurements needed here. ) -> CardinalityEstimation { let available_points = self.id_tracker.borrow().available_point_count(); CardinalityEstimation { primary_clauses: vec![], min: 0, exp: available_points / 2, max: available_points, } } /// Forward to non nested implementation. fn estimate_nested_cardinality( &self, query: &Filter, _nested_path: &JsonPath, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { self.estimate_cardinality(query, hw_counter) } fn query_points( &self, query: &Filter, hw_counter: &HardwareCounterCell, is_stopped: &AtomicBool, ) -> Vec<PointOffsetType> { let filter_context = self.filter_context(query, hw_counter); let id_tracker = self.id_tracker.borrow(); let all_points_iter = id_tracker.iter_internal(); all_points_iter .stop_if(is_stopped) .filter(|id| filter_context.check(*id)) .collect() } fn indexed_points(&self, _field: PayloadKeyTypeRef) -> usize { 0 // No points are indexed in the plain index } fn filter_context<'a>( &'a self, filter: &'a Filter, _: &HardwareCounterCell, ) -> Box<dyn FilterContext + 'a> { Box::new(PlainFilterContext { filter, condition_checker: self.condition_checker.clone(), }) } fn payload_blocks( &self, _field: PayloadKeyTypeRef, _threshold: usize, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { // No blocks for un-indexed payload Box::new(std::iter::empty()) } fn overwrite_payload( &mut self, _point_id: PointOffsetType, _payload: &Payload, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { unreachable!() } fn set_payload( &mut self, _point_id: PointOffsetType, _payload: &Payload, _key: &Option<JsonPath>, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { unreachable!() } fn get_payload( &self, _point_id: PointOffsetType, _hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload> { unreachable!() } fn get_payload_sequential( &self, _point_id: PointOffsetType, _hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload> { unreachable!() } fn delete_payload( &mut self, _point_id: PointOffsetType, _key: PayloadKeyTypeRef, _hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<Value>> { unreachable!() } fn clear_payload( &mut self, _point_id: PointOffsetType, _hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<Payload>> { unreachable!() } fn flusher(&self) -> Flusher { unreachable!() } #[cfg(feature = "rocksdb")] fn take_database_snapshot(&self, _: &Path) -> OperationResult<()> { unreachable!() } fn files(&self) -> Vec<PathBuf> { vec![self.config_path()] } } pub struct PlainFilterContext<'a> { condition_checker: Arc<ConditionCheckerSS>, filter: &'a Filter, } impl FilterContext for PlainFilterContext<'_> { fn check(&self, point_id: PointOffsetType) -> bool { self.condition_checker.check(point_id, self.filter) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/payload_index_base.rs
lib/segment/src/index/payload_index_base.rs
use std::collections::HashMap; use std::path::PathBuf; use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use serde_json::Value; use super::field_index::FieldIndex; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::index::field_index::{CardinalityEstimation, PayloadBlockCondition}; use crate::json_path::JsonPath; use crate::payload_storage::FilterContext; use crate::types::{Filter, Payload, PayloadFieldSchema, PayloadKeyType, PayloadKeyTypeRef}; pub enum BuildIndexResult { /// Index was built Built(Vec<FieldIndex>), /// Index was already built AlreadyBuilt, /// Field Index already exists, but incompatible schema /// Requires extra actions to remove the old index. IncompatibleSchema, } pub trait PayloadIndex { /// Get indexed fields fn indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema>; /// Build the index, if not built before, taking the caller by reference only fn build_index( &self, field: PayloadKeyTypeRef, payload_schema: &PayloadFieldSchema, hw_counter: &HardwareCounterCell, ) -> OperationResult<BuildIndexResult>; /// Apply already built indexes fn apply_index( &mut self, field: PayloadKeyType, payload_schema: PayloadFieldSchema, field_index: Vec<FieldIndex>, ) -> OperationResult<()>; /// Mark field as one which should be indexed fn set_indexed( &mut self, field: PayloadKeyTypeRef, payload_schema: impl Into<PayloadFieldSchema>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Remove index fn drop_index(&mut self, field: PayloadKeyTypeRef) -> OperationResult<bool>; /// Remove index if incompatible with new payload schema fn drop_index_if_incompatible( &mut self, field: PayloadKeyTypeRef, new_payload_schema: &PayloadFieldSchema, ) -> OperationResult<bool>; /// Estimate amount of points (min, max) which satisfies filtering condition. /// /// A best estimation of the number of available points should be given. fn estimate_cardinality( &self, query: &Filter, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation; /// Estimate amount of points (min, max) which satisfies filtering of a nested condition. fn estimate_nested_cardinality( &self, query: &Filter, nested_path: &JsonPath, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation; /// Return list of all point ids, which satisfy filtering criteria /// /// A best estimation of the number of available points should be given. /// /// If `is_stopped` is set to true during execution, the function should return early with no results. fn query_points( &self, query: &Filter, hw_counter: &HardwareCounterCell, is_stopped: &AtomicBool, ) -> Vec<PointOffsetType>; /// Return number of points, indexed by this field fn indexed_points(&self, field: PayloadKeyTypeRef) -> usize; fn filter_context<'a>( &'a self, filter: &'a Filter, hw_counter: &HardwareCounterCell, ) -> Box<dyn FilterContext + 'a>; /// Iterate conditions for payload blocks with minimum size of `threshold` /// Required for building HNSW index fn payload_blocks( &self, field: PayloadKeyTypeRef, threshold: usize, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_>; /// Overwrite payload for point_id. If payload already exists, replace it. fn overwrite_payload( &mut self, point_id: PointOffsetType, payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Assign payload to a concrete point with a concrete payload value fn set_payload( &mut self, point_id: PointOffsetType, payload: &Payload, key: &Option<JsonPath>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Get payload for point fn get_payload( &self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload>; /// Get payload for point with potential optimization for sequential access. fn get_payload_sequential( &self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload>; /// Delete payload by key fn delete_payload( &mut self, point_id: PointOffsetType, key: PayloadKeyTypeRef, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<Value>>; /// Drop all payload of the point fn clear_payload( &mut self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<Payload>>; /// Return function that forces persistence of current storage state. fn flusher(&self) -> Flusher; #[cfg(feature = "rocksdb")] fn take_database_snapshot(&self, path: &std::path::Path) -> OperationResult<()>; fn files(&self) -> Vec<PathBuf>; fn immutable_files(&self) -> Vec<(PayloadKeyType, PathBuf)> { Vec::new() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/vector_index_base.rs
lib/segment/src/index/vector_index_base.rs
use std::collections::HashMap; use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::{PointOffsetType, ScoredPointOffset, TelemetryDetail}; use half::f16; use sparse::common::types::{DimId, QuantizedU8}; use sparse::index::inverted_index::InvertedIndex; use sparse::index::inverted_index::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam; use sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap; use sparse::index::inverted_index::inverted_index_immutable_ram::InvertedIndexImmutableRam; use sparse::index::inverted_index::inverted_index_mmap::InvertedIndexMmap; use sparse::index::inverted_index::inverted_index_ram::InvertedIndexRam; use super::hnsw_index::hnsw::HNSWIndex; use super::plain_vector_index::PlainVectorIndex; use super::sparse_index::sparse_vector_index::SparseVectorIndex; use crate::common::operation_error::OperationResult; use crate::data_types::query_context::VectorQueryContext; use crate::data_types::vectors::{QueryVector, VectorRef}; use crate::telemetry::VectorIndexSearchesTelemetry; use crate::types::{Filter, SearchParams}; /// Trait for vector searching pub trait VectorIndex { /// Return list of Ids with fitting fn search( &self, vectors: &[&QueryVector], filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, query_context: &VectorQueryContext, ) -> OperationResult<Vec<Vec<ScoredPointOffset>>>; fn get_telemetry_data(&self, detail: TelemetryDetail) -> VectorIndexSearchesTelemetry; fn files(&self) -> Vec<PathBuf>; fn immutable_files(&self) -> Vec<PathBuf> { Vec::new() } /// The number of indexed vectors, currently accessible fn indexed_vector_count(&self) -> usize; /// Total size of all searchable vectors in bytes. fn size_of_searchable_vectors_in_bytes(&self) -> usize; /// Update index for a single vector /// /// # Arguments /// - `id` - sequential vector id, offset in the vector storage /// - `vector` - new vector value, /// if None - vector will be removed from the index marked as deleted in storage. /// Note: inserting None vector is not equal to removing vector from the storage. /// Unlike removing, it will always result in storage growth. /// Proper removing should be performed by the optimizer. fn update_vector( &mut self, id: PointOffsetType, vector: Option<VectorRef>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; } #[derive(Debug)] pub enum VectorIndexEnum { Plain(PlainVectorIndex), Hnsw(HNSWIndex), SparseRam(SparseVectorIndex<InvertedIndexRam>), SparseImmutableRam(SparseVectorIndex<InvertedIndexImmutableRam>), SparseMmap(SparseVectorIndex<InvertedIndexMmap>), SparseCompressedImmutableRamF32(SparseVectorIndex<InvertedIndexCompressedImmutableRam<f32>>), SparseCompressedImmutableRamF16(SparseVectorIndex<InvertedIndexCompressedImmutableRam<f16>>), SparseCompressedImmutableRamU8( SparseVectorIndex<InvertedIndexCompressedImmutableRam<QuantizedU8>>, ), SparseCompressedMmapF32(SparseVectorIndex<InvertedIndexCompressedMmap<f32>>), SparseCompressedMmapF16(SparseVectorIndex<InvertedIndexCompressedMmap<f16>>), SparseCompressedMmapU8(SparseVectorIndex<InvertedIndexCompressedMmap<QuantizedU8>>), } impl VectorIndexEnum { pub fn is_index(&self) -> bool { match self { Self::Plain(_) => false, Self::Hnsw(_) => true, Self::SparseRam(_) => true, Self::SparseImmutableRam(_) => true, Self::SparseMmap(_) => true, Self::SparseCompressedImmutableRamF32(_) => true, Self::SparseCompressedImmutableRamF16(_) => true, Self::SparseCompressedImmutableRamU8(_) => true, Self::SparseCompressedMmapF32(_) => true, Self::SparseCompressedMmapF16(_) => true, Self::SparseCompressedMmapU8(_) => true, } } /// Returns true if underlying storage is configured to be stored on disk without /// actively holding data in RAM pub fn is_on_disk(&self) -> bool { match self { Self::Plain(_) => false, Self::Hnsw(index) => index.is_on_disk(), Self::SparseRam(index) => index.inverted_index().is_on_disk(), Self::SparseImmutableRam(index) => index.inverted_index().is_on_disk(), Self::SparseMmap(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedImmutableRamF32(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedImmutableRamF16(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedImmutableRamU8(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedMmapF32(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedMmapF16(index) => index.inverted_index().is_on_disk(), Self::SparseCompressedMmapU8(index) => index.inverted_index().is_on_disk(), } } pub fn populate(&self) -> OperationResult<()> { match self { Self::Plain(_) => {} Self::Hnsw(index) => index.populate()?, Self::SparseRam(_) => {} Self::SparseImmutableRam(_) => {} Self::SparseMmap(index) => index.inverted_index().populate()?, Self::SparseCompressedImmutableRamF32(_) => {} Self::SparseCompressedImmutableRamF16(_) => {} Self::SparseCompressedImmutableRamU8(_) => {} Self::SparseCompressedMmapF32(index) => index.inverted_index().populate()?, Self::SparseCompressedMmapF16(index) => index.inverted_index().populate()?, Self::SparseCompressedMmapU8(index) => index.inverted_index().populate()?, }; Ok(()) } pub fn clear_cache(&self) -> OperationResult<()> { match self { Self::Plain(_) => {} Self::Hnsw(index) => index.clear_cache()?, Self::SparseRam(_) => {} Self::SparseImmutableRam(_) => {} Self::SparseMmap(index) => index.inverted_index().clear_cache()?, Self::SparseCompressedImmutableRamF32(_) => {} Self::SparseCompressedImmutableRamF16(_) => {} Self::SparseCompressedImmutableRamU8(_) => {} Self::SparseCompressedMmapF32(index) => index.inverted_index().clear_cache()?, Self::SparseCompressedMmapF16(index) => index.inverted_index().clear_cache()?, Self::SparseCompressedMmapU8(index) => index.inverted_index().clear_cache()?, }; Ok(()) } pub fn fill_idf_statistics( &self, idf: &mut HashMap<DimId, usize>, hw_counter: &HardwareCounterCell, ) { match self { Self::Plain(_) | Self::Hnsw(_) => (), Self::SparseRam(index) => index.fill_idf_statistics(idf, hw_counter), Self::SparseImmutableRam(index) => index.fill_idf_statistics(idf, hw_counter), Self::SparseMmap(index) => index.fill_idf_statistics(idf, hw_counter), Self::SparseCompressedImmutableRamF32(index) => { index.fill_idf_statistics(idf, hw_counter) } Self::SparseCompressedImmutableRamF16(index) => { index.fill_idf_statistics(idf, hw_counter) } Self::SparseCompressedImmutableRamU8(index) => { index.fill_idf_statistics(idf, hw_counter) } Self::SparseCompressedMmapF32(index) => index.fill_idf_statistics(idf, hw_counter), Self::SparseCompressedMmapF16(index) => index.fill_idf_statistics(idf, hw_counter), Self::SparseCompressedMmapU8(index) => index.fill_idf_statistics(idf, hw_counter), } } pub fn indexed_vectors(&self) -> usize { match self { Self::Plain(index) => index.indexed_vector_count(), Self::Hnsw(index) => index.indexed_vector_count(), Self::SparseRam(index) => index.inverted_index().vector_count(), Self::SparseImmutableRam(index) => index.inverted_index().vector_count(), Self::SparseMmap(index) => index.inverted_index().vector_count(), Self::SparseCompressedImmutableRamF32(index) => index.inverted_index().vector_count(), Self::SparseCompressedImmutableRamF16(index) => index.inverted_index().vector_count(), Self::SparseCompressedImmutableRamU8(index) => index.inverted_index().vector_count(), Self::SparseCompressedMmapF32(index) => index.inverted_index().vector_count(), Self::SparseCompressedMmapF16(index) => index.inverted_index().vector_count(), Self::SparseCompressedMmapU8(index) => index.inverted_index().vector_count(), } } } impl VectorIndex for VectorIndexEnum { fn search( &self, vectors: &[&QueryVector], filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, query_context: &VectorQueryContext, ) -> OperationResult<Vec<Vec<ScoredPointOffset>>> { match self { VectorIndexEnum::Plain(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::Hnsw(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseRam(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseImmutableRam(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseMmap(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedImmutableRamF32(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedImmutableRamF16(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedImmutableRamU8(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedMmapF32(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedMmapF16(index) => { index.search(vectors, filter, top, params, query_context) } VectorIndexEnum::SparseCompressedMmapU8(index) => { index.search(vectors, filter, top, params, query_context) } } } fn get_telemetry_data(&self, detail: TelemetryDetail) -> VectorIndexSearchesTelemetry { match self { VectorIndexEnum::Plain(index) => index.get_telemetry_data(detail), VectorIndexEnum::Hnsw(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseRam(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseImmutableRam(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseMmap(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseCompressedImmutableRamF32(index) => { index.get_telemetry_data(detail) } VectorIndexEnum::SparseCompressedImmutableRamF16(index) => { index.get_telemetry_data(detail) } VectorIndexEnum::SparseCompressedImmutableRamU8(index) => { index.get_telemetry_data(detail) } VectorIndexEnum::SparseCompressedMmapF32(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseCompressedMmapF16(index) => index.get_telemetry_data(detail), VectorIndexEnum::SparseCompressedMmapU8(index) => index.get_telemetry_data(detail), } } fn files(&self) -> Vec<PathBuf> { match self { VectorIndexEnum::Plain(index) => index.files(), VectorIndexEnum::Hnsw(index) => index.files(), VectorIndexEnum::SparseRam(index) => index.files(), VectorIndexEnum::SparseImmutableRam(index) => index.files(), VectorIndexEnum::SparseMmap(index) => index.files(), VectorIndexEnum::SparseCompressedImmutableRamF32(index) => index.files(), VectorIndexEnum::SparseCompressedImmutableRamF16(index) => index.files(), VectorIndexEnum::SparseCompressedImmutableRamU8(index) => index.files(), VectorIndexEnum::SparseCompressedMmapF32(index) => index.files(), VectorIndexEnum::SparseCompressedMmapF16(index) => index.files(), VectorIndexEnum::SparseCompressedMmapU8(index) => index.files(), } } fn immutable_files(&self) -> Vec<PathBuf> { match self { VectorIndexEnum::Plain(index) => index.immutable_files(), VectorIndexEnum::Hnsw(index) => index.immutable_files(), VectorIndexEnum::SparseRam(index) => index.immutable_files(), VectorIndexEnum::SparseImmutableRam(index) => index.immutable_files(), VectorIndexEnum::SparseMmap(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedImmutableRamF32(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedImmutableRamF16(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedImmutableRamU8(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedMmapF32(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedMmapF16(index) => index.immutable_files(), VectorIndexEnum::SparseCompressedMmapU8(index) => index.immutable_files(), } } fn indexed_vector_count(&self) -> usize { match self { Self::Plain(index) => index.indexed_vector_count(), Self::Hnsw(index) => index.indexed_vector_count(), Self::SparseRam(index) => index.indexed_vector_count(), Self::SparseImmutableRam(index) => index.indexed_vector_count(), Self::SparseMmap(index) => index.indexed_vector_count(), Self::SparseCompressedImmutableRamF32(index) => index.indexed_vector_count(), Self::SparseCompressedImmutableRamF16(index) => index.indexed_vector_count(), Self::SparseCompressedImmutableRamU8(index) => index.indexed_vector_count(), Self::SparseCompressedMmapF32(index) => index.indexed_vector_count(), Self::SparseCompressedMmapF16(index) => index.indexed_vector_count(), Self::SparseCompressedMmapU8(index) => index.indexed_vector_count(), } } fn size_of_searchable_vectors_in_bytes(&self) -> usize { match self { Self::Plain(index) => index.size_of_searchable_vectors_in_bytes(), Self::Hnsw(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseRam(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseImmutableRam(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseMmap(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseCompressedImmutableRamF32(index) => { index.size_of_searchable_vectors_in_bytes() } Self::SparseCompressedImmutableRamF16(index) => { index.size_of_searchable_vectors_in_bytes() } Self::SparseCompressedImmutableRamU8(index) => { index.size_of_searchable_vectors_in_bytes() } Self::SparseCompressedMmapF32(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseCompressedMmapF16(index) => index.size_of_searchable_vectors_in_bytes(), Self::SparseCompressedMmapU8(index) => index.size_of_searchable_vectors_in_bytes(), } } fn update_vector( &mut self, id: PointOffsetType, vector: Option<VectorRef>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { Self::Plain(index) => index.update_vector(id, vector, hw_counter), Self::Hnsw(index) => index.update_vector(id, vector, hw_counter), Self::SparseRam(index) => index.update_vector(id, vector, hw_counter), Self::SparseImmutableRam(index) => index.update_vector(id, vector, hw_counter), Self::SparseMmap(index) => index.update_vector(id, vector, hw_counter), Self::SparseCompressedImmutableRamF32(index) => { index.update_vector(id, vector, hw_counter) } Self::SparseCompressedImmutableRamF16(index) => { index.update_vector(id, vector, hw_counter) } Self::SparseCompressedImmutableRamU8(index) => { index.update_vector(id, vector, hw_counter) } Self::SparseCompressedMmapF32(index) => index.update_vector(id, vector, hw_counter), Self::SparseCompressedMmapF16(index) => index.update_vector(id, vector, hw_counter), Self::SparseCompressedMmapU8(index) => index.update_vector(id, vector, hw_counter), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/mod.rs
lib/segment/src/index/mod.rs
pub mod field_index; pub mod hnsw_index; mod key_encoding; pub mod payload_config; mod payload_index_base; pub mod plain_payload_index; pub mod plain_vector_index; pub mod query_estimator; pub mod query_optimization; mod sample_estimation; pub mod sparse_index; mod struct_filter_context; pub mod struct_payload_index; pub mod vector_index_base; mod vector_index_search_common; mod visited_pool; pub use payload_index_base::*; pub use vector_index_base::*;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/plain_vector_index.rs
lib/segment/src/index/plain_vector_index.rs
use std::path::PathBuf; use std::sync::Arc; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::types::{PointOffsetType, ScoredPointOffset, TelemetryDetail}; use parking_lot::Mutex; use super::hnsw_index::point_scorer::BatchFilteredSearcher; use crate::common::BYTES_IN_KB; use crate::common::operation_error::OperationResult; use crate::common::operation_time_statistics::{ OperationDurationStatistics, OperationDurationsAggregator, ScopeDurationMeasurer, }; use crate::data_types::query_context::VectorQueryContext; use crate::data_types::vectors::{QueryVector, VectorRef}; use crate::id_tracker::IdTrackerSS; use crate::index::struct_payload_index::StructPayloadIndex; use crate::index::vector_index_search_common::{ get_oversampled_top, is_quantized_search, postprocess_search_result, }; use crate::index::{PayloadIndex, VectorIndex}; use crate::telemetry::VectorIndexSearchesTelemetry; use crate::types::{Filter, SearchParams}; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; use crate::vector_storage::{VectorStorage, VectorStorageEnum}; #[derive(Debug)] pub struct PlainVectorIndex { id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>, payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, filtered_searches_telemetry: Arc<Mutex<OperationDurationsAggregator>>, unfiltered_searches_telemetry: Arc<Mutex<OperationDurationsAggregator>>, } impl PlainVectorIndex { pub fn new( id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>, payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, ) -> PlainVectorIndex { PlainVectorIndex { id_tracker, vector_storage, quantized_vectors, payload_index, filtered_searches_telemetry: OperationDurationsAggregator::new(), unfiltered_searches_telemetry: OperationDurationsAggregator::new(), } } pub fn is_small_enough_for_unindexed_search( &self, search_optimized_threshold_kb: usize, filter: Option<&Filter>, hw_counter: &HardwareCounterCell, ) -> bool { let vector_storage = self.vector_storage.borrow(); let available_vector_count = vector_storage.available_vector_count(); if available_vector_count > 0 { let vector_size_bytes = vector_storage.size_of_available_vectors_in_bytes() / available_vector_count; let indexing_threshold_bytes = search_optimized_threshold_kb * BYTES_IN_KB; if let Some(payload_filter) = filter { let payload_index = self.payload_index.borrow(); let cardinality = payload_index.estimate_cardinality(payload_filter, hw_counter); let scan_size = vector_size_bytes.saturating_mul(cardinality.max); scan_size <= indexing_threshold_bytes } else { let vector_storage_size = vector_size_bytes.saturating_mul(available_vector_count); vector_storage_size <= indexing_threshold_bytes } } else { true } } } impl VectorIndex for PlainVectorIndex { fn search( &self, query_vectors: &[&QueryVector], filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, query_context: &VectorQueryContext, ) -> OperationResult<Vec<Vec<ScoredPointOffset>>> { let is_indexed_only = params.map(|p| p.indexed_only).unwrap_or(false); if is_indexed_only && !self.is_small_enough_for_unindexed_search( query_context.search_optimized_threshold_kb(), filter, &query_context.hardware_counter(), ) { return Ok(vec![vec![]; query_vectors.len()]); } if top == 0 { return Ok(vec![vec![]; query_vectors.len()]); } let is_stopped = query_context.is_stopped(); let hw_counter = query_context.hardware_counter(); let _timer = ScopeDurationMeasurer::new(if filter.is_some() { &self.filtered_searches_telemetry } else { &self.unfiltered_searches_telemetry }); let vector_storage = self.vector_storage.borrow(); let quantized_storage = self.quantized_vectors.borrow(); let id_tracker = self.id_tracker.borrow(); let deleted_points = query_context .deleted_points() .unwrap_or_else(|| id_tracker.deleted_point_bitslice()); let quantization_enabled = is_quantized_search(quantized_storage.as_ref(), params); let quantized_vectors = quantization_enabled .then_some(quantized_storage.as_ref()) .flatten(); let oversampled_top = get_oversampled_top(quantized_storage.as_ref(), params, top); let batch_searcher = BatchFilteredSearcher::new( query_vectors, &vector_storage, quantized_vectors, None, oversampled_top, deleted_points, query_context.hardware_counter(), )?; let mut search_results = match filter { Some(filter) => { let payload_index = self.payload_index.borrow(); let filtered_ids_vec = payload_index.query_points(filter, &hw_counter, &is_stopped); batch_searcher.peek_top_iter(&mut filtered_ids_vec.iter().copied(), &is_stopped)? } None => batch_searcher.peek_top_all(&is_stopped)?, }; for (search_result, query_vector) in search_results.iter_mut().zip(query_vectors) { *search_result = postprocess_search_result( std::mem::take(search_result), deleted_points, &vector_storage, quantized_storage.as_ref(), query_vector, params, top, query_context.hardware_counter(), )?; } Ok(search_results) } fn get_telemetry_data(&self, detail: TelemetryDetail) -> VectorIndexSearchesTelemetry { VectorIndexSearchesTelemetry { index_name: None, unfiltered_plain: self .unfiltered_searches_telemetry .lock() .get_statistics(detail), filtered_plain: self .filtered_searches_telemetry .lock() .get_statistics(detail), unfiltered_hnsw: OperationDurationStatistics::default(), filtered_small_cardinality: OperationDurationStatistics::default(), filtered_large_cardinality: OperationDurationStatistics::default(), filtered_exact: OperationDurationStatistics::default(), filtered_sparse: Default::default(), unfiltered_exact: OperationDurationStatistics::default(), unfiltered_sparse: OperationDurationStatistics::default(), } } fn files(&self) -> Vec<PathBuf> { vec![] } fn indexed_vector_count(&self) -> usize { 0 } fn size_of_searchable_vectors_in_bytes(&self) -> usize { self.vector_storage .borrow() .size_of_available_vectors_in_bytes() } fn update_vector( &mut self, id: PointOffsetType, vector: Option<VectorRef>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let mut vector_storage = self.vector_storage.borrow_mut(); if let Some(vector) = vector { vector_storage.insert_vector(id, vector, hw_counter)?; let mut quantized_vectors = self.quantized_vectors.borrow_mut(); if let Some(quantized_vectors) = quantized_vectors.as_mut() { quantized_vectors.upsert_vector(id, vector, hw_counter)?; } } else { if id as usize >= vector_storage.total_vector_count() { debug_assert!(id as usize == vector_storage.total_vector_count()); // Vector doesn't exist in the storage // Insert default vector to keep the sequence let default_vector = vector_storage.default_vector(); vector_storage.insert_vector(id, VectorRef::from(&default_vector), hw_counter)?; } vector_storage.delete_vector(id)?; } Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/vector_index_search_common.rs
lib/segment/src/index/vector_index_search_common.rs
use bitvec::slice::BitSlice; use common::counter::hardware_counter::HardwareCounterCell; use common::types::ScoredPointOffset; use itertools::Itertools; use crate::common::operation_error::OperationResult; use crate::data_types::vectors::QueryVector; use crate::index::hnsw_index::point_scorer::FilteredScorer; use crate::types::{ SearchParams, default_quantization_ignore_value, default_quantization_oversampling_value, }; use crate::vector_storage::VectorStorageEnum; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; pub fn is_quantized_search( quantized_storage: Option<&QuantizedVectors>, params: Option<&SearchParams>, ) -> bool { let ignore_quantization = params .and_then(|p| p.quantization) .map(|q| q.ignore) .unwrap_or(default_quantization_ignore_value()); let exact = params.map(|p| p.exact).unwrap_or(false); quantized_storage.is_some() && !ignore_quantization && !exact } pub fn get_oversampled_top( quantized_storage: Option<&QuantizedVectors>, params: Option<&SearchParams>, top: usize, ) -> usize { let quantization_enabled = is_quantized_search(quantized_storage, params); let oversampling_value = params .and_then(|p| p.quantization) .map(|q| q.oversampling) .unwrap_or(default_quantization_oversampling_value()); match oversampling_value { Some(oversampling) if quantization_enabled && oversampling > 1.0 => { (oversampling * top as f64) as usize } _ => top, } } #[allow(clippy::too_many_arguments)] pub fn postprocess_search_result( mut search_result: Vec<ScoredPointOffset>, point_deleted: &BitSlice, vector_storage: &VectorStorageEnum, quantized_vectors: Option<&QuantizedVectors>, vector: &QueryVector, params: Option<&SearchParams>, top: usize, hardware_counter: HardwareCounterCell, ) -> OperationResult<Vec<ScoredPointOffset>> { let quantization_enabled = is_quantized_search(quantized_vectors, params); let default_rescoring = quantized_vectors .as_ref() .map(|q| q.default_rescoring()) .unwrap_or(false); let rescore = quantization_enabled && params .and_then(|p| p.quantization) .and_then(|q| q.rescore) .unwrap_or(default_rescoring); if rescore { let mut scorer = FilteredScorer::new( vector.to_owned(), vector_storage, None, None, point_deleted, hardware_counter, )?; search_result = scorer .score_points(&mut search_result.iter().map(|x| x.idx).collect_vec(), 0) .collect(); search_result.sort_unstable(); search_result.reverse(); } search_result.truncate(top); Ok(search_result) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/struct_filter_context.rs
lib/segment/src/index/struct_filter_context.rs
use common::types::PointOffsetType; use crate::index::query_optimization::optimized_filter::{OptimizedFilter, check_optimized_filter}; use crate::payload_storage::FilterContext; pub struct StructFilterContext<'a> { optimized_filter: OptimizedFilter<'a>, } impl<'a> StructFilterContext<'a> { pub fn new(optimized_filter: OptimizedFilter<'a>) -> Self { Self { optimized_filter } } } impl FilterContext for StructFilterContext<'_> { fn check(&self, point_id: PointOffsetType) -> bool { check_optimized_filter(&self.optimized_filter, point_id) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sample_estimation.rs
lib/segment/src/index/sample_estimation.rs
use std::cmp::{max, min}; use common::types::PointOffsetType; const MAX_ESTIMATED_POINTS: usize = 1000; /// How many points do we need to check in order to estimate expected query cardinality. /// Based on <https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval> #[allow(dead_code)] fn estimate_required_sample_size(total: usize, confidence_interval: usize) -> usize { let confidence_interval = min(confidence_interval, total); let z = 1.96; // percentile 0.95 of normal distribution let index_fraction = confidence_interval as f64 / total as f64 / 2.0; let h = 0.5; // success rate which requires most number of estimations let estimated_size = h * (1. - h) / (index_fraction / z).powi(2); max(estimated_size as usize, 10) } /// Returns (expected cardinality ± confidence interval at 0.99) /// Based on <https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Agresti%E2%80%93Coull_interval> fn confidence_agresti_coull_interval(trials: usize, positive: usize, total: usize) -> (i64, i64) { let z = 2.; // heuristics let n_hat = trials as f64 + z * z; let phat = (positive as f64 + z * z / 2.) / n_hat; let interval = z * ((phat / n_hat) * (1. - phat)).sqrt(); let expected = (phat * total as f64) as i64; let delta = (interval * total as f64) as i64; (expected, delta) } /// Tests if given `query` have cardinality higher than the `threshold` /// Iteratively samples points until the decision could be made with confidence pub fn sample_check_cardinality( sample_points: impl Iterator<Item = PointOffsetType>, checker: impl Fn(PointOffsetType) -> bool, threshold: usize, total_points: usize, ) -> bool { let mut matched_points = 0; let mut total_checked = 0; let mut exp = 0; let mut interval; for idx in sample_points.take(MAX_ESTIMATED_POINTS) { matched_points += usize::from(checker(idx)); total_checked += 1; let estimation = confidence_agresti_coull_interval(total_checked, matched_points, total_points); exp = estimation.0; interval = estimation.1; if exp - interval > threshold as i64 { return true; } if exp + interval < threshold as i64 { return false; } } exp > threshold as i64 } #[cfg(test)] mod tests { use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use super::*; #[test] fn test_confidence_interval() { let mut rng = StdRng::seed_from_u64(42); let total = 100_000; let true_p = 0.25; let mut delta = 100_000; let mut positive = 0; for i in 1..=101 { positive += usize::from(rng.random_bool(true_p)); if i % 20 == 1 { let interval = confidence_agresti_coull_interval(i, positive, total); assert!(interval.1 < delta); delta = interval.1; eprintln!( "confidence_agresti_coull_interval({i}, {positive}, {total}) = {interval:#?}" ); } } } #[test] fn test_sample_check_cardinality() { let res = sample_check_cardinality( vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12].into_iter(), |idx| idx % 2 == 0, 10_000, 100_000, ); assert!(res) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/visited_pool.rs
lib/segment/src/index/visited_pool.rs
//! Structures for fast and tread-safe way to check if some points were visited or not use common::defaults::POOL_KEEP_LIMIT; use common::types::PointOffsetType; use parking_lot::RwLock; /// Visited list handle is an owner of the `VisitedList`, which is returned by `VisitedPool` and returned back to it #[derive(Debug)] pub struct VisitedListHandle<'a> { pool: &'a VisitedPool, visited_list: VisitedList, } /// Visited list reuses same memory to keep track of visited points ids among multiple consequent queries /// /// It stores the sequence number of last processed operation next to the point ID, which allows to avoid memory allocation /// and reuse same counter for multiple queries. #[derive(Debug)] struct VisitedList { current_iter: u8, visit_counters: Vec<u8>, } impl Default for VisitedList { fn default() -> Self { VisitedList { current_iter: 1, visit_counters: vec![], } } } impl VisitedList { fn new(num_points: usize) -> Self { VisitedList { current_iter: 1, visit_counters: vec![0; num_points], } } } impl Drop for VisitedListHandle<'_> { fn drop(&mut self) { self.pool .return_back(std::mem::take(&mut self.visited_list)); } } impl<'a> VisitedListHandle<'a> { fn new(pool: &'a VisitedPool, data: VisitedList) -> Self { VisitedListHandle { pool, visited_list: data, } } /// Return `true` if visited pub fn check(&self, point_id: PointOffsetType) -> bool { self.visited_list .visit_counters .get(point_id as usize) .is_some_and(|x| *x == self.visited_list.current_iter) } /// Updates visited list /// return `true` if point was visited before pub fn check_and_update_visited(&mut self, point_id: PointOffsetType) -> bool { let idx = point_id as usize; if idx >= self.visited_list.visit_counters.len() { self.visited_list.visit_counters.resize(idx + 1, 0); } std::mem::replace( &mut self.visited_list.visit_counters[idx], self.visited_list.current_iter, ) == self.visited_list.current_iter } pub fn next_iteration(&mut self) { self.visited_list.current_iter = self.visited_list.current_iter.wrapping_add(1); if self.visited_list.current_iter == 0 { self.visited_list.current_iter = 1; self.visited_list.visit_counters.fill(0); } } fn resize(&mut self, num_points: usize) { // `self.current_iter` is never 0, so it's safe to use 0 as a default // value. self.visited_list.visit_counters.resize(num_points, 0); } } /// Keeps a list of `VisitedList` which could be requested and released from multiple threads /// /// If there are more requests than lists - creates a new list, but only keeps max defined amount. #[derive(Debug)] pub struct VisitedPool { pool: RwLock<Vec<VisitedList>>, } impl VisitedPool { pub fn new() -> Self { VisitedPool { pool: RwLock::new(Vec::with_capacity(*POOL_KEEP_LIMIT)), } } pub fn get(&self, num_points: usize) -> VisitedListHandle<'_> { // If there are more concurrent requests, a new temporary list is created dynamically. // This limit is implemented to prevent memory leakage. match self.pool.write().pop() { None => VisitedListHandle::new(self, VisitedList::new(num_points)), Some(data) => { let mut visited_list = VisitedListHandle::new(self, data); visited_list.resize(num_points); visited_list.next_iteration(); visited_list } } } fn return_back(&self, data: VisitedList) { let mut pool = self.pool.write(); if pool.len() < *POOL_KEEP_LIMIT { pool.push(data); } } } impl Default for VisitedPool { fn default() -> Self { VisitedPool::new() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_visited_list() { let pool = VisitedPool::new(); let mut visited_list = pool.get(10); for _ in 0..2 { assert!(!visited_list.check(0)); assert!(!visited_list.check_and_update_visited(0)); assert!(visited_list.check(0)); assert!(visited_list.check_and_update_visited(0)); assert!(visited_list.check(0)); for _ in 0..(u8::MAX as usize * 2 + 10) { visited_list.next_iteration(); assert!(!visited_list.check(0)); } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/entry_points.rs
lib/segment/src/index/hnsw_index/entry_points.rs
use std::cmp::Ordering; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::PointOffsetType; use rand::Rng; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] pub struct EntryPoint { pub point_id: PointOffsetType, pub level: usize, } impl Eq for EntryPoint {} impl PartialOrd for EntryPoint { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for EntryPoint { fn cmp(&self, other: &Self) -> Ordering { self.level.cmp(&other.level) } } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct EntryPoints { entry_points: Vec<EntryPoint>, extra_entry_points: FixedLengthPriorityQueue<EntryPoint>, } impl EntryPoints { pub fn new(extra_entry_points: usize) -> Self { EntryPoints { entry_points: vec![], extra_entry_points: FixedLengthPriorityQueue::new(extra_entry_points), } } pub fn merge_from_other(&mut self, mut other: EntryPoints) { self.entry_points.append(&mut other.entry_points); // Do not merge `extra_entry_points` to prevent duplications } pub fn new_point<F>( &mut self, new_point: PointOffsetType, level: usize, checker: F, ) -> Option<EntryPoint> where F: Fn(PointOffsetType) -> bool, { // there are 3 cases: // - There is proper entry point for a new point higher or same level - return the point // - The new point is higher than any alternative - return the next best thing // - There is no point and alternatives - return None for i in 0..self.entry_points.len() { let candidate = &self.entry_points[i]; if !checker(candidate.point_id) { continue; // Checkpoint does not fulfil filtering conditions. Hence, does not "exists" } // Found checkpoint candidate return if candidate.level >= level { // The good checkpoint exists. // Return it, and also try to save given if required self.extra_entry_points.push(EntryPoint { point_id: new_point, level, }); Some(candidate.clone()) } else { // The current point is better than existing let entry = self.entry_points[i].clone(); self.entry_points[i] = EntryPoint { point_id: new_point, level, }; self.extra_entry_points.push(entry.clone()); Some(entry) }; } // No entry points found. Create a new one and return self let new_entry = EntryPoint { point_id: new_point, level, }; self.entry_points.push(new_entry); None } /// Find the highest `EntryPoint` which satisfies filtering condition of `checker` pub fn get_entry_point<F>(&self, checker: F) -> Option<EntryPoint> where F: Fn(PointOffsetType) -> bool, { self.entry_points .iter() .find(|entry| checker(entry.point_id)) .cloned() .or_else(|| { // Searching for at least some entry point self.extra_entry_points .iter_unsorted() .filter(|entry| checker(entry.point_id)) .cloned() .max_by_key(|ep| ep.level) }) } pub fn get_random_entry_point<F, R: Rng + ?Sized>( &self, rnd: &mut R, checker: F, ) -> Option<EntryPoint> where F: Fn(PointOffsetType) -> bool, { let filtered_entry_points: Vec<_> = self .entry_points .iter() .filter(|entry| checker(entry.point_id)) .cloned() .collect(); if !filtered_entry_points.is_empty() { let random_index = rnd.random_range(0..filtered_entry_points.len()); return Some(filtered_entry_points[random_index].clone()); } let filtered_extra_entry_points: Vec<_> = self .extra_entry_points .iter_unsorted() .filter(|entry| checker(entry.point_id)) .cloned() .collect(); if !filtered_extra_entry_points.is_empty() { let random_index = rnd.random_range(0..filtered_extra_entry_points.len()); return Some(filtered_extra_entry_points[random_index].clone()); } None } } #[cfg(test)] mod tests { use rand::Rng; use super::*; #[test] fn test_entry_points() { let mut points = EntryPoints::new(10); let mut rnd = rand::rng(); for i in 0..1000 { let level = rnd.random_range(0..10000); points.new_point(i, level, |_x| true); } assert_eq!(points.entry_points.len(), 1); assert_eq!(points.extra_entry_points.len(), 10); assert!(points.entry_points[0].level > 1); for i in 1000..2000 { let level = rnd.random_range(0..10000); points.new_point(i, level, |x| x % 5 == i % 5); } assert_eq!(points.entry_points.len(), 5); assert_eq!(points.extra_entry_points.len(), 10); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/config.rs
lib/segment/src/index/hnsw_index/config.rs
use std::path::{Path, PathBuf}; use io::file_operations::{atomic_save_json, read_json}; use serde::{Deserialize, Serialize}; use crate::common::operation_error::OperationResult; pub const HNSW_INDEX_CONFIG_FILE: &str = "hnsw_config.json"; #[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] pub struct HnswGraphConfig { pub m: usize, /// Requested M pub m0: usize, /// Actual M on level 0 pub ef_construct: usize, /// Number of neighbours to search on construction pub ef: usize, /// We prefer a full scan search upto (excluding) this number of vectors. /// /// Note: this is number of vectors, not KiloBytes. #[serde(alias = "indexing_threshold")] pub full_scan_threshold: usize, #[serde(default)] pub max_indexing_threads: usize, #[serde(default)] pub payload_m: Option<usize>, #[serde(default)] pub payload_m0: Option<usize>, #[serde(default)] pub indexed_vector_count: Option<usize>, } impl HnswGraphConfig { pub fn new( m: usize, ef_construct: usize, full_scan_threshold: usize, max_indexing_threads: usize, payload_m: Option<usize>, indexed_vector_count: usize, ) -> Self { HnswGraphConfig { m, m0: m * 2, ef_construct, ef: ef_construct, full_scan_threshold, max_indexing_threads, payload_m, payload_m0: payload_m.map(|v| v * 2), indexed_vector_count: Some(indexed_vector_count), } } pub fn get_config_path(path: &Path) -> PathBuf { path.join(HNSW_INDEX_CONFIG_FILE) } pub fn load(path: &Path) -> OperationResult<Self> { Ok(read_json(path)?) } pub fn save(&self, path: &Path) -> OperationResult<()> { Ok(atomic_save_json(path, self)?) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/build_condition_checker.rs
lib/segment/src/index/hnsw_index/build_condition_checker.rs
use common::types::PointOffsetType; use crate::index::visited_pool::VisitedListHandle; use crate::payload_storage::FilterContext; pub struct BuildConditionChecker<'a> { pub filter_list: &'a VisitedListHandle<'a>, pub current_point: PointOffsetType, } impl FilterContext for BuildConditionChecker<'_> { fn check(&self, point_id: PointOffsetType) -> bool { if point_id == self.current_point { return false; // Do not match current point while inserting it (second time) } self.filter_list.check(point_id) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_links.rs
lib/segment/src/index/hnsw_index/graph_links.rs
use std::alloc::Layout; use std::io::Cursor; use std::path::Path; use std::sync::Arc; use common::types::PointOffsetType; use memmap2::Mmap; use memory::madvise::{Advice, AdviceSetting, Madviseable}; use memory::mmap_ops::open_read_mmap; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::hnsw_index::HnswM; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; use crate::vector_storage::{Sequential, VectorStorageEnum}; mod header; mod serializer; mod view; pub use serializer::serialize_graph_links; pub use view::LinksIterator; use view::{CompressionInfo, GraphLinksView, LinksWithVectorsIterator}; /* Links data for whole graph layers. sorted points: points: points to lvl 012345 142350 0 -> 0 1 -> 4 lvl4: 7 lvl4: 7 2 -> 2 lvl3: Z Y lvl3: ZY 3 -> 2 lvl2: abcd lvl2: adbc 4 -> 3 lvl1: ABCDE lvl1: ADBCE 5 -> 1 lvl0: 123456 lvl0: 123456 <- lvl 0 is not sorted lvl offset: 6 11 15 17 │ │ │ │ │ │ │ │ ▼ ▼ ▼ ▼ indexes: 012345 6789A BCDE FG H flatten: 123456 ADBCE adbc ZY 7 ▲ ▲ ▲ ▲ ▲ ▲ ▲ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ reindex: 142350 142350 142350 142350 (same for each level) for lvl > 0: links offset = level_offsets[level] + offsets[reindex[point_id]] */ #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum GraphLinksFormat { Plain, Compressed, CompressedWithVectors, } /// Similar to [`GraphLinksFormat`], won't let you use `CompressedWithVectors` /// without providing the vectors. #[derive(Clone, Copy)] pub enum GraphLinksFormatParam<'a> { Plain, Compressed, CompressedWithVectors(&'a dyn GraphLinksVectors), } /// This trait lets the [`serialize_graph_links`] to access vector values. pub trait GraphLinksVectors { /// Base vectors will be included once per point on level 0. /// The layout of each vector must correspond to [`VectorLayout::base`]. fn get_base_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]>; /// Link vectors will be included for each link per point. /// The layout of each vector must correspond to [`VectorLayout::link`]. fn get_link_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]>; /// Get the layout of base and link vectors. fn vectors_layout(&self) -> GraphLinksVectorsLayout; } /// Layout of base and link vectors, returned by [`GraphLinksVectors::vectors_layout`]. #[derive(Copy, Clone)] pub struct GraphLinksVectorsLayout { pub base: Layout, pub link: Layout, } /// A [`GraphLinksVectors`] implementation that uses real storage. pub struct StorageGraphLinksVectors<'a> { vector_storage: &'a VectorStorageEnum, // base vectors quantized_vectors: &'a QuantizedVectors, // link vectors vectors_layout: GraphLinksVectorsLayout, } impl<'a> StorageGraphLinksVectors<'a> { pub fn try_new( vector_storage: &'a VectorStorageEnum, quantized_vectors: Option<&'a QuantizedVectors>, ) -> Option<Self> { let quantized_vectors = quantized_vectors?; Some(Self { vector_storage, quantized_vectors, vectors_layout: GraphLinksVectorsLayout { base: vector_storage.get_vector_layout().ok()?, link: quantized_vectors.get_quantized_vector_layout().ok()?, }, }) } } impl<'a> GraphLinksVectors for StorageGraphLinksVectors<'a> { /// Note: uses [`Sequential`] because [`serializer::serialize_graph_links`] /// traverses base vectors in a sequential order. fn get_base_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]> { self.vector_storage .get_vector_bytes_opt::<Sequential>(point_id) .ok_or_else(|| { OperationError::service_error(format!( "Point {point_id} not found in vector storage" )) }) } /// Note: unlike base vectors, link vectors are written in a random order. fn get_link_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]> { Ok(self.quantized_vectors.get_quantized_vector(point_id)) } fn vectors_layout(&self) -> GraphLinksVectorsLayout { self.vectors_layout } } impl GraphLinksFormat { /// Create the corresponding [`GraphLinksFormatParam`]. /// /// # Panics /// /// Panics if `CompressedWithVectors` is selected, but `vectors` is `None`. #[cfg(test)] pub fn with_param_for_tests<'a, Q: GraphLinksVectors>( &self, vectors: Option<&'a Q>, ) -> GraphLinksFormatParam<'a> { match self { GraphLinksFormat::Plain => GraphLinksFormatParam::Plain, GraphLinksFormat::Compressed => GraphLinksFormatParam::Compressed, GraphLinksFormat::CompressedWithVectors => match vectors { Some(v) => GraphLinksFormatParam::CompressedWithVectors(v), None => panic!(), }, } } /// Create the corresponding [`GraphLinksFormatParam`]. /// /// When vectors are not available, `CompressedWithVectors` is downgraded to /// `Compressed`. pub fn with_param<'a, V: GraphLinksVectors>( &self, vectors: Option<&'a V>, ) -> GraphLinksFormatParam<'a> { match self { GraphLinksFormat::Plain => GraphLinksFormatParam::Plain, GraphLinksFormat::Compressed => GraphLinksFormatParam::Compressed, GraphLinksFormat::CompressedWithVectors => match vectors { Some(v) => GraphLinksFormatParam::CompressedWithVectors(v), None => GraphLinksFormatParam::Compressed, }, } } pub fn is_with_vectors(&self) -> bool { match self { GraphLinksFormat::Plain | GraphLinksFormat::Compressed => false, GraphLinksFormat::CompressedWithVectors => true, } } } impl<'a> GraphLinksFormatParam<'a> { pub fn as_format(&self) -> GraphLinksFormat { match self { GraphLinksFormatParam::Plain => GraphLinksFormat::Plain, GraphLinksFormatParam::Compressed => GraphLinksFormat::Compressed, GraphLinksFormatParam::CompressedWithVectors(_) => { GraphLinksFormat::CompressedWithVectors } } } } self_cell::self_cell! { pub struct GraphLinks { owner: GraphLinksEnum, #[covariant] dependent: GraphLinksView, } impl {Debug} } #[derive(Debug)] enum GraphLinksEnum { Ram(Vec<u8>), Mmap(Arc<Mmap>), } impl GraphLinksEnum { fn as_bytes(&self) -> &[u8] { match self { GraphLinksEnum::Ram(data) => data.as_slice(), GraphLinksEnum::Mmap(mmap) => &mmap[..], } } } impl GraphLinks { pub fn load_from_file( path: &Path, on_disk: bool, format: GraphLinksFormat, ) -> OperationResult<Self> { let populate = !on_disk; let mmap = open_read_mmap(path, AdviceSetting::Advice(Advice::Random), populate)?; Self::try_new(GraphLinksEnum::Mmap(Arc::new(mmap)), |x| { GraphLinksView::load(x.as_bytes(), format) }) } pub fn new_from_edges( edges: Vec<Vec<Vec<PointOffsetType>>>, format_param: GraphLinksFormatParam<'_>, hnsw_m: HnswM, ) -> OperationResult<Self> { let mut cursor = Cursor::new(Vec::<u8>::new()); serialize_graph_links(edges, format_param, hnsw_m, &mut cursor)?; let mut bytes = cursor.into_inner(); bytes.shrink_to_fit(); Self::try_new(GraphLinksEnum::Ram(bytes), |x| { GraphLinksView::load(x.as_bytes(), format_param.as_format()) }) } fn view(&self) -> &GraphLinksView<'_> { self.borrow_dependent() } pub fn as_bytes(&self) -> &[u8] { self.borrow_owner().as_bytes() } pub fn format(&self) -> GraphLinksFormat { match self.view().compression { CompressionInfo::Uncompressed { .. } => GraphLinksFormat::Plain, CompressionInfo::Compressed { .. } => GraphLinksFormat::Compressed, CompressionInfo::CompressedWithVectors { .. } => { GraphLinksFormat::CompressedWithVectors } } } pub fn num_points(&self) -> usize { self.view().reindex.len() } pub fn for_each_link( &self, point_id: PointOffsetType, level: usize, f: impl FnMut(PointOffsetType), ) { self.links(point_id, level).for_each(f); } #[inline] pub fn links(&self, point_id: PointOffsetType, level: usize) -> LinksIterator<'_> { self.view().links(point_id, level) } #[inline] pub fn links_empty(&self, point_id: PointOffsetType, level: usize) -> bool { self.view().links_empty(point_id, level) } #[inline] pub fn links_with_vectors( &self, point_id: PointOffsetType, level: usize, ) -> (&[u8], LinksWithVectorsIterator<'_>) { let (base_vector, links, vectors) = self.view().links_with_vectors(point_id, level); (base_vector, links.zip(vectors)) } pub fn point_level(&self, point_id: PointOffsetType) -> usize { self.view().point_level(point_id) } /// Convert the graph links to a vector of edges, suitable for passing into /// [`serialize_graph_links`] or using in tests. pub fn to_edges(&self) -> Vec<Vec<Vec<PointOffsetType>>> { self.to_edges_impl(|point_id, level| self.links(point_id, level).collect()) } /// Convert the graph links to a vector of edges, generic over the container type. pub fn to_edges_impl<Container>( &self, mut f: impl FnMut(PointOffsetType, usize) -> Container, ) -> Vec<Vec<Container>> { let mut edges = Vec::with_capacity(self.num_points()); for point_id in 0..self.num_points() { let num_levels = self.point_level(point_id as PointOffsetType) + 1; let mut levels = Vec::with_capacity(num_levels); levels.extend((0..num_levels).map(|level| f(point_id as PointOffsetType, level))); edges.push(levels); } edges } /// Populate the disk cache with data, if applicable. /// This is a blocking operation. pub fn populate(&self) -> OperationResult<()> { match self.borrow_owner() { GraphLinksEnum::Mmap(mmap) => mmap.populate(), GraphLinksEnum::Ram(_) => {} }; Ok(()) } } /// Sort the first `m` values in `links` and return them. Used to compare stored /// links where the order of the first `m` links is not preserved. #[cfg(test)] pub(super) fn normalize_links(m: usize, mut links: Vec<PointOffsetType>) -> Vec<PointOffsetType> { let first = links.len().min(m); links[..first].sort_unstable(); links } #[cfg(test)] mod tests { use io::file_operations::atomic_save; use rand::Rng; use rstest::rstest; use tempfile::Builder; use super::*; use crate::index::hnsw_index::HnswM; struct TestGraphLinksVectors { base_vectors: Vec<Vec<u8>>, link_vectors: Vec<Vec<u8>>, vectors_layout: GraphLinksVectorsLayout, } impl TestGraphLinksVectors { fn new(count: usize, base_align: usize, link_align: usize) -> Self { let mut rng = rand::rng(); let base_len = base_align * 7; let link_len = link_align * 5; Self { base_vectors: (0..count) .map(|_| (0..base_len).map(|_| rng.random()).collect()) .collect(), link_vectors: (0..count) .map(|_| (0..link_len).map(|_| rng.random()).collect()) .collect(), vectors_layout: GraphLinksVectorsLayout { base: Layout::from_size_align(base_len, base_align).unwrap(), link: Layout::from_size_align(link_len, link_align).unwrap(), }, } } } impl GraphLinksVectors for TestGraphLinksVectors { fn get_base_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]> { Ok(&self.base_vectors[point_id as usize]) } fn get_link_vector(&self, point_id: PointOffsetType) -> OperationResult<&[u8]> { Ok(&self.link_vectors[point_id as usize]) } fn vectors_layout(&self) -> GraphLinksVectorsLayout { self.vectors_layout } } fn random_links( points_count: usize, max_levels_count: usize, hnsw_m: &HnswM, ) -> Vec<Vec<Vec<PointOffsetType>>> { let mut rng = rand::rng(); (0..points_count) .map(|_| { let levels_count = rng.random_range(1..max_levels_count); (0..levels_count) .map(|level| { let mut max_links_count = hnsw_m.level_m(level); max_links_count *= 2; // Simulate additional payload links. let links_count = rng.random_range(0..max_links_count); (0..links_count) .map(|_| rng.random_range(0..points_count) as PointOffsetType) .collect() }) .collect() }) .collect() } fn check_links( mut left: Vec<Vec<Vec<PointOffsetType>>>, right: &GraphLinks, vectors: &Option<TestGraphLinksVectors>, ) { let mut right_links = right.to_edges_impl(|point_id, level| { let links: Vec<_> = if let Some(vectors) = vectors { let (base_vector, iter) = right.links_with_vectors(point_id, level); if level == 0 { assert_eq!(base_vector, vectors.get_base_vector(point_id).unwrap()); } else { assert!(base_vector.is_empty()); } iter.map(|(link, bytes)| { assert_eq!(bytes, vectors.get_link_vector(link).unwrap()); link }) .collect() } else { right.links(point_id, level).collect() }; assert_eq!(links.is_empty(), right.links_empty(point_id, level)); links }); for links in [&mut left, &mut right_links].iter_mut() { links.iter_mut().for_each(|levels| { levels .iter_mut() .enumerate() .for_each(|(level_idx, links)| { *links = normalize_links( right.view().sorted_count(level_idx), std::mem::take(links), ); }) }); } assert_eq!(left, right_links); } /// Test that random links can be saved by [`serialize_graph_links`] and /// loaded correctly by a [`GraphLinks`] impl. #[rstest] #[case::plain(GraphLinksFormat::Plain, 8, 8)] #[case::compressed(GraphLinksFormat::Compressed, 8, 8)] #[case::comp_vec_1_16(GraphLinksFormat::CompressedWithVectors, 1, 16)] #[case::comp_vec_4_1(GraphLinksFormat::CompressedWithVectors, 4, 1)] #[case::comp_vec_4_16(GraphLinksFormat::CompressedWithVectors, 4, 16)] fn test_save_load( #[case] format: GraphLinksFormat, #[case] base_align: usize, #[case] link_align: usize, ) { let points_count = 1000; let max_levels_count = 10; let hnsw_m = HnswM::new2(8); let path = Builder::new().prefix("graph_dir").tempdir().unwrap(); let links_file = path.path().join("links.bin"); let links = random_links(points_count, max_levels_count, &hnsw_m); let vectors = format .is_with_vectors() .then(|| TestGraphLinksVectors::new(points_count, base_align, link_align)); let format_param = format.with_param_for_tests(vectors.as_ref()); atomic_save(&links_file, |writer| { serialize_graph_links(links.clone(), format_param, hnsw_m, writer) }) .unwrap(); let cmp_links = GraphLinks::load_from_file(&links_file, true, format).unwrap(); check_links(links, &cmp_links, &vectors); } #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_graph_links_construction(#[case] format: GraphLinksFormat) { let hnsw_m = HnswM::new2(8); let vectors = format .is_with_vectors() .then(|| TestGraphLinksVectors::new(100, 8, 8)); let check = |links: Vec<Vec<Vec<PointOffsetType>>>| { let format_param = format.with_param_for_tests(vectors.as_ref()); let cmp_links = GraphLinks::new_from_edges(links.clone(), format_param, hnsw_m).unwrap(); check_links(links, &cmp_links, &vectors); }; // no points check(vec![]); // 2 points without any links check(vec![vec![vec![]], vec![vec![]]]); // one link at level 0 check(vec![vec![vec![1]], vec![vec![0]]]); // 3 levels with no links at second level check(vec![ vec![vec![1, 2]], vec![vec![0, 2], vec![], vec![2]], vec![vec![0, 1], vec![], vec![1]], ]); // 3 levels with no links at last level check(vec![ vec![vec![1, 2], vec![2], vec![]], vec![vec![0, 2], vec![1], vec![]], vec![vec![0, 1]], ]); // 4 levels with random nonexistent links check(vec![ vec![vec![1, 2, 5, 6]], vec![vec![0, 2, 7, 8], vec![], vec![34, 45, 10]], vec![vec![0, 1, 1, 2], vec![3, 5, 9], vec![9, 8], vec![9], vec![]], vec![vec![0, 1, 5, 6], vec![1, 5, 0]], vec![vec![0, 1, 9, 18], vec![1, 5, 6], vec![5], vec![9]], ]); // fully random links check(random_links(100, 10, &hnsw_m)); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/hnsw.rs
lib/segment/src/index/hnsw_index/hnsw.rs
use std::ops::Deref as _; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::thread; use atomic_refcell::{AtomicRef, AtomicRefCell}; use bitvec::prelude::BitSlice; use bitvec::vec::BitVec; use common::counter::hardware_counter::HardwareCounterCell; use common::cow::BoxCow; #[cfg(target_os = "linux")] use common::cpu::linux_low_thread_priority; use common::ext::BitSliceExt as _; use common::flags::FeatureFlags; use common::progress_tracker::ProgressTracker; use common::types::{PointOffsetType, ScoredPointOffset, TelemetryDetail}; use fs_err as fs; use itertools::EitherOrBoth; use log::{debug, trace}; use memory::fadvise::clear_disk_cache; use parking_lot::Mutex; use rand::Rng; use rayon::ThreadPool; use rayon::prelude::*; #[cfg(feature = "gpu")] use super::gpu::gpu_devices_manager::LockedGpuDevice; use super::gpu::gpu_insert_context::GpuInsertContext; #[cfg(feature = "gpu")] use super::gpu::gpu_vector_storage::GpuVectorStorage; use super::point_scorer::BatchFilteredSearcher; use crate::common::BYTES_IN_KB; use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped}; use crate::common::operation_time_statistics::{ OperationDurationsAggregator, ScopeDurationMeasurer, }; use crate::data_types::query_context::VectorQueryContext; use crate::data_types::vectors::{QueryVector, VectorInternal, VectorRef}; use crate::id_tracker::IdTrackerSS; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::build_condition_checker::BuildConditionChecker; use crate::index::hnsw_index::config::HnswGraphConfig; #[cfg(feature = "gpu")] use crate::index::hnsw_index::gpu::gpu_graph_builder::GPU_MAX_VISITED_FLAGS_FACTOR; #[cfg(feature = "gpu")] use crate::index::hnsw_index::gpu::{get_gpu_groups_count, gpu_graph_builder::build_hnsw_on_gpu}; use crate::index::hnsw_index::graph_layers::{ GraphLayers, GraphLayersWithVectors, SearchAlgorithm, }; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::graph_layers_healer::GraphLayersHealer; use crate::index::hnsw_index::graph_links::{GraphLinksFormatParam, StorageGraphLinksVectors}; use crate::index::hnsw_index::point_scorer::FilteredScorer; use crate::index::query_estimator::adjust_to_available_vectors; use crate::index::sample_estimation::sample_check_cardinality; use crate::index::struct_payload_index::StructPayloadIndex; use crate::index::vector_index_search_common::{ get_oversampled_top, is_quantized_search, postprocess_search_result, }; use crate::index::visited_pool::{VisitedListHandle, VisitedPool}; use crate::index::{PayloadIndex, VectorIndex, VectorIndexEnum}; use crate::json_path::JsonPath; use crate::payload_storage::FilterContext; use crate::segment_constructor::VectorIndexBuildArgs; use crate::telemetry::VectorIndexSearchesTelemetry; use crate::types::Condition::Field; use crate::types::{ ACORN_MAX_SELECTIVITY_DEFAULT, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, QuantizationSearchParams, SearchParams, }; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; use crate::vector_storage::query::DiscoveryQuery; use crate::vector_storage::{VectorStorage, VectorStorageEnum, new_raw_scorer}; const HNSW_USE_HEURISTIC: bool = true; const FINISH_MAIN_GRAPH_LOG_MESSAGE: &str = "Finish main graph in time"; /// Build first N points in HNSW graph using only a single thread, to avoid /// disconnected components in the graph. #[cfg(debug_assertions)] pub const SINGLE_THREADED_HNSW_BUILD_THRESHOLD: usize = 32; #[cfg(not(debug_assertions))] pub const SINGLE_THREADED_HNSW_BUILD_THRESHOLD: usize = 256; const LINK_COMPRESSION_CONVERT_EXISTING: bool = false; #[derive(Debug)] pub struct HNSWIndex { id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>, payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, config: HnswGraphConfig, path: PathBuf, graph: GraphLayers, searches_telemetry: HNSWSearchesTelemetry, is_on_disk: bool, } #[derive(Debug)] struct HNSWSearchesTelemetry { unfiltered_plain: Arc<Mutex<OperationDurationsAggregator>>, filtered_plain: Arc<Mutex<OperationDurationsAggregator>>, unfiltered_hnsw: Arc<Mutex<OperationDurationsAggregator>>, small_cardinality: Arc<Mutex<OperationDurationsAggregator>>, large_cardinality: Arc<Mutex<OperationDurationsAggregator>>, exact_filtered: Arc<Mutex<OperationDurationsAggregator>>, exact_unfiltered: Arc<Mutex<OperationDurationsAggregator>>, } impl HNSWSearchesTelemetry { fn new() -> Self { Self { unfiltered_plain: OperationDurationsAggregator::new(), filtered_plain: OperationDurationsAggregator::new(), unfiltered_hnsw: OperationDurationsAggregator::new(), small_cardinality: OperationDurationsAggregator::new(), large_cardinality: OperationDurationsAggregator::new(), exact_filtered: OperationDurationsAggregator::new(), exact_unfiltered: OperationDurationsAggregator::new(), } } } pub struct HnswIndexOpenArgs<'a> { pub path: &'a Path, pub id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, pub vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, pub quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>, pub payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, pub hnsw_config: HnswConfig, } impl HNSWIndex { pub fn open(args: HnswIndexOpenArgs<'_>) -> OperationResult<Self> { let HnswIndexOpenArgs { path, id_tracker, vector_storage, quantized_vectors, payload_index, hnsw_config, } = args; let config_path = HnswGraphConfig::get_config_path(path); let config = if config_path.exists() { HnswGraphConfig::load(&config_path)? } else { let vector_storage = vector_storage.borrow(); let available_vectors = vector_storage.available_vector_count(); let full_scan_threshold = vector_storage .size_of_available_vectors_in_bytes() .checked_div(available_vectors) .and_then(|avg_vector_size| { hnsw_config .full_scan_threshold .saturating_mul(BYTES_IN_KB) .checked_div(avg_vector_size) }) .unwrap_or(1); HnswGraphConfig::new( hnsw_config.m, hnsw_config.ef_construct, full_scan_threshold, hnsw_config.max_indexing_threads, hnsw_config.payload_m, available_vectors, ) }; let do_convert = LINK_COMPRESSION_CONVERT_EXISTING; let is_on_disk = hnsw_config.on_disk.unwrap_or(false); let graph = GraphLayers::load(path, is_on_disk, do_convert)?; Ok(HNSWIndex { id_tracker, vector_storage, quantized_vectors, payload_index, config, path: path.to_owned(), graph, searches_telemetry: HNSWSearchesTelemetry::new(), is_on_disk, }) } pub fn is_on_disk(&self) -> bool { self.is_on_disk } #[cfg(test)] pub(super) fn graph(&self) -> &GraphLayers { &self.graph } pub fn get_quantized_vectors(&self) -> Arc<AtomicRefCell<Option<QuantizedVectors>>> { self.quantized_vectors.clone() } pub fn build<R: Rng + ?Sized>( open_args: HnswIndexOpenArgs<'_>, build_args: VectorIndexBuildArgs<'_, R>, ) -> OperationResult<Self> { if HnswGraphConfig::get_config_path(open_args.path).exists() || GraphLayers::get_path(open_args.path).exists() { log::warn!( "HNSW index already exists at {:?}, skipping building", open_args.path ); debug_assert!(false); return Self::open(open_args); } let HnswIndexOpenArgs { path, id_tracker, vector_storage, quantized_vectors, payload_index, hnsw_config, } = open_args; let VectorIndexBuildArgs { permit, old_indices, gpu_device, rng, stopped, hnsw_global_config, feature_flags, progress, } = build_args; fs::create_dir_all(path)?; let id_tracker_ref = id_tracker.borrow(); let vector_storage_ref = vector_storage.borrow(); let quantized_vectors_ref = quantized_vectors.borrow(); let payload_index_ref = payload_index.borrow(); let total_vector_count = vector_storage_ref.total_vector_count(); let full_scan_threshold = vector_storage_ref .size_of_available_vectors_in_bytes() .checked_div(total_vector_count) .and_then(|avg_vector_size| { hnsw_config .full_scan_threshold .saturating_mul(BYTES_IN_KB) .checked_div(avg_vector_size) }) .unwrap_or(1); let mut config = HnswGraphConfig::new( hnsw_config.m, hnsw_config.ef_construct, full_scan_threshold, hnsw_config.max_indexing_threads, hnsw_config.payload_m, total_vector_count, ); #[allow(unused_mut)] let mut build_main_graph = config.m > 0; if !build_main_graph { debug!("skip building main HNSW graph"); } let payload_m = HnswM::new( config.payload_m.unwrap_or(config.m), config.payload_m0.unwrap_or(config.m0), ); // Progress subtasks let progress_migrate = build_main_graph.then(|| progress.subtask("migrate")); let progress_main_graph = build_main_graph.then(|| progress.subtask("main_graph")); let additional_links_params: Option<(ProgressTracker, Vec<(ProgressTracker, JsonPath)>)> = (payload_m.m > 0) .then(|| payload_index_ref.indexed_fields()) .filter(|fields| !fields.is_empty()) .map(|fields| { let progress_additional_links = progress.subtask("additional_links"); let fields = fields .into_iter() .map(|(field, payload_schema)| { let subtask_name = format!("{}:{field}", payload_schema.name()); (progress_additional_links.subtask(subtask_name), field) }) .collect::<Vec<_>>(); (progress_additional_links, fields) }); let old_index = old_indices .iter() .filter_map(|old_index| { OldIndexCandidate::evaluate( &feature_flags, old_index, &config, hnsw_global_config, &vector_storage_ref, &quantized_vectors_ref, id_tracker_ref.deref(), ) }) .max_by_key(|old_index| old_index.valid_points); // Build main index graph let deleted_bitslice = vector_storage_ref.deleted_vector_bitslice(); #[cfg(feature = "gpu")] let gpu_name_postfix = if let Some(gpu_device) = gpu_device { format!(" and GPU {}", gpu_device.device().name()) } else { Default::default() }; #[cfg(not(feature = "gpu"))] let gpu_name_postfix = ""; #[cfg(not(feature = "gpu"))] let _ = gpu_device; debug!( "building HNSW for {total_vector_count} vectors with {} CPUs{gpu_name_postfix}", permit.num_cpus, ); let num_entries = std::cmp::max( 1, total_vector_count .checked_div(full_scan_threshold) .unwrap_or(0) * 10, ); let mut graph_layers_builder = GraphLayersBuilder::new( total_vector_count, HnswM::new(config.m, config.m0), config.ef_construct, num_entries, HNSW_USE_HEURISTIC, ); let pool = rayon::ThreadPoolBuilder::new() .thread_name(|idx| format!("hnsw-build-{idx}")) .num_threads(permit.num_cpus as usize) .spawn_handler(|thread| { let mut b = thread::Builder::new(); if let Some(name) = thread.name() { b = b.name(name.to_owned()); } if let Some(stack_size) = thread.stack_size() { b = b.stack_size(stack_size); } b.spawn(|| { // On Linux, use lower thread priority so we interfere less with serving traffic #[cfg(target_os = "linux")] if let Err(err) = linux_low_thread_priority() { log::debug!( "Failed to set low thread priority for HNSW building, ignoring: {err}" ); } thread.run() })?; Ok(()) }) .build()?; let old_index = old_index.map(|old_index| old_index.reuse(total_vector_count)); let mut indexed_vectors = 0; for vector_id in id_tracker_ref.iter_internal_excluding(deleted_bitslice) { check_process_stopped(stopped)?; indexed_vectors += 1; let level = old_index .as_ref() .and_then(|old_index| old_index.point_level(vector_id)) .unwrap_or_else(|| graph_layers_builder.get_random_layer(rng)); graph_layers_builder.set_levels(vector_id, level); } // Try to build the main graph on GPU if possible. // Store created gpu vectors to reuse them for payload links. #[cfg(feature = "gpu")] let gpu_vectors = if build_main_graph { let timer = std::time::Instant::now(); let gpu_vectors = Self::create_gpu_vectors( gpu_device, &vector_storage_ref, &quantized_vectors_ref, stopped, )?; if let Some(gpu_constructed_graph) = Self::build_main_graph_on_gpu( id_tracker_ref.deref(), &vector_storage_ref, &quantized_vectors_ref, gpu_vectors.as_ref(), &graph_layers_builder, deleted_bitslice, num_entries, stopped, )? { graph_layers_builder = gpu_constructed_graph; build_main_graph = false; debug!("{FINISH_MAIN_GRAPH_LOG_MESSAGE} {:?}", timer.elapsed()); } gpu_vectors } else { None }; if build_main_graph { let progress_main_graph = progress_main_graph.unwrap(); let progress_migrate = progress_migrate.unwrap(); let mut ids = Vec::with_capacity(total_vector_count); let mut first_few_ids = Vec::with_capacity(SINGLE_THREADED_HNSW_BUILD_THRESHOLD); let mut ids_iter = id_tracker_ref.iter_internal_excluding(deleted_bitslice); if let Some(old_index) = old_index { progress_migrate.start(); let timer = std::time::Instant::now(); let mut healer = GraphLayersHealer::new( old_index.graph(), &old_index.old_to_new, config.ef_construct, ); let old_vector_storage = old_index.index.vector_storage.borrow(); let old_quantized_vectors = old_index.index.quantized_vectors.borrow(); healer.heal(&pool, &old_vector_storage, old_quantized_vectors.as_ref())?; healer.save_into_builder(&graph_layers_builder); for vector_id in ids_iter { if old_index.new_to_old[vector_id as usize].is_none() { if first_few_ids.len() < SINGLE_THREADED_HNSW_BUILD_THRESHOLD { first_few_ids.push(vector_id); } else { ids.push(vector_id); } } } debug!("Migrated in {:?}", timer.elapsed()); } else { first_few_ids.extend(ids_iter.by_ref().take(SINGLE_THREADED_HNSW_BUILD_THRESHOLD)); ids.extend(ids_iter); } drop(progress_migrate); let timer = std::time::Instant::now(); progress_main_graph.start(); let counter = progress_main_graph .track_progress(Some(first_few_ids.len() as u64 + ids.len() as u64)); let counter = counter.deref(); let insert_point = |vector_id| { check_process_stopped(stopped)?; // No need to accumulate hardware, since this is an internal operation let internal_hardware_counter = HardwareCounterCell::disposable(); let points_scorer = FilteredScorer::new_internal( vector_id, vector_storage_ref.deref(), quantized_vectors_ref.as_ref(), None, id_tracker_ref.deleted_point_bitslice(), internal_hardware_counter, )?; graph_layers_builder.link_new_point(vector_id, points_scorer); counter.fetch_add(1, Ordering::Relaxed); Ok::<_, OperationError>(()) }; for vector_id in first_few_ids { insert_point(vector_id)?; } if !ids.is_empty() { pool.install(|| ids.into_par_iter().try_for_each(insert_point))?; } drop(progress_main_graph); debug!("{FINISH_MAIN_GRAPH_LOG_MESSAGE} {:?}", timer.elapsed()); } else { drop(old_index); } if let Some((progress_additional_links, indexed_fields)) = additional_links_params { progress_additional_links.start(); // Calculate true average number of links per vertex in the HNSW graph // to better estimate percolation threshold let average_links_per_0_level = graph_layers_builder.get_average_connectivity_on_level(0); let average_links_per_0_level_int = (average_links_per_0_level as usize).max(1); // Estimate connectivity of the main graph let all_points = id_tracker_ref .iter_internal_excluding(deleted_bitslice) .collect::<Vec<_>>(); // According to percolation theory, random graph becomes disconnected // if 1/K points are left, where K is average number of links per point // So we need to sample connectivity relative to this bifurcation point, but // not exactly at 1/K, as at this point graph is very sensitive to noise. // // Instead, we choose sampling point at 2/K, which expects graph to still be // mostly connected, but still have some measurable disconnected components. let percolation = 1. - 2. / (average_links_per_0_level_int as f32); let required_connectivity = if average_links_per_0_level_int >= 4 { let global_graph_connectivity = [ graph_layers_builder.subgraph_connectivity(&all_points, percolation), graph_layers_builder.subgraph_connectivity(&all_points, percolation), graph_layers_builder.subgraph_connectivity(&all_points, percolation), ]; debug!("graph connectivity: {global_graph_connectivity:?} @ {percolation}"); global_graph_connectivity .iter() .copied() .max_by(|a, b| a.partial_cmp(b).unwrap()) } else { // Main graph is too small to estimate connectivity, // we can't shortcut sub-graph building None }; let mut indexed_vectors_set = if config.m != 0 { // Every vector is already indexed in the main graph, so skip counting. BitVec::new() } else { BitVec::repeat(false, total_vector_count) }; let visited_pool = VisitedPool::new(); let mut block_filter_list = visited_pool.get(total_vector_count); #[cfg(feature = "gpu")] let mut gpu_insert_context = if let Some(gpu_vectors) = gpu_vectors.as_ref() { Some(GpuInsertContext::new( gpu_vectors, get_gpu_groups_count(), payload_m, config.ef_construct, false, 1..=GPU_MAX_VISITED_FLAGS_FACTOR, )?) } else { None }; #[cfg(not(feature = "gpu"))] let mut gpu_insert_context = None; for (index_pos, (field_progress, field)) in indexed_fields.into_iter().enumerate() { field_progress.start(); debug!("building additional index for field {}", &field); let is_tenant = payload_index_ref.is_tenant(&field); // It is expected, that graph will become disconnected less than // $1/m$ points left. // So blocks larger than $1/m$ are not needed. // We add multiplier for the extra safety. let percolation_multiplier = 4; let max_block_size = if config.m > 0 { total_vector_count / average_links_per_0_level_int * percolation_multiplier } else { usize::MAX }; let counter = field_progress.track_progress(None); for payload_block in payload_index_ref.payload_blocks(&field, full_scan_threshold) { check_process_stopped(stopped)?; if payload_block.cardinality > max_block_size { continue; } let points_to_index = Self::condition_points( payload_block.condition, id_tracker_ref.deref(), &payload_index_ref, &vector_storage_ref, stopped, ); if !is_tenant && index_pos > 0 && let Some(required_connectivity) = required_connectivity { // Always build for tenants let graph_connectivity = graph_layers_builder .subgraph_connectivity(&points_to_index, percolation); if graph_connectivity >= required_connectivity { trace!( "skip building additional HNSW links for {field}, connectivity {graph_connectivity:.4} >= {required_connectivity:.4}" ); continue; } trace!("graph connectivity: {graph_connectivity} for {field}"); } // ToDo: reuse graph layer for same payload let mut additional_graph = GraphLayersBuilder::new_with_params( total_vector_count, payload_m, config.ef_construct, 1, HNSW_USE_HEURISTIC, false, ); Self::build_filtered_graph( id_tracker_ref.deref(), &vector_storage_ref, &quantized_vectors_ref, &mut gpu_insert_context, &payload_index_ref, &pool, stopped, &mut additional_graph, points_to_index, &mut block_filter_list, &mut indexed_vectors_set, &counter, )?; graph_layers_builder.merge_from_other(additional_graph); } } let indexed_payload_vectors = indexed_vectors_set.count_ones(); debug_assert!(indexed_vectors >= indexed_payload_vectors || config.m == 0); indexed_vectors = indexed_vectors.max(indexed_payload_vectors); debug_assert!(indexed_payload_vectors <= total_vector_count); } else { debug!("skip building additional HNSW links"); } config.indexed_vector_count.replace(indexed_vectors); // Always skip loading graph to RAM on build // as it will be discarded anyway let is_on_disk = true; let graph_links_vectors = hnsw_config .inline_storage .unwrap_or_default() .then(|| { // NOTE: the configuration is silently ignored if try_new fails. StorageGraphLinksVectors::try_new( &vector_storage_ref, quantized_vectors_ref.as_ref(), ) }) .flatten(); let format_param = match graph_links_vectors.as_ref() { Some(v) => GraphLinksFormatParam::CompressedWithVectors(v), None => GraphLinksFormatParam::Compressed, }; let graph: GraphLayers = graph_layers_builder.into_graph_layers(path, format_param, is_on_disk)?; #[cfg(debug_assertions)] { for (idx, deleted) in deleted_bitslice.iter().enumerate() { if *deleted { graph.links.for_each_link(idx as PointOffsetType, 0, |_| { panic!("Deleted point in the graph"); }); } } } debug!("finish additional payload field indexing"); config.save(&HnswGraphConfig::get_config_path(path))?; drop(id_tracker_ref); drop(vector_storage_ref); drop(quantized_vectors_ref); drop(payload_index_ref); Ok(HNSWIndex { id_tracker, vector_storage, quantized_vectors, payload_index, config, path: path.to_owned(), graph, searches_telemetry: HNSWSearchesTelemetry::new(), is_on_disk, }) } /// Get list of points for indexing, associated with payload block filtering condition fn condition_points( condition: FieldCondition, id_tracker: &IdTrackerSS, payload_index: &StructPayloadIndex, vector_storage: &VectorStorageEnum, stopped: &AtomicBool, ) -> Vec<PointOffsetType> { let filter = Filter::new_must(Field(condition)); let disposed_hw_counter = HardwareCounterCell::disposable(); // Internal operation. No measurements needed let deleted_bitslice = vector_storage.deleted_vector_bitslice(); let cardinality_estimation = payload_index.estimate_cardinality(&filter, &disposed_hw_counter); payload_index .iter_filtered_points( &filter, id_tracker, &cardinality_estimation, &disposed_hw_counter, stopped, ) .filter(|&point_id| !deleted_bitslice.get_bit(point_id as usize).unwrap_or(false)) .collect() } #[allow(clippy::too_many_arguments)] #[allow(unused_variables)] #[allow(clippy::needless_pass_by_ref_mut)] fn build_filtered_graph( id_tracker: &IdTrackerSS, vector_storage: &VectorStorageEnum, quantized_vectors: &Option<QuantizedVectors>, #[allow(unused_variables)] gpu_insert_context: &mut Option<GpuInsertContext<'_>>, payload_index: &StructPayloadIndex, pool: &ThreadPool, stopped: &AtomicBool, graph_layers_builder: &mut GraphLayersBuilder, points_to_index: Vec<PointOffsetType>, block_filter_list: &mut VisitedListHandle, indexed_vectors_set: &mut BitVec, counter: &AtomicU64, ) -> OperationResult<()> { block_filter_list.next_iteration(); for block_point_id in points_to_index.iter().copied() { block_filter_list.check_and_update_visited(block_point_id); if !indexed_vectors_set.is_empty() { indexed_vectors_set.set(block_point_id as usize, true); } } #[cfg(feature = "gpu")] if let Some(gpu_constructed_graph) = Self::build_filtered_graph_on_gpu( id_tracker, vector_storage, quantized_vectors, gpu_insert_context.as_mut(), graph_layers_builder, block_filter_list, &points_to_index, stopped, )? { *graph_layers_builder = gpu_constructed_graph; return Ok(()); } let insert_points = |block_point_id| { check_process_stopped(stopped)?; // This hardware counter can be discarded, since it is only used for internal operations let internal_hardware_counter = HardwareCounterCell::disposable(); let block_condition_checker = BuildConditionChecker { filter_list: block_filter_list, current_point: block_point_id, }; let points_scorer = FilteredScorer::new_internal( block_point_id, vector_storage, quantized_vectors.as_ref(), Some(BoxCow::Borrowed(&block_condition_checker)), id_tracker.deleted_point_bitslice(), internal_hardware_counter, )?; graph_layers_builder.link_new_point(block_point_id, points_scorer); counter.fetch_add(1, Ordering::Relaxed); Ok::<_, OperationError>(()) }; let first_points = points_to_index .len() .min(SINGLE_THREADED_HNSW_BUILD_THRESHOLD); // First index points in single thread so ensure warm start for parallel indexing process for point_id in points_to_index[..first_points].iter().copied() { insert_points(point_id)?; } // Once initial structure is built, index remaining points in parallel // So that each thread will insert points in different parts of the graph, // it is less likely that they will compete for the same locks if points_to_index.len() > first_points { pool.install(|| { points_to_index .into_par_iter() .skip(first_points) .try_for_each(insert_points) })?; } Ok(()) } #[cfg(feature = "gpu")] #[allow(clippy::too_many_arguments)] fn build_main_graph_on_gpu( id_tracker: &IdTrackerSS, vector_storage: &VectorStorageEnum, quantized_vectors: &Option<QuantizedVectors>, gpu_vectors: Option<&GpuVectorStorage>, graph_layers_builder: &GraphLayersBuilder, deleted_bitslice: &BitSlice, entry_points_num: usize, stopped: &AtomicBool, ) -> OperationResult<Option<GraphLayersBuilder>> { let points_scorer_builder = |vector_id| { let hardware_counter = HardwareCounterCell::disposable(); FilteredScorer::new_internal( vector_id, vector_storage, quantized_vectors.as_ref(), None, id_tracker.deleted_point_bitslice(), hardware_counter, ) };
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/search_context.rs
lib/segment/src/index/hnsw_index/search_context.rs
use std::collections::BinaryHeap; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{ScoreType, ScoredPointOffset}; use num_traits::float::FloatCore; /// Structure that holds context of the search pub struct SearchContext { /// Overall nearest points found so far pub nearest: FixedLengthPriorityQueue<ScoredPointOffset>, /// Current candidates to process pub candidates: BinaryHeap<ScoredPointOffset>, } impl SearchContext { pub fn new(ef: usize) -> Self { SearchContext { nearest: FixedLengthPriorityQueue::new(ef), candidates: BinaryHeap::new(), } } pub fn lower_bound(&self) -> ScoreType { match self.nearest.top() { None => ScoreType::min_value(), Some(worst_of_the_best) => worst_of_the_best.score, } } /// Updates search context with new scored point. /// If it is closer than existing - also add it to candidates for further search pub fn process_candidate(&mut self, score_point: ScoredPointOffset) { let was_added = match self.nearest.push(score_point) { None => true, Some(removed) => removed.idx != score_point.idx, }; if was_added { self.candidates.push(score_point); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/point_scorer.rs
lib/segment/src/index/hnsw_index/point_scorer.rs
use std::sync::atomic::AtomicBool; use bitvec::slice::BitSlice; use common::counter::hardware_counter::HardwareCounterCell; use common::cow::BoxCow; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{PointOffsetType, ScoreType, ScoredPointOffset}; use smallvec::SmallVec; use crate::common::operation_error::{CancellableResult, OperationResult, check_process_stopped}; use crate::data_types::vectors::QueryVector; use crate::payload_storage::FilterContext; use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE; use crate::vector_storage::quantized::quantized_query_scorer::InternalScorerUnsupported; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; use crate::vector_storage::query_scorer::QueryScorerBytes; use crate::vector_storage::{ Random, RawScorer, VectorStorage, VectorStorageEnum, check_deleted_condition, new_raw_scorer, }; /// Scorers composition: /// /// ```plaintext /// Metric /// ┌─────────────┐ /// │ - Cosine │ /// FilteredScorer RawScorer QueryScorer │ - Dot │ /// ┌─────────────────┐ ┌───────────────┐ ┌────────────────┐ ┌─┤ - Euclidean │ /// │ RawScorer ◄─────┼─┤ QueryScorer ◄─┼───│ Metric ◄───────┼─┘ └─────────────┘ /// │ │ └───────────────┘ │ │ - Vector Distance /// │ FilterContext │ - Access patterns │ Query ◄───────┼─┐ /// │ │ │ │ │ Query /// │ deleted_points │ │ TVectorStorage │ │ ┌──────────────────┐ /// │ deleted_vectors │ └────────────────┘ └─┤ - RecoQuery │ /// └─────────────────┘ │ - DiscoveryQuery │ /// │ - ContextQuery │ /// └──────────────────┘ /// - Scoring logic /// - Complex queries /// ``` /// /// The `BatchFilteredSearcher` contains an array of `RawScorer`s, a common filter and certain parameters. /// /// ```plaintext /// BatchFilteredSearcher RawScorer /// ┌─────────────────┐ ┌───────────────┐ /// │ [RawScorer] ◄───┼──┤ QueryScorer ◄─┼── (ditto) /// │ │ └───────────────┘ /// │ FilterContext │ /// └─────────────────┘ /// ``` pub struct FilteredScorer<'a> { raw_scorer: Box<dyn RawScorer + 'a>, filters: ScorerFilters<'a>, /// Temporary buffer for scores. scores_buffer: Vec<ScoreType>, } pub struct ScorerFilters<'a> { filter_context: Option<BoxCow<'a, dyn FilterContext + 'a>>, /// Point deleted flags should be explicitly present as `false` /// for each existing point in the segment. /// If there are no flags for some points, they are considered deleted. /// [`BitSlice`] defining flags for deleted points (and thus these vectors). point_deleted: &'a BitSlice, /// [`BitSlice`] defining flags for deleted vectors in this segment. vec_deleted: &'a BitSlice, } impl<'a> ScorerFilters<'a> { /// Return true if vector satisfies current search context for given point: /// exists, not deleted, and satisfies filter context. pub fn check_vector(&self, point_id: PointOffsetType) -> bool { check_deleted_condition(point_id, self.vec_deleted, self.point_deleted) && self .filter_context .as_ref() .is_none_or(|f| f.check(point_id)) } fn as_borrowed(&'a self) -> Self { ScorerFilters { filter_context: self.filter_context.as_ref().map(BoxCow::as_borrowed), point_deleted: self.point_deleted, vec_deleted: self.vec_deleted, } } } pub struct FilteredBytesScorer<'a> { scorer_bytes: &'a dyn QueryScorerBytes, filters: ScorerFilters<'a>, } impl<'a> FilteredBytesScorer<'a> { pub fn score_points( &self, points: &mut Vec<(PointOffsetType, &[u8])>, limit: usize, ) -> impl Iterator<Item = ScoredPointOffset> { points.retain(|(point_id, _)| self.filters.check_vector(*point_id)); if limit != 0 { points.truncate(limit); } points.iter().map(|&(idx, bytes)| ScoredPointOffset { idx, score: self.scorer_bytes.score_bytes(bytes), }) } } impl<'a> FilteredScorer<'a> { /// Create a new filtered scorer. /// /// If present, `quantized_vectors` will be used for scoring, otherwise `vectors` will be used. pub fn new( query: QueryVector, vectors: &'a VectorStorageEnum, quantized_vectors: Option<&'a QuantizedVectors>, filter_context: Option<BoxCow<'a, dyn FilterContext + 'a>>, point_deleted: &'a BitSlice, hardware_counter: HardwareCounterCell, ) -> OperationResult<Self> { let raw_scorer = match quantized_vectors { Some(quantized_vectors) => quantized_vectors.raw_scorer(query, hardware_counter)?, None => new_raw_scorer(query, vectors, hardware_counter)?, }; Ok(FilteredScorer { raw_scorer, filters: ScorerFilters { filter_context, point_deleted, vec_deleted: vectors.deleted_vector_bitslice(), }, scores_buffer: Vec::new(), }) } pub fn new_internal( point_id: PointOffsetType, vectors: &'a VectorStorageEnum, quantized_vectors: Option<&'a QuantizedVectors>, filter_context: Option<BoxCow<'a, dyn FilterContext + 'a>>, point_deleted: &'a BitSlice, hardware_counter: HardwareCounterCell, ) -> OperationResult<Self> { // This is a fallback function, which is used if quantized vector storage // is not capable of reconstructing the query vector. let original_query_fn = || { let query = vectors.get_vector::<Random>(point_id); let query: QueryVector = query.as_vec_ref().into(); query }; let raw_scorer = match quantized_vectors { Some(quantized_vectors) => quantized_vectors .raw_internal_scorer(point_id, hardware_counter) .or_else(|InternalScorerUnsupported(hardware_counter)| { quantized_vectors.raw_scorer(original_query_fn(), hardware_counter) })?, None => { let query = original_query_fn(); new_raw_scorer(query, vectors, hardware_counter)? } }; Ok(FilteredScorer { raw_scorer, filters: ScorerFilters { filter_context, point_deleted, vec_deleted: vectors.deleted_vector_bitslice(), }, scores_buffer: Vec::new(), }) } /// Create a new filtered scorer for testing purposes. /// /// # Panics /// /// Panics if [`new_raw_scorer`] fails. #[cfg(feature = "testing")] pub fn new_for_test( vector: QueryVector, vector_storage: &'a VectorStorageEnum, point_deleted: &'a BitSlice, ) -> Self { FilteredScorer { raw_scorer: new_raw_scorer(vector, vector_storage, HardwareCounterCell::new()).unwrap(), filters: ScorerFilters { filter_context: None, point_deleted, vec_deleted: vector_storage.deleted_vector_bitslice(), }, scores_buffer: Vec::new(), } } pub fn raw_scorer(&self) -> &dyn RawScorer { self.raw_scorer.as_ref() } pub fn filters(&self) -> &ScorerFilters<'a> { &self.filters } /// Return [`FilteredBytesScorer`] if the underlying scorer supports it. pub fn scorer_bytes(&self) -> Option<FilteredBytesScorer<'_>> { Some(FilteredBytesScorer { scorer_bytes: self.raw_scorer.scorer_bytes()?, filters: self.filters.as_borrowed(), }) } /// Filters and calculates scores for the given slice of points IDs. /// /// For performance reasons this method mutates `point_ids`. /// /// # Arguments /// /// * `point_ids` - list of points to score. /// **Warning**: This input will be wrecked during the execution. /// * `limit` - limits the number of points to process after filtering. /// `0` means no limit. pub fn score_points( &mut self, point_ids: &mut Vec<PointOffsetType>, limit: usize, ) -> impl Iterator<Item = ScoredPointOffset> { point_ids.retain(|point_id| self.filters.check_vector(*point_id)); if limit != 0 { point_ids.truncate(limit); } self.score_points_unfiltered(point_ids) } pub fn score_points_unfiltered( &mut self, point_ids: &[PointOffsetType], ) -> impl Iterator<Item = ScoredPointOffset> { if self.scores_buffer.len() < point_ids.len() { self.scores_buffer.resize(point_ids.len(), 0.0); } self.raw_scorer .score_points(point_ids, &mut self.scores_buffer[..point_ids.len()]); std::iter::zip(point_ids, &self.scores_buffer) .map(|(&idx, &score)| ScoredPointOffset { idx, score }) } pub fn score_point(&self, point_id: PointOffsetType) -> ScoreType { self.raw_scorer.score_point(point_id) } pub fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType { self.raw_scorer.score_internal(point_a, point_b) } } // We keep each scorer with its queue to reduce allocations and improve data locality. struct BatchSearch<'a> { raw_scorer: Box<dyn RawScorer + 'a>, pq: FixedLengthPriorityQueue<ScoredPointOffset>, } pub struct BatchFilteredSearcher<'a> { scorer_batch: SmallVec<[BatchSearch<'a>; 1]>, filters: ScorerFilters<'a>, } impl<'a> BatchFilteredSearcher<'a> { /// Create a new batch filtered searcher. /// /// If present, `quantized_vectors` will be used for scoring, otherwise `vectors` will be used. pub fn new( queries: &[&QueryVector], vectors: &'a VectorStorageEnum, quantized_vectors: Option<&'a QuantizedVectors>, filter_context: Option<BoxCow<'a, dyn FilterContext + 'a>>, top: usize, point_deleted: &'a BitSlice, hardware_counter: HardwareCounterCell, ) -> OperationResult<Self> { let scorer_batch = queries .iter() .map(|&query| { let query = query.to_owned(); let hardware_counter = hardware_counter.fork(); let raw_scorer = match quantized_vectors { Some(quantized_vectors) => { quantized_vectors.raw_scorer(query, hardware_counter) } None => new_raw_scorer(query, vectors, hardware_counter), }; let pq = FixedLengthPriorityQueue::new(top); raw_scorer.map(|raw_scorer| BatchSearch { raw_scorer, pq }) }) .collect::<Result<_, _>>()?; let filters = ScorerFilters { filter_context, point_deleted, vec_deleted: vectors.deleted_vector_bitslice(), }; Ok(Self { scorer_batch, filters, }) } /// Create a new batched filtered searcher for testing purposes. /// /// # Panics /// /// Panics if [`new_raw_scorer`] fails. #[cfg(feature = "testing")] pub fn new_for_test( vectors: &[QueryVector], vector_storage: &'a VectorStorageEnum, point_deleted: &'a BitSlice, top: usize, ) -> Self { let scorer_batch = vectors .iter() .map(|vector| { let raw_scorer = new_raw_scorer( vector.to_owned(), vector_storage, HardwareCounterCell::new(), ) .unwrap(); BatchSearch { raw_scorer, pq: FixedLengthPriorityQueue::new(top), } }) .collect(); Self { scorer_batch, filters: ScorerFilters { filter_context: None, point_deleted, vec_deleted: vector_storage.deleted_vector_bitslice(), }, } } pub fn peek_top_all( self, is_stopped: &AtomicBool, ) -> CancellableResult<Vec<Vec<ScoredPointOffset>>> { let iter = self .filters .point_deleted .iter_zeros() .map(|p| p as PointOffsetType); self.peek_top_iter(iter, is_stopped) } pub fn peek_top_iter( mut self, mut points: impl Iterator<Item = PointOffsetType>, is_stopped: &AtomicBool, ) -> CancellableResult<Vec<Vec<ScoredPointOffset>>> { // Reuse the same buffer for all chunks, to avoid reallocation let mut chunk = [0; VECTOR_READ_BATCH_SIZE]; let mut scores_buffer = [0.0; VECTOR_READ_BATCH_SIZE]; loop { check_process_stopped(is_stopped)?; let mut chunk_size = 0; for point_id in &mut points { check_process_stopped(is_stopped)?; if !self.filters.check_vector(point_id) { continue; } chunk[chunk_size] = point_id; chunk_size += 1; if chunk_size == VECTOR_READ_BATCH_SIZE { break; } } if chunk_size == 0 { break; } // Switching the loops improves batching performance, but slightly degrades single-query performance. for BatchSearch { raw_scorer, pq } in &mut self.scorer_batch { raw_scorer.score_points(&chunk[..chunk_size], &mut scores_buffer[..chunk_size]); for i in 0..chunk_size { pq.push(ScoredPointOffset { idx: chunk[i], score: scores_buffer[i], }); } } } let results = self .scorer_batch .into_iter() .map(|BatchSearch { pq, .. }| pq.into_sorted_vec()) .collect(); Ok(results) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/build_cache.rs
lib/segment/src/index/hnsw_index/build_cache.rs
use std::cmp::{max, min}; use std::hash::{Hash, Hasher}; use common::types::{PointOffsetType, ScoreType}; use seahash::SeaHasher; #[derive(Hash, Eq, PartialEq, Clone, Debug)] struct PointPair { a: PointOffsetType, b: PointOffsetType, } impl PointPair { pub fn new(a: PointOffsetType, b: PointOffsetType) -> Self { PointPair { a: min(a, b), b: max(a, b), } } } #[derive(Clone, Debug)] struct CacheObj { points: PointPair, value: ScoreType, } #[allow(dead_code)] #[derive(Debug)] pub struct DistanceCache { cache: Vec<Option<CacheObj>>, pub hits: usize, pub misses: usize, } #[allow(dead_code)] impl DistanceCache { fn hasher() -> impl Hasher { SeaHasher::new() } pub fn new(size: usize) -> Self { let mut cache = Vec::with_capacity(size); cache.resize(size, None); DistanceCache { cache, hits: 0, misses: 0, } } pub fn get(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> Option<ScoreType> { let points = PointPair::new(point_a, point_b); let mut s = DistanceCache::hasher(); points.hash(&mut s); let idx = s.finish() as usize % self.cache.len(); self.cache[idx].as_ref().and_then(|x| { if x.points == points { Some(x.value) } else { None } }) } pub fn put(&mut self, point_a: PointOffsetType, point_b: PointOffsetType, value: ScoreType) { let points = PointPair::new(point_a, point_b); let mut s = DistanceCache::hasher(); points.hash(&mut s); let idx = s.finish() as usize % self.cache.len(); self.cache[idx] = Some(CacheObj { points, value }); } } impl Default for DistanceCache { fn default() -> Self { DistanceCache::new(0) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_cache() { let mut cache = DistanceCache::new(1000); cache.put(100, 10, 0.8); cache.put(10, 101, 0.7); cache.put(10, 110, 0.1); assert_eq!(cache.get(12, 99), None); assert_eq!(cache.get(10, 100), Some(0.8)); assert_eq!(cache.get(10, 101), Some(0.7)); } #[test] fn test_collision() { let mut cache = DistanceCache::new(1); cache.put(1, 2, 0.8); cache.put(3, 4, 0.7); assert_eq!(cache.get(1, 2), None); assert_eq!(cache.get(2, 1), None); assert_eq!(cache.get(4, 3), Some(0.7)); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_layers.rs
lib/segment/src/index/hnsw_index/graph_layers.rs
//! # Search on level functions //! //! This module contains multiple variations of the SEARCH-LAYER function. //! All of them implement a beam (greedy) search for closest points within a //! single graph layer. //! //! - [`GraphLayersBase::search_on_level`] //! Regular search, as described in the original HNSW paper. //! Usually used on layer 0. //! //! - [`GraphLayersBase::search_on_level_acorn`] //! Variation of `search_on_level` that implements the ACORN-1 algorithm. //! Usually used on layer 0. //! //! - [`GraphLayersBase::search_entry_on_level`] //! Simplified version of `search_on_level` that uses beam size of 1. //! Usually used on all levels above level 0. //! //! - [`GraphLayersWithVectors::search_on_level_with_vectors`] //! Like `search_on_level`, but for graphs with [inline storage]. //! //! - [`GraphLayersWithVectors::search_entry_on_level_with_vectors`] //! Like `search_entry_on_level`, but for graphs with [inline storage]. //! //! [inline storage]: crate::types::HnswConfig::inline_storage use std::borrow::Cow; use std::cmp::max; use std::ops::ControlFlow; use std::path::{Path, PathBuf}; use std::sync::atomic::AtomicBool; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{PointOffsetType, ScoredPointOffset}; use fs_err as fs; use io::file_operations::{atomic_save, read_bin}; use itertools::Itertools; use serde::{Deserialize, Serialize}; use super::HnswM; use super::entry_points::{EntryPoint, EntryPoints}; use super::graph_links::{GraphLinks, GraphLinksFormat}; use crate::common::operation_error::{ CancellableResult, OperationError, OperationResult, check_process_stopped, }; use crate::common::utils::rev_range; use crate::index::hnsw_index::graph_links::{GraphLinksFormatParam, serialize_graph_links}; use crate::index::hnsw_index::point_scorer::{FilteredBytesScorer, FilteredScorer, ScorerFilters}; use crate::index::hnsw_index::search_context::SearchContext; use crate::index::visited_pool::{VisitedListHandle, VisitedPool}; use crate::vector_storage::RawScorer; use crate::vector_storage::query_scorer::QueryScorerBytes; pub type LinkContainer = Vec<PointOffsetType>; pub type LayersContainer = Vec<LinkContainer>; pub const HNSW_GRAPH_FILE: &str = "graph.bin"; pub const HNSW_LINKS_FILE: &str = "links.bin"; pub const COMPRESSED_HNSW_LINKS_FILE: &str = "links_compressed.bin"; pub const COMPRESSED_WITH_VECTORS_HNSW_LINKS_FILE: &str = "links_comp_vec.bin"; /// Contents of the `graph.bin` file. #[derive(Deserialize, Serialize, Debug)] pub(super) struct GraphLayerData<'a> { pub(super) m: usize, pub(super) m0: usize, pub(super) ef_construct: usize, pub(super) entry_points: Cow<'a, EntryPoints>, } #[derive(Debug)] pub struct GraphLayers { pub(super) hnsw_m: HnswM, pub(super) links: GraphLinks, pub(super) entry_points: EntryPoints, pub(super) visited_pool: VisitedPool, } #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum SearchAlgorithm { Hnsw, Acorn, } pub trait GraphLayersBase { fn get_visited_list_from_pool(&self) -> VisitedListHandle<'_>; fn for_each_link<F>(&self, point_id: PointOffsetType, level: usize, f: F) where F: FnMut(PointOffsetType); fn try_for_each_link<F>( &self, point_id: PointOffsetType, level: usize, f: F, ) -> ControlFlow<(), ()> where F: FnMut(PointOffsetType) -> ControlFlow<(), ()>; /// Get M based on current level fn get_m(&self, level: usize) -> usize; /// Beam search for closest points within a single graph layer. /// /// See [module docs](self) for comparison with other search functions. fn search_on_level( &self, level_entry: ScoredPointOffset, level: usize, ef: usize, points_scorer: &mut FilteredScorer, is_stopped: &AtomicBool, ) -> CancellableResult<FixedLengthPriorityQueue<ScoredPointOffset>> { let mut visited_list = self.get_visited_list_from_pool(); visited_list.check_and_update_visited(level_entry.idx); let mut search_context = SearchContext::new(ef); search_context.process_candidate(level_entry); let limit = self.get_m(level); let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit); while let Some(candidate) = search_context.candidates.pop() { check_process_stopped(is_stopped)?; if candidate.score < search_context.lower_bound() { break; } points_ids.clear(); self.for_each_link(candidate.idx, level, |link| { if !visited_list.check(link) { points_ids.push(link); } }); points_scorer .score_points(&mut points_ids, limit) .for_each(|score_point| { search_context.process_candidate(score_point); visited_list.check_and_update_visited(score_point.idx); }); } Ok(search_context.nearest) } /// Variation of [`GraphLayersBase::search_on_level`] that implements the /// ACORN-1 algorithm. /// /// See [module docs](self) for comparison with other search functions. fn search_on_level_acorn( &self, level_entry: ScoredPointOffset, level: usize, ef: usize, points_scorer: &mut FilteredScorer, is_stopped: &AtomicBool, ) -> CancellableResult<FixedLengthPriorityQueue<ScoredPointOffset>> { // Each node in `hop1_visited_list` either: // a) Non-deleted node that going to be scored and added to // `search_context` for further expansion. (or already added) // b) Deleted node that scheduled for exploration for 2-hop neighbors. let mut hop1_visited_list = self.get_visited_list_from_pool(); hop1_visited_list.check_and_update_visited(level_entry.idx); // Nodes in `hop2_visited_list` are already explored as 2-hop neighbors. // Being in this list doesn't prevent the node to be handled again as // 1-hop neighbor. let mut hop2_visited_list = self.get_visited_list_from_pool(); let mut search_context = SearchContext::new(ef); search_context.process_candidate(level_entry); // Limits are per every explored 1-hop or 2-hop neighbors, not total. // This is necessary to avoid over-scoring when there are many // additional graph links. let hop1_limit = self.get_m(level); let hop2_limit = self.get_m(level); debug_assert_ne!(self.get_m(level), 0); // See `FilteredBytesScorer::score_points` let mut to_score = Vec::with_capacity(hop1_limit * hop2_limit.min(16)); let mut to_explore = Vec::with_capacity(hop1_limit * hop2_limit.min(16)); while let Some(candidate) = search_context.candidates.pop() { check_process_stopped(is_stopped)?; if candidate.score < search_context.lower_bound() { break; } to_explore.clear(); to_score.clear(); // Collect 1-hop neighbors (direct neighbors) _ = self.try_for_each_link(candidate.idx, level, |hop1| { if hop1_visited_list.check_and_update_visited(hop1) { return ControlFlow::Continue(()); } if points_scorer.filters().check_vector(hop1) { to_score.push(hop1); if to_score.len() >= hop1_limit { return ControlFlow::Break(()); } } else { to_explore.push(hop1); } ControlFlow::Continue(()) }); // Collect 2-hop neighbors (neighbors of neighbors) for &hop1 in to_explore.iter() { check_process_stopped(is_stopped)?; let total_limit = to_score.len() + hop2_limit; _ = self.try_for_each_link(hop1, level, |hop2| { if hop1_visited_list.check(hop2) || hop2_visited_list.check_and_update_visited(hop2) { return ControlFlow::Continue(()); } if points_scorer.filters().check_vector(hop2) { hop1_visited_list.check_and_update_visited(hop2); to_score.push(hop2); if to_score.len() >= total_limit { return ControlFlow::Break(()); } } ControlFlow::Continue(()) }); } points_scorer .score_points_unfiltered(&to_score) .for_each(|score_point| search_context.process_candidate(score_point)); } Ok(search_context.nearest) } /// Greedy searches for entry point of level `target_level`. /// Beam size is 1. fn search_entry( &self, entry_point: PointOffsetType, top_level: usize, target_level: usize, points_scorer: &mut FilteredScorer, is_stopped: &AtomicBool, ) -> CancellableResult<ScoredPointOffset> { let mut links_buffer = Vec::new(); let mut result = None; let mut level_entry = entry_point; for level in rev_range(top_level, target_level) { check_process_stopped(is_stopped)?; let search_result = self.search_entry_on_level(level_entry, level, points_scorer, &mut links_buffer); level_entry = search_result.idx; result = Some(search_result); } if let Some(result) = result { Ok(result) } else { // If no levels, return the entry point with it's score Ok(ScoredPointOffset { idx: entry_point, score: points_scorer.score_point(entry_point), }) } } /// Simplified version of `search_on_level` that uses beam size of 1. /// /// See [module docs](self) for comparison with other search functions. fn search_entry_on_level( &self, entry_point: PointOffsetType, level: usize, points_scorer: &mut FilteredScorer, // Temporary buffer for links to avoid unnecessary allocations. // 'links' is reused if `search_entry_on_level` is called multiple times. links: &mut Vec<PointOffsetType>, ) -> ScoredPointOffset { let limit = self.get_m(level); links.clear(); links.reserve(2 * self.get_m(0)); let mut changed = true; let mut current_point = ScoredPointOffset { idx: entry_point, score: points_scorer.score_point(entry_point), }; while changed { changed = false; links.clear(); self.for_each_link(current_point.idx, level, |link| { links.push(link); }); points_scorer .score_points(links, limit) .for_each(|score_point| { if score_point.score > current_point.score { changed = true; current_point = score_point; } }); } current_point } } pub trait GraphLayersWithVectors: GraphLayersBase { /// Returns `true` if the current graph format contains vectors. fn has_inline_vectors(&self) -> bool; /// # Panics /// /// Panics when using a format that does not support vectors. /// Check with [`Self::has_inline_vectors()`] before calling this method. fn links_with_vectors( &self, point_id: PointOffsetType, level: usize, ) -> (&[u8], impl Iterator<Item = (PointOffsetType, &[u8])> + '_); /// Similar to [`GraphLayersBase::search_on_level`]. /// /// See [module docs](self) for comparison with other search functions. fn search_on_level_with_vectors( &self, level_entry: ScoredPointOffset, level: usize, ef: usize, links_scorer: &FilteredBytesScorer, base_scorer: &dyn QueryScorerBytes, is_stopped: &AtomicBool, ) -> CancellableResult<FixedLengthPriorityQueue<ScoredPointOffset>> { let mut visited_list = self.get_visited_list_from_pool(); visited_list.check_and_update_visited(level_entry.idx); let mut links_search_context = SearchContext::new(ef); let mut base_search_context = SearchContext::new(ef); links_search_context.process_candidate(level_entry); let limit = self.get_m(level); let mut points: Vec<(PointOffsetType, &[u8])> = Vec::with_capacity(2 * limit); while let Some(candidate) = links_search_context.candidates.pop() { check_process_stopped(is_stopped)?; if candidate.score < links_search_context.lower_bound() { let (base_vector, _) = self.links_with_vectors(candidate.idx, level); base_search_context.process_candidate(ScoredPointOffset { idx: candidate.idx, score: base_scorer.score_bytes(base_vector), }); break; } points.clear(); let (base_vector, links_iter) = self.links_with_vectors(candidate.idx, level); links_iter.for_each(|(link, link_vector)| { if !visited_list.check(link) { points.push((link, link_vector)); } }); base_search_context.process_candidate(ScoredPointOffset { idx: candidate.idx, score: base_scorer.score_bytes(base_vector), }); links_scorer .score_points(&mut points, limit) .for_each(|score_point| { links_search_context.process_candidate(score_point); visited_list.check_and_update_visited(score_point.idx); }); } Ok(base_search_context.nearest) } /// Similar to [`GraphLayersBase::search_entry`]. fn search_entry_with_vectors( &self, entry_point: PointOffsetType, top_level: usize, target_level: usize, links_scorer_raw: &dyn RawScorer, links_scorer: &FilteredBytesScorer, is_stopped: &AtomicBool, ) -> CancellableResult<ScoredPointOffset> { let mut links_buffer = Vec::new(); let mut current_point = ScoredPointOffset { idx: entry_point, score: links_scorer_raw.score_point(entry_point), }; for level in rev_range(top_level, target_level) { check_process_stopped(is_stopped)?; current_point = self.search_entry_on_level_with_vectors( current_point, level, links_scorer, &mut links_buffer, ); } Ok(current_point) } /// Similar to [`GraphLayersBase::search_entry_on_level`]. /// /// See [module docs](self) for comparison with other search functions. fn search_entry_on_level_with_vectors<'a>( &'a self, entry_point: ScoredPointOffset, level: usize, links_scorer: &FilteredBytesScorer, links: &mut Vec<(PointOffsetType, &'a [u8])>, ) -> ScoredPointOffset { let limit = self.get_m(level); links.clear(); links.reserve(limit); let mut changed = true; let mut current_point = entry_point; while changed { changed = false; links.clear(); let (_, links_iter) = self.links_with_vectors(current_point.idx, level); links_iter.for_each(|(link, vector)| links.push((link, vector))); links_scorer .score_points(links, limit) .for_each(|score_point| { if score_point.score > current_point.score { changed = true; current_point = score_point; } }); } current_point } } impl GraphLayersBase for GraphLayers { fn get_visited_list_from_pool(&self) -> VisitedListHandle<'_> { self.visited_pool.get(self.links.num_points()) } fn for_each_link<F>(&self, point_id: PointOffsetType, level: usize, f: F) where F: FnMut(PointOffsetType), { self.links.links(point_id, level).for_each(f); } fn try_for_each_link<F>( &self, point_id: PointOffsetType, level: usize, f: F, ) -> ControlFlow<(), ()> where F: FnMut(PointOffsetType) -> ControlFlow<(), ()>, { self.links.links(point_id, level).try_for_each(f) } fn get_m(&self, level: usize) -> usize { self.hnsw_m.level_m(level) } } impl GraphLayersWithVectors for GraphLayers { fn has_inline_vectors(&self) -> bool { self.links.format().is_with_vectors() } fn links_with_vectors( &self, point_id: PointOffsetType, level: usize, ) -> (&[u8], impl Iterator<Item = (PointOffsetType, &[u8])> + '_) { self.links.links_with_vectors(point_id, level) } } /// Object contains links between nodes for HNSW search /// /// Assume all scores are similarities. Larger score = closer points impl GraphLayers { /// Returns the highest level this point is included in pub fn point_level(&self, point_id: PointOffsetType) -> usize { self.links.point_level(point_id) } fn get_entry_point( &self, filters: &ScorerFilters, custom_entry_points: Option<&[PointOffsetType]>, ) -> Option<EntryPoint> { // Try to get it from custom entry points custom_entry_points .and_then(|custom_entry_points| { custom_entry_points .iter() .filter(|&&point_id| filters.check_vector(point_id)) .map(|&point_id| { let level = self.point_level(point_id); EntryPoint { point_id, level } }) .max_by_key(|ep| ep.level) }) .or_else(|| { // Otherwise use normal entry points self.entry_points .get_entry_point(|point_id| filters.check_vector(point_id)) }) } pub fn search( &self, top: usize, ef: usize, algorithm: SearchAlgorithm, mut points_scorer: FilteredScorer, custom_entry_points: Option<&[PointOffsetType]>, is_stopped: &AtomicBool, ) -> CancellableResult<Vec<ScoredPointOffset>> { let Some(entry_point) = self.get_entry_point(points_scorer.filters(), custom_entry_points) else { return Ok(Vec::default()); }; let zero_level_entry = self.search_entry( entry_point.point_id, entry_point.level, 0, &mut points_scorer, is_stopped, )?; let ef = max(ef, top); let nearest = match algorithm { SearchAlgorithm::Hnsw => { self.search_on_level(zero_level_entry, 0, ef, &mut points_scorer, is_stopped) } SearchAlgorithm::Acorn => { self.search_on_level_acorn(zero_level_entry, 0, ef, &mut points_scorer, is_stopped) } }?; Ok(nearest.into_iter_sorted().take(top).collect_vec()) } #[allow(clippy::too_many_arguments)] pub fn search_with_vectors( &self, top: usize, ef: usize, links_scorer: &FilteredScorer, links_scorer_bytes: &FilteredBytesScorer, base_scorer: &dyn QueryScorerBytes, custom_entry_points: Option<&[PointOffsetType]>, is_stopped: &AtomicBool, ) -> CancellableResult<Vec<ScoredPointOffset>> { let Some(entry_point) = self.get_entry_point(links_scorer.filters(), custom_entry_points) else { return Ok(Vec::default()); }; let zero_level_entry = self.search_entry_with_vectors( entry_point.point_id, entry_point.level, 0, links_scorer.raw_scorer(), links_scorer_bytes, is_stopped, )?; let nearest = self.search_on_level_with_vectors( zero_level_entry, 0, max(top, ef), links_scorer_bytes, base_scorer, is_stopped, )?; Ok(nearest.into_iter_sorted().take(top).collect_vec()) } pub fn get_path(path: &Path) -> PathBuf { path.join(HNSW_GRAPH_FILE) } pub fn get_links_path(path: &Path, format: GraphLinksFormat) -> PathBuf { match format { GraphLinksFormat::Plain => path.join(HNSW_LINKS_FILE), GraphLinksFormat::Compressed => path.join(COMPRESSED_HNSW_LINKS_FILE), GraphLinksFormat::CompressedWithVectors => { path.join(COMPRESSED_WITH_VECTORS_HNSW_LINKS_FILE) } } } pub fn files(&self, path: &Path) -> Vec<PathBuf> { vec![ GraphLayers::get_path(path), GraphLayers::get_links_path(path, self.links.format()), ] } pub fn num_points(&self) -> usize { self.links.num_points() } } impl GraphLayers { pub fn load(dir: &Path, on_disk: bool, compress: bool) -> OperationResult<Self> { let graph_data: GraphLayerData = read_bin(&GraphLayers::get_path(dir))?; if compress { Self::convert_to_compressed(dir, HnswM::new(graph_data.m, graph_data.m0))?; } Ok(Self { hnsw_m: HnswM::new(graph_data.m, graph_data.m0), links: Self::load_links(dir, on_disk)?, entry_points: graph_data.entry_points.into_owned(), visited_pool: VisitedPool::new(), }) } fn load_links(dir: &Path, on_disk: bool) -> OperationResult<GraphLinks> { for format in [ GraphLinksFormat::CompressedWithVectors, GraphLinksFormat::Compressed, GraphLinksFormat::Plain, ] { let path = GraphLayers::get_links_path(dir, format); if path.exists() { return GraphLinks::load_from_file(&path, on_disk, format); } } Err(OperationError::service_error("No links file found")) } /// Convert the "plain" format into the "compressed" format. /// Note: conversion into the "compressed with vectors" format is not /// supported at the moment, though it is possible to implement. /// As far as [`super::hnsw::LINK_COMPRESSION_CONVERT_EXISTING`] is false, /// this code is not used in production. fn convert_to_compressed(dir: &Path, hnsw_m: HnswM) -> OperationResult<()> { let plain_path = Self::get_links_path(dir, GraphLinksFormat::Plain); let compressed_path = Self::get_links_path(dir, GraphLinksFormat::Compressed); let compressed_with_vectors_path = Self::get_links_path(dir, GraphLinksFormat::CompressedWithVectors); if compressed_path.exists() || compressed_with_vectors_path.exists() { return Ok(()); } let start = std::time::Instant::now(); let links = GraphLinks::load_from_file(&plain_path, true, GraphLinksFormat::Plain)?; let original_size = fs::metadata(&plain_path)?.len(); atomic_save(&compressed_path, |writer| { let edges = links.to_edges(); serialize_graph_links(edges, GraphLinksFormatParam::Compressed, hnsw_m, writer) })?; let new_size = fs::metadata(&compressed_path)?.len(); // Remove the original file fs::remove_file(&plain_path)?; log::debug!( "Compressed HNSW graph links in {:.1?}: {:.1}MB -> {:.1}MB ({:.1}%)", start.elapsed(), original_size as f64 / 1024.0 / 1024.0, new_size as f64 / 1024.0 / 1024.0, new_size as f64 / original_size as f64 * 100.0, ); Ok(()) } #[cfg(feature = "testing")] pub fn compress_ram(&mut self) { assert_eq!(self.links.format(), GraphLinksFormat::Plain); let dummy = GraphLinks::new_from_edges(Vec::new(), GraphLinksFormatParam::Plain, HnswM::new2(0)) .unwrap(); let links = std::mem::replace(&mut self.links, dummy); self.links = GraphLinks::new_from_edges( links.to_edges(), GraphLinksFormatParam::Compressed, self.hnsw_m, ) .unwrap(); } pub fn populate(&self) -> OperationResult<()> { self.links.populate()?; Ok(()) } } #[cfg(test)] mod tests { use rand::SeedableRng; use rand::rngs::StdRng; use rstest::rstest; use tempfile::Builder; use super::*; use crate::data_types::vectors::VectorElementType; use crate::fixtures::index_fixtures::{TestRawScorerProducer, random_vector}; use crate::index::hnsw_index::tests::{ create_graph_layer_builder_fixture, create_graph_layer_fixture, }; use crate::spaces::metric::Metric; use crate::spaces::simple::CosineMetric; use crate::types::Distance; use crate::vector_storage::{DEFAULT_STOPPED, VectorStorage}; fn search_in_graph( query: &[VectorElementType], top: usize, vector_storage: &TestRawScorerProducer, graph: &GraphLayers, ) -> Vec<ScoredPointOffset> { let scorer = vector_storage.scorer(query.to_owned()); let ef = 16; graph .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap() } const M: usize = 8; #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_search_on_level(#[case] format: GraphLinksFormat) { let dim = 8; let hnsw_m = HnswM::new2(8); let entry_points_num = 10; let num_vectors = 10; let mut rng = StdRng::seed_from_u64(42); let vector_holder = TestRawScorerProducer::new( dim, Distance::Dot, num_vectors, format.is_with_vectors(), &mut rng, ); let mut graph_links = vec![vec![Vec::new()]; num_vectors]; graph_links[0][0] = vec![1, 2, 3, 4, 5, 6]; let graph_links_vectors = vector_holder.graph_links_vectors(); let format_param = format.with_param_for_tests(graph_links_vectors.as_ref()); let graph_layers = GraphLayers { hnsw_m, links: GraphLinks::new_from_edges(graph_links.clone(), format_param, hnsw_m).unwrap(), entry_points: EntryPoints::new(entry_points_num), visited_pool: VisitedPool::new(), }; let linking_idx: PointOffsetType = 7; let mut scorer = vector_holder.internal_scorer(linking_idx); let nearest_on_level = graph_layers .search_on_level( ScoredPointOffset { idx: 0, score: scorer.score_point(0), }, 0, 32, &mut scorer, &DEFAULT_STOPPED, ) .unwrap(); assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1); for nearest in nearest_on_level.iter_unsorted() { // eprintln!("nearest = {:#?}", nearest); assert_eq!( nearest.score, scorer.score_internal(linking_idx, nearest.idx) ) } } #[rstest] #[case::uncompressed((GraphLinksFormat::Plain, false))] #[case::converted((GraphLinksFormat::Plain, true))] #[case::compressed((GraphLinksFormat::Compressed, false))] #[case::recompressed((GraphLinksFormat::Compressed, true))] #[case::compressed_with_vectors((GraphLinksFormat::CompressedWithVectors, false))] fn test_save_and_load(#[case] (initial_format, compress): (GraphLinksFormat, bool)) { let distance = Distance::Cosine; let num_vectors = 100; let dim = 8; let top = 5; let mut rng = StdRng::seed_from_u64(42); let dir = Builder::new().prefix("graph_dir").tempdir().unwrap(); let query = random_vector(&mut rng, dim); let (vector_holder, graph_layers_builder) = create_graph_layer_builder_fixture( num_vectors, M, dim, false, true, distance, &mut rng, ); let graph_links_vectors = vector_holder.graph_links_vectors(); let graph1 = graph_layers_builder .into_graph_layers( dir.path(), initial_format.with_param_for_tests(graph_links_vectors.as_ref()), true, ) .unwrap(); assert_eq!(graph1.links.format(), initial_format); let res1 = search_in_graph(&query, top, &vector_holder, &graph1); drop(graph1); let graph2 = GraphLayers::load(dir.path(), false, compress).unwrap(); if compress { assert_eq!(graph2.links.format(), GraphLinksFormat::Compressed); } else { assert_eq!(graph2.links.format(), initial_format); } let res2 = search_in_graph(&query, top, &vector_holder, &graph2); assert_eq!(res1, res2) } #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_add_points(#[case] format: GraphLinksFormat) { type M = CosineMetric; let distance = <M as Metric<VectorElementType>>::distance(); let num_vectors = 1000; let dim = 8; let mut rng = StdRng::seed_from_u64(42); let (vector_holder, graph_layers) = create_graph_layer_fixture( num_vectors, M, dim, format, false, format.is_with_vectors(), distance, &mut rng, ); let main_entry = graph_layers .entry_points .get_entry_point(|_x| true) .expect("Expect entry point to exists"); assert!(main_entry.level > 0); let num_levels = (0..num_vectors) .map(|i| graph_layers.links.point_level(i as PointOffsetType)) .max() .unwrap(); assert_eq!(main_entry.level, num_levels); let total_links_0 = (0..num_vectors) .map(|i| graph_layers.links.links(i as PointOffsetType, 0).len()) .sum::<usize>(); eprintln!("total_links_0 = {total_links_0:#?}"); eprintln!("num_vectors = {num_vectors:#?}"); assert!(total_links_0 > 0); assert!(total_links_0 as f64 / num_vectors as f64 > M as f64); let top = 5; let query = random_vector(&mut rng, dim); let processed_query = distance.preprocess_vector::<VectorElementType>(query.clone()); let scorer = vector_holder.scorer(processed_query); let mut reference_top = FixedLengthPriorityQueue::new(top); for idx in 0..vector_holder.storage().total_vector_count() as PointOffsetType { let score = scorer.score_point(idx); reference_top.push(ScoredPointOffset { idx, score }); } let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers); assert_eq!(reference_top.into_sorted_vec(), graph_search); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_layers_builder.rs
lib/segment/src/index/hnsw_index/graph_layers_builder.rs
use std::borrow::Cow; use std::cmp::{max, min}; use std::io::Write; use std::ops::ControlFlow; use std::path::Path; use std::sync::atomic::{AtomicBool, AtomicUsize}; use bitvec::prelude::BitVec; use common::ext::BitSliceExt; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{PointOffsetType, ScoredPointOffset}; use io::file_operations::{atomic_save, atomic_save_bin}; use parking_lot::{Mutex, MutexGuard, RwLock}; use rand::Rng; use rand::distr::Uniform; use super::HnswM; use super::graph_layers::GraphLayerData; use super::graph_links::{GraphLinks, GraphLinksFormatParam}; use super::links_container::{ItemsBuffer, LinksContainer}; use crate::common::operation_error::OperationResult; use crate::index::hnsw_index::entry_points::EntryPoints; #[cfg(test)] use crate::index::hnsw_index::graph_layers::SearchAlgorithm; use crate::index::hnsw_index::graph_layers::{GraphLayers, GraphLayersBase}; use crate::index::hnsw_index::graph_links::serialize_graph_links; use crate::index::hnsw_index::point_scorer::FilteredScorer; use crate::index::visited_pool::{VisitedListHandle, VisitedPool}; pub type LockedLinkContainer = RwLock<LinksContainer>; pub type LockedLayersContainer = Vec<LockedLinkContainer>; /// Same as `GraphLayers`, but allows to build in parallel /// Convertible to `GraphLayers` pub struct GraphLayersBuilder { max_level: AtomicUsize, hnsw_m: HnswM, ef_construct: usize, // Factor of level probability level_factor: f64, // Exclude points according to "not closer than base" heuristic? use_heuristic: bool, links_layers: Vec<LockedLayersContainer>, entry_points: Mutex<EntryPoints>, // Fields used on construction phase only visited_pool: VisitedPool, // List of bool flags, which defines if the point is already indexed or not ready_list: BitVec<AtomicUsize>, } impl GraphLayersBase for GraphLayersBuilder { fn get_visited_list_from_pool(&self) -> VisitedListHandle<'_> { self.visited_pool.get(self.num_points()) } fn for_each_link<F>(&self, point_id: PointOffsetType, level: usize, mut f: F) where F: FnMut(PointOffsetType), { let links = self.links_layers[point_id as usize][level].read(); for link in links.iter() { if self.ready_list[link as usize] { f(link); } } } fn try_for_each_link<F>( &self, point_id: PointOffsetType, level: usize, mut f: F, ) -> ControlFlow<(), ()> where F: FnMut(PointOffsetType) -> ControlFlow<(), ()>, { let links = self.links_layers[point_id as usize][level].read(); for link in links.iter() { if self.ready_list[link as usize] { f(link)?; } } ControlFlow::Continue(()) } fn get_m(&self, level: usize) -> usize { self.hnsw_m.level_m(level) } } /// Budget of how many checks have to be done at minimum to consider subgraph-connectivity approximation correct. const SUBGRAPH_CONNECTIVITY_SEARCH_BUDGET: usize = 64; impl GraphLayersBuilder { pub fn get_entry_points(&self) -> MutexGuard<'_, EntryPoints> { self.entry_points.lock() } /// For a given sub-graph defined by points, returns connectivity estimation. /// How it works: /// - Select entry point, it would be a point with the highest level. If there are several, pick first one. /// - Start Breadth-First Search (BFS) from the entry point, on each edge flip a coin to decide if the edge is removed or not. /// - Count number of nodes reachable from the entry point. /// - Use visited points as entry points for the next layer below and repeat until layer 0 has reached. /// - Return the fraction of reachable nodes to the total number of nodes in the sub-graph. /// /// Coin probability `q` is a parameter of this function. By default, it is 0.5. pub fn subgraph_connectivity(&self, points: &[PointOffsetType], q: f32) -> f32 { if points.is_empty() { return 1.0; } let max_point_id = *points.iter().max().unwrap(); let mut visited: BitVec = BitVec::repeat(false, max_point_id as usize + 1); let mut point_selection: BitVec = BitVec::repeat(false, max_point_id as usize + 1); for point_id in points { point_selection.set(*point_id as usize, true); } let mut rnd = rand::rng(); // Try to get entry point from the entry points list // If not found, select the point with the highest level let entry_point = self .entry_points .lock() .get_random_entry_point(&mut rnd, |point_id| { point_selection.get_bit(point_id as usize).unwrap_or(false) }) .map(|ep| ep.point_id); // Select entry point by selecting the point with the highest level let entry_point = entry_point.unwrap_or_else(|| { points .iter() .max_by_key(|point_id| self.links_layers[**point_id as usize].len()) .cloned() .unwrap() }); let entry_layer = self.get_point_level(entry_point); let mut queue: Vec<u32> = vec![]; // Amount of points reached when searching the graph. let mut reached_points = 1; // Total points visited (also across retries). let mut spent_budget = 0; // Retry loop, in case some budget is left. loop { visited.set(entry_point as usize, true); // Points visited in the previous layer (Get used as entry point in the iteration over the next layer) let mut previous_visited_points = vec![entry_point]; // For each layer in HNSW lower than the entry point layer for current_layer in (0..=entry_layer).rev() { // Set entry points to visited points of previous layer. queue.extend_from_slice(&previous_visited_points); // Do BFS through all points on the current layer. while let Some(current_point) = queue.pop() { let links = self.links_layers[current_point as usize][current_layer].read(); for link in links.iter() { spent_budget += 1; // Flip a coin to decide if the edge is removed or not let coin_flip = rnd.random_range(0.0..1.0); if coin_flip < q { continue; } let is_selected = point_selection.get_bit(link as usize).unwrap_or(false); let is_visited = visited.get_bit(link as usize).unwrap_or(false); if !is_visited && is_selected { visited.set(link as usize, true); reached_points += 1; queue.push(link); previous_visited_points.push(link); } } } } // Budget exhausted, don't retry. if spent_budget > SUBGRAPH_CONNECTIVITY_SEARCH_BUDGET { break; } queue.clear(); reached_points = 1; // Reset reached points visited.fill(false); } reached_points as f32 / points.len() as f32 } pub fn into_graph_layers( self, path: &Path, format_param: GraphLinksFormatParam, on_disk: bool, ) -> OperationResult<GraphLayers> { let links_path = GraphLayers::get_links_path(path, format_param.as_format()); let edges = Self::links_layers_to_edges(self.links_layers); let links; if on_disk { // Save memory by serializing directly to disk, then re-loading as mmap. atomic_save(&links_path, |writer| { serialize_graph_links(edges, format_param, self.hnsw_m, writer) })?; links = GraphLinks::load_from_file(&links_path, true, format_param.as_format())?; } else { // Since we'll keep it in the RAM anyway, we can afford to build in the RAM too. links = GraphLinks::new_from_edges(edges, format_param, self.hnsw_m)?; atomic_save(&links_path, |writer| writer.write_all(links.as_bytes()))?; } let entry_points = self.entry_points.into_inner(); let data = GraphLayerData { m: self.hnsw_m.m, m0: self.hnsw_m.m0, ef_construct: self.ef_construct, entry_points: Cow::Borrowed(&entry_points), }; atomic_save_bin(&GraphLayers::get_path(path), &data)?; Ok(GraphLayers { hnsw_m: self.hnsw_m, links, entry_points, visited_pool: self.visited_pool, }) } #[cfg(feature = "testing")] pub fn into_graph_layers_ram(self, format_param: GraphLinksFormatParam<'_>) -> GraphLayers { let edges = Self::links_layers_to_edges(self.links_layers); GraphLayers { hnsw_m: self.hnsw_m, links: GraphLinks::new_from_edges(edges, format_param, self.hnsw_m).unwrap(), entry_points: self.entry_points.into_inner(), visited_pool: self.visited_pool, } } fn links_layers_to_edges(link_layers: Vec<LockedLayersContainer>) -> Vec<Vec<Vec<u32>>> { link_layers .into_iter() .map(|l| l.into_iter().map(|l| l.into_inner().into_vec()).collect()) .collect() } #[cfg(feature = "gpu")] pub fn hnsw_m(&self) -> HnswM { self.hnsw_m } #[cfg(feature = "gpu")] pub fn ef_construct(&self) -> usize { self.ef_construct } #[cfg(feature = "gpu")] pub fn links_layers(&self) -> &[LockedLayersContainer] { &self.links_layers } #[cfg(feature = "gpu")] pub fn fill_ready_list(&mut self) { self.ready_list.fill(true); } #[cfg(feature = "gpu")] pub fn set_ready(&mut self, point_id: PointOffsetType) -> bool { self.ready_list.replace(point_id as usize, true) } pub fn new_with_params( num_vectors: usize, // Initial number of points in index hnsw_m: HnswM, ef_construct: usize, entry_points_num: usize, // Depends on number of points use_heuristic: bool, reserve: bool, ) -> Self { let links_layers = std::iter::repeat_with(|| { let capacity = if reserve { hnsw_m.m0 } else { 0 }; vec![RwLock::new(LinksContainer::with_capacity(capacity))] }) .take(num_vectors) .collect(); let ready_list = BitVec::repeat(false, num_vectors); Self { max_level: AtomicUsize::new(0), hnsw_m, ef_construct, level_factor: 1.0 / (max(hnsw_m.m, 2) as f64).ln(), use_heuristic, links_layers, entry_points: Mutex::new(EntryPoints::new(entry_points_num)), visited_pool: VisitedPool::new(), ready_list, } } pub fn new( num_vectors: usize, // Initial number of points in index hnsw_m: HnswM, ef_construct: usize, entry_points_num: usize, // Depends on number of points use_heuristic: bool, ) -> Self { Self::new_with_params( num_vectors, hnsw_m, ef_construct, entry_points_num, use_heuristic, true, ) } pub fn merge_from_other(&mut self, other: GraphLayersBuilder) { self.max_level = AtomicUsize::new(max( self.max_level.load(std::sync::atomic::Ordering::Relaxed), other.max_level.load(std::sync::atomic::Ordering::Relaxed), )); let mut visited_list = self.visited_pool.get(self.num_points()); if other.links_layers.len() > self.links_layers.len() { self.links_layers .resize_with(other.links_layers.len(), Vec::new); } for (point_id, layers) in other.links_layers.into_iter().enumerate() { let current_layers = &mut self.links_layers[point_id]; for (level, other_links) in layers.into_iter().enumerate() { if current_layers.len() <= level { current_layers.push(other_links); } else { let other_links = other_links.into_inner(); visited_list.next_iteration(); let mut current_links = current_layers[level].write(); current_links.iter().for_each(|x| { visited_list.check_and_update_visited(x); }); for other_link in other_links .into_vec() .into_iter() .filter(|x| !visited_list.check_and_update_visited(*x)) { current_links.push(other_link); } } } } self.entry_points .lock() .merge_from_other(other.entry_points.into_inner()); } fn num_points(&self) -> usize { self.links_layers.len() } /// Generate random level for a new point, according to geometric distribution pub fn get_random_layer<R>(&self, rng: &mut R) -> usize where R: Rng + ?Sized, { let distribution = Uniform::new(0.0, 1.0).unwrap(); let sample: f64 = rng.sample(distribution); let picked_level = -sample.ln() * self.level_factor; picked_level.round() as usize } pub(crate) fn get_point_level(&self, point_id: PointOffsetType) -> usize { self.links_layers[point_id as usize].len() - 1 } pub fn set_levels(&mut self, point_id: PointOffsetType, level: usize) { if self.links_layers.len() <= point_id as usize { while self.links_layers.len() <= point_id as usize { self.links_layers.push(vec![]); } } let point_layers = &mut self.links_layers[point_id as usize]; while point_layers.len() <= level { let links = LinksContainer::with_capacity(self.hnsw_m.level_m(level)); point_layers.push(RwLock::new(links)); } self.max_level .fetch_max(level, std::sync::atomic::Ordering::Relaxed); } pub fn link_new_point(&self, point_id: PointOffsetType, mut points_scorer: FilteredScorer) { // Check if there is an suitable entry point // - entry point level if higher or equal // - it satisfies filters let level = self.get_point_level(point_id); let entry_point_opt = self .entry_points .lock() .get_entry_point(|point_id| points_scorer.filters().check_vector(point_id)); if let Some(entry_point) = entry_point_opt { let mut level_entry = if entry_point.level > level { // The entry point is higher than a new point // Let's find closest one on same level // greedy search for a single closest point self.search_entry( entry_point.point_id, entry_point.level, level, &mut points_scorer, &AtomicBool::new(false), ) .unwrap() } else { ScoredPointOffset { idx: entry_point.point_id, score: points_scorer.score_internal(point_id, entry_point.point_id), } }; // minimal common level for entry points let linking_level = min(level, entry_point.level); for curr_level in (0..=linking_level).rev() { level_entry = self.link_new_point_on_level( point_id, curr_level, &mut points_scorer, level_entry, ); } } else { // New point is a new empty entry (for this filter, at least) // We can't do much here, so just quit } debug_assert!( !self.ready_list[point_id as usize], "Point {point_id} was already marked as ready" ); self.ready_list.set_aliased(point_id as usize, true); self.entry_points .lock() .new_point(point_id, level, |point_id| { points_scorer.filters().check_vector(point_id) }); } /// Add a new point using pre-existing links. /// Mutually exclusive with [`Self::link_new_point`]. pub fn add_new_point( &self, point_id: PointOffsetType, links_by_level: Vec<Vec<PointOffsetType>>, ) { let level = self.get_point_level(point_id); debug_assert_eq!(links_by_level.len(), level + 1); for (level, neighbours) in links_by_level.iter().enumerate() { let mut links = self.links_layers[point_id as usize][level].write(); links.fill_from(neighbours.iter().copied()); } debug_assert!( !self.ready_list[point_id as usize], "Point {point_id} was already marked as ready" ); self.ready_list.set_aliased(point_id as usize, true); self.entry_points .lock() .new_point(point_id, level, |_| true); } /// Link a new point on a specific level. /// Returns an entry point for the level below. fn link_new_point_on_level( &self, point_id: PointOffsetType, curr_level: usize, points_scorer: &mut FilteredScorer, mut level_entry: ScoredPointOffset, ) -> ScoredPointOffset { let nearest = self .search_on_level( level_entry, curr_level, self.ef_construct, points_scorer, &AtomicBool::new(false), ) .unwrap(); if let Some(the_nearest) = nearest.iter_unsorted().max() { level_entry = *the_nearest; } if self.use_heuristic { self.link_with_heuristic(point_id, curr_level, points_scorer, nearest); } else { self.link_without_heuristic(point_id, curr_level, points_scorer, nearest); } level_entry } fn link_with_heuristic( &self, point_id: PointOffsetType, curr_level: usize, points_scorer: &FilteredScorer, nearest: FixedLengthPriorityQueue<ScoredPointOffset>, ) { let level_m = self.hnsw_m.level_m(curr_level); let scorer = |a, b| points_scorer.score_internal(a, b); let selected_nearest = { let iter = nearest.into_iter_sorted(); let mut existing_links = self.links_layers[point_id as usize][curr_level].write(); existing_links.fill_from_sorted_with_heuristic(iter, level_m, scorer); existing_links.links().to_vec() }; // Insert backlinks. let mut items = ItemsBuffer::default(); for &other_point in &selected_nearest { self.links_layers[other_point as usize][curr_level] .write() .connect_with_heuristic(point_id, other_point, level_m, scorer, &mut items); } } fn link_without_heuristic( &self, point_id: PointOffsetType, curr_level: usize, points_scorer: &FilteredScorer, nearest: FixedLengthPriorityQueue<ScoredPointOffset>, ) { let level_m = self.hnsw_m.level_m(curr_level); let scorer = |a, b| points_scorer.score_internal(a, b); for nearest_point in nearest.iter_unsorted() { { let mut links = self.links_layers[point_id as usize][curr_level].write(); links.connect(nearest_point.idx, point_id, level_m, scorer); } { let mut links = self.links_layers[nearest_point.idx as usize][curr_level].write(); links.connect(point_id, nearest_point.idx, level_m, scorer); } } } /// This function returns average number of links per node in HNSW graph /// on specified level. /// /// Useful for: /// - estimating memory consumption /// - percolation threshold estimation /// - debugging pub fn get_average_connectivity_on_level(&self, level: usize) -> f32 { let mut sum = 0; let mut count = 0; for links in self.links_layers.iter() { if links.len() > level { sum += links[level].read().links().len(); count += 1; } } if count == 0 { 0.0 } else { sum as f32 / count as f32 } } } #[cfg(test)] mod tests { use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use itertools::Itertools; use rand::SeedableRng; use rand::prelude::StdRng; use rstest::rstest; use super::*; use crate::fixtures::index_fixtures::{TestRawScorerProducer, random_vector}; use crate::index::hnsw_index::graph_links::{GraphLinksFormat, normalize_links}; use crate::index::hnsw_index::tests::create_graph_layer_fixture; use crate::types::Distance; use crate::vector_storage::{DEFAULT_STOPPED, VectorStorage as _}; const M: usize = 8; #[cfg(not(windows))] fn parallel_graph_build<R>( num_vectors: usize, dim: usize, use_heuristic: bool, use_quantization: bool, distance: Distance, rng: &mut R, ) -> (TestRawScorerProducer, GraphLayersBuilder) where R: Rng + ?Sized, { use rayon::prelude::{IntoParallelIterator, ParallelIterator}; let pool = rayon::ThreadPoolBuilder::new() .num_threads(2) .build() .unwrap(); let m = M; let ef_construct = 16; let entry_points_num = 10; let vector_holder = TestRawScorerProducer::new(dim, distance, num_vectors, use_quantization, rng); let mut graph_layers = GraphLayersBuilder::new( num_vectors, HnswM::new2(m), ef_construct, entry_points_num, use_heuristic, ); for idx in 0..(num_vectors as PointOffsetType) { let level = graph_layers.get_random_layer(rng); graph_layers.set_levels(idx, level); } pool.install(|| { (0..(num_vectors as PointOffsetType)) .into_par_iter() .for_each(|idx| { let scorer = vector_holder.internal_scorer(idx); graph_layers.link_new_point(idx, scorer); }); }); (vector_holder, graph_layers) } fn create_graph_layer<R>( num_vectors: usize, dim: usize, use_heuristic: bool, use_quantization: bool, distance: Distance, rng: &mut R, ) -> (TestRawScorerProducer, GraphLayersBuilder) where R: Rng + ?Sized, { let m = M; let ef_construct = 16; let entry_points_num = 10; let vector_holder = TestRawScorerProducer::new(dim, distance, num_vectors, use_quantization, rng); let mut graph_layers = GraphLayersBuilder::new( num_vectors, HnswM::new2(m), ef_construct, entry_points_num, use_heuristic, ); for idx in 0..(num_vectors as PointOffsetType) { let level = graph_layers.get_random_layer(rng); graph_layers.set_levels(idx, level); } for idx in 0..(num_vectors as PointOffsetType) { let scorer = vector_holder.internal_scorer(idx); graph_layers.link_new_point(idx, scorer); } (vector_holder, graph_layers) } #[cfg(not(windows))] // https://github.com/qdrant/qdrant/issues/1452 #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_parallel_graph_build(#[case] format: GraphLinksFormat) { let distance = Distance::Cosine; let num_vectors = 1000; let dim = 8; let mut rng = StdRng::seed_from_u64(42); // let (vector_holder, graph_layers_builder) = // create_graph_layer::<M, _>(num_vectors, dim, false, &mut rng); let (vector_holder, graph_layers_builder) = parallel_graph_build( num_vectors, dim, false, format.is_with_vectors(), distance, &mut rng, ); let main_entry = graph_layers_builder .entry_points .lock() .get_entry_point(|_x| true) .expect("Expect entry point to exists"); assert!(main_entry.level > 0); let num_levels = graph_layers_builder .links_layers .iter() .map(|x| x.len()) .max() .unwrap(); assert_eq!(main_entry.level + 1, num_levels); let total_links_0: usize = graph_layers_builder .links_layers .iter() .map(|x| x[0].read().links().len()) .sum(); assert!(total_links_0 > 0); eprintln!("total_links_0 = {total_links_0:#?}"); eprintln!("num_vectors = {num_vectors:#?}"); assert!(total_links_0 as f64 / num_vectors as f64 > M as f64); let top = 5; let query = random_vector(&mut rng, dim); let scorer = vector_holder.scorer(query.clone()); let mut reference_top = FixedLengthPriorityQueue::new(top); for idx in 0..vector_holder.storage().total_vector_count() as PointOffsetType { let score = scorer.score_point(idx); reference_top.push(ScoredPointOffset { idx, score }); } let graph = graph_layers_builder.into_graph_layers_ram( format.with_param_for_tests(vector_holder.graph_links_vectors().as_ref()), ); let scorer = vector_holder.scorer(query); let ef = 16; let graph_search = graph .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap(); assert_eq!(reference_top.into_sorted_vec(), graph_search); } #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_add_points(#[case] format: GraphLinksFormat) { let distance = Distance::Cosine; let num_vectors = 1000; let dim = 8; let mut rng = StdRng::seed_from_u64(42); let mut rng2 = StdRng::seed_from_u64(42); let (vector_holder, graph_layers_builder) = create_graph_layer( num_vectors, dim, false, format.is_with_vectors(), distance, &mut rng, ); let (_vector_holder_orig, graph_layers_orig) = create_graph_layer_fixture( num_vectors, M, dim, format, false, format.is_with_vectors(), distance, &mut rng2, ); // check is graph_layers_builder links are equal to graph_layers_orig let orig_len = graph_layers_orig.links.num_points(); let builder_len = graph_layers_builder.links_layers.len(); assert_eq!(orig_len, builder_len); for idx in 0..builder_len { let links_orig = &graph_layers_orig .links .links(idx as PointOffsetType, 0) .collect_vec(); let links_builder = graph_layers_builder.links_layers[idx][0].read(); let link_container_from_builder = links_builder.links().to_vec(); let m = match format { GraphLinksFormat::Plain => 0, GraphLinksFormat::Compressed | GraphLinksFormat::CompressedWithVectors => M * 2, }; assert_eq!( normalize_links(m, links_orig.clone()), normalize_links(m, link_container_from_builder), ); } let main_entry = graph_layers_builder .entry_points .lock() .get_entry_point(|_x| true) .expect("Expect entry point to exists"); assert!(main_entry.level > 0); let num_levels = graph_layers_builder .links_layers .iter() .map(|x| x.len()) .max() .unwrap(); assert_eq!(main_entry.level + 1, num_levels); let total_links_0: usize = graph_layers_builder .links_layers .iter() .map(|x| x[0].read().links().len()) .sum(); assert!(total_links_0 > 0); eprintln!("total_links_0 = {total_links_0:#?}"); eprintln!("num_vectors = {num_vectors:#?}"); assert!(total_links_0 as f64 / num_vectors as f64 > M as f64); let top = 5; let query = random_vector(&mut rng, dim); let scorer = vector_holder.scorer(query.clone()); let mut reference_top = FixedLengthPriorityQueue::new(top); for idx in 0..vector_holder.storage().total_vector_count() as PointOffsetType { let score = scorer.score_point(idx); reference_top.push(ScoredPointOffset { idx, score }); } let graph = graph_layers_builder.into_graph_layers_ram( format.with_param_for_tests(vector_holder.graph_links_vectors().as_ref()), ); let scorer = vector_holder.scorer(query); let ef = 16; let graph_search = graph .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap(); assert_eq!(reference_top.into_sorted_vec(), graph_search); } #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_hnsw_graph_properties(#[case] format: GraphLinksFormat) { const NUM_VECTORS: usize = 5_000; const DIM: usize = 16; const M: usize = 16; const EF_CONSTRUCT: usize = 64; const USE_HEURISTIC: bool = true; let mut rng = StdRng::seed_from_u64(42); let vector_holder = TestRawScorerProducer::new( DIM, Distance::Cosine, NUM_VECTORS, format.is_with_vectors(), &mut rng, ); let mut graph_layers_builder = GraphLayersBuilder::new(NUM_VECTORS, HnswM::new2(M), EF_CONSTRUCT, 10, USE_HEURISTIC); for idx in 0..(NUM_VECTORS as PointOffsetType) { let scorer = vector_holder.internal_scorer(idx); let level = graph_layers_builder.get_random_layer(&mut rng); graph_layers_builder.set_levels(idx, level); graph_layers_builder.link_new_point(idx, scorer); } let graph_layers = graph_layers_builder.into_graph_layers_ram( format.with_param_for_tests(vector_holder.graph_links_vectors().as_ref()), ); let num_points = graph_layers.links.num_points(); eprintln!("number_points = {num_points:#?}"); let max_layer = (0..NUM_VECTORS) .map(|i| graph_layers.links.point_level(i as PointOffsetType)) .max() .unwrap(); eprintln!("max_layer = {:#?}", max_layer + 1); let layers910 = graph_layers.links.point_level(910); let links910 = (0..layers910 + 1) .map(|i| graph_layers.links.links(910, i).collect()) .collect::<Vec<Vec<_>>>(); eprintln!("graph_layers.links_layers[910] = {links910:#?}",); let total_edges: usize = (0..NUM_VECTORS) .map(|i| graph_layers.links.links(i as PointOffsetType, 0).len()) .sum();
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_layers_healer.rs
lib/segment/src/index/hnsw_index/graph_layers_healer.rs
use common::counter::hardware_counter::HardwareCounterCell; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{PointOffsetType, ScoredPointOffset}; use parking_lot::RwLock; use rayon::ThreadPool; use rayon::iter::{IntoParallelIterator as _, ParallelIterator as _}; use crate::common::operation_error::OperationResult; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::graph_layers::GraphLayers; use crate::index::hnsw_index::graph_layers_builder::{GraphLayersBuilder, LockedLayersContainer}; use crate::index::hnsw_index::links_container::{ItemsBuffer, LinksContainer}; use crate::index::visited_pool::VisitedPool; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; use crate::vector_storage::{Random, RawScorer, VectorStorage, VectorStorageEnum, new_raw_scorer}; pub struct GraphLayersHealer<'a> { links_layers: Vec<LockedLayersContainer>, to_heal: Vec<(PointOffsetType, usize)>, old_to_new: &'a [Option<PointOffsetType>], hnsw_m: HnswM, ef_construct: usize, visited_pool: VisitedPool, } impl<'a> GraphLayersHealer<'a> { pub fn new( graph_layers: &GraphLayers, old_to_new: &'a [Option<PointOffsetType>], ef_construct: usize, ) -> Self { let mut to_heal = Vec::new(); let links_layers = { graph_layers.links.to_edges_impl(|point_id, level| { let level_m = graph_layers.hnsw_m.level_m(level); let mut container = LinksContainer::with_capacity(level_m); container.fill_from(graph_layers.links.links(point_id, level).take(level_m)); if container .iter() .any(|neighbor| old_to_new[neighbor as usize].is_none()) { to_heal.push((point_id, level)); } RwLock::new(container) }) }; Self { links_layers, to_heal, old_to_new, hnsw_m: graph_layers.hnsw_m, ef_construct, visited_pool: VisitedPool::new(), } } fn point_deleted(&self, point: PointOffsetType) -> bool { self.old_to_new[point as usize].is_none() } /// Greedy search for non-deleted points accessible through deleted points. /// /// Unlike the regular search ([`search_on_level`]) which: /// - BFS (queue-based). /// - Deleted points are ignored. /// - Non-deleted points are added into the result AND the search queue. /// /// This method: /// - DFS (stack-based). /// - Deleted points are added into the search queue, but not into the /// result. /// - Non-deleted points are added into the result, but not into the search /// queue. /// /// In other words, we search in the scope of deleted points, but /// we want to use points on the border between deleted and non-deleted as candidates /// for the shortcut. /// /// [`search_on_level`]: crate::index::hnsw_index::graph_layers::GraphLayersBase::search_on_level fn search_shortcuts_on_level( &self, offset: PointOffsetType, level: usize, scorer: &dyn RawScorer, ) -> FixedLengthPriorityQueue<ScoredPointOffset> { let mut visited_list = self.visited_pool.get(self.links_layers.len()); // Result of the search is stored here. let mut nearest = FixedLengthPriorityQueue::<ScoredPointOffset>::new(self.ef_construct); let limit = self.hnsw_m.level_m(level); let mut neighbours: Vec<PointOffsetType> = Vec::with_capacity(2 * limit); let mut scores_buffer = Vec::with_capacity(limit); // Candidates for the search stack. // ToDo: Try later, instead of using stack, we can use proper priority queue // ToDo: So that in the deleted sub-graph we can navigate towards the point with better scores let mut pending = Vec::new(); // Find entry into "deleted" sub-graph, do not consider non-deleted neighbors // as they already connected to the "healing" point. visited_list.check_and_update_visited(offset); { let links = self.links_layers[offset as usize][level].read(); for &point in links.links() { if !self.point_deleted(point) { visited_list.check_and_update_visited(point); } else { pending.push(ScoredPointOffset { idx: point, score: scorer.score_point(point), }); } } } // At this moment `pending` is initialized with at least one deleted point, // now we need to find borders of all "deleted" points sub-graphs while let Some(candidate) = pending.pop() { if nearest.is_full() && candidate.score < nearest.top().unwrap().score { // Stop the search branch early, if it is not promising continue; } if visited_list.check_and_update_visited(candidate.idx) { continue; } neighbours.clear(); neighbours.extend( self.links_layers[candidate.idx as usize][level] .read() .links() .iter() .filter(|&&link| !visited_list.check(link)), ); if scores_buffer.len() < neighbours.len() { scores_buffer.resize(neighbours.len(), 0.0); } scorer.score_points(&neighbours, &mut scores_buffer[..neighbours.len()]); for (&idx, &score) in neighbours.iter().zip(&scores_buffer) { if !self.point_deleted(idx) { // This point is on the "border", as it is reachable from the deleted // And is not deleted itself nearest.push(ScoredPointOffset { idx, score }); } else { // This is just another deleted point pending.push(ScoredPointOffset { idx, score }); } } } nearest } fn heal_point_on_level(&self, offset: PointOffsetType, level: usize, scorer: &dyn RawScorer) { let level_m = self.hnsw_m.level_m(level); // Get current links and filter out deleted ones let mut valid_links = Vec::with_capacity(level_m); valid_links.extend( self.links_layers[offset as usize][level] .read() .links() .iter() .filter(|&&idx| !self.point_deleted(idx)), ); // First: generate list of candidates using shortcuts search let shortcuts = self.search_shortcuts_on_level(offset, level, scorer); // Second: process list of candidates with heuristic let mut container = LinksContainer::with_capacity(level_m); let scorer_fn = |a, b| scorer.score_internal(a, b); container.fill_from_sorted_with_heuristic( shortcuts.into_iter_sorted(), level_m - valid_links.len(), scorer_fn, ); for &link in &valid_links { container.push(link); } let container = container.into_vec(); self.links_layers[offset as usize][level] .write() .fill_from(container.iter().copied()); // Insert backlinks. let mut items = ItemsBuffer::default(); for other_point in container { let mut other_container = self.links_layers[other_point as usize][level].write(); if !other_container.iter().any(|link| link == offset) { other_container.connect_with_heuristic( offset, other_point, level_m, scorer_fn, &mut items, ); } } } pub fn heal( &mut self, pool: &ThreadPool, vector_storage: &VectorStorageEnum, quantized_vectors: Option<&QuantizedVectors>, ) -> OperationResult<()> { pool.install(|| { std::mem::take(&mut self.to_heal) .into_par_iter() .try_for_each(|(offset, level)| { // Internal operation. No measurements needed. let internal_hardware_counter = HardwareCounterCell::disposable(); let query = vector_storage .get_vector::<Random>(offset) .as_vec_ref() .into(); let scorer = if let Some(quantized_vectors) = quantized_vectors { quantized_vectors.raw_scorer(query, internal_hardware_counter)? } else { new_raw_scorer(query, vector_storage, internal_hardware_counter)? }; self.heal_point_on_level(offset, level, scorer.as_ref()); Ok(()) }) }) } pub fn save_into_builder(self, builder: &GraphLayersBuilder) { for (old_offset, layers) in self.links_layers.into_iter().enumerate() { let Some(new_offset) = self.old_to_new[old_offset] else { continue; }; let links_by_level = layers .into_iter() .map(|layer| { layer .into_inner() .into_vec() .into_iter() .filter_map(|link| self.old_to_new[link as usize]) .collect() }) .collect(); builder.add_new_point(new_offset, links_by_level); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/links_container.rs
lib/segment/src/index/hnsw_index/links_container.rs
use std::cell::Cell; use std::iter::Copied; use std::num::NonZeroU32; use common::types::{PointOffsetType, ScoreType, ScoredPointOffset}; use crate::common::vector_utils::TrySetCapacityExact as _; pub struct LinksContainer { links: Vec<PointOffsetType>, /// Number of links that have been processed by the heuristic. processed_by_heuristic: u32, } impl LinksContainer { pub fn with_capacity(m: usize) -> Self { Self { links: Vec::with_capacity(m), processed_by_heuristic: 0, } } pub fn push(&mut self, link: PointOffsetType) { self.links.push(link); } pub fn links(&self) -> &[PointOffsetType] { &self.links } pub fn iter(&self) -> Copied<std::slice::Iter<'_, u32>> { self.links.iter().copied() } pub fn into_vec(self) -> Vec<PointOffsetType> { self.links } /// Put points into the container. pub fn fill_from(&mut self, points: impl Iterator<Item = PointOffsetType>) { self.links.clear(); self.links.extend(points); self.processed_by_heuristic = 0; } /// Put `m` candidates selected by the heuristic into the container. pub fn fill_from_sorted_with_heuristic( &mut self, candidates: impl Iterator<Item = ScoredPointOffset>, level_m: usize, mut score: impl FnMut(PointOffsetType, PointOffsetType) -> ScoreType, ) { self.links.clear(); if level_m == 0 { // Unlikely. self.processed_by_heuristic = 0; return; } 'outer: for candidate in candidates { for &existing in &self.links { if score(candidate.idx, existing) > candidate.score { continue 'outer; } } self.links.push(candidate.idx); if self.links.len() >= level_m { break; } } self.processed_by_heuristic = self.links.len() as u32; } /// Connect new point to links, so that links contains only closest points. pub fn connect( &mut self, new_point_id: PointOffsetType, target_point_id: PointOffsetType, level_m: usize, mut score: impl FnMut(PointOffsetType, PointOffsetType) -> ScoreType, ) { // Invalidate assumptions about the heuristic eagerly. self.processed_by_heuristic = 0; // ToDo: binary search here ? (most likely does not worth it) let new_to_target = score(target_point_id, new_point_id); let mut id_to_insert = self.links.len(); for (i, &item) in self.links.iter().enumerate() { let target_to_link = score(target_point_id, item); if target_to_link < new_to_target { id_to_insert = i; break; } } if self.links.len() < level_m { self.links.insert(id_to_insert, new_point_id); } else if id_to_insert != self.links.len() { self.links.pop(); self.links.insert(id_to_insert, new_point_id); } } /// Append one point to the container. If the container is full, run the heuristic. /// /// This is a reference implementation for testing. #[cfg(test)] fn connect_with_heuristic_simple( &mut self, new_point_id: PointOffsetType, target_point_id: PointOffsetType, level_m: usize, mut score: impl FnMut(PointOffsetType, PointOffsetType) -> ScoreType, ) { if self.links.len() < level_m { self.links.push(new_point_id); } else { let mut candidates = Vec::with_capacity(level_m + 1); for &idx in &self.links { candidates.push(ScoredPointOffset { idx, score: score(target_point_id, idx), }); } candidates.push(ScoredPointOffset { idx: new_point_id, score: score(target_point_id, new_point_id), }); candidates.sort_unstable_by(|a, b| b.score.total_cmp(&a.score)); self.fill_from_sorted_with_heuristic(candidates.into_iter(), level_m, score); } } /// Append one point to the container. If the container is full, run the heuristic. /// /// The result is exactly the same as [`Self::connect_with_heuristic_simple`], /// but this implementation cuts some corners given that some of the links /// are already processed by the heuristic. pub fn connect_with_heuristic( &mut self, new_point_id: PointOffsetType, target_point_id: PointOffsetType, level_m: usize, mut score: impl FnMut(PointOffsetType, PointOffsetType) -> ScoreType, items: &mut ItemsBuffer, ) { if level_m == 0 { // Unlikely. return; } if self.links.len() < level_m { self.links.push(new_point_id); return; } items.0.clear(); items.0.try_set_capacity_exact(level_m + 1).unwrap(); for (order, &link) in self.links.iter().enumerate() { items.0.push(Item { idx: link, score: Cell::new(None), order: if order < self.processed_by_heuristic as usize { NonZeroU32::new(order as u32) } else { None }, }); } items.0.push(Item { idx: new_point_id, score: Cell::new(None), order: None, }); items.0.sort_unstable_by(|a, b| { if let (Some(a_order), Some(b_order)) = (a.order, b.order) { // Both items processed by the heuristic, compare their order // to avoid recomputing the score. return a_order.cmp(&b_order); } b.cached_score(target_point_id, &mut score) .total_cmp(&a.cached_score(target_point_id, &mut score)) }); self.links.clear(); // The code below is similar to `fill_from_sorted_with_heuristic` with // two notable differences: // // (A) If both items have already been processed by the heuristic, the // score check is skipped as it is known to pass. // // (B) Instead of having separate input iterator (`candidates`) and // intermediate vector for the processed items (`self.links`), this // implementation reads and updates the same vector in-place: // - `items[read]` is the next candidate to be processed. // - `items[0..write]` are already processed items. // Since `read` ≤ `write`, there are no collisions, so this approach is // sound. let mut write = 0; 'outer: for read in 0..items.0.len() { let candidate = items.0[read].clone(); for existing in &items.0[0..write] { if candidate.order.is_some() && existing.order.is_some() { continue; // See (A). } if score(candidate.idx, existing.idx) > candidate.cached_score(target_point_id, &mut score) { continue 'outer; } } self.links.push(candidate.idx); items.0[write] = candidate; write += 1; if write >= level_m { break; } } self.processed_by_heuristic = self.links.len() as u32; } } /// Internal buffer to avoid allocations. #[derive(Default)] pub struct ItemsBuffer(Vec<Item>); #[derive(Debug, Clone)] struct Item { idx: PointOffsetType, score: Cell<Option<ScoreType>>, /// Order is used to avoid recomputing the score for items known be sorted. order: Option<NonZeroU32>, } impl Item { /// Get the score. This value is lazy/cached: it's computed no more than once. fn cached_score<F>(&self, query: PointOffsetType, score: F) -> ScoreType where F: FnOnce(PointOffsetType, PointOffsetType) -> ScoreType, { if let Some(score) = self.score.get() { score } else { let score = score(query, self.idx); self.score.set(Some(score)); score } } } #[cfg(test)] mod tests { use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use itertools::Itertools as _; use rand::SeedableRng as _; use rand::rngs::StdRng; use rand::seq::SliceRandom as _; use super::*; use crate::data_types::vectors::DenseVector; use crate::fixtures::index_fixtures::{TestRawScorerProducer, random_vector}; use crate::types::Distance; #[test] #[ignore] fn test_candidate_selection_heuristics() { const NUM_VECTORS: usize = 100; const DIM: usize = 16; const M: usize = 16; let mut rng = StdRng::seed_from_u64(42); let vector_holder = TestRawScorerProducer::new(DIM, Distance::Euclid, NUM_VECTORS, false, &mut rng); let mut candidates: FixedLengthPriorityQueue<ScoredPointOffset> = FixedLengthPriorityQueue::new(NUM_VECTORS); let new_vector_to_insert = random_vector(&mut rng, DIM); let scorer = vector_holder.scorer(new_vector_to_insert); for i in 0..NUM_VECTORS { candidates.push(ScoredPointOffset { idx: i as PointOffsetType, score: scorer.score_point(i as PointOffsetType), }); } let sorted_candidates_vec = candidates.clone().into_sorted_vec(); for x in sorted_candidates_vec.iter().take(M) { eprintln!("sorted_candidates = ({}, {})", x.idx, x.score); } let mut links_container = LinksContainer::with_capacity(M); links_container.fill_from_sorted_with_heuristic( candidates.into_iter_sorted(), M, |a, b| scorer.score_internal(a, b), ); let selected_candidates = links_container.links().to_vec(); for x in selected_candidates.iter() { eprintln!("selected_candidates = {x}"); } } #[test] fn test_connect_new_point() { let m = 6; // ○ 10 K // // // // // // // // // // // ○ 9 H // // // // // // ○ 7 I // ● 6 G // // // ○ 8 J ○ 4 E // // // ● 3 D // // // ◉ Target ○ 5 F // Y // ▲ // │ ● 1 B // └──▶ X // ○ 2 C let points: Vec<DenseVector> = vec![ vec![21.79, 07.18], // Target vec![20.58, 05.46], // + 1 B vec![21.19, 04.51], // 2 C closer to B than to the target vec![24.73, 08.24], // + 3 D vec![24.55, 09.98], // 4 E closer to D than to the target vec![26.11, 06.85], // 5 F closer to D than to the target vec![17.64, 11.14], // + 6 G vec![14.97, 11.52], // 7 I closer to G than to the target vec![14.97, 09.60], // 8 J closer to B and G than to the target vec![16.23, 14.32], // 9 H closer to G than to the target vec![12.69, 19.13], // 10 K closer to G than to the target ]; let scorer = |a: PointOffsetType, b: PointOffsetType| { -((points[a as usize][0] - points[b as usize][0]).powi(2) + (points[a as usize][1] - points[b as usize][1]).powi(2)) .sqrt() }; let mut insert_ids = (1..points.len() as PointOffsetType).collect_vec(); let mut candidates = FixedLengthPriorityQueue::new(insert_ids.len()); for &id in &insert_ids { candidates.push(ScoredPointOffset { idx: id, score: scorer(0, id), }); } let mut res = LinksContainer::with_capacity(m); res.fill_from_sorted_with_heuristic(candidates.into_iter_sorted(), m, scorer); assert_eq!(&res.links(), &[1, 3, 6]); let mut rng = StdRng::seed_from_u64(42); let mut links_container = LinksContainer::with_capacity(m); insert_ids.shuffle(&mut rng); for &id in &insert_ids { links_container.connect(id, 0, m, scorer); } assert_eq!(links_container.links(), &vec![1, 2, 3, 4, 5, 6]); } #[test] fn test_connect_new_point_with_heuristic() { let mut rng = StdRng::seed_from_u64(42); const NUM_VECTORS: usize = 20; const DIM: usize = 128; const M: usize = 5; for _ in 0..1000 { let vector_holder = TestRawScorerProducer::new(DIM, Distance::Euclid, NUM_VECTORS, false, &mut rng); let scorer = vector_holder.scorer(random_vector(&mut rng, DIM)); let mut candidate_indices: Vec<_> = (0..NUM_VECTORS as u32).collect(); candidate_indices.shuffle(&mut rng); let query_idx = candidate_indices.pop().unwrap(); let score = |a: u32, b: u32| scorer.score_internal(a, b); let scored_offfset = |idx: u32| ScoredPointOffset { idx, score: score(query_idx, idx), }; let mut container = LinksContainer::with_capacity(M); container.fill_from_sorted_with_heuristic( candidate_indices .iter() .copied() .map(scored_offfset) .take(5) .sorted_by(|a, b| b.score.total_cmp(&a.score)), M, score, ); let mut reference_container = LinksContainer::with_capacity(M); reference_container.fill_from_sorted_with_heuristic( candidate_indices .iter() .copied() .map(scored_offfset) .take(5) .sorted_by(|a, b| b.score.total_cmp(&a.score)), M, score, ); let mut items = ItemsBuffer::default(); for &candidate_idx in candidate_indices.iter().skip(5) { container.connect_with_heuristic(candidate_idx, query_idx, M, score, &mut items); reference_container.connect_with_heuristic_simple( candidate_idx, query_idx, M, score, ); assert_eq!(container.links, reference_container.links); } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/mod.rs
lib/segment/src/index/hnsw_index/mod.rs
use common::defaults::thread_count_for_hnsw; mod build_cache; pub mod build_condition_checker; mod config; mod entry_points; pub mod graph_layers; pub mod graph_layers_builder; mod graph_layers_healer; pub mod graph_links; pub mod hnsw; mod links_container; pub mod point_scorer; mod search_context; #[cfg(feature = "gpu")] pub mod gpu; /// Maximum number of links per level. #[derive(Debug, Clone, Copy)] pub struct HnswM { /// M for all levels except level 0. pub m: usize, /// M for level 0. pub m0: usize, } impl HnswM { /// Explicitly set both `m` and `m0`. pub fn new(m: usize, m0: usize) -> Self { Self { m, m0 } } /// Initialize with `m0 = 2 * m`. pub fn new2(m: usize) -> Self { Self { m, m0: 2 * m } } pub fn level_m(&self, level: usize) -> usize { if level == 0 { self.m0 } else { self.m } } } /// Placeholders for GPU logic when the `gpu` feature is not enabled. #[cfg(not(feature = "gpu"))] pub mod gpu { pub mod gpu_devices_manager { /// Placeholder for GPU device to process indexing on. pub struct LockedGpuDevice<'a> { phantom: std::marker::PhantomData<&'a usize>, } } pub mod gpu_insert_context { /// Placeholder for GPU insertion context to process indexing on. pub struct GpuInsertContext<'a> { phantom: std::marker::PhantomData<&'a usize>, } } pub mod gpu_vector_storage { /// Placeholder for GPU vector storage. pub struct GpuVectorStorage {} } } #[cfg(test)] mod tests; /// Number of threads to use with rayon for HNSW index building. /// /// Uses [`thread_count_for_hnsw`] heuristic but accepts a `max_indexing_threads` parameter to /// allow configuring this. pub fn num_rayon_threads(max_indexing_threads: usize) -> usize { if max_indexing_threads == 0 { let num_cpu = common::cpu::get_num_cpus(); num_cpu.clamp(1, thread_count_for_hnsw(num_cpu)) } else { max_indexing_threads } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_devices_manager.rs
lib/segment/src/index/hnsw_index/gpu/gpu_devices_manager.rs
use std::sync::Arc; use std::sync::atomic::AtomicBool; use itertools::Itertools; use parking_lot::{Mutex, MutexGuard}; use crate::common::check_stopped; use crate::common::operation_error::OperationResult; /// Simple non-invasive permits to use GPU devices. pub struct GpuDevicesMaganer { devices: Vec<Mutex<Arc<gpu::Device>>>, device_names: Vec<String>, wait_free: bool, } pub struct LockedGpuDevice<'a> { locked_device: MutexGuard<'a, Arc<gpu::Device>>, } impl<'a> LockedGpuDevice<'a> { pub fn new(locked_device: MutexGuard<'a, Arc<gpu::Device>>) -> Self { Self { locked_device } } pub fn device(&self) -> Arc<gpu::Device> { self.locked_device.clone() } } impl GpuDevicesMaganer { pub fn new( filter: &str, device_indexes: Option<&[usize]>, allow_integrated: bool, allow_emulated: bool, wait_free: bool, parallel_indexes: usize, ) -> OperationResult<Self> { let instance = gpu::Instance::builder().build()?; // Device filter is case-insensitive and comma-separated. let filter = filter.to_lowercase(); let filter = filter .split(",") .map(|s| s.trim().to_owned()) .collect::<Vec<_>>(); // Collect physical devices that match the filter. let filtered_physical_devices = instance .physical_devices() .iter() // Apply device name filter. .filter(|device| { let device_name = device.name.to_lowercase(); filter.iter().any(|filter| device_name.contains(filter)) }) // Filter out integrated and emulated devices. .filter(|device| { device.device_type == gpu::PhysicalDeviceType::Discrete || (allow_integrated && device.device_type == gpu::PhysicalDeviceType::Integrated) || (allow_emulated && device.device_type == gpu::PhysicalDeviceType::Other) }) .collect::<Vec<_>>(); // Collect device indexes to use. let device_indexes: Vec<_> = if let Some(device_indexes) = device_indexes { device_indexes.iter().copied().unique().collect() } else { (0..filtered_physical_devices.len()).collect() }; let mut devices = Vec::new(); for queue_index in 0..parallel_indexes { devices.extend( device_indexes .iter() // Get vk physical device. Filter out invalid device indexes. .filter_map(|&device_index| filtered_physical_devices.get(device_index)) // Try to create a gpu device. .filter_map(|physical_device| { match gpu::Device::new_with_params( instance.clone(), physical_device, queue_index, false, ) { Ok(device) => { log::info!("Initialized GPU device: {:?}", &physical_device.name); Some(Mutex::new(device)) } Err(err) => { log::error!( "Failed to create GPU device: {:?}, error: {:?}", &physical_device.name, err ); None } } }), ); } // All found devices to include it to the telemetry. let device_names = instance .physical_devices() .iter() .map(|device| device.name.clone()) .collect(); Ok(Self { devices, device_names, wait_free, }) } pub fn lock_device( &self, stopped: &AtomicBool, ) -> OperationResult<Option<LockedGpuDevice<'_>>> { if self.devices.is_empty() { return Ok(None); } loop { for device in &self.devices { if let Some(guard) = device.try_lock() { return Ok(Some(LockedGpuDevice::new(guard))); } } if !self.wait_free { return Ok(None); } check_stopped(stopped)?; std::thread::sleep(std::time::Duration::from_millis(100)); } } /// Returns all found device names without filtering. pub fn all_found_device_names(&self) -> Vec<String> { self.device_names.clone() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_heap_tests.rs
lib/segment/src/index/hnsw_index/gpu/gpu_heap_tests.rs
//! Unit tests for GPU heap shader. use std::collections::{BinaryHeap, HashMap}; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use common::types::{PointOffsetType, ScoredPointOffset}; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use rstest::rstest; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::index::hnsw_index::gpu::GPU_TIMEOUT; use crate::index::hnsw_index::gpu::shader_builder::{ShaderBuilder, ShaderBuilderParameters}; struct GpuHeapTestConfig { ef: usize, linear: bool, } impl ShaderBuilderParameters for GpuHeapTestConfig { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([ ( "shared_buffer.comp".to_string(), include_str!("shaders/shared_buffer.comp").to_string(), ), ( "bheap.comp".to_string(), include_str!("shaders/bheap.comp").to_string(), ), ]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); defines.insert("EF".to_owned(), Some(self.ef.to_string())); if self.linear { defines.insert("BHEAP_LINEAR".to_owned(), None); } defines } } #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct GpuHeapTestParams { input_counts: u32, } #[rstest] fn test_gpu_nearest_heap(#[values(true, false)] linear: bool) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let ef = 100; let points_count = 1024; let groups_count = 8; let inputs_count = points_count; let mut rng = StdRng::seed_from_u64(42); let inputs_data: Vec<ScoredPointOffset> = (0..inputs_count * groups_count) .map(|i| ScoredPointOffset { idx: i as PointOffsetType, score: rng.random_range(-1.0..1.0), }) .collect(); let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap(); let gpu_nearest_heap = GpuHeapTestConfig { ef, linear }; let shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/tests/test_nearest_heap.comp")) .with_parameters(&gpu_nearest_heap) .build("tests/test_nearest_heap.comp") .unwrap(); let input_points_buffer = gpu::Buffer::new( device.clone(), "Nearest heap input points buffer", gpu::BufferType::Storage, inputs_count * groups_count * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); let upload_staging_buffer = gpu::Buffer::new( device.clone(), "Nearest heap upload staging buffer", gpu::BufferType::CpuToGpu, inputs_count * groups_count * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); upload_staging_buffer .upload(inputs_data.as_slice(), 0) .unwrap(); let mut context = gpu::Context::new(device.clone()).unwrap(); context .copy_gpu_buffer( upload_staging_buffer.clone(), input_points_buffer.clone(), 0, 0, input_points_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let test_params_buffer = gpu::Buffer::new( device.clone(), "Nearest heap test params buffer", gpu::BufferType::Uniform, std::mem::size_of::<GpuHeapTestParams>(), ) .unwrap(); upload_staging_buffer .upload( &GpuHeapTestParams { input_counts: inputs_count as u32, }, 0, ) .unwrap(); context .copy_gpu_buffer( upload_staging_buffer, test_params_buffer.clone(), 0, 0, test_params_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let scores_output_buffer = gpu::Buffer::new( device.clone(), "Nearest heap scores output buffer", gpu::BufferType::Storage, inputs_count * groups_count * std::mem::size_of::<f32>(), ) .unwrap(); let sorted_output_buffer = gpu::Buffer::new( device.clone(), "Nearest heap sorted output buffer", gpu::BufferType::Storage, ef * groups_count * std::mem::size_of::<PointOffsetType>(), ) .unwrap(); let descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_uniform_buffer(0) .add_storage_buffer(1) .add_storage_buffer(2) .add_storage_buffer(3) .build(device.clone()) .unwrap(); let descriptor_set = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_uniform_buffer(0, test_params_buffer.clone()) .add_storage_buffer(1, input_points_buffer.clone()) .add_storage_buffer(2, scores_output_buffer.clone()) .add_storage_buffer(3, sorted_output_buffer.clone()) .build() .unwrap(); let pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, descriptor_set_layout.clone()) .add_shader(shader.clone()) .build(device.clone()) .unwrap(); context .bind_pipeline(pipeline, std::slice::from_ref(&descriptor_set)) .unwrap(); context.dispatch(groups_count, 1, 1).unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let download_staging_buffer = gpu::Buffer::new( device.clone(), "Nearest heap download staging buffer", gpu::BufferType::GpuToCpu, std::cmp::max(scores_output_buffer.size(), sorted_output_buffer.size()), ) .unwrap(); context .copy_gpu_buffer( scores_output_buffer.clone(), download_staging_buffer.clone(), 0, 0, scores_output_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let scores_output = download_staging_buffer .download_vec::<f32>(0, inputs_count * groups_count) .unwrap(); let mut scores_output_cpu = vec![0.0; inputs_count * groups_count]; let mut sorted_output_cpu = vec![PointOffsetType::default(); ef * groups_count]; for group in 0..groups_count { let mut queue = FixedLengthPriorityQueue::<ScoredPointOffset>::new(ef); for i in 0..inputs_count { let scored_point = inputs_data[group * inputs_count + i]; queue.push(scored_point); scores_output_cpu[group * inputs_count + i] = queue.top().unwrap().score; } let sorted = queue.into_sorted_vec(); for i in 0..ef { sorted_output_cpu[group * ef + i] = sorted[i].idx; } } let nearest_gpu_count = gpu_nearest_heap.ef * groups_count; context .copy_gpu_buffer( sorted_output_buffer.clone(), download_staging_buffer.clone(), 0, 0, nearest_gpu_count * std::mem::size_of::<PointOffsetType>(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let nearest_gpu = download_staging_buffer .download_vec::<PointOffsetType>(0, nearest_gpu_count) .unwrap(); let mut sorted_output_gpu = Vec::new(); for group in 0..groups_count { let mut nearest_group = Vec::new(); for i in 0..ef { nearest_group.push(nearest_gpu[group * gpu_nearest_heap.ef + i]); } sorted_output_gpu.extend(nearest_group); } assert_eq!(scores_output, scores_output_cpu); assert_eq!(sorted_output_gpu, sorted_output_cpu); } #[rstest] fn test_gpu_candidates_heap(#[values(true, false)] linear: bool) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let capacity = 128; let points_count = 128; let groups_count = 8; let inputs_count = points_count; let mut rng = StdRng::seed_from_u64(42); let inputs_data: Vec<ScoredPointOffset> = (0..inputs_count * groups_count) .map(|i| ScoredPointOffset { idx: i as PointOffsetType, score: rng.random_range(-1.0..1.0), }) .collect(); let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap(); let gpu_candidates_heap = GpuHeapTestConfig { ef: capacity, linear, }; let shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/tests/test_candidates_heap.comp")) .with_parameters(&gpu_candidates_heap) .build("tests/test_candidates_heap.comp") .unwrap(); let input_points_buffer = gpu::Buffer::new( device.clone(), "Input points buffer", gpu::BufferType::Storage, inputs_count * groups_count * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); let upload_staging_buffer = gpu::Buffer::new( device.clone(), "Candidates heap upload staging buffer", gpu::BufferType::CpuToGpu, inputs_count * groups_count * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); upload_staging_buffer .upload(inputs_data.as_slice(), 0) .unwrap(); let mut context = gpu::Context::new(device.clone()).unwrap(); context .copy_gpu_buffer( upload_staging_buffer.clone(), input_points_buffer.clone(), 0, 0, input_points_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let test_params_buffer = gpu::Buffer::new( device.clone(), "Test params buffer", gpu::BufferType::Uniform, std::mem::size_of::<GpuHeapTestParams>(), ) .unwrap(); upload_staging_buffer .upload( &GpuHeapTestParams { input_counts: inputs_count as u32, }, 0, ) .unwrap(); context .copy_gpu_buffer( upload_staging_buffer, test_params_buffer.clone(), 0, 0, test_params_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let scores_output_buffer = gpu::Buffer::new( device.clone(), "Scores output buffer", gpu::BufferType::Storage, inputs_count * groups_count * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); let descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_uniform_buffer(0) .add_storage_buffer(1) .add_storage_buffer(2) .build(device.clone()) .unwrap(); let descriptor_set = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_uniform_buffer(0, test_params_buffer.clone()) .add_storage_buffer(1, input_points_buffer.clone()) .add_storage_buffer(2, scores_output_buffer.clone()) .build() .unwrap(); let pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, descriptor_set_layout.clone()) .add_shader(shader.clone()) .build(device.clone()) .unwrap(); context .bind_pipeline(pipeline, std::slice::from_ref(&descriptor_set)) .unwrap(); context.dispatch(groups_count, 1, 1).unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let mut scores_cpu = vec![]; for group in 0..groups_count { let mut heap = BinaryHeap::<ScoredPointOffset>::new(); for i in 0..inputs_count { let scored_point = inputs_data[group * inputs_count + i]; heap.push(scored_point); } while !heap.is_empty() { scores_cpu.push(heap.pop().unwrap()); } } let download_staging_buffer = gpu::Buffer::new( device.clone(), "Candidates heap download staging buffer", gpu::BufferType::GpuToCpu, scores_output_buffer.size(), ) .unwrap(); context .copy_gpu_buffer( scores_output_buffer.clone(), download_staging_buffer.clone(), 0, 0, scores_output_buffer.size(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let scores_gpu = download_staging_buffer .download_vec::<ScoredPointOffset>(0, inputs_count * groups_count) .unwrap(); assert_eq!(scores_gpu, scores_cpu); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_links.rs
lib/segment/src/index/hnsw_index/gpu/gpu_links.rs
use std::collections::HashMap; use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::types::PointOffsetType; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::shader_builder::ShaderBuilderParameters; use crate::common::check_stopped; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::hnsw_index::gpu::GPU_TIMEOUT; use crate::index::hnsw_index::graph_layers::GraphLayersBase; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; /// Size of transfer buffer for links. const LINKS_TRANSFER_BUFFER_SIZE: usize = 32 * 1024 * 1024; #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct GpuLinksParamsBuffer { m: u32, links_capacity: u32, } /// GPU resources for links. pub struct GpuLinks { m: usize, links_capacity: usize, max_patched_points: usize, device: Arc<gpu::Device>, links_buffer: Arc<gpu::Buffer>, params_buffer: Arc<gpu::Buffer>, patch_buffer: Arc<gpu::Buffer>, patched_points: Vec<(PointOffsetType, usize)>, descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, descriptor_set: Arc<gpu::DescriptorSet>, } impl ShaderBuilderParameters for GpuLinks { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([( "links.comp".to_string(), include_str!("shaders/links.comp").to_string(), )]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); defines.insert( "LINKS_CAPACITY".to_owned(), Some(self.links_capacity.to_string()), ); defines } } impl GpuLinks { pub fn new( device: Arc<gpu::Device>, m: usize, links_capacity: usize, points_count: usize, ) -> gpu::GpuResult<Self> { let links_buffer = gpu::Buffer::new( device.clone(), "Links buffer", gpu::BufferType::Storage, points_count * (links_capacity + 1) * std::mem::size_of::<PointOffsetType>(), )?; let params_buffer = gpu::Buffer::new( device.clone(), "Links params buffer", gpu::BufferType::Uniform, std::mem::size_of::<GpuLinksParamsBuffer>(), )?; let max_patched_points = LINKS_TRANSFER_BUFFER_SIZE / ((links_capacity + 1) * std::mem::size_of::<PointOffsetType>()); let links_patch_capacity = max_patched_points * (links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); let patch_buffer = gpu::Buffer::new( device.clone(), "Links patch buffer", gpu::BufferType::CpuToGpu, links_patch_capacity + std::mem::size_of::<GpuLinksParamsBuffer>(), )?; let params = GpuLinksParamsBuffer { m: m as u32, links_capacity: links_capacity as u32, }; patch_buffer.upload(&params, 0)?; let mut upload_context = gpu::Context::new(device.clone())?; upload_context.copy_gpu_buffer( patch_buffer.clone(), params_buffer.clone(), 0, 0, std::mem::size_of::<GpuLinksParamsBuffer>(), )?; upload_context.clear_buffer(links_buffer.clone())?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; let descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_uniform_buffer(0) .add_storage_buffer(1) .build(device.clone())?; let descriptor_set = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_uniform_buffer(0, params_buffer.clone()) .add_storage_buffer(1, links_buffer.clone()) .build()?; Ok(Self { m, links_capacity, max_patched_points, device, links_buffer, params_buffer, patch_buffer, patched_points: vec![], descriptor_set_layout, descriptor_set, }) } pub fn update_params( &mut self, gpu_context: &mut gpu::Context, m: usize, ) -> OperationResult<()> { self.m = m; let params = GpuLinksParamsBuffer { m: m as u32, links_capacity: self.links_capacity as u32, }; let links_patch_capacity = self.max_patched_points * (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); self.patch_buffer.upload(&params, links_patch_capacity)?; gpu_context.copy_gpu_buffer( self.patch_buffer.clone(), self.params_buffer.clone(), links_patch_capacity, 0, std::mem::size_of::<GpuLinksParamsBuffer>(), )?; gpu_context.run()?; gpu_context.wait_finish(GPU_TIMEOUT)?; Ok(()) } pub fn clear(&mut self, gpu_context: &mut gpu::Context) -> OperationResult<()> { if !self.patched_points.is_empty() { self.patched_points.clear(); } gpu_context.clear_buffer(self.links_buffer.clone())?; gpu_context.run()?; gpu_context.wait_finish(GPU_TIMEOUT)?; Ok(()) } pub fn links_buffer(&self) -> Arc<gpu::Buffer> { self.links_buffer.clone() } pub fn upload_links( &mut self, level: usize, graph_layers_builder: &GraphLayersBuilder, gpu_context: &mut gpu::Context, stopped: &AtomicBool, ) -> OperationResult<()> { self.update_params(gpu_context, graph_layers_builder.get_m(level))?; self.clear(gpu_context)?; let timer = std::time::Instant::now(); let points: Vec<_> = (0..graph_layers_builder.links_layers().len()) .filter(|&point_id| { graph_layers_builder.get_point_level(point_id as PointOffsetType) >= level }) .filter(|&point_id| { !graph_layers_builder.links_layers()[point_id][level] .read() .links() .is_empty() }) .collect(); for points_slice in points.chunks(self.max_patched_points) { check_stopped(stopped)?; for &point_id in points_slice { let links = graph_layers_builder.links_layers()[point_id][level].read(); self.set_links(point_id as PointOffsetType, links.links())?; } self.apply_gpu_patches(gpu_context)?; gpu_context.run()?; gpu_context.wait_finish(GPU_TIMEOUT)?; } log::trace!("Upload links on level {level} time: {:?}", timer.elapsed()); Ok(()) } pub fn download_links( &mut self, level: usize, graph_layers_builder: &GraphLayersBuilder, gpu_context: &mut gpu::Context, stopped: &AtomicBool, ) -> OperationResult<()> { let timer = std::time::Instant::now(); // Collect bad links to check if there are any errors in the links. let mut bad_links = Vec::new(); let links_patch_capacity = self.max_patched_points * (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); let download_buffer = gpu::Buffer::new( self.device.clone(), "Download links staging buffer", gpu::BufferType::GpuToCpu, links_patch_capacity, )?; let points = (0..graph_layers_builder.links_layers().len() as PointOffsetType) .filter(|&point_id| graph_layers_builder.get_point_level(point_id) >= level) .collect::<Vec<_>>(); for chunk_index in 0..points.len().div_ceil(self.max_patched_points) { check_stopped(stopped)?; let start = chunk_index * self.max_patched_points; let end = (start + self.max_patched_points).min(points.len()); let chunk_size = end - start; for (i, &point_id) in points[start..end].iter().enumerate() { let links_size = (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); gpu_context.copy_gpu_buffer( self.links_buffer.clone(), download_buffer.clone(), point_id as usize * links_size, i * links_size, links_size, )?; } gpu_context.run()?; gpu_context.wait_finish(GPU_TIMEOUT)?; let links = download_buffer .download_vec::<PointOffsetType>(0, chunk_size * (self.links_capacity + 1))?; for (index, chunk) in links.chunks(self.links_capacity + 1).enumerate() { let point_id = points[start + index] as usize; let links_count = chunk[0] as usize; let links = &chunk[1..=links_count]; let mut dst = graph_layers_builder.links_layers()[point_id][level].write(); dst.fill_from(links.iter().copied().filter(|&other_point_id| { let is_correct_link = level < graph_layers_builder.links_layers()[other_point_id as usize].len(); if !is_correct_link { bad_links.push(other_point_id); } is_correct_link })); } } if !bad_links.is_empty() { log::warn!( "Incorrect links on level {} were found. Amount of incorrect links: {}, zeroes: {}", level, bad_links.len(), bad_links.iter().filter(|&&point_id| point_id == 0).count() ); } log::trace!( "Download links for level {} in time {:?}", level, timer.elapsed() ); Ok(()) } pub fn descriptor_set_layout(&self) -> Arc<gpu::DescriptorSetLayout> { self.descriptor_set_layout.clone() } pub fn descriptor_set(&self) -> Arc<gpu::DescriptorSet> { self.descriptor_set.clone() } fn apply_gpu_patches(&mut self, gpu_context: &mut gpu::Context) -> OperationResult<()> { for (i, &(patched_point_id, patched_links_count)) in self.patched_points.iter().enumerate() { let patch_start_index = i * (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); let patch_size = (patched_links_count + 1) * std::mem::size_of::<PointOffsetType>(); let links_start_index = patched_point_id as usize * (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); gpu_context.copy_gpu_buffer( self.patch_buffer.clone(), self.links_buffer.clone(), patch_start_index, links_start_index, patch_size, )?; } self.patched_points.clear(); Ok(()) } fn set_links( &mut self, point_id: PointOffsetType, links: &[PointOffsetType], ) -> OperationResult<()> { if self.patched_points.len() >= self.max_patched_points { return Err(OperationError::service_error("Gpu links patches are full")); } let mut patch_start_index = self.patched_points.len() * (self.links_capacity + 1) * std::mem::size_of::<PointOffsetType>(); self.patch_buffer .upload(&(links.len() as u32), patch_start_index)?; patch_start_index += std::mem::size_of::<PointOffsetType>(); self.patch_buffer.upload(links, patch_start_index)?; self.patched_points.push((point_id, links.len())); Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/batched_points.rs
lib/segment/src/index/hnsw_index/gpu/batched_points.rs
use std::ops::Range; use std::sync::atomic::AtomicU32; use common::types::PointOffsetType; use crate::common::operation_error::OperationResult; /// Pending point params (entry and level). #[derive(Debug)] pub struct PointLinkingData { pub point_id: PointOffsetType, pub level: usize, pub entry: AtomicU32, } /// Batch of points to insert in one shader invocation. #[derive(Debug)] pub struct Batch<'a> { pub points: &'a [PointLinkingData], pub level: usize, } /// Batched points for GPU processing. /// Each batch is one shader invocation. /// All points in one batch have the same level. /// Size of batch is limited by subgroups count. pub struct BatchedPoints { points: Vec<PointLinkingData>, batches: Vec<Range<usize>>, first_point_id: Option<PointOffsetType>, levels_count: usize, remap: Vec<PointOffsetType>, } impl BatchedPoints { pub fn new( level_fn: impl Fn(PointOffsetType) -> usize, mut ids: Vec<PointOffsetType>, groups_count: usize, ) -> OperationResult<Self> { Self::sort_points_by_level(&level_fn, &mut ids); let mut remap = vec![0; ids.iter().max().copied().unwrap_or_default() as usize + 1]; for (remapped_id, id) in ids.iter().enumerate() { remap[*id as usize] = remapped_id as PointOffsetType; } let first_point_id = if !ids.is_empty() { Some(ids.remove(0)) } else { None }; let batches = Self::build_initial_batches(&level_fn, &ids, groups_count); let mut points = Vec::with_capacity(ids.len()); for batch in batches.iter() { for i in batch.clone() { let point_id = ids[i]; let level = level_fn(point_id); points.push(PointLinkingData { point_id, level, entry: first_point_id.unwrap_or_default().into(), }); } } Ok(Self { points, batches, first_point_id, levels_count: first_point_id .map(|first_point_id| level_fn(first_point_id) + 1) .unwrap_or_default(), remap, }) } pub fn first_point_id(&self) -> Option<PointOffsetType> { self.first_point_id } pub fn levels_count(&self) -> usize { self.levels_count } pub fn remap(&self) -> &[PointOffsetType] { &self.remap } pub fn points(&self) -> &[PointLinkingData] { &self.points } pub fn iter_batches(&self, skip_count: usize) -> impl Iterator<Item = Batch<'_>> { self.batches .iter() .filter(move |batch| batch.end > skip_count) .map(move |batch| { let intersected_batch = std::cmp::max(batch.start, skip_count)..batch.end; let level = self.points[intersected_batch.start].level; Batch { points: &self.points[intersected_batch], level, } }) } pub fn sort_points_by_level( level_fn: impl Fn(PointOffsetType) -> usize, ids: &mut [PointOffsetType], ) { ids.sort_by(|&a, &b| { let a_level = level_fn(a); let b_level = level_fn(b); match b_level.cmp(&a_level) { std::cmp::Ordering::Less => std::cmp::Ordering::Less, std::cmp::Ordering::Greater => std::cmp::Ordering::Greater, std::cmp::Ordering::Equal => a.cmp(&b), } }); } fn build_initial_batches( level_fn: impl Fn(PointOffsetType) -> usize, ids: &[PointOffsetType], groups_count: usize, ) -> Vec<Range<usize>> { let num_vectors = ids.len(); let mut batches: Vec<_> = (0..num_vectors.div_ceil(groups_count)) .map(|start| { groups_count * start..std::cmp::min(groups_count * (start + 1), num_vectors) }) .collect(); let mut batch_index = 0usize; while batch_index < batches.len() { let batch = batches[batch_index].clone(); let point_id = ids[batch.start]; let batch_level = level_fn(point_id); for i in 1..batch.len() { let point_id = ids[batch.start + i]; let level = level_fn(point_id); // divide batch by level. all batches must be on the same level if level != batch_level { let batch1 = batch.start..batch.start + i; let batch2 = batch.start + i..batch.end; batches[batch_index] = batch1; batches.insert(batch_index + 1, batch2); break; } } batch_index += 1; } for batch_pair in batches.windows(2) { if batch_pair.len() == 2 { assert_eq!(batch_pair[0].end, batch_pair[1].start); } } batches } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_visited_flags.rs
lib/segment/src/index/hnsw_index/gpu/gpu_visited_flags.rs
use std::collections::HashMap; use std::sync::Arc; use common::types::PointOffsetType; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::GPU_TIMEOUT; use super::shader_builder::ShaderBuilderParameters; use crate::common::operation_error::{OperationError, OperationResult}; const UPLOAD_REMAP_BUFFER_COUNT: usize = 1024 * 1024; #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct GpuVisitedFlagsParamsBuffer { generation: u32, } /// GPU resources for visited flags. pub struct GpuVisitedFlags { device: Arc<gpu::Device>, params: GpuVisitedFlagsParamsBuffer, params_buffer: Arc<gpu::Buffer>, params_staging_buffer: Arc<gpu::Buffer>, remap_staging_buffer: Option<Arc<gpu::Buffer>>, visited_flags_buffer: Arc<gpu::Buffer>, descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, descriptor_set: Arc<gpu::DescriptorSet>, capacity: usize, remap_buffer: Option<Arc<gpu::Buffer>>, } impl ShaderBuilderParameters for GpuVisitedFlags { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([( "visited_flags.comp".to_string(), include_str!("shaders/visited_flags.comp").to_string(), )]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); defines.insert( "VISITED_FLAGS_CAPACITY".to_owned(), Some(self.capacity.to_string()), ); if self.remap_buffer.is_some() { defines.insert("VISITED_FLAGS_REMAP".to_owned(), None); } defines } } impl GpuVisitedFlags { pub fn new( device: Arc<gpu::Device>, groups_count: usize, points_capacity: usize, factor_range: std::ops::RangeInclusive<usize>, ) -> OperationResult<Self> { let params_buffer = gpu::Buffer::new( device.clone(), "Visited flags params buffer", gpu::BufferType::Uniform, std::mem::size_of::<GpuVisitedFlagsParamsBuffer>(), )?; let params_staging_buffer = gpu::Buffer::new( device.clone(), "Visited flags params staging buffer", gpu::BufferType::CpuToGpu, std::mem::size_of::<GpuVisitedFlagsParamsBuffer>(), )?; let (visited_flags_buffer, remap_buffer, capacity) = Self::create_flags_buffer(device.clone(), groups_count, points_capacity, factor_range)?; let params = GpuVisitedFlagsParamsBuffer { generation: 1 }; params_staging_buffer.upload(&params, 0)?; let mut upload_context = gpu::Context::new(device.clone())?; upload_context.clear_buffer(visited_flags_buffer.clone())?; upload_context.copy_gpu_buffer( params_staging_buffer.clone(), params_buffer.clone(), 0, 0, std::mem::size_of::<GpuVisitedFlagsParamsBuffer>(), )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; let mut descriptor_set_layout_builder = gpu::DescriptorSetLayout::builder() .add_uniform_buffer(0) .add_storage_buffer(1); if remap_buffer.is_some() { descriptor_set_layout_builder = descriptor_set_layout_builder.add_storage_buffer(2); } let descriptor_set_layout = descriptor_set_layout_builder.build(device.clone())?; let mut descriptor_set_builder = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_uniform_buffer(0, params_buffer.clone()) .add_storage_buffer(1, visited_flags_buffer.clone()); if let Some(remap_buffer) = remap_buffer.clone() { descriptor_set_builder = descriptor_set_builder.add_storage_buffer(2, remap_buffer); } let descriptor_set = descriptor_set_builder.build()?; let remap_staging_buffer = if remap_buffer.is_some() { Some(gpu::Buffer::new( device.clone(), "Visited flags remap staging buffer", gpu::BufferType::CpuToGpu, UPLOAD_REMAP_BUFFER_COUNT * std::mem::size_of::<PointOffsetType>(), )?) } else { None }; Ok(Self { device, params, params_buffer, params_staging_buffer, remap_staging_buffer, visited_flags_buffer, descriptor_set_layout, descriptor_set, capacity, remap_buffer, }) } pub fn init(&mut self, points_remap: &[PointOffsetType]) -> OperationResult<()> { let mut context = gpu::Context::new(self.device.clone())?; if let Some(remap_buffer) = self.remap_buffer.clone() { let Some(remap_staging_buffer) = &self.remap_staging_buffer else { return Err(OperationError::from(gpu::GpuError::Other( "Remap staging buffer is not initialized".to_string(), ))); }; for chunk in points_remap.chunks(UPLOAD_REMAP_BUFFER_COUNT) { remap_staging_buffer.upload(chunk, 0)?; context.copy_gpu_buffer( remap_staging_buffer.clone(), remap_buffer.clone(), 0, 0, std::mem::size_of_val(chunk), )?; context.run()?; context.wait_finish(GPU_TIMEOUT)?; } } context.clear_buffer(self.visited_flags_buffer.clone())?; self.params.generation = 1; self.params_staging_buffer.upload(&self.params, 0)?; context.copy_gpu_buffer( self.params_staging_buffer.clone(), self.params_buffer.clone(), 0, 0, std::mem::size_of::<GpuVisitedFlagsParamsBuffer>(), )?; context.run()?; context.wait_finish(GPU_TIMEOUT)?; Ok(()) } pub fn clear(&mut self, gpu_context: &mut gpu::Context) -> OperationResult<()> { if self.params.generation == 255 { self.params.generation = 1; gpu_context.clear_buffer(self.visited_flags_buffer.clone())?; } else { self.params.generation += 1; } self.params_staging_buffer.upload(&self.params, 0)?; gpu_context.copy_gpu_buffer( self.params_staging_buffer.clone(), self.params_buffer.clone(), 0, 0, self.params_buffer.size(), )?; Ok(()) } pub fn descriptor_set_layout(&self) -> Arc<gpu::DescriptorSetLayout> { self.descriptor_set_layout.clone() } pub fn descriptor_set(&self) -> Arc<gpu::DescriptorSet> { self.descriptor_set.clone() } pub fn visited_flags_buffer(&self) -> Arc<gpu::Buffer> { self.visited_flags_buffer.clone() } fn create_flags_buffer( device: Arc<gpu::Device>, groups_count: usize, points_capacity: usize, factor_range: std::ops::RangeInclusive<usize>, ) -> OperationResult<(Arc<gpu::Buffer>, Option<Arc<gpu::Buffer>>, usize)> { let alignment = std::mem::size_of::<u32>(); let points_count = points_capacity.next_multiple_of(alignment); let flags_size = groups_count * points_count * std::mem::size_of::<u8>(); if flags_size < device.max_buffer_size() && *factor_range.start() == 1 { let visited_flags_buffer_result = gpu::Buffer::new( device.clone(), "Visited flags buffer", gpu::BufferType::Storage, flags_size, ); match visited_flags_buffer_result { Ok(visited_flags_buffer) => return Ok((visited_flags_buffer, None, points_count)), Err(gpu::GpuError::OutOfMemory) => {} Err(e) => return Err(OperationError::from(e)), } } let remap_buffer = gpu::Buffer::new( device.clone(), "Visited flags remap buffer", gpu::BufferType::Storage, points_capacity * std::mem::size_of::<u32>(), )?; let mut factor = *factor_range.start(); while factor <= *factor_range.end() { let capacity = (points_count / factor).next_multiple_of(alignment); if capacity == 0 { break; } let flags_size = groups_count * capacity * std::mem::size_of::<u8>(); if flags_size > device.max_buffer_size() { factor *= 2; continue; } let visited_flags_buffer_result = gpu::Buffer::new( device.clone(), "Visited flags buffer", gpu::BufferType::Storage, flags_size, ); if let Ok(visited_flags_buffer) = visited_flags_buffer_result { return Ok((visited_flags_buffer, Some(remap_buffer), capacity)); } factor *= 2; } Err(OperationError::from(gpu::GpuError::OutOfMemory)) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/shader_builder.rs
lib/segment/src/index/hnsw_index/gpu/shader_builder.rs
use std::collections::HashMap; use std::sync::Arc; use crate::common::operation_error::OperationResult; pub struct ShaderBuilder { device: Arc<gpu::Device>, shader_code: String, includes: HashMap<String, String>, defines: HashMap<String, Option<String>>, } pub trait ShaderBuilderParameters { fn shader_includes(&self) -> HashMap<String, String>; fn shader_defines(&self) -> HashMap<String, Option<String>>; } impl ShaderBuilder { pub fn new(device: Arc<gpu::Device>) -> Self { let includes = HashMap::from([ ( "common.comp".to_string(), include_str!("shaders/common.comp").to_string(), ), ( "extensions.comp".to_string(), include_str!("shaders/extensions.comp").to_string(), ), ]); let mut defines = HashMap::new(); defines.insert( "SUBGROUP_SIZE".to_owned(), Some(device.subgroup_size().to_string()), ); Self { device, shader_code: Default::default(), includes, defines, } } pub fn with_parameters<T: ShaderBuilderParameters>(&mut self, parameters: &T) -> &mut Self { self.includes.extend(parameters.shader_includes()); self.defines.extend(parameters.shader_defines()); self } pub fn with_shader_code(&mut self, shader_code: &str) -> &mut Self { self.shader_code.push_str(shader_code); self.shader_code.push('\n'); self } pub fn build(&self, shader_name: &str) -> OperationResult<Arc<gpu::Shader>> { let timer = std::time::Instant::now(); let compiled = self.device.instance().compile_shader( &self.shader_code, shader_name, Some(&self.defines), Some(&self.includes), )?; log::debug!("Shader compilation took: {:?}", timer.elapsed()); Ok(gpu::Shader::new(self.device.clone(), &compiled)?) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/mod.rs
lib/segment/src/index/hnsw_index/gpu/mod.rs
pub mod batched_points; pub mod gpu_devices_manager; pub mod gpu_graph_builder; pub mod gpu_insert_context; pub mod gpu_level_builder; pub mod gpu_links; pub mod gpu_vector_storage; pub mod gpu_visited_flags; pub mod shader_builder; #[cfg(test)] mod gpu_heap_tests; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use batched_points::BatchedPoints; use gpu_devices_manager::GpuDevicesMaganer; use lazy_static::lazy_static; use parking_lot::RwLock; use super::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::HnswM; lazy_static! { pub static ref GPU_DEVICES_MANAGER: RwLock<Option<GpuDevicesMaganer>> = RwLock::new(None); } /// Each GPU operation has a timeout by Vulkan API specification. /// Choose large enough timeout. /// We cannot use too small timeout and check stopper in the loop because /// GPU resources should be alive while GPU operation is in progress. static GPU_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); /// Warps count for GPU. /// In other words, how many parallel points can be indexed by GPU. static GPU_GROUPS_COUNT: AtomicUsize = AtomicUsize::new(GPU_GROUPS_COUNT_DEFAULT); pub const GPU_GROUPS_COUNT_DEFAULT: usize = 512; /// Global option from settings to force half precision on GPU for `f32` values. static GPU_FORCE_HALF_PRECISION: AtomicBool = AtomicBool::new(false); pub fn set_gpu_force_half_precision(force_half_precision: bool) { GPU_FORCE_HALF_PRECISION.store(force_half_precision, Ordering::Relaxed); } pub fn get_gpu_force_half_precision() -> bool { GPU_FORCE_HALF_PRECISION.load(Ordering::Relaxed) } pub fn set_gpu_groups_count(groups_count: Option<usize>) { if let Some(groups_count) = groups_count { GPU_GROUPS_COUNT.store(groups_count, Ordering::Relaxed); } } pub fn get_gpu_groups_count() -> usize { GPU_GROUPS_COUNT.load(Ordering::Relaxed) } fn create_graph_layers_builder( batched_points: &BatchedPoints, num_vectors: usize, hnsw_m: HnswM, ef: usize, entry_points_num: usize, ) -> GraphLayersBuilder { // create graph layers builder let mut graph_layers_builder = GraphLayersBuilder::new(num_vectors, hnsw_m, ef, entry_points_num, true); if let Some(first_point_id) = batched_points.first_point_id() { // set first entry point graph_layers_builder.get_entry_points().new_point( first_point_id, batched_points.levels_count() - 1, |_| true, ); graph_layers_builder.set_ready(first_point_id); // set levels graph_layers_builder.set_levels(first_point_id, batched_points.levels_count() - 1); for batch in batched_points.iter_batches(0) { for linking_point in batch.points { graph_layers_builder.set_levels(linking_point.point_id, batch.level); } } } graph_layers_builder } #[cfg(test)] mod tests { use ahash::HashSet; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use rand::SeedableRng; use rand::rngs::StdRng; use super::batched_points::BatchedPoints; use crate::data_types::vectors::DenseVector; use crate::fixtures::index_fixtures::TestRawScorerProducer; use crate::fixtures::payload_fixtures::random_vector; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::graph_layers::{GraphLayers, SearchAlgorithm}; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::graph_links::GraphLinksFormatParam; use crate::types::Distance; use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage; use crate::vector_storage::{DEFAULT_STOPPED, Random, VectorStorage, VectorStorageEnum}; pub struct GpuGraphTestData { pub vector_storage: VectorStorageEnum, pub vector_holder: TestRawScorerProducer, pub graph_layers_builder: GraphLayersBuilder, pub search_vectors: Vec<DenseVector>, } pub fn create_gpu_graph_test_data( num_vectors: usize, dim: usize, hnsw_m: HnswM, ef: usize, search_counts: usize, ) -> GpuGraphTestData { // Generate random vectors let mut rng = StdRng::seed_from_u64(42); let vector_holder = TestRawScorerProducer::new(dim, Distance::Cosine, num_vectors, false, &mut rng); // upload vectors to storage let mut storage = new_volatile_dense_vector_storage(dim, Distance::Cosine); for idx in 0..num_vectors as PointOffsetType { let v = vector_holder.storage().get_vector::<Random>(idx); storage .insert_vector(idx, v.as_vec_ref(), &HardwareCounterCell::new()) .unwrap(); } // Build HNSW index let mut graph_layers_builder = GraphLayersBuilder::new(num_vectors, hnsw_m, ef, 1, true); for idx in 0..(num_vectors as PointOffsetType) { let level = graph_layers_builder.get_random_layer(&mut rng); graph_layers_builder.set_levels(idx, level); } let mut ids: Vec<_> = (0..num_vectors as PointOffsetType).collect(); BatchedPoints::sort_points_by_level( |point_id| graph_layers_builder.get_point_level(point_id), &mut ids, ); for &idx in &ids { let scorer = vector_holder.internal_scorer(idx); graph_layers_builder.link_new_point(idx, scorer); } let search_vectors = (0..search_counts) .map(|_| random_vector(&mut rng, dim)) .collect(); GpuGraphTestData { vector_storage: storage, vector_holder, graph_layers_builder, search_vectors, } } pub fn compare_graph_layers_builders( graph_a: &GraphLayersBuilder, graph_b: &GraphLayersBuilder, ) { assert_eq!(graph_a.links_layers().len(), graph_b.links_layers().len()); let num_vectors = graph_a.links_layers().len(); for point_id in 0..num_vectors as PointOffsetType { let levels_a = graph_a.get_point_level(point_id); let levels_b = graph_b.get_point_level(point_id); assert_eq!(levels_a, levels_b); for level in (0..levels_a + 1).rev() { let links_a = graph_a.links_layers()[point_id as usize][level] .read() .links() .to_vec(); let links_b = graph_b.links_layers()[point_id as usize][level] .read() .links() .to_vec(); if links_a != links_b { log::error!("Wrong links point_id={point_id} at level {level}"); } assert_eq!(links_a, links_b); } } } pub fn check_graph_layers_builders_quality( graph: GraphLayersBuilder, test: GpuGraphTestData, top: usize, ef: usize, accuracy: f32, ) { let graph: GraphLayers = graph.into_graph_layers_ram(GraphLinksFormatParam::Plain); let ref_graph: GraphLayers = test .graph_layers_builder .into_graph_layers_ram(GraphLinksFormatParam::Plain); let mut total_sames = 0; let total_top = top * test.search_vectors.len(); for search_vector in &test.search_vectors { let scorer = test.vector_holder.scorer(search_vector.clone()); let search_result_gpu = graph .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap(); let scorer = test.vector_holder.scorer(search_vector.clone()); let search_result_cpu = ref_graph .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap(); let mut gpu_set = HashSet::default(); let mut cpu_set = HashSet::default(); for (gpu_id, cpu_id) in search_result_gpu.iter().zip(search_result_cpu.iter()) { gpu_set.insert(gpu_id.idx); cpu_set.insert(cpu_id.idx); } total_sames += gpu_set.intersection(&cpu_set).count(); } assert!( total_sames as f32 >= total_top as f32 * accuracy, "sames: {total_sames}, total_top: {total_top}, div {}", total_sames as f32 / total_top as f32, ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_insert_context.rs
lib/segment/src/index/hnsw_index/gpu/gpu_insert_context.rs
use std::collections::HashMap; use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::types::PointOffsetType; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::GPU_TIMEOUT; use super::gpu_links::GpuLinks; use super::gpu_vector_storage::GpuVectorStorage; use super::gpu_visited_flags::GpuVisitedFlags; use super::shader_builder::ShaderBuilderParameters; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::gpu::shader_builder::ShaderBuilder; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; /// If EF is less than this value, we use linear search instead of binary heap. const MIN_POINTS_FOR_BINARY_HEAP: usize = 512; #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub struct GpuRequest { pub id: PointOffsetType, pub entry: PointOffsetType, } /// Structure to perform insert and update entries operations on GPU. /// It handles all GPU resources and shaders instead of gpu vector storage, /// which may be shared between multiple hnsw constructions. pub struct GpuInsertContext<'a> { context: gpu::Context, groups_count: usize, gpu_vector_storage: &'a GpuVectorStorage, gpu_links: GpuLinks, gpu_visited_flags: GpuVisitedFlags, insert_resources: GpuInsertResources, greedy_pipeline: Arc<gpu::Pipeline>, insert_pipeline: Arc<gpu::Pipeline>, updates_timer: std::time::Duration, updates_count: usize, patches_timer: std::time::Duration, patches_count: usize, } struct GpuInsertResources { requests_buffer: Arc<gpu::Buffer>, requests_staging_buffer: Arc<gpu::Buffer>, responses_buffer: Arc<gpu::Buffer>, responses_staging_buffer: Arc<gpu::Buffer>, insert_atomics_buffer: Arc<gpu::Buffer>, greedy_descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, greedy_descriptor_set: Arc<gpu::DescriptorSet>, insert_descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, insert_descriptor_set: Arc<gpu::DescriptorSet>, exact: bool, ef: usize, } impl ShaderBuilderParameters for GpuInsertResources { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([ ( "shared_buffer.comp".to_string(), include_str!("shaders/shared_buffer.comp").to_string(), ), ( "bheap.comp".to_string(), include_str!("shaders/bheap.comp").to_string(), ), ( "search_context.comp".to_string(), include_str!("shaders/search_context.comp").to_string(), ), ]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); defines.insert("EF".to_owned(), Some(self.ef.to_string())); if self.exact { defines.insert("EXACT".to_owned(), None); } if self.ef < MIN_POINTS_FOR_BINARY_HEAP { defines.insert("BHEAP_LINEAR".to_owned(), None); } defines } } impl GpuInsertResources { #[allow(clippy::too_many_arguments)] pub fn new( gpu_vector_storage: &GpuVectorStorage, groups_count: usize, points_capacity: usize, ef: usize, exact: bool, ) -> OperationResult<Self> { let device = gpu_vector_storage.device(); let requests_buffer = gpu::Buffer::new( device.clone(), "Search requests buffer", gpu::BufferType::Storage, groups_count * std::mem::size_of::<GpuRequest>(), )?; let requests_staging_buffer = gpu::Buffer::new( device.clone(), "Search context upload staging buffer", gpu::BufferType::CpuToGpu, requests_buffer.size(), )?; let responses_buffer = gpu::Buffer::new( device.clone(), "Search responses buffer", gpu::BufferType::Storage, groups_count * std::mem::size_of::<PointOffsetType>(), )?; let responses_staging_buffer = gpu::Buffer::new( device.clone(), "Search context download staging buffer", gpu::BufferType::GpuToCpu, responses_buffer.size(), )?; let insert_atomics_buffer = gpu::Buffer::new( device.clone(), "Insert atomics buffer", gpu::BufferType::Storage, points_capacity * std::mem::size_of::<u32>(), )?; let greedy_descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_storage_buffer(0) .add_storage_buffer(1) .build(device.clone())?; let greedy_descriptor_set = gpu::DescriptorSet::builder(greedy_descriptor_set_layout.clone()) .add_storage_buffer(0, requests_buffer.clone()) .add_storage_buffer(1, responses_buffer.clone()) .build()?; let insert_descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_storage_buffer(0) .add_storage_buffer(1) .add_storage_buffer(2) .build(device.clone())?; let insert_descriptor_set = gpu::DescriptorSet::builder(insert_descriptor_set_layout.clone()) .add_storage_buffer(0, requests_buffer.clone()) .add_storage_buffer(1, responses_buffer.clone()) .add_storage_buffer(2, insert_atomics_buffer.clone()) .build()?; Ok(Self { requests_buffer, requests_staging_buffer, responses_buffer, responses_staging_buffer, insert_atomics_buffer, greedy_descriptor_set_layout, greedy_descriptor_set, insert_descriptor_set_layout, insert_descriptor_set, exact, ef, }) } } impl<'a> GpuInsertContext<'a> { #[allow(clippy::too_many_arguments)] pub fn new( gpu_vector_storage: &'a GpuVectorStorage, // Parallel inserts count. groups_count: usize, hnsw_m: HnswM, ef: usize, // If true, guarantee equality of result with CPU version for both single-threaded case. // Required for tests. exact: bool, // If points count is very big, we share visited flags buffer between multiple points. // This parameter sets a factor how many points can share one visited flag. visited_flags_factor_range: std::ops::RangeInclusive<usize>, ) -> OperationResult<Self> { debug_assert!(groups_count > 0 && gpu_vector_storage.num_vectors() > 0); let device = gpu_vector_storage.device(); let points_count = gpu_vector_storage.num_vectors(); let insert_resources = GpuInsertResources::new(gpu_vector_storage, groups_count, points_count, ef, exact)?; let gpu_links = GpuLinks::new(device.clone(), hnsw_m.m, hnsw_m.m0, points_count)?; let gpu_visited_flags = GpuVisitedFlags::new( device.clone(), groups_count, points_count, visited_flags_factor_range, )?; let greedy_search_shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/run_greedy_search.comp")) .with_parameters(gpu_vector_storage) .with_parameters(&gpu_links) .with_parameters(&gpu_visited_flags) .with_parameters(&insert_resources) .build("run_greedy_search.comp")?; let insert_shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/run_insert_vector.comp")) .with_parameters(gpu_vector_storage) .with_parameters(&gpu_links) .with_parameters(&gpu_visited_flags) .with_parameters(&insert_resources) .build("run_insert_vector.comp")?; let greedy_pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, insert_resources.greedy_descriptor_set_layout.clone()) .add_descriptor_set_layout(1, gpu_vector_storage.descriptor_set_layout()) .add_descriptor_set_layout(2, gpu_links.descriptor_set_layout()) .add_descriptor_set_layout(3, gpu_visited_flags.descriptor_set_layout()) .add_shader(greedy_search_shader.clone()) .build(device.clone())?; let insert_pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, insert_resources.insert_descriptor_set_layout.clone()) .add_descriptor_set_layout(1, gpu_vector_storage.descriptor_set_layout()) .add_descriptor_set_layout(2, gpu_links.descriptor_set_layout()) .add_descriptor_set_layout(3, gpu_visited_flags.descriptor_set_layout()) .add_shader(insert_shader.clone()) .build(device.clone())?; let mut context = gpu::Context::new(device)?; context.clear_buffer(insert_resources.insert_atomics_buffer.clone())?; context.run()?; context.wait_finish(GPU_TIMEOUT)?; Ok(Self { insert_resources, gpu_vector_storage, gpu_links, gpu_visited_flags, context, groups_count, greedy_pipeline, insert_pipeline, updates_timer: Default::default(), updates_count: 0, patches_timer: Default::default(), patches_count: 0, }) } pub fn init(&mut self, remap: &[PointOffsetType]) -> OperationResult<()> { self.gpu_visited_flags.init(remap)?; self.gpu_links.clear(&mut self.context)?; self.context .clear_buffer(self.insert_resources.insert_atomics_buffer.clone())?; Ok(()) } pub fn download_responses(&mut self, count: usize) -> OperationResult<Vec<PointOffsetType>> { self.context.copy_gpu_buffer( self.insert_resources.responses_buffer.clone(), self.insert_resources.responses_staging_buffer.clone(), 0, 0, count * std::mem::size_of::<PointOffsetType>(), )?; self.context.run()?; self.context.wait_finish(GPU_TIMEOUT)?; Ok(self .insert_resources .responses_staging_buffer .download_vec::<PointOffsetType>(0, count)?) } pub fn greedy_search( &mut self, requests: &[GpuRequest], prev_results_count: usize, ) -> OperationResult<Vec<PointOffsetType>> { if requests.len() > self.groups_count { return Err(OperationError::service_error( "Too many gpu greedy search requests", )); } let timer = std::time::Instant::now(); // upload requests self.insert_resources .requests_staging_buffer .upload(requests, 0)?; self.context.copy_gpu_buffer( self.insert_resources.requests_staging_buffer.clone(), self.insert_resources.requests_buffer.clone(), 0, 0, std::mem::size_of_val(requests), )?; // download previous results if prev_results_count > 0 { self.context.copy_gpu_buffer( self.insert_resources.responses_buffer.clone(), self.insert_resources.responses_staging_buffer.clone(), 0, 0, prev_results_count * std::mem::size_of::<PointOffsetType>(), )?; } self.context.run()?; self.context.wait_finish(GPU_TIMEOUT)?; self.context.bind_pipeline( self.greedy_pipeline.clone(), &[ self.insert_resources.greedy_descriptor_set.clone(), self.gpu_vector_storage.descriptor_set(), self.gpu_links.descriptor_set(), self.gpu_visited_flags.descriptor_set(), ], )?; self.context.dispatch(requests.len(), 1, 1)?; self.context .barrier_buffers(std::slice::from_ref( &self.insert_resources.responses_buffer, )) .unwrap(); self.context.run()?; self.context.wait_finish(GPU_TIMEOUT)?; self.updates_timer += timer.elapsed(); self.updates_count += 1; if prev_results_count > 0 { Ok(self .insert_resources .responses_staging_buffer .download_vec::<PointOffsetType>(0, prev_results_count)?) } else { Ok(vec![]) } } pub fn run_insert_vector( &mut self, requests: &[GpuRequest], prev_results_count: usize, ) -> OperationResult<Vec<PointOffsetType>> { if requests.len() > self.groups_count { return Err(OperationError::service_error("Too many gpu patch requests")); } let timer = std::time::Instant::now(); self.gpu_visited_flags.clear(&mut self.context)?; // upload requests self.insert_resources .requests_staging_buffer .upload(requests, 0)?; self.context.copy_gpu_buffer( self.insert_resources.requests_staging_buffer.clone(), self.insert_resources.requests_buffer.clone(), 0, 0, std::mem::size_of_val(requests), )?; // download previous results if prev_results_count > 0 { self.context.copy_gpu_buffer( self.insert_resources.responses_buffer.clone(), self.insert_resources.responses_staging_buffer.clone(), 0, 0, prev_results_count * std::mem::size_of::<PointOffsetType>(), )?; } self.context.run()?; self.context.wait_finish(GPU_TIMEOUT)?; self.context.bind_pipeline( self.insert_pipeline.clone(), &[ self.insert_resources.insert_descriptor_set.clone(), self.gpu_vector_storage.descriptor_set(), self.gpu_links.descriptor_set(), self.gpu_visited_flags.descriptor_set(), ], )?; self.context.dispatch(requests.len(), 1, 1)?; self.context .barrier_buffers(&[ self.insert_resources.responses_buffer.clone(), self.insert_resources.insert_atomics_buffer.clone(), self.gpu_links.links_buffer(), self.gpu_visited_flags.visited_flags_buffer(), ]) .unwrap(); self.context.run()?; self.context.wait_finish(GPU_TIMEOUT)?; self.patches_timer += timer.elapsed(); self.patches_count += 1; if prev_results_count > 0 { let gpu_responses = self .insert_resources .responses_staging_buffer .download_vec::<PointOffsetType>(0, prev_results_count)?; Ok(gpu_responses) } else { Ok(vec![]) } } pub fn upload_links( &mut self, level: usize, graph_layers_builder: &GraphLayersBuilder, stopped: &AtomicBool, ) -> OperationResult<()> { self.gpu_links .upload_links(level, graph_layers_builder, &mut self.context, stopped) } pub fn download_links( &mut self, level: usize, graph_layers_builder: &GraphLayersBuilder, stopped: &AtomicBool, ) -> OperationResult<()> { self.gpu_links .download_links(level, graph_layers_builder, &mut self.context, stopped) } pub fn clear(&mut self, new_m: usize) -> OperationResult<()> { self.gpu_links.update_params(&mut self.context, new_m)?; self.gpu_links.clear(&mut self.context)?; Ok(()) } pub fn log_measurements(&self) { log::debug!( "Gpu graph patches time: {:?}, count {:?}, avg {:?}", &self.patches_timer, self.patches_count, self.patches_timer .checked_div(self.patches_count as u32) .unwrap_or_default(), ); log::debug!( "Gpu graph update entries time: {:?}, count {:?}, avg {:?}", &self.updates_timer, self.updates_count, self.updates_timer .checked_div(self.updates_count as u32) .unwrap_or_default(), ); } } #[cfg(test)] mod tests { use common::counter::hardware_counter::HardwareCounterCell; use common::types::ScoredPointOffset; use itertools::Itertools; use rand::SeedableRng; use rand::rngs::StdRng; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::*; use crate::fixtures::index_fixtures::TestRawScorerProducer; use crate::index::hnsw_index::graph_layers::GraphLayersBase; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::links_container::LinksContainer; use crate::types::Distance; use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage; use crate::vector_storage::{DEFAULT_STOPPED, Random, VectorStorage}; #[derive(Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct TestSearchRequest { id: PointOffsetType, entry: PointOffsetType, } struct TestData { groups_count: usize, m: usize, ef: usize, gpu_vector_storage: GpuVectorStorage, vector_holder: TestRawScorerProducer, graph_layers_builder: GraphLayersBuilder, } fn create_test_data( num_vectors: usize, groups_count: usize, dim: usize, m: usize, ef: usize, ) -> TestData { // Generate random vectors let mut rng = StdRng::seed_from_u64(42); let vector_holder = TestRawScorerProducer::new( dim, Distance::Dot, num_vectors + groups_count, false, &mut rng, ); // upload vectors to storage let mut storage = new_volatile_dense_vector_storage(dim, Distance::Dot); for idx in 0..(num_vectors + groups_count) as PointOffsetType { let v = vector_holder.storage().get_vector::<Random>(idx); storage .insert_vector(idx, v.as_vec_ref(), &HardwareCounterCell::new()) .unwrap(); } // Build HNSW index let mut graph_layers_builder = GraphLayersBuilder::new(num_vectors, HnswM::new(m, m), ef, 1, true); for idx in 0..(num_vectors as PointOffsetType) { let level = graph_layers_builder.get_random_layer(&mut rng); graph_layers_builder.set_levels(idx, level); } for idx in 0..(num_vectors as PointOffsetType) { graph_layers_builder.link_new_point(idx, vector_holder.internal_scorer(idx)); } // Create GPU search context let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap(); let gpu_vector_storage = GpuVectorStorage::new(device.clone(), &storage, None, false, &false.into()).unwrap(); TestData { groups_count, m, ef, gpu_vector_storage, vector_holder, graph_layers_builder, } } fn create_insert_context(test_data: &TestData) -> GpuInsertContext<'_> { let total_num_vectors = test_data.gpu_vector_storage.num_vectors() + test_data.groups_count; let point_ids = (0..total_num_vectors as PointOffsetType).collect_vec(); let mut gpu_insert_context = GpuInsertContext::new( &test_data.gpu_vector_storage, test_data.groups_count, HnswM::new(test_data.m, test_data.m), test_data.ef, true, 1..=32, ) .unwrap(); gpu_insert_context.init(&point_ids).unwrap(); gpu_insert_context .upload_links(0, &test_data.graph_layers_builder, &false.into()) .unwrap(); gpu_insert_context } #[test] fn test_gpu_hnsw_search_on_level() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let groups_count = 8; let dim = 64; let m = 16; let ef = 32; let test = create_test_data(num_vectors, groups_count, dim, m, ef); let device = test.gpu_vector_storage.device(); let mut gpu_insert_context = create_insert_context(&test); let search_responses_buffer = gpu::Buffer::new( device.clone(), "Search responses buffer", gpu::BufferType::Storage, groups_count * ef * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); let download_staging_buffer = gpu::Buffer::new( device.clone(), "Search context download staging buffer", gpu::BufferType::GpuToCpu, search_responses_buffer.size(), ) .unwrap(); let search_shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/tests/test_hnsw_search.comp")) .with_parameters(gpu_insert_context.gpu_vector_storage) .with_parameters(&gpu_insert_context.gpu_links) .with_parameters(&gpu_insert_context.gpu_visited_flags) .with_parameters(&gpu_insert_context.insert_resources) .build("tests/test_hnsw_search.comp") .unwrap(); let search_descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_storage_buffer(0) .add_storage_buffer(1) .build(device.clone()) .unwrap(); let search_descriptor_set = gpu::DescriptorSet::builder(search_descriptor_set_layout.clone()) .add_storage_buffer( 0, gpu_insert_context.insert_resources.requests_buffer.clone(), ) .add_storage_buffer(1, search_responses_buffer.clone()) .build() .unwrap(); let search_pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, search_descriptor_set_layout.clone()) .add_descriptor_set_layout( 1, gpu_insert_context .gpu_vector_storage .descriptor_set_layout(), ) .add_descriptor_set_layout(2, gpu_insert_context.gpu_links.descriptor_set_layout()) .add_descriptor_set_layout( 3, gpu_insert_context.gpu_visited_flags.descriptor_set_layout(), ) .add_shader(search_shader.clone()) .build(device.clone()) .unwrap(); // create request data let mut search_requests = vec![]; for i in 0..groups_count { search_requests.push(GpuRequest { id: (num_vectors + i) as PointOffsetType, entry: 0, }); } let mut search = |requests: &[GpuRequest]| { gpu_insert_context .gpu_visited_flags .clear(&mut gpu_insert_context.context) .unwrap(); gpu_insert_context .insert_resources .requests_staging_buffer .upload(requests, 0) .unwrap(); gpu_insert_context .context .copy_gpu_buffer( gpu_insert_context .insert_resources .requests_staging_buffer .clone(), gpu_insert_context.insert_resources.requests_buffer.clone(), 0, 0, std::mem::size_of_val(requests), ) .unwrap(); gpu_insert_context.context.run().unwrap(); gpu_insert_context.context.wait_finish(GPU_TIMEOUT).unwrap(); gpu_insert_context .context .bind_pipeline( search_pipeline.clone(), &[ search_descriptor_set.clone(), gpu_insert_context.gpu_vector_storage.descriptor_set(), gpu_insert_context.gpu_links.descriptor_set(), gpu_insert_context.gpu_visited_flags.descriptor_set(), ], ) .unwrap(); gpu_insert_context .context .dispatch(requests.len(), 1, 1) .unwrap(); gpu_insert_context.context.run().unwrap(); gpu_insert_context.context.wait_finish(GPU_TIMEOUT).unwrap(); // Download response gpu_insert_context .context .copy_gpu_buffer( search_responses_buffer.clone(), download_staging_buffer.clone(), 0, 0, requests.len() * ef * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); gpu_insert_context.context.run().unwrap(); gpu_insert_context.context.wait_finish(GPU_TIMEOUT).unwrap(); download_staging_buffer .download_vec::<ScoredPointOffset>(0, requests.len() * ef) .unwrap() .chunks(ef) .map(|r| { r.iter() .take_while(|s| s.idx != PointOffsetType::MAX) .cloned() .collect_vec() }) .collect_vec() }; let gpu_responses_1 = search(&search_requests); // restart search to check reset let gpu_responses_2 = search(&search_requests); // Check response for i in 0..groups_count { let mut scorer = test .vector_holder .internal_scorer((num_vectors + i) as PointOffsetType); let entry = ScoredPointOffset { idx: 0, score: scorer.score_point(0), }; let search_result = test .graph_layers_builder .search_on_level(entry, 0, ef, &mut scorer, &DEFAULT_STOPPED) .unwrap() .into_sorted_vec(); for (cpu, (gpu_1, gpu_2)) in search_result .iter() .zip(gpu_responses_1[i].iter().zip(gpu_responses_2[i].iter())) { assert_eq!(cpu.idx, gpu_1.idx); assert_eq!(cpu.idx, gpu_2.idx); assert!((cpu.score - gpu_1.score).abs() < 1e-5); assert!((cpu.score - gpu_2.score).abs() < 1e-5); } } } #[test] fn test_gpu_greedy_search() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let groups_count = 8; let dim = 64; let m = 16; let ef = 32; let test = create_test_data(num_vectors, groups_count, dim, m, ef); let mut gpu_insert_context = create_insert_context(&test); // create request data let mut search_requests = vec![]; for i in 0..groups_count { search_requests.push(GpuRequest { id: (num_vectors + i) as PointOffsetType, entry: 0, }); } gpu_insert_context .greedy_search(&search_requests, 0) .unwrap(); let gpu_responses = gpu_insert_context.download_responses(groups_count).unwrap(); // Check response for (i, &gpu_search_result) in gpu_responses.iter().enumerate() { let mut scorer = test .vector_holder .internal_scorer((num_vectors + i) as PointOffsetType); let search_result = test.graph_layers_builder .search_entry_on_level(0, 0, &mut scorer, &mut Vec::new()); assert_eq!(search_result.idx, gpu_search_result); } } #[test] fn test_gpu_heuristic() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let groups_count = 8; let dim = 64; let m = 16; let ef = 32; let test = create_test_data(num_vectors, groups_count, dim, m, ef); let device = test.gpu_vector_storage.device(); let mut gpu_insert_context = create_insert_context(&test); // create request data let mut search_requests = vec![]; for i in 0..groups_count { search_requests.push(TestSearchRequest { id: (num_vectors + i) as PointOffsetType, entry: 0, }); } // upload search requests to GPU let search_requests_buffer = gpu::Buffer::new( device.clone(), "Search requests buffer", gpu::BufferType::Storage, search_requests.len() * std::mem::size_of::<TestSearchRequest>(), ) .unwrap(); let upload_staging_buffer = gpu::Buffer::new( device.clone(), "Search context upload staging buffer", gpu::BufferType::CpuToGpu, search_requests.len() * std::mem::size_of::<TestSearchRequest>(), ) .unwrap(); upload_staging_buffer .upload(search_requests.as_slice(), 0) .unwrap(); gpu_insert_context .context .copy_gpu_buffer( upload_staging_buffer.clone(), search_requests_buffer.clone(), 0, 0, search_requests_buffer.size(), ) .unwrap(); gpu_insert_context.context.run().unwrap(); gpu_insert_context.context.wait_finish(GPU_TIMEOUT).unwrap(); // create response and response staging buffers let responses_buffer = gpu::Buffer::new( device.clone(), "Search responses buffer", gpu::BufferType::Storage, groups_count * ef * std::mem::size_of::<ScoredPointOffset>(), ) .unwrap(); let responses_staging_buffer = gpu::Buffer::new( device.clone(), "Search responses staging buffer", gpu::BufferType::GpuToCpu, responses_buffer.size(), ) .unwrap(); // Create test pipeline let shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("shaders/tests/test_heuristic.comp")) .with_parameters(gpu_insert_context.gpu_vector_storage) .with_parameters(&gpu_insert_context.gpu_links) .with_parameters(&gpu_insert_context.gpu_visited_flags) .with_parameters(&gpu_insert_context.insert_resources) .build("tests/test_heuristic.comp") .unwrap(); let descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_storage_buffer(0) .add_storage_buffer(1) .build(device.clone()) .unwrap(); let descriptor_set = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_storage_buffer(0, search_requests_buffer.clone()) .add_storage_buffer(1, responses_buffer.clone()) .build() .unwrap(); let pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, descriptor_set_layout.clone()) .add_descriptor_set_layout( 1, gpu_insert_context .gpu_vector_storage .descriptor_set_layout(), ) .add_descriptor_set_layout(2, gpu_insert_context.gpu_links.descriptor_set_layout()) .add_descriptor_set_layout( 3, gpu_insert_context.gpu_visited_flags.descriptor_set_layout(), ) .add_shader(shader.clone())
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_graph_builder.rs
lib/segment/src/index/hnsw_index/gpu/gpu_graph_builder.rs
use std::sync::atomic::AtomicBool; use common::types::PointOffsetType; use crate::common::check_stopped; use crate::common::operation_error::OperationResult; use crate::index::hnsw_index::gpu::batched_points::BatchedPoints; use crate::index::hnsw_index::gpu::create_graph_layers_builder; use crate::index::hnsw_index::gpu::gpu_insert_context::GpuInsertContext; use crate::index::hnsw_index::gpu::gpu_level_builder::build_level_on_gpu; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::point_scorer::FilteredScorer; /// Maximum count of point IDs per visited flag. pub static GPU_MAX_VISITED_FLAGS_FACTOR: usize = 32; /// Build HNSW graph on GPU. #[allow(clippy::too_many_arguments)] pub fn build_hnsw_on_gpu<'a, 'b>( gpu_insert_context: &mut GpuInsertContext<'b>, // Graph with all settings like m, ef, levels, etc. reference_graph: &GraphLayersBuilder, // Parallel inserts count. groups_count: usize, // Number of entry points of hnsw graph. entry_points_num: usize, // Amount of first points to link on CPU. cpu_linked_points: usize, // Point IDs to insert. // In payload blocks we need to use subset of all points. ids: Vec<PointOffsetType>, // Scorer builder for CPU build. points_scorer_builder: impl Fn(PointOffsetType) -> OperationResult<FilteredScorer<'a>> + Send + Sync, stopped: &AtomicBool, ) -> OperationResult<GraphLayersBuilder> { let num_vectors = reference_graph.links_layers().len(); let hnsw_m = reference_graph.hnsw_m(); let ef = std::cmp::max(reference_graph.ef_construct(), hnsw_m.m0); // Divide points into batches. // One batch is one shader invocation. let batched_points = BatchedPoints::new( |point_id| reference_graph.get_point_level(point_id), ids, groups_count, )?; let mut graph_layers_builder = create_graph_layers_builder(&batched_points, num_vectors, hnsw_m, ef, entry_points_num); // Link first points on CPU. let mut cpu_linked_points_count = 0; for batch in batched_points.iter_batches(0) { for point in batch.points { check_stopped(stopped)?; let points_scorer = points_scorer_builder(point.point_id)?; graph_layers_builder.link_new_point(point.point_id, points_scorer); cpu_linked_points_count += 1; if cpu_linked_points_count >= cpu_linked_points { break; } } if cpu_linked_points_count >= cpu_linked_points { break; } } // Mark all points as ready, as GPU will fill layer by layer. graph_layers_builder.fill_ready_list(); // Check if all points are linked on CPU. // If there are no batches left, we can return result before gpu resources creation. if batched_points .iter_batches(cpu_linked_points_count) .next() .is_none() { return Ok(graph_layers_builder); } gpu_insert_context.init(batched_points.remap())?; // Build all levels on GPU level by level. for level in (0..batched_points.levels_count()).rev() { log::trace!("Starting GPU level {level}"); gpu_insert_context.upload_links(level, &graph_layers_builder, stopped)?; build_level_on_gpu( gpu_insert_context, &batched_points, cpu_linked_points, level, stopped, )?; gpu_insert_context.download_links(level, &graph_layers_builder, stopped)?; } gpu_insert_context.log_measurements(); Ok(graph_layers_builder) } #[cfg(test)] mod tests { use std::borrow::Borrow; use super::*; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::gpu::gpu_vector_storage::GpuVectorStorage; use crate::index::hnsw_index::gpu::tests::{ GpuGraphTestData, check_graph_layers_builders_quality, compare_graph_layers_builders, create_gpu_graph_test_data, }; fn build_gpu_graph( test: &GpuGraphTestData, groups_count: usize, cpu_linked_points_count: usize, exact: bool, repeats: usize, ) -> Vec<GraphLayersBuilder> { let num_vectors = test.graph_layers_builder.links_layers().len(); let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap(); let gpu_vector_storage = GpuVectorStorage::new( device.clone(), test.vector_storage.borrow(), None, false, &false.into(), ) .unwrap(); let mut gpu_search_context = GpuInsertContext::new( &gpu_vector_storage, groups_count, test.graph_layers_builder.hnsw_m(), test.graph_layers_builder.ef_construct(), exact, 1..=GPU_MAX_VISITED_FLAGS_FACTOR, ) .unwrap(); let ids: Vec<_> = (0..num_vectors as PointOffsetType).collect(); (0..repeats) .map(|_| { build_hnsw_on_gpu( &mut gpu_search_context, &test.graph_layers_builder, groups_count, 1, cpu_linked_points_count, ids.clone(), |point_id| Ok(test.vector_holder.internal_scorer(point_id)), &false.into(), ) .unwrap() }) .collect() } #[test] fn test_gpu_hnsw_equivalency() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let dim = 64; let hnsw_m = HnswM::new2(8); let ef = 32; let min_cpu_linked_points_count = 64; let test = create_gpu_graph_test_data(num_vectors, dim, hnsw_m, ef, 0); let graph_layers_builders = build_gpu_graph(&test, 1, min_cpu_linked_points_count, true, 2); for graph_layers_builder in graph_layers_builders.iter() { compare_graph_layers_builders(&test.graph_layers_builder, graph_layers_builder); } } #[test] fn test_gpu_hnsw_quality_exact() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let dim = 64; let hnsw_m = HnswM::new2(8); let ef = 32; let groups_count = 4; let searches_count = 20; let top = 10; let min_cpu_linked_points_count = 64; let test = create_gpu_graph_test_data(num_vectors, dim, hnsw_m, ef, searches_count); let graph_layers_builders = build_gpu_graph(&test, groups_count, min_cpu_linked_points_count, true, 1); let graph_layers_builder = graph_layers_builders.into_iter().next().unwrap(); check_graph_layers_builders_quality(graph_layers_builder, test, top, ef, 0.8) } #[test] fn test_gpu_hnsw_quality() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let dim = 64; let hnsw_m = HnswM::new2(8); let ef = 32; let groups_count = 4; let searches_count = 20; let top = 10; let min_cpu_linked_points_count = 64; let test = create_gpu_graph_test_data(num_vectors, dim, hnsw_m, ef, searches_count); let graph_layers_builders = build_gpu_graph(&test, groups_count, min_cpu_linked_points_count, false, 1); let graph_layers_builder = graph_layers_builders.into_iter().next().unwrap(); check_graph_layers_builders_quality(graph_layers_builder, test, top, ef, 0.8) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_level_builder.rs
lib/segment/src/index/hnsw_index/gpu/gpu_level_builder.rs
use std::sync::atomic::{AtomicBool, Ordering}; use common::types::PointOffsetType; use super::batched_points::{Batch, BatchedPoints}; use super::gpu_insert_context::GpuInsertContext; use crate::common::check_stopped; use crate::common::operation_error::OperationResult; use crate::index::hnsw_index::gpu::gpu_insert_context::GpuRequest; // Build level on GPU pub fn build_level_on_gpu( gpu_search_context: &mut GpuInsertContext, batched_points: &BatchedPoints, skip_count: usize, level: usize, stopped: &AtomicBool, ) -> OperationResult<()> { let mut prev_batch = None; for batch in batched_points.iter_batches(skip_count) { check_stopped(stopped)?; if level > batch.level { gpu_batched_update_entries(gpu_search_context, &batch, prev_batch.as_ref())?; } else { gpu_batched_insert(gpu_search_context, &batch, prev_batch.as_ref())?; } prev_batch = Some(batch); } if let Some(prev_batch) = prev_batch { let new_entries = gpu_search_context.download_responses(prev_batch.points.len())?; gpu_batched_apply_entries(&prev_batch, new_entries); } Ok(()) } fn gpu_batched_update_entries( gpu_search_context: &mut GpuInsertContext, batch: &Batch, prev_batch: Option<&Batch>, ) -> OperationResult<()> { let mut requests = Vec::with_capacity(batch.points.len()); for linking_point in batch.points { requests.push(GpuRequest { id: linking_point.point_id, entry: linking_point.entry.load(Ordering::Relaxed), }) } let prev_batch_len = prev_batch .map(|prev_batch| prev_batch.points.len()) .unwrap_or(0); let new_entries = gpu_search_context.greedy_search(&requests, prev_batch_len)?; if let Some(prev_batch) = prev_batch { gpu_batched_apply_entries(prev_batch, new_entries); } Ok(()) } fn gpu_batched_insert( gpu_search_context: &mut GpuInsertContext, batch: &Batch, prev_batch: Option<&Batch>, ) -> OperationResult<()> { let mut requests = Vec::with_capacity(batch.points.len()); for linking_point in batch.points { requests.push(GpuRequest { id: linking_point.point_id, entry: linking_point.entry.load(Ordering::Relaxed), }) } let prev_batch_len = prev_batch .map(|prev_batch| prev_batch.points.len()) .unwrap_or(0); let new_entries = gpu_search_context.run_insert_vector(&requests, prev_batch_len)?; if let Some(prev_batch) = prev_batch { gpu_batched_apply_entries(prev_batch, new_entries); } Ok(()) } fn gpu_batched_apply_entries(batch: &Batch, new_entries: Vec<PointOffsetType>) { assert_eq!(batch.points.len(), new_entries.len()); for (linking_point, new_entry) in batch.points.iter().zip(new_entries) { linking_point.entry.store(new_entry, Ordering::Relaxed); } } #[cfg(test)] mod tests { use std::borrow::Borrow; use common::types::PointOffsetType; use rstest::rstest; use super::*; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::gpu::batched_points::BatchedPoints; use crate::index::hnsw_index::gpu::create_graph_layers_builder; use crate::index::hnsw_index::gpu::gpu_vector_storage::GpuVectorStorage; use crate::index::hnsw_index::gpu::tests::{ GpuGraphTestData, check_graph_layers_builders_quality, compare_graph_layers_builders, create_gpu_graph_test_data, }; use crate::index::hnsw_index::graph_layers::GraphLayersBase; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; fn build_gpu_graph( test: &GpuGraphTestData, groups_count: usize, visited_flags_factor: usize, ) -> GraphLayersBuilder { let num_vectors = test.graph_layers_builder.links_layers().len(); let hnsw_m = test.graph_layers_builder.hnsw_m(); let ef = test.graph_layers_builder.ef_construct(); let batched_points = BatchedPoints::new( |point_id| test.graph_layers_builder.get_point_level(point_id), (0..num_vectors as PointOffsetType).collect(), groups_count, ) .unwrap(); let mut graph_layers_builder = create_graph_layers_builder(&batched_points, num_vectors, hnsw_m, ef, 1); graph_layers_builder.fill_ready_list(); let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap(); let gpu_vector_storage = GpuVectorStorage::new( device.clone(), test.vector_storage.borrow(), None, false, &false.into(), ) .unwrap(); let mut gpu_search_context = GpuInsertContext::new( &gpu_vector_storage, groups_count, hnsw_m, ef, true, visited_flags_factor..=32, ) .unwrap(); gpu_search_context.init(batched_points.remap()).unwrap(); for level in (0..batched_points.levels_count()).rev() { let level_m = graph_layers_builder.get_m(level); gpu_search_context.clear(level_m).unwrap(); build_level_on_gpu( &mut gpu_search_context, &batched_points, 0, level, &false.into(), ) .unwrap(); gpu_search_context .download_links(level, &graph_layers_builder, &false.into()) .unwrap(); } graph_layers_builder } #[test] fn test_gpu_hnsw_level_equivalency() { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let dim = 64; let hnsw_m = HnswM::new2(8); let ef = 32; let test = create_gpu_graph_test_data(num_vectors, dim, hnsw_m, ef, 0); let graph_layers_builder = build_gpu_graph(&test, 1, 1); compare_graph_layers_builders(&test.graph_layers_builder, &graph_layers_builder); } #[rstest] fn test_gpu_hnsw_level_quality(#[values(1, 2)] visited_flags_factor: usize) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let num_vectors = 1024; let dim = 64; let hnsw_m = HnswM::new2(8); let ef = 32; let groups_count = 4; let searches_count = 20; let top = 10; let test = create_gpu_graph_test_data(num_vectors, dim, hnsw_m, ef, searches_count); let graph_layers_builder = build_gpu_graph(&test, groups_count, visited_flags_factor); check_graph_layers_builders_quality(graph_layers_builder, test, top, ef, 0.8) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/gpu_quantization.rs
lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/gpu_quantization.rs
use std::collections::HashMap; use std::sync::Arc; use common::types::PointOffsetType; use quantization::encoded_vectors_binary::{BitsStoreType, EncodedVectorsBin}; use quantization::{EncodedStorage, EncodedVectors, EncodedVectorsPQ, EncodedVectorsU8}; use super::{GpuVectorStorage, STORAGES_COUNT}; use crate::common::operation_error::OperationResult; use crate::index::hnsw_index::gpu::GPU_TIMEOUT; use crate::index::hnsw_index::gpu::shader_builder::ShaderBuilderParameters; pub const START_QUANTIZATION_BINDING: usize = STORAGES_COUNT; pub const MAX_QUANTIZATION_BINDINGS: usize = 2; /// Additional data for quantization in GPU. /// Add quantized vectors are stored as reqular vectors with for instance `u8` type. /// But quantization requires additional data for decoding. /// For instance, centroids for product quantization. /// This structures provides all additional data for quantization. /// From gpu buffers to shader defines and additional shader files. pub enum GpuQuantization { Binary(GpuBinaryQuantization), Scalar(GpuScalarQuantization), Product(GpuProductQuantization), } impl ShaderBuilderParameters for GpuQuantization { fn shader_includes(&self) -> HashMap<String, String> { match self { GpuQuantization::Binary(params) => params.shader_includes(), GpuQuantization::Scalar(params) => params.shader_includes(), GpuQuantization::Product(params) => params.shader_includes(), } } fn shader_defines(&self) -> HashMap<String, Option<String>> { match self { GpuQuantization::Binary(params) => params.shader_defines(), GpuQuantization::Scalar(params) => params.shader_defines(), GpuQuantization::Product(params) => params.shader_defines(), } } } impl GpuQuantization { pub fn new_bq<T: BitsStoreType, TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsBin<T, TStorage>, ) -> GpuQuantization { GpuQuantization::Binary(GpuBinaryQuantization::new(device, quantized_storage)) } pub fn new_sq<TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsU8<TStorage>, ) -> OperationResult<GpuQuantization> { Ok(GpuQuantization::Scalar(GpuScalarQuantization::new( device, quantized_storage, )?)) } pub fn new_pq<TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsPQ<TStorage>, ) -> OperationResult<GpuQuantization> { Ok(GpuQuantization::Product(GpuProductQuantization::new( device, quantized_storage, )?)) } /// Adds additional bindings to descriptor set. pub fn add_descriptor_set( &self, descriptor_set_builder: gpu::DescriptorSetBuilder, ) -> gpu::DescriptorSetBuilder { match self { GpuQuantization::Binary(bq) => bq.add_descriptor_set(descriptor_set_builder), GpuQuantization::Scalar(sq) => sq.add_descriptor_set(descriptor_set_builder), GpuQuantization::Product(pq) => pq.add_descriptor_set(descriptor_set_builder), } } /// Adds additional bindings to descriptor set layout. pub fn add_descriptor_set_layout( &self, descriptor_set_layout_builder: gpu::DescriptorSetLayoutBuilder, ) -> gpu::DescriptorSetLayoutBuilder { match self { GpuQuantization::Binary(bq) => { bq.add_descriptor_set_layout(descriptor_set_layout_builder) } GpuQuantization::Scalar(sq) => { sq.add_descriptor_set_layout(descriptor_set_layout_builder) } GpuQuantization::Product(pq) => { pq.add_descriptor_set_layout(descriptor_set_layout_builder) } } } } /// Additional data for binary quantization in GPU. /// It imnplements all shader definitions for BQ. pub struct GpuBinaryQuantization { /// How many bits are added while aligning. /// We need to subtract them after XOR+popcnt operation. skip_count: usize, } impl ShaderBuilderParameters for GpuBinaryQuantization { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([( "vector_storage_bq.comp".to_string(), include_str!("../shaders/vector_storage_bq.comp").to_string(), )]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); // Define that we are using quantization. defines.insert("VECTOR_STORAGE_QUANTIZATION".to_owned(), None); // Define that quantization is binary. defines.insert("VECTOR_STORAGE_ELEMENT_BQ".to_owned(), None); // Provide skip count for BQ. defines.insert( "BQ_SKIP_COUNT".to_owned(), Some(self.skip_count.to_string()), ); defines } } impl GpuBinaryQuantization { fn new<T: BitsStoreType, TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsBin<T, TStorage>, ) -> Self { let orig_dim = quantized_storage.get_vector_parameters().dim; // Bytes count for quantized vector. let quantized_vector_len = if quantized_storage.vectors_count() > 0 { quantized_storage.get_quantized_vector(0).len() } else { 0 }; // Find bits count for aligned gpu vector. let gpu_bits_count = GpuVectorStorage::gpu_vector_capacity(&device, quantized_vector_len) * u8::BITS as usize; Self { skip_count: gpu_bits_count - orig_dim, } } #[allow(clippy::unused_self)] fn add_descriptor_set( &self, descriptor_set_builder: gpu::DescriptorSetBuilder, ) -> gpu::DescriptorSetBuilder { descriptor_set_builder } #[allow(clippy::unused_self)] fn add_descriptor_set_layout( &self, descriptor_set_layout_builder: gpu::DescriptorSetLayoutBuilder, ) -> gpu::DescriptorSetLayoutBuilder { descriptor_set_layout_builder } } pub struct GpuScalarQuantization { multiplier: f32, diff: f32, offsets_buffer: Arc<gpu::Buffer>, offsets_buffer_binding: usize, } impl ShaderBuilderParameters for GpuScalarQuantization { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([( "vector_storage_sq.comp".to_string(), include_str!("../shaders/vector_storage_sq.comp").to_string(), )]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); // Define that we are using quantization. defines.insert("VECTOR_STORAGE_QUANTIZATION".to_owned(), None); // Define that quantization is scalar. defines.insert("VECTOR_STORAGE_ELEMENT_SQ".to_owned(), None); // Provide shader binding of SQ offsets. defines.insert( "VECTOR_STORAGE_SQ_OFFSETS_BINDING".to_owned(), Some(self.offsets_buffer_binding.to_string()), ); // Provide multiplier and diff for quantization. defines.insert( "SQ_MULTIPLIER".to_owned(), Some(self.multiplier.to_string()), ); defines.insert("SQ_DIFF".to_owned(), Some(self.diff.to_string())); defines } } impl GpuScalarQuantization { fn new<TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsU8<TStorage>, ) -> OperationResult<Self> { Ok(GpuScalarQuantization { multiplier: quantized_storage.get_multiplier(), diff: quantized_storage.get_shift(), offsets_buffer: GpuScalarQuantization::create_sq_offsets_buffer( device, quantized_storage, )?, offsets_buffer_binding: START_QUANTIZATION_BINDING, }) } fn create_sq_offsets_buffer<TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsU8<TStorage>, ) -> OperationResult<Arc<gpu::Buffer>> { let sq_offsets_buffer = gpu::Buffer::new( device.clone(), "SQ offsets buffer", gpu::BufferType::Storage, std::cmp::max(quantized_storage.vectors_count(), 1) * std::mem::size_of::<f32>(), )?; let sq_offsets_staging_buffer = gpu::Buffer::new( device.clone(), "SQ offsets staging buffer", gpu::BufferType::CpuToGpu, sq_offsets_buffer.size(), )?; let mut upload_context = gpu::Context::new(device.clone())?; for i in 0..quantized_storage.vectors_count() { let (offset, _) = quantized_storage.get_quantized_vector_offset_and_code(i as PointOffsetType); sq_offsets_staging_buffer.upload(&offset, i * std::mem::size_of::<f32>())?; } upload_context.copy_gpu_buffer( sq_offsets_staging_buffer, sq_offsets_buffer.clone(), 0, 0, sq_offsets_buffer.size(), )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; Ok(sq_offsets_buffer) } pub fn add_descriptor_set( &self, descriptor_set_builder: gpu::DescriptorSetBuilder, ) -> gpu::DescriptorSetBuilder { descriptor_set_builder .add_storage_buffer(self.offsets_buffer_binding, self.offsets_buffer.clone()) } pub fn add_descriptor_set_layout( &self, descriptor_set_layout_builder: gpu::DescriptorSetLayoutBuilder, ) -> gpu::DescriptorSetLayoutBuilder { descriptor_set_layout_builder.add_storage_buffer(self.offsets_buffer_binding) } } pub struct GpuProductQuantization { centroids_buffer: Arc<gpu::Buffer>, centroids_buffer_binding: usize, vector_division_buffer: Arc<gpu::Buffer>, vector_division_buffer_binding: usize, divisions_count: usize, centroids_dim: usize, } impl ShaderBuilderParameters for GpuProductQuantization { fn shader_includes(&self) -> HashMap<String, String> { HashMap::from([( "vector_storage_pq.comp".to_string(), include_str!("../shaders/vector_storage_pq.comp").to_string(), )]) } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); // Define that we are using quantization. defines.insert("VECTOR_STORAGE_QUANTIZATION".to_owned(), None); // Define that quantization is product. defines.insert("VECTOR_STORAGE_ELEMENT_PQ".to_owned(), None); // Provide shader binding of PQ centroids. defines.insert( "VECTOR_STORAGE_PQ_CENTROIDS_BINDING".to_owned(), Some(self.centroids_buffer_binding.to_string()), ); // Provide shader binding of PQ vector division. defines.insert( "VECTOR_STORAGE_PQ_DIVISIONS_BINDING".to_owned(), Some(self.vector_division_buffer_binding.to_string()), ); // Provide vector divisions count and centroids count for quantization. defines.insert( "PQ_DIVISIONS_COUNT".to_owned(), Some(self.divisions_count.to_string()), ); defines.insert( "PQ_CENTROIDS_DIM".to_owned(), Some(self.centroids_dim.to_string()), ); defines } } impl GpuProductQuantization { fn new<TStorage: EncodedStorage>( device: Arc<gpu::Device>, quantized_storage: &EncodedVectorsPQ<TStorage>, ) -> OperationResult<Self> { let centroids_buffer = gpu::Buffer::new( device.clone(), "PQ centroids buffer", gpu::BufferType::Storage, quantized_storage .get_metadata() .centroids .iter() .map(|c| c.len()) .sum::<usize>() * std::mem::size_of::<f32>(), )?; let centroids_staging_buffer = gpu::Buffer::new( device.clone(), "PQ centroids staging buffer", gpu::BufferType::CpuToGpu, centroids_buffer.size(), )?; let vector_division_buffer = gpu::Buffer::new( device.clone(), "PQ vector division buffer", gpu::BufferType::Storage, std::cmp::max(quantized_storage.get_metadata().vector_division.len(), 1) * std::mem::size_of::<u32>() * 2, )?; let vector_division_staging_buffer = gpu::Buffer::new( device.clone(), "PQ vector division staging buffer", gpu::BufferType::CpuToGpu, vector_division_buffer.size(), )?; let mut upload_context = gpu::Context::new(device.clone())?; let mut centroids_offset = 0; for centroids in &quantized_storage.get_metadata().centroids { centroids_staging_buffer.upload(centroids.as_slice(), centroids_offset)?; centroids_offset += centroids.len() * std::mem::size_of::<f32>(); } upload_context.copy_gpu_buffer( centroids_staging_buffer, centroids_buffer.clone(), 0, 0, centroids_buffer.size(), )?; let vector_division: Vec<_> = quantized_storage .get_metadata() .vector_division .iter() .flat_map(|range| [range.start as u32, range.end as u32].into_iter()) .collect(); vector_division_staging_buffer.upload(vector_division.as_slice(), 0)?; upload_context.copy_gpu_buffer( vector_division_staging_buffer, vector_division_buffer.clone(), 0, 0, vector_division_buffer.size(), )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; Ok(Self { centroids_buffer, vector_division_buffer, divisions_count: quantized_storage.get_metadata().vector_division.len(), centroids_dim: quantized_storage .get_metadata() .centroids .first() .map(|c| c.len()) .unwrap_or_default(), centroids_buffer_binding: START_QUANTIZATION_BINDING, vector_division_buffer_binding: START_QUANTIZATION_BINDING + 1, }) } pub fn add_descriptor_set( &self, descriptor_set_builder: gpu::DescriptorSetBuilder, ) -> gpu::DescriptorSetBuilder { let descriptor_set_builder = descriptor_set_builder .add_storage_buffer(self.centroids_buffer_binding, self.centroids_buffer.clone()); descriptor_set_builder.add_storage_buffer( self.vector_division_buffer_binding, self.vector_division_buffer.clone(), ) } pub fn add_descriptor_set_layout( &self, descriptor_set_layout_builder: gpu::DescriptorSetLayoutBuilder, ) -> gpu::DescriptorSetLayoutBuilder { let descriptor_set_layout_builder = descriptor_set_layout_builder.add_storage_buffer(self.centroids_buffer_binding); descriptor_set_layout_builder.add_storage_buffer(self.vector_division_buffer_binding) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/tests.rs
lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/tests.rs
//! Unit tests for GPU vector storage, covering f32, f16, u8 dense/multivectors with SQ, BQ, PQ. use std::path::Path; use common::counter::hardware_counter::HardwareCounterCell; use parking_lot::RwLock; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use rocksdb::DB; use rstest::rstest; use super::*; use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db}; use crate::data_types::vectors::{MultiDenseVectorInternal, QueryVector, VectorRef}; use crate::fixtures::index_fixtures::random_vector; use crate::fixtures::payload_fixtures::random_dense_byte_vector; use crate::index::hnsw_index::gpu::shader_builder::ShaderBuilder; use crate::types::{ BinaryQuantization, BinaryQuantizationConfig, BinaryQuantizationEncoding, Distance, ProductQuantization, ProductQuantizationConfig, QuantizationConfig, ScalarQuantization, ScalarQuantizationConfig, }; use crate::vector_storage::dense::simple_dense_vector_storage::{ open_simple_dense_byte_vector_storage, open_simple_dense_full_vector_storage, open_simple_dense_half_vector_storage, }; use crate::vector_storage::multi_dense::simple_multi_dense_vector_storage::{ open_simple_multi_dense_vector_storage_byte, open_simple_multi_dense_vector_storage_full, open_simple_multi_dense_vector_storage_half, }; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectorsStorageType; use crate::vector_storage::{DEFAULT_STOPPED, RawScorer, new_raw_scorer_for_test}; #[derive(Debug, Clone, Copy)] enum TestElementType { Float32, Float16, Uint8, } #[derive(Debug, Clone, Copy)] enum TestStorageType { Dense(TestElementType), Multi(TestElementType), } impl TestStorageType { pub fn element_type(&self) -> TestElementType { match self { TestStorageType::Dense(element_type) => *element_type, TestStorageType::Multi(element_type) => *element_type, } } } #[rstest] #[case::cosine_f32( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::dot_f32( Distance::Dot, TestStorageType::Dense(TestElementType::Float32), 256, 512 )] #[case::euclid_f32( Distance::Euclid, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::manhattan_f32( Distance::Manhattan, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::small_dimension( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 17, 2057 )] #[case::cosine_f16( Distance::Cosine, TestStorageType::Dense(TestElementType::Float16), 273, 2057 )] #[case::cosine_u8( Distance::Cosine, TestStorageType::Dense(TestElementType::Uint8), 273, 2057 )] #[case::cosine_multi_f32( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 67, 2057 )] #[case::cosine_multi_u8( Distance::Cosine, TestStorageType::Multi(TestElementType::Uint8), 273, 2057 )] fn test_gpu_vector_storage_sq( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let quantization_config = QuantizationConfig::Scalar(ScalarQuantization { scalar: ScalarQuantizationConfig { always_ram: Some(true), r#type: crate::types::ScalarType::Int8, quantile: Some(0.99), }, }); let precision = get_precision(storage_type, dim, distance); log::info!( "Testing SQ distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, Some(quantization_config.clone()), false, false, precision, ); } #[rstest] #[case::cosine_f32_one_bit( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 273, 2057, BinaryQuantizationEncoding::OneBit )] #[case::dot_f32_one_and_half_bits( Distance::Dot, TestStorageType::Dense(TestElementType::Float32), 256, 512, BinaryQuantizationEncoding::OneAndHalfBits )] #[case::euclid_f32( Distance::Euclid, TestStorageType::Dense(TestElementType::Float32), 273, 2057, BinaryQuantizationEncoding::OneBit )] #[case::manhattan_f32_two_bits( Distance::Manhattan, TestStorageType::Dense(TestElementType::Float32), 273, 2057, BinaryQuantizationEncoding::TwoBits )] #[case::small_dimension( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 17, 2057, BinaryQuantizationEncoding::OneBit )] #[case::cosine_f16( Distance::Cosine, TestStorageType::Dense(TestElementType::Float16), 273, 2057, BinaryQuantizationEncoding::OneBit )] #[case::cosine_u8( Distance::Cosine, TestStorageType::Dense(TestElementType::Uint8), 273, 2057, BinaryQuantizationEncoding::OneBit )] #[case::cosine_multi_f32( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 67, 2057, BinaryQuantizationEncoding::OneBit )] #[case::cosine_multi_u8( Distance::Cosine, TestStorageType::Multi(TestElementType::Uint8), 273, 2057, BinaryQuantizationEncoding::OneBit )] fn test_gpu_vector_storage_bq( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, #[case] encoding: BinaryQuantizationEncoding, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let quantization_config = QuantizationConfig::Binary(BinaryQuantization { binary: BinaryQuantizationConfig { always_ram: Some(true), encoding: Some(encoding), query_encoding: None, }, }); let precision = get_precision(storage_type, dim, distance); log::info!( "Testing BQ distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, Some(quantization_config.clone()), false, false, precision, ); } #[rstest] #[case::cosine_f32( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 17, 2057 )] #[case::dot_f32( Distance::Dot, TestStorageType::Dense(TestElementType::Float32), 17, 512 )] #[case::euclid_f32( Distance::Euclid, TestStorageType::Dense(TestElementType::Float32), 17, 2057 )] #[case::manhattan_f32( Distance::Manhattan, TestStorageType::Dense(TestElementType::Float32), 17, 2057 )] #[case::large_dimension( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 129, 1095 )] #[case::cosine_f16( Distance::Cosine, TestStorageType::Dense(TestElementType::Float16), 17, 2057 )] #[case::cosine_u8( Distance::Cosine, TestStorageType::Dense(TestElementType::Uint8), 17, 2057 )] #[case::cosine_multi_f32( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 17, 2057 )] #[case::cosine_multi_u8( Distance::Cosine, TestStorageType::Multi(TestElementType::Uint8), 17, 2057 )] fn test_gpu_vector_storage_pq( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let quantization_config = QuantizationConfig::Product(ProductQuantization { product: ProductQuantizationConfig { always_ram: Some(true), compression: crate::types::CompressionRatio::X8, }, }); let precision = get_precision(storage_type, dim, distance); log::info!( "Testing PQ distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, Some(quantization_config.clone()), false, false, precision, ); } #[rstest] #[case::cosine_f32( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::dot_f32( Distance::Dot, TestStorageType::Dense(TestElementType::Float32), 256, 512 )] #[case::euclid_f32( Distance::Euclid, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::manhattan_f32( Distance::Manhattan, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::small_dimension( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 17, 2057 )] #[case::cosine_f16( Distance::Cosine, TestStorageType::Dense(TestElementType::Float16), 273, 2057 )] #[case::cosine_u8( Distance::Cosine, TestStorageType::Dense(TestElementType::Uint8), 273, 2057 )] #[case::cosine_multi_f32( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 67, 2057 )] #[case::cosine_multi_u8( Distance::Cosine, TestStorageType::Multi(TestElementType::Uint8), 273, 2057 )] fn test_gpu_vector_storage( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let precision = get_precision(storage_type, dim, distance); log::info!( "Testing distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, None, false, false, precision, ); } #[rstest] #[case::cosine_dense( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::cosine_multi( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 67, 2057 )] fn test_gpu_vector_storage_force_half( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let precision = 5.0 * get_precision(storage_type, dim, distance); log::info!( "Testing distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, None, true, // force half precision false, precision, ); } #[rstest] #[case::dense_f32( Distance::Cosine, TestStorageType::Dense(TestElementType::Float32), 273, 2057 )] #[case::dense_f16( Distance::Cosine, TestStorageType::Dense(TestElementType::Float16), 273, 2057 )] #[case::multi_f32( Distance::Cosine, TestStorageType::Multi(TestElementType::Float32), 67, 2057 )] #[case::multi_f16( Distance::Cosine, TestStorageType::Multi(TestElementType::Float16), 67, 2057 )] fn test_gpu_vector_storage_without_half( #[case] distance: Distance, #[case] storage_type: TestStorageType, #[case] dim: usize, #[case] num_vectors: usize, ) { let _ = env_logger::builder() .is_test(true) .filter_level(log::LevelFilter::Trace) .try_init(); let precision = 5.0 * get_precision(storage_type, dim, distance); log::info!( "Testing distance {distance:?}, element type {storage_type:?}, dim {dim} with precision {precision}" ); test_gpu_vector_storage_impl( storage_type, num_vectors, dim, distance, None, true, // force half precision true, // skip half support precision, ); } fn get_precision(storage_type: TestStorageType, dim: usize, distance: Distance) -> f32 { let distance_persision = match distance { Distance::Cosine => 0.01, Distance::Dot => 0.01, Distance::Euclid => dim as f32 * 0.001, Distance::Manhattan => dim as f32 * 0.001, }; match storage_type.element_type() { TestElementType::Float32 => distance_persision, TestElementType::Float16 => distance_persision * 5.0, TestElementType::Uint8 => distance_persision * 10.0, } } fn create_vector_storage( path: &Path, storage_type: TestStorageType, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let db = open_db(path, &[DB_VECTOR_CF]).unwrap(); match storage_type { TestStorageType::Dense(TestElementType::Float32) => { create_vector_storage_f32(db, num_vectors, dim, distance) } TestStorageType::Dense(TestElementType::Float16) => { create_vector_storage_f16(db, num_vectors, dim, distance) } TestStorageType::Dense(TestElementType::Uint8) => { create_vector_storage_u8(db, num_vectors, dim, distance) } TestStorageType::Multi(TestElementType::Float32) => { create_vector_storage_f32_multi(db, num_vectors, dim, distance) } TestStorageType::Multi(TestElementType::Float16) => { create_vector_storage_f16_multi(db, num_vectors, dim, distance) } TestStorageType::Multi(TestElementType::Uint8) => { create_vector_storage_u8_multi(db, num_vectors, dim, distance) } } } fn create_vector_storage_f32( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let mut vector_storage = open_simple_dense_full_vector_storage(db, DB_VECTOR_CF, dim, distance, &false.into()) .unwrap(); for i in 0..num_vectors { let vec = random_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementType>(vec); let vec_ref = VectorRef::from(&vec); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } fn create_vector_storage_f16( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let mut vector_storage = open_simple_dense_half_vector_storage(db, DB_VECTOR_CF, dim, distance, &false.into()) .unwrap(); for i in 0..num_vectors { let vec = random_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementTypeHalf>(vec); let vec_ref = VectorRef::from(&vec); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } fn create_vector_storage_u8( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let mut vector_storage = open_simple_dense_byte_vector_storage(db, DB_VECTOR_CF, dim, distance, &false.into()) .unwrap(); for i in 0..num_vectors { let vec = random_dense_byte_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementTypeByte>(vec); let vec_ref = VectorRef::from(&vec); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } fn create_vector_storage_f32_multi( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let multivector_config = Default::default(); let mut vector_storage = open_simple_multi_dense_vector_storage_full( db, DB_VECTOR_CF, dim, distance, multivector_config, &false.into(), ) .unwrap(); for i in 0..num_vectors { let mut vectors = vec![]; let num_vectors_per_points = 1 + rnd.random::<u8>() % 3; for _ in 0..num_vectors_per_points { let vec = random_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementType>(vec); vectors.extend(vec); } let multivector = MultiDenseVectorInternal::new(vectors, dim); let vec_ref = VectorRef::from(&multivector); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } fn create_vector_storage_f16_multi( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let multivector_config = Default::default(); let mut vector_storage = open_simple_multi_dense_vector_storage_half( db, DB_VECTOR_CF, dim, distance, multivector_config, &false.into(), ) .unwrap(); for i in 0..num_vectors { let mut vectors = vec![]; let num_vectors_per_points = 1 + rnd.random::<u8>() % 3; for _ in 0..num_vectors_per_points { let vec = random_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementTypeHalf>(vec); vectors.extend(vec); } let multivector = MultiDenseVectorInternal::new(vectors, dim); let vec_ref = VectorRef::from(&multivector); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } fn create_vector_storage_u8_multi( db: Arc<RwLock<DB>>, num_vectors: usize, dim: usize, distance: Distance, ) -> VectorStorageEnum { let mut rnd = StdRng::seed_from_u64(42); let multivector_config = Default::default(); let mut vector_storage = open_simple_multi_dense_vector_storage_byte( db, DB_VECTOR_CF, dim, distance, multivector_config, &false.into(), ) .unwrap(); for i in 0..num_vectors { let mut vectors = vec![]; let num_vectors_per_points = 1 + rnd.random::<u8>() % 3; for _ in 0..num_vectors_per_points { let vec = random_dense_byte_vector(&mut rnd, dim); let vec = distance.preprocess_vector::<VectorElementTypeByte>(vec); vectors.extend(vec); } let multivector = MultiDenseVectorInternal::new(vectors, dim); let vec_ref = VectorRef::from(&multivector); vector_storage .insert_vector(i as PointOffsetType, vec_ref, &HardwareCounterCell::new()) .unwrap(); } vector_storage } #[cfg(test)] #[allow(clippy::too_many_arguments)] fn test_gpu_vector_storage_impl( storage_type: TestStorageType, num_vectors: usize, dim: usize, distance: Distance, quantization_config: Option<QuantizationConfig>, force_half_precision: bool, skip_half_support: bool, precision: f32, ) { let test_point_id: PointOffsetType = 0; let dir = tempfile::Builder::new().prefix("db_dir").tempdir().unwrap(); let storage = create_vector_storage(dir.path(), storage_type, num_vectors, dim, distance); let quantized_vectors = quantization_config.as_ref().map(|quantization_config| { QuantizedVectors::create( &storage, quantization_config, QuantizedVectorsStorageType::Immutable, dir.path(), 1, &DEFAULT_STOPPED, ) .unwrap() }); let instance = gpu::GPU_TEST_INSTANCE.clone(); let device = gpu::Device::new_with_params( instance.clone(), &instance.physical_devices()[0], 0, skip_half_support, ) .unwrap(); let gpu_vector_storage = GpuVectorStorage::new( device.clone(), &storage, quantized_vectors.as_ref(), force_half_precision, &DEFAULT_STOPPED, ) .unwrap(); assert_eq!(gpu_vector_storage.num_vectors(), num_vectors); assert_eq!( gpu_vector_storage.element_type, if let Some(_quantization_config) = quantization_config.as_ref() { VectorStorageDatatype::Uint8 } else { match storage_type.element_type() { TestElementType::Float32 => { if force_half_precision && device.has_half_precision() { VectorStorageDatatype::Float16 } else { VectorStorageDatatype::Float32 } } TestElementType::Float16 => { if device.has_half_precision() { VectorStorageDatatype::Float16 } else { VectorStorageDatatype::Float32 } } TestElementType::Uint8 => VectorStorageDatatype::Uint8, } } ); let scores_buffer = gpu::Buffer::new( device.clone(), "Scores buffer", gpu::BufferType::Storage, num_vectors * std::mem::size_of::<f32>(), ) .unwrap(); let descriptor_set_layout = gpu::DescriptorSetLayout::builder() .add_storage_buffer(0) .build(device.clone()) .unwrap(); let descriptor_set = gpu::DescriptorSet::builder(descriptor_set_layout.clone()) .add_storage_buffer(0, scores_buffer.clone()) .build() .unwrap(); let shader = ShaderBuilder::new(device.clone()) .with_shader_code(include_str!("../shaders/tests/test_vector_storage.comp")) .with_parameters(&gpu_vector_storage) .build("tests/test_vector_storage.comp") .unwrap(); let pipeline = gpu::Pipeline::builder() .add_descriptor_set_layout(0, descriptor_set_layout.clone()) .add_descriptor_set_layout(1, gpu_vector_storage.descriptor_set_layout.clone()) .add_shader(shader.clone()) .build(device.clone()) .unwrap(); let mut context = gpu::Context::new(device.clone()).unwrap(); context .bind_pipeline( pipeline, &[descriptor_set, gpu_vector_storage.descriptor_set.clone()], ) .unwrap(); context.dispatch(num_vectors, 1, 1).unwrap(); let timer = std::time::Instant::now(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); log::trace!("GPU scoring time = {:?}", timer.elapsed()); let staging_buffer = gpu::Buffer::new( device.clone(), "Scores staging buffer", gpu::BufferType::GpuToCpu, num_vectors * std::mem::size_of::<f32>(), ) .unwrap(); context .copy_gpu_buffer( scores_buffer, staging_buffer.clone(), 0, 0, num_vectors * std::mem::size_of::<f32>(), ) .unwrap(); context.run().unwrap(); context.wait_finish(GPU_TIMEOUT).unwrap(); let gpu_scores = staging_buffer.download_vec(0, num_vectors).unwrap(); let query = QueryVector::Nearest(storage.get_vector::<Random>(test_point_id).to_owned()); let hardware_counter = HardwareCounterCell::new(); let scorer: Box<dyn RawScorer> = if let Some(quantized_vectors) = quantized_vectors.as_ref() { quantized_vectors .raw_scorer(query, hardware_counter) .unwrap() } else { new_raw_scorer_for_test(query, &storage).unwrap() }; for (point_id, gpu_score) in gpu_scores.iter().enumerate() { let score = scorer.score_internal( test_point_id as PointOffsetType, point_id as PointOffsetType, ); assert!((score - gpu_score).abs() < precision); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/mod.rs
lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/mod.rs
mod gpu_multivectors; mod gpu_quantization; #[cfg(test)] mod tests; use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::types::PointOffsetType; use gpu_multivectors::GpuMultivectors; use gpu_quantization::GpuQuantization; use quantization::encoded_vectors_binary::{BitsStoreType, EncodedVectorsBin}; use quantization::{EncodedStorage, EncodedVectors, EncodedVectorsPQ, EncodedVectorsU8}; use zerocopy::IntoBytes; use super::shader_builder::ShaderBuilderParameters; use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped}; use crate::data_types::primitive::PrimitiveVectorElement; use crate::data_types::vectors::{VectorElementType, VectorElementTypeByte, VectorElementTypeHalf}; use crate::index::hnsw_index::gpu::GPU_TIMEOUT; use crate::types::{Distance, VectorStorageDatatype}; use crate::vector_storage::quantized::quantized_vectors::{ QuantizedVectorStorage, QuantizedVectors, }; use crate::vector_storage::{ DenseVectorStorage, MultiVectorStorage, Random, VectorStorage, VectorStorageEnum, }; pub const ELEMENTS_PER_SUBGROUP: usize = 4; pub const UPLOAD_CHUNK_SIZE: usize = 64 * 1024 * 1024; pub const STORAGES_COUNT: usize = 4; /// GPU storage for vectors. pub struct GpuVectorStorage { device: Arc<gpu::Device>, num_vectors: usize, descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, descriptor_set: Arc<gpu::DescriptorSet>, dim: usize, element_type: VectorStorageDatatype, distance: Distance, /// Additional quantization data. quantization: Option<GpuQuantization>, /// Additional multivectors data. multivectors: Option<GpuMultivectors>, } impl ShaderBuilderParameters for GpuVectorStorage { fn shader_includes(&self) -> HashMap<String, String> { let mut includes = HashMap::from([ ( "vector_storage.comp".to_string(), include_str!("../shaders/vector_storage.comp").to_string(), ), ( "vector_storage_dense.comp".to_string(), include_str!("../shaders/vector_storage_dense.comp").to_string(), ), ( "vector_storage_f16.comp".to_string(), include_str!("../shaders/vector_storage_f16.comp").to_string(), ), ( "vector_storage_f32.comp".to_string(), include_str!("../shaders/vector_storage_f32.comp").to_string(), ), ( "vector_storage_u8.comp".to_string(), include_str!("../shaders/vector_storage_u8.comp").to_string(), ), ]); if let Some(quantization) = &self.quantization { includes.extend(quantization.shader_includes()); } if let Some(multivectors) = &self.multivectors { includes.extend(multivectors.shader_includes()); } includes } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); match self.element_type { VectorStorageDatatype::Float32 => { defines.insert("VECTOR_STORAGE_ELEMENT_FLOAT32".to_owned(), None); } VectorStorageDatatype::Float16 => { defines.insert("VECTOR_STORAGE_ELEMENT_FLOAT16".to_owned(), None); } VectorStorageDatatype::Uint8 => { defines.insert("VECTOR_STORAGE_ELEMENT_UINT8".to_owned(), None); } } match self.distance { Distance::Cosine => { defines.insert("COSINE_DISTANCE".to_owned(), None); } Distance::Euclid => { defines.insert("EUCLID_DISTANCE".to_owned(), None); } Distance::Dot => { defines.insert("DOT_DISTANCE".to_owned(), None); } Distance::Manhattan => { defines.insert("MANHATTAN_DISTANCE".to_owned(), None); } } if let Some(quantization) = &self.quantization { defines.extend(quantization.shader_defines()); } if let Some(multivectors) = &self.multivectors { defines.extend(multivectors.shader_defines()); } defines.insert("DIM".to_owned(), Some(self.dim.to_string())); defines.insert( "STORAGES_COUNT".to_owned(), Some(STORAGES_COUNT.to_string()), ); defines } } impl GpuVectorStorage { pub fn new( device: Arc<gpu::Device>, vector_storage: &VectorStorageEnum, quantized_storage: Option<&QuantizedVectors>, // Force half precision for `f32` vectors. force_half_precision: bool, stopped: &AtomicBool, ) -> OperationResult<Self> { if let Some(quantized_storage) = quantized_storage { Self::new_quantized( device, vector_storage.distance(), quantized_storage.get_storage(), stopped, ) } else { Self::new_from_vector_storage(device, vector_storage, force_half_precision, stopped) } } fn new_quantized( device: Arc<gpu::Device>, distance: Distance, quantized_storage: &QuantizedVectorStorage, stopped: &AtomicBool, ) -> OperationResult<Self> { match quantized_storage { QuantizedVectorStorage::ScalarRam(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::ScalarMmap(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::ScalarChunkedMmap(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::PQRam(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::PQMmap(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::PQChunkedMmap(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::BinaryRam(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::BinaryMmap(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::BinaryChunkedMmap(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage, None, stopped, ), QuantizedVectorStorage::ScalarRamMulti(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::ScalarMmapMulti(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::ScalarChunkedMmapMulti(quantized_storage) => Self::new_sq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::PQRamMulti(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::PQMmapMulti(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::PQChunkedMmapMulti(quantized_storage) => Self::new_pq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::BinaryRamMulti(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::BinaryMmapMulti(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), QuantizedVectorStorage::BinaryChunkedMmapMulti(quantized_storage) => Self::new_bq( device.clone(), distance, quantized_storage.vectors_count(), quantized_storage.inner_storage(), Some(GpuMultivectors::new_quantized(device, quantized_storage)?), stopped, ), } } pub fn new_sq<TStorage: EncodedStorage>( device: Arc<gpu::Device>, distance: Distance, num_vectors: usize, quantized_storage: &EncodedVectorsU8<TStorage>, multivectors: Option<GpuMultivectors>, stopped: &AtomicBool, ) -> OperationResult<Self> { Self::new_typed::<VectorElementTypeByte>( device.clone(), distance, quantized_storage.vectors_count(), num_vectors, quantized_storage .get_quantized_vector_offset_and_code(0) .1 .len(), (0..quantized_storage.vectors_count()).map(|id| { let (_, vector) = quantized_storage.get_quantized_vector_offset_and_code(id as PointOffsetType); Cow::Borrowed(vector) }), Some(GpuQuantization::new_sq(device, quantized_storage)?), multivectors, stopped, ) } fn new_pq<TStorage: EncodedStorage>( device: Arc<gpu::Device>, distance: Distance, num_vectors: usize, quantized_storage: &EncodedVectorsPQ<TStorage>, multivectors: Option<GpuMultivectors>, stopped: &AtomicBool, ) -> OperationResult<Self> { Self::new_typed::<VectorElementTypeByte>( device.clone(), distance, quantized_storage.vectors_count(), num_vectors, quantized_storage.get_quantized_vector(0).len(), (0..quantized_storage.vectors_count()).map(|id| { let vector = quantized_storage.get_quantized_vector(id as PointOffsetType); Cow::Borrowed(vector) }), Some(GpuQuantization::new_pq(device, quantized_storage)?), multivectors, stopped, ) } fn new_bq<T: BitsStoreType, TStorage: EncodedStorage>( device: Arc<gpu::Device>, distance: Distance, num_vectors: usize, quantized_storage: &EncodedVectorsBin<T, TStorage>, multivectors: Option<GpuMultivectors>, stopped: &AtomicBool, ) -> OperationResult<Self> { Self::new_typed::<VectorElementTypeByte>( device.clone(), distance, quantized_storage.vectors_count(), num_vectors, quantized_storage.get_quantized_vector(0).len(), (0..quantized_storage.vectors_count()).map(|id| { Cow::Borrowed(quantized_storage.get_quantized_vector(id as PointOffsetType)) }), Some(GpuQuantization::new_bq(device, quantized_storage)), multivectors, stopped, ) } fn new_from_vector_storage( device: Arc<gpu::Device>, vector_storage: &VectorStorageEnum, force_half_precision: bool, stopped: &AtomicBool, ) -> OperationResult<Self> { match vector_storage { #[cfg(feature = "rocksdb")] VectorStorageEnum::DenseSimple(vector_storage) => { Self::new_dense_f32(device, vector_storage, force_half_precision, stopped) } #[cfg(feature = "rocksdb")] VectorStorageEnum::DenseSimpleByte(vector_storage) => { Self::new_dense(device, vector_storage, stopped) } #[cfg(feature = "rocksdb")] VectorStorageEnum::DenseSimpleHalf(vector_storage) => { Self::new_dense_f16(device, vector_storage, stopped) } VectorStorageEnum::DenseVolatile(vector_storage) => { Self::new_dense_f32(device, vector_storage, force_half_precision, stopped) } #[cfg(test)] VectorStorageEnum::DenseVolatileByte(vector_storage) => { Self::new_dense(device, vector_storage, stopped) } #[cfg(test)] VectorStorageEnum::DenseVolatileHalf(vector_storage) => { Self::new_dense_f16(device, vector_storage, stopped) } VectorStorageEnum::DenseMemmap(vector_storage) => Self::new_dense_f32( device, vector_storage.as_ref(), force_half_precision, stopped, ), VectorStorageEnum::DenseMemmapByte(vector_storage) => { Self::new_dense(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::DenseMemmapHalf(vector_storage) => { Self::new_dense_f16(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::DenseAppendableMemmap(vector_storage) => Self::new_dense_f32( device, vector_storage.as_ref(), force_half_precision, stopped, ), VectorStorageEnum::DenseAppendableMemmapByte(vector_storage) => { Self::new_dense(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::DenseAppendableMemmapHalf(vector_storage) => { Self::new_dense_f16(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::DenseAppendableInRam(vector_storage) => Self::new_dense_f32( device, vector_storage.as_ref(), force_half_precision, stopped, ), VectorStorageEnum::DenseAppendableInRamByte(vector_storage) => { Self::new_dense(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::DenseAppendableInRamHalf(vector_storage) => { Self::new_dense_f16(device, vector_storage.as_ref(), stopped) } #[cfg(feature = "rocksdb")] VectorStorageEnum::SparseSimple(_) => Err(OperationError::from( gpu::GpuError::NotSupported("Sparse vectors are not supported on GPU".to_string()), )), VectorStorageEnum::SparseVolatile(_) => Err(OperationError::from( gpu::GpuError::NotSupported("Sparse vectors are not supported on GPU".to_string()), )), VectorStorageEnum::SparseMmap(_) => Err(OperationError::from( gpu::GpuError::NotSupported("Sparse vectors are not supported on GPU".to_string()), )), #[cfg(feature = "rocksdb")] VectorStorageEnum::MultiDenseSimple(vector_storage) => Self::new_multi_f32( device.clone(), vector_storage, force_half_precision, stopped, ), #[cfg(feature = "rocksdb")] VectorStorageEnum::MultiDenseSimpleByte(vector_storage) => { Self::new_multi(device, vector_storage, stopped) } #[cfg(feature = "rocksdb")] VectorStorageEnum::MultiDenseSimpleHalf(vector_storage) => { Self::new_multi_f16(device, vector_storage, stopped) } VectorStorageEnum::MultiDenseVolatile(vector_storage) => Self::new_multi_f32( device.clone(), vector_storage, force_half_precision, stopped, ), #[cfg(test)] VectorStorageEnum::MultiDenseVolatileByte(vector_storage) => { Self::new_multi(device, vector_storage, stopped) } #[cfg(test)] VectorStorageEnum::MultiDenseVolatileHalf(vector_storage) => { Self::new_multi_f16(device, vector_storage, stopped) } VectorStorageEnum::MultiDenseAppendableMemmap(vector_storage) => Self::new_multi_f32( device.clone(), vector_storage.as_ref(), force_half_precision, stopped, ), VectorStorageEnum::MultiDenseAppendableMemmapByte(vector_storage) => { Self::new_multi(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::MultiDenseAppendableMemmapHalf(vector_storage) => { Self::new_multi_f16(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::MultiDenseAppendableInRam(vector_storage) => Self::new_multi_f32( device.clone(), vector_storage.as_ref(), force_half_precision, stopped, ), VectorStorageEnum::MultiDenseAppendableInRamByte(vector_storage) => { Self::new_multi(device, vector_storage.as_ref(), stopped) } VectorStorageEnum::MultiDenseAppendableInRamHalf(vector_storage) => { Self::new_multi_f16(device, vector_storage.as_ref(), stopped) } } } fn new_dense_f32<TVectorStorage: DenseVectorStorage<VectorElementType>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, force_half_precision: bool, stopped: &AtomicBool, ) -> OperationResult<Self> { if force_half_precision && device.has_half_precision() { Self::new_typed::<VectorElementTypeHalf>( device, vector_storage.distance(), vector_storage.total_vector_count(), vector_storage.total_vector_count(), vector_storage.vector_dim(), (0..vector_storage.total_vector_count()).map(|id| { VectorElementTypeHalf::slice_from_float_cow(Cow::Borrowed( vector_storage.get_dense::<Random>(id as PointOffsetType), )) }), None, None, stopped, ) } else { Self::new_dense(device, vector_storage, stopped) } } fn new_dense_f16<TVectorStorage: DenseVectorStorage<VectorElementTypeHalf>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, stopped: &AtomicBool, ) -> OperationResult<Self> { if device.has_half_precision() { Self::new_dense(device, vector_storage, stopped) } else { Self::new_typed::<VectorElementType>( device, vector_storage.distance(), vector_storage.total_vector_count(), vector_storage.total_vector_count(), vector_storage.vector_dim(), (0..vector_storage.total_vector_count()).map(|id| { VectorElementTypeHalf::slice_to_float_cow(Cow::Borrowed( vector_storage.get_dense::<Random>(id as PointOffsetType), )) }), None, None, stopped, ) } } fn new_dense<TElement: PrimitiveVectorElement, TVectorStorage: DenseVectorStorage<TElement>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, stopped: &AtomicBool, ) -> OperationResult<Self> { Self::new_typed::<TElement>( device, vector_storage.distance(), vector_storage.total_vector_count(), vector_storage.total_vector_count(), vector_storage.vector_dim(), (0..vector_storage.total_vector_count()) .map(|id| Cow::Borrowed(vector_storage.get_dense::<Random>(id as PointOffsetType))), None, None, stopped, ) } fn new_multi_f32<TVectorStorage: MultiVectorStorage<VectorElementType>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, force_half_precision: bool, stopped: &AtomicBool, ) -> OperationResult<Self> { if force_half_precision && device.has_half_precision() { Self::new_typed::<VectorElementTypeHalf>( device.clone(), vector_storage.distance(), (0..vector_storage.total_vector_count()) .map(|id| { vector_storage .get_multi::<Random>(id as PointOffsetType) .vectors_count() }) .sum(), vector_storage.total_vector_count(), vector_storage.vector_dim(), vector_storage.iterate_inner_vectors().map(|vector| { VectorElementTypeHalf::slice_from_float_cow(Cow::Borrowed(vector)) }), None, Some(GpuMultivectors::new_multidense(device, vector_storage)?), stopped, ) } else { Self::new_multi(device, vector_storage, stopped) } } fn new_multi_f16<TVectorStorage: MultiVectorStorage<VectorElementTypeHalf>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, stopped: &AtomicBool, ) -> OperationResult<Self> { if device.has_half_precision() { Self::new_multi(device, vector_storage, stopped) } else { Self::new_typed::<VectorElementType>( device.clone(), vector_storage.distance(), (0..vector_storage.total_vector_count()) .map(|id| { vector_storage .get_multi::<Random>(id as PointOffsetType) .vectors_count() }) .sum(), vector_storage.total_vector_count(), vector_storage.vector_dim(), vector_storage .iterate_inner_vectors() .map(|vector| VectorElementTypeHalf::slice_to_float_cow(Cow::Borrowed(vector))), None, Some(GpuMultivectors::new_multidense(device, vector_storage)?), stopped, ) } } fn new_multi<TElement: PrimitiveVectorElement, TVectorStorage: MultiVectorStorage<TElement>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, stopped: &AtomicBool, ) -> OperationResult<Self> { Self::new_typed::<TElement>( device.clone(), vector_storage.distance(), (0..vector_storage.total_vector_count()) .map(|id| { vector_storage .get_multi::<Random>(id as PointOffsetType) .vectors_count() }) .sum(), vector_storage.total_vector_count(), vector_storage.vector_dim(), vector_storage.iterate_inner_vectors().map(Cow::Borrowed), None, Some(GpuMultivectors::new_multidense(device, vector_storage)?), stopped, ) } #[allow(clippy::too_many_arguments)] fn new_typed<'a, TElement: PrimitiveVectorElement>( device: Arc<gpu::Device>, distance: Distance, dense_count: usize, num_vectors: usize, dim: usize, vectors: impl Iterator<Item = Cow<'a, [TElement]>> + Clone, quantization: Option<GpuQuantization>, multivectors: Option<GpuMultivectors>, stopped: &AtomicBool, ) -> OperationResult<Self> { check_process_stopped(stopped)?; let timer = std::time::Instant::now(); let gpu_vector_capacity = Self::gpu_vector_capacity(&device, dim); let gpu_vector_size = gpu_vector_capacity * std::mem::size_of::<TElement>(); let upload_points_count = UPLOAD_CHUNK_SIZE / gpu_vector_size; let points_in_storage_count = Self::points_in_storage_count(dense_count); let vectors_buffer: Vec<Arc<gpu::Buffer>> = (0..STORAGES_COUNT) .map(|_| -> gpu::GpuResult<Arc<gpu::Buffer>> { gpu::Buffer::new( device.clone(), "Vector storage buffer", gpu::BufferType::Storage, std::cmp::max(points_in_storage_count, 1) * gpu_vector_size, ) }) .collect::<gpu::GpuResult<Vec<_>>>()?; log::trace!("Storage buffer size {}", vectors_buffer[0].size()); let mut upload_context = gpu::Context::new(device.clone())?; // Fill all vector storages with zeros. for buffer in vectors_buffer.iter() { upload_context.clear_buffer(buffer.clone())?; } upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; let staging_buffer = gpu::Buffer::new( device.clone(), "Vector storage upload staging buffer", gpu::BufferType::CpuToGpu, std::cmp::max(upload_points_count, 1) * gpu_vector_size, )?; // fill staging buffer with zeros let zero_vector = vec![TElement::default(); gpu_vector_capacity]; for i in 0..upload_points_count { staging_buffer.upload(zero_vector.as_bytes(), i * gpu_vector_capacity)?; } log::trace!( "GPU staging buffer size {}, `upload_points_count` = {}", staging_buffer.size(), upload_points_count ); // Upload vectors to GPU. // Upload storage-by storage and iterate over all vectors for each storage. for (storage_index, vector_buffer) in vectors_buffer.iter().enumerate() { let mut gpu_offset = 0; let mut upload_size = 0; let mut upload_points = 0; for vector in vectors.clone().skip(storage_index).step_by(STORAGES_COUNT) { check_process_stopped(stopped)?; staging_buffer.upload(vector.as_bytes(), upload_points * gpu_vector_size)?; upload_size += gpu_vector_size; upload_points += 1; if upload_points == upload_points_count { upload_context.copy_gpu_buffer( staging_buffer.clone(), vector_buffer.clone(), 0, gpu_offset, upload_size, )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; log::trace!( "Uploaded {} vectors, {} MB", upload_points, upload_size / 1024 / 1024, ); gpu_offset += upload_size; upload_size = 0; upload_points = 0; } } if upload_points > 0 { upload_context.copy_gpu_buffer( staging_buffer.clone(), vectors_buffer[storage_index].clone(), 0, gpu_offset, upload_size, )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; log::trace!( "Uploaded {} vectors, {} MB", upload_points, upload_size / 1024 / 1024, ); } } log::trace!( "Upload vector data to GPU time = {:?}, vector data size {} MB, element type: {:?}", timer.elapsed(), STORAGES_COUNT * points_in_storage_count * gpu_vector_size / 1024 / 1024, TElement::datatype(), ); let descriptor_set_layout = Self::create_descriptor_set_layout(device.clone(), &quantization, &multivectors)?; let descriptor_set = Self::create_descriptor_set( descriptor_set_layout.clone(), &vectors_buffer, &quantization, &multivectors, )?; Ok(Self { device, descriptor_set_layout, descriptor_set, dim: gpu_vector_capacity, num_vectors, element_type: TElement::datatype(), distance, quantization, multivectors, }) } fn create_descriptor_set_layout( device: Arc<gpu::Device>, quantization: &Option<GpuQuantization>, multivectors: &Option<GpuMultivectors>, ) -> OperationResult<Arc<gpu::DescriptorSetLayout>> { let mut descriptor_set_layout_builder = gpu::DescriptorSetLayout::builder(); for i in 0..STORAGES_COUNT { descriptor_set_layout_builder = descriptor_set_layout_builder.add_storage_buffer(i); } descriptor_set_layout_builder = if let Some(quantization) = &quantization { quantization.add_descriptor_set_layout(descriptor_set_layout_builder) } else { descriptor_set_layout_builder }; descriptor_set_layout_builder = if let Some(multivectors) = &multivectors { multivectors.add_descriptor_set_layout(descriptor_set_layout_builder) } else { descriptor_set_layout_builder }; Ok(descriptor_set_layout_builder.build(device.clone())?) } fn create_descriptor_set( descriptor_set_layout: Arc<gpu::DescriptorSetLayout>, vectors_buffer: &[Arc<gpu::Buffer>],
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/gpu_multivectors.rs
lib/segment/src/index/hnsw_index/gpu/gpu_vector_storage/gpu_multivectors.rs
use std::collections::HashMap; use std::sync::Arc; use common::types::PointOffsetType; use quantization::EncodedVectors; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::STORAGES_COUNT; use super::gpu_quantization::MAX_QUANTIZATION_BINDINGS; use crate::common::operation_error::OperationResult; use crate::data_types::primitive::PrimitiveVectorElement; use crate::index::hnsw_index::gpu::GPU_TIMEOUT; use crate::index::hnsw_index::gpu::shader_builder::ShaderBuilderParameters; use crate::vector_storage::quantized::quantized_multivector_storage::{ MultivectorOffsetsStorage, QuantizedMultivectorStorage, }; use crate::vector_storage::{MultiVectorStorage, Random}; // Multivector shader binding is after vectot data and quantization data bindings. const START_MULTIVECTORS_BINDING: usize = STORAGES_COUNT + MAX_QUANTIZATION_BINDINGS; /// Shader struct for multivector offsets with start id and count of vectors in multivector. #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct GpuMultivectorOffset { start: u32, count: u32, } /// Gpu data for multivectors pub struct GpuMultivectors { /// Positions of multivector offsets. offsets_buffer: Arc<gpu::Buffer>, /// Shader binding for offsets. offsets_binding: usize, } impl ShaderBuilderParameters for GpuMultivectors { fn shader_includes(&self) -> HashMap<String, String> { // No additional includes for multivectors. // Cause all multivector logic is defined in `vector_storage.comp` shader. Default::default() } fn shader_defines(&self) -> HashMap<String, Option<String>> { let mut defines = HashMap::new(); // Set enabled flag for multivectors. defines.insert("MULTIVECTORS".to_owned(), None); // Provide shader binding of multivector offsets. defines.insert( "MULTIVECTOR_OFFSETS_BINDING".to_owned(), Some(self.offsets_binding.to_string()), ); defines } } impl GpuMultivectors { /// Construct multivectors data from quantized storage. pub fn new_quantized< QuantizedStorage: EncodedVectors, TMultivectorOffsetsStorage: MultivectorOffsetsStorage, >( device: Arc<gpu::Device>, quantized_storage: &QuantizedMultivectorStorage< QuantizedStorage, TMultivectorOffsetsStorage, >, ) -> OperationResult<GpuMultivectors> { Self::new_impl( device, (0..quantized_storage.vectors_count()) .map(|id| quantized_storage.inner_vector_offset(id as PointOffsetType)) .map(|x| GpuMultivectorOffset { start: x.start, count: x.count, }), ) } /// Construct multivectors data from vector storage. pub fn new_multidense<T: PrimitiveVectorElement, TVectorStorage: MultiVectorStorage<T>>( device: Arc<gpu::Device>, vector_storage: &TVectorStorage, ) -> OperationResult<GpuMultivectors> { Self::new_impl( device, (0..vector_storage.total_vector_count()) // map ID to count of vectors in multivector .map(|id| { vector_storage .get_multi::<Random>(id as PointOffsetType) .vectors_count() }) // Map count of vectors to start and count of vectors in multivector. .scan(0, |acc, count| { let start = *acc; *acc += count; Some(GpuMultivectorOffset { start: start as u32, count: count as u32, }) }), ) } /// Adds multivector data to the descriptor set builder. pub fn add_descriptor_set( &self, descriptor_set_builder: gpu::DescriptorSetBuilder, ) -> gpu::DescriptorSetBuilder { descriptor_set_builder.add_storage_buffer(self.offsets_binding, self.offsets_buffer.clone()) } /// Adds multivector data to the descriptor set layout builder. pub fn add_descriptor_set_layout( &self, descriptor_set_layout_builder: gpu::DescriptorSetLayoutBuilder, ) -> gpu::DescriptorSetLayoutBuilder { descriptor_set_layout_builder.add_storage_buffer(self.offsets_binding) } fn new_impl( device: Arc<gpu::Device>, vector_offsets: impl Iterator<Item = GpuMultivectorOffset> + Clone, ) -> OperationResult<GpuMultivectors> { let multivectors_count = vector_offsets.clone().count(); let offsets = gpu::Buffer::new( device.clone(), "Multivector offsets buffer", gpu::BufferType::Storage, std::cmp::max(multivectors_count, 1) * std::mem::size_of::<GpuMultivectorOffset>(), )?; let offsets_staging_buffer = gpu::Buffer::new( device.clone(), "Multivector offsets staging buffer", gpu::BufferType::CpuToGpu, offsets.size(), )?; for (point_id, offset) in vector_offsets.enumerate() { offsets_staging_buffer.upload( &offset, point_id * std::mem::size_of::<GpuMultivectorOffset>(), )?; } let mut upload_context = gpu::Context::new(device.clone())?; upload_context.copy_gpu_buffer( offsets_staging_buffer, offsets.clone(), 0, 0, offsets.size(), )?; upload_context.run()?; upload_context.wait_finish(GPU_TIMEOUT)?; Ok(GpuMultivectors { offsets_buffer: offsets, offsets_binding: START_MULTIVECTORS_BINDING, }) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/tests/mod.rs
lib/segment/src/index/hnsw_index/tests/mod.rs
mod test_compact_graph_layer; mod test_graph_connectivity; use common::types::PointOffsetType; use rand::Rng; use super::graph_links::GraphLinksFormat; use crate::fixtures::index_fixtures::TestRawScorerProducer; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::graph_layers::GraphLayers; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::types::Distance; pub(crate) fn create_graph_layer_builder_fixture<R: Rng + ?Sized>( num_vectors: usize, m: usize, dim: usize, use_heuristic: bool, use_quantization: bool, distance: Distance, rng: &mut R, ) -> (TestRawScorerProducer, GraphLayersBuilder) { let ef_construct = 16; let entry_points_num = 10; let vector_holder = TestRawScorerProducer::new(dim, distance, num_vectors, use_quantization, rng); let mut graph_layers_builder = GraphLayersBuilder::new( num_vectors, HnswM::new2(m), ef_construct, entry_points_num, use_heuristic, ); for idx in 0..(num_vectors as PointOffsetType) { let level = graph_layers_builder.get_random_layer(rng); graph_layers_builder.set_levels(idx, level); graph_layers_builder.link_new_point(idx, vector_holder.internal_scorer(idx)); } (vector_holder, graph_layers_builder) } #[expect(clippy::too_many_arguments)] pub(crate) fn create_graph_layer_fixture<R: Rng + ?Sized>( num_vectors: usize, m: usize, dim: usize, format: GraphLinksFormat, use_heuristic: bool, use_quantization: bool, distance: Distance, rng: &mut R, ) -> (TestRawScorerProducer, GraphLayers) { let (vector_holder, graph_layers_builder) = create_graph_layer_builder_fixture( num_vectors, m, dim, use_heuristic, use_quantization, distance, rng, ); let graph_layers = graph_layers_builder.into_graph_layers_ram( format.with_param_for_tests(vector_holder.graph_links_vectors().as_ref()), ); (vector_holder, graph_layers) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/tests/test_compact_graph_layer.rs
lib/segment/src/index/hnsw_index/tests/test_compact_graph_layer.rs
use std::cmp::max; use common::types::ScoredPointOffset; use itertools::Itertools; use rand::SeedableRng; use rand::prelude::StdRng; use rstest::rstest; use crate::fixtures::index_fixtures::random_vector; use crate::index::hnsw_index::graph_layers::{GraphLayersBase, SearchAlgorithm}; use crate::index::hnsw_index::graph_layers_builder::GraphLayersBuilder; use crate::index::hnsw_index::graph_links::GraphLinksFormat; use crate::index::hnsw_index::point_scorer::FilteredScorer; use crate::index::hnsw_index::tests::create_graph_layer_builder_fixture; use crate::types::Distance; use crate::vector_storage::DEFAULT_STOPPED; fn search_in_builder( builder: &GraphLayersBuilder, top: usize, ef: usize, mut points_scorer: FilteredScorer, ) -> Vec<ScoredPointOffset> { let Some(entry_point) = builder .get_entry_points() .get_entry_point(|point_id| points_scorer.filters().check_vector(point_id)) else { return vec![]; }; let zero_level_entry = builder .search_entry( entry_point.point_id, entry_point.level, 0, &mut points_scorer, &DEFAULT_STOPPED, ) .unwrap(); let nearest = builder .search_on_level( zero_level_entry, 0, max(top, ef), &mut points_scorer, &DEFAULT_STOPPED, ) .unwrap(); nearest.into_iter_sorted().take(top).collect_vec() } /// Check that HNSW index with raw and compacted links gives the same results #[rstest] #[case::uncompressed(GraphLinksFormat::Plain)] #[case::compressed(GraphLinksFormat::Compressed)] #[case::compressed_with_vectors(GraphLinksFormat::CompressedWithVectors)] fn test_compact_graph_layers(#[case] format: GraphLinksFormat) { let num_vectors = 1000; let num_queries = 100; let m = 16; let dim = 8; let top = 5; let ef = 100; let mut rng = StdRng::seed_from_u64(42); let (vector_holder, graph_layers_builder) = create_graph_layer_builder_fixture( num_vectors, m, dim, false, format.is_with_vectors(), Distance::Cosine, &mut rng, ); let queries = (0..num_queries) .map(|_| random_vector(&mut rng, dim)) .collect_vec(); let reference_results = queries .iter() .map(|query| { let scorer = vector_holder.scorer(query.clone()); search_in_builder(&graph_layers_builder, top, ef, scorer) }) .collect_vec(); let graph_layers = graph_layers_builder.into_graph_layers_ram( format.with_param_for_tests(vector_holder.graph_links_vectors().as_ref()), ); let results = queries .iter() .map(|query| { let scorer = vector_holder.scorer(query.clone()); graph_layers .search( top, ef, SearchAlgorithm::Hnsw, scorer, None, &DEFAULT_STOPPED, ) .unwrap() }) .collect_vec(); assert_eq!(reference_results, results); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/tests/test_graph_connectivity.rs
lib/segment/src/index/hnsw_index/tests/test_graph_connectivity.rs
use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::budget::ResourcePermit; use common::counter::hardware_counter::HardwareCounterCell; use common::flags::FeatureFlags; use common::progress_tracker::ProgressTracker; use common::types::PointOffsetType; use rand::rng; use tempfile::Builder; use crate::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector}; use crate::entry::entry_point::SegmentEntry; use crate::fixtures::index_fixtures::random_vector; use crate::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs}; use crate::index::hnsw_index::num_rayon_threads; use crate::segment_constructor::VectorIndexBuildArgs; use crate::segment_constructor::simple_segment_constructor::build_simple_segment; use crate::types::{Distance, HnswConfig, HnswGlobalConfig, SeqNumberType}; #[test] fn test_graph_connectivity() { let stopped = AtomicBool::new(false); let dim = 32; let m = 16; let num_vectors: u64 = 1_000; let ef_construct = 100; let distance = Distance::Cosine; let full_scan_threshold = 10_000; let mut rng = rng(); let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap(); let hw_counter = HardwareCounterCell::new(); let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap(); for n in 0..num_vectors { let idx = n.into(); let vector = random_vector(&mut rng, dim); segment .upsert_point( n as SeqNumberType, idx, only_default_vector(&vector), &hw_counter, ) .unwrap(); } let payload_index_ptr = segment.payload_index.clone(); let hnsw_config = HnswConfig { m, ef_construct, full_scan_threshold, max_indexing_threads: 4, on_disk: Some(false), payload_m: None, inline_storage: None, }; let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads); let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32)); let hnsw_index = HNSWIndex::build( HnswIndexOpenArgs { path: hnsw_dir.path(), id_tracker: segment.id_tracker.clone(), vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME] .vector_storage .clone(), quantized_vectors: Default::default(), payload_index: payload_index_ptr, hnsw_config, }, VectorIndexBuildArgs { permit, old_indices: &[], gpu_device: None, rng: &mut rng, stopped: &stopped, hnsw_global_config: &HnswGlobalConfig::default(), feature_flags: FeatureFlags::default(), progress: ProgressTracker::new_for_test(), }, ) .unwrap(); let mut reverse_links = vec![vec![]; num_vectors as usize]; for point_id in 0..num_vectors { for link in hnsw_index .graph() .links .links(point_id as PointOffsetType, 0) { reverse_links[link as usize].push(point_id); } } for point_id in 0..num_vectors { assert!( !reverse_links[point_id as usize].is_empty(), "Point {point_id} has no inbound links" ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_links/serializer.rs
lib/segment/src/index/hnsw_index/graph_links/serializer.rs
use std::alloc::Layout; use std::cmp::Reverse; use std::io::{Seek, Write}; use common::bitpacking::packed_bits; use common::bitpacking_links::{MIN_BITS_PER_VALUE, pack_links}; use common::bitpacking_ordered; use common::types::PointOffsetType; use common::zeros::WriteZerosExt; use integer_encoding::{VarInt, VarIntWriter}; use itertools::Either; use zerocopy::IntoBytes as AsBytes; use zerocopy::little_endian::U64 as LittleU64; use super::GraphLinksFormatParam; use super::header::{HEADER_VERSION_COMPRESSED, HeaderCompressed, HeaderPlain}; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::graph_links::header::{ HEADER_VERSION_COMPRESSED_WITH_VECTORS, HeaderCompressedWithVectors, PackedVectorLayout, }; pub fn serialize_graph_links<W: Write + Seek>( mut edges: Vec<Vec<Vec<PointOffsetType>>>, format_param: GraphLinksFormatParam, hnsw_m: HnswM, writer: &mut W, ) -> OperationResult<()> { let bits_per_unsorted = packed_bits(u32::try_from(edges.len().saturating_sub(1)).unwrap()).max(MIN_BITS_PER_VALUE); let vectors_layout = match format_param { GraphLinksFormatParam::Plain => None, GraphLinksFormatParam::Compressed => None, GraphLinksFormatParam::CompressedWithVectors(v) => { let vectors_layout = v.vectors_layout(); if vectors_layout.base.size() % vectors_layout.base.align() != 0 { return Err(OperationError::service_error( "Base vector size must be a multiple of its alignment", )); } if vectors_layout.link.size() % vectors_layout.link.align() != 0 { return Err(OperationError::service_error( "Link vector size must be a multiple of its alignment", )); } Some(vectors_layout) } }; // create map from index in `offsets` to point_id let mut back_index: Vec<PointOffsetType> = (0..edges.len()).map(|i| i as _).collect(); // sort by max layer and use this map to build `reindex` back_index.sort_unstable_by_key(|&i| Reverse(edges[i as usize].len())); let levels_count = back_index.first().map_or(0, |&id| edges[id as usize].len()); let mut point_count_by_level = vec![0; levels_count]; for point in &edges { point_count_by_level[point.len() - 1] += 1; } // 1. Write header (placeholder, will be rewritten later) writer.write_zeros(match &format_param { GraphLinksFormatParam::Plain => size_of::<HeaderPlain>(), GraphLinksFormatParam::Compressed => size_of::<HeaderCompressed>(), GraphLinksFormatParam::CompressedWithVectors(_) => size_of::<HeaderCompressedWithVectors>(), })?; // 2. Write level offsets let mut total_offsets_len = 0; { let mut suffix_sum = point_count_by_level.iter().sum::<u64>(); for &value in point_count_by_level.iter() { writer.write_all(total_offsets_len.as_bytes())?; total_offsets_len += suffix_sum; suffix_sum -= value; } total_offsets_len += 1; } // 3. Write reindex (aka map from point id to index in `offsets`) { let mut reindex = vec![0; back_index.len()]; for i in 0..back_index.len() { reindex[back_index[i] as usize] = i as PointOffsetType; } writer.write_all(reindex.as_bytes())?; } // 4. Write neighbors padding (if applicable) if let Some(vectors_layout) = vectors_layout.as_ref() { let pos = writer.stream_position()? as usize; let alignment = std::cmp::max(vectors_layout.base.align(), vectors_layout.link.align()); writer.write_zeros(pos.next_multiple_of(alignment) - pos)?; } // 5. Write neighbors (and calculate `offsets`) let mut links_buf = Vec::new(); let mut offset = 0; // elements for Plain, bytes for Compressed/CompressedWithVectors let mut offsets = Vec::with_capacity(total_offsets_len as usize); offsets.push(0); #[expect(clippy::needless_range_loop)] // this clippy lint is positively demented, can't wait till they remove it 🙄 for level in 0..levels_count { let count = point_count_by_level.iter().skip(level).sum::<u64>() as usize; let (level_m, mut iter) = match level { 0 => (hnsw_m.m0, Either::Left((0..count).map(|x| x as u32))), _ => (hnsw_m.m, Either::Right(back_index[..count].iter().copied())), }; iter.try_for_each(|id| { let mut raw_links = std::mem::take(&mut edges[id as usize][level]); match format_param { GraphLinksFormatParam::Plain => { writer.write_all(raw_links.as_bytes())?; offset += raw_links.len(); } GraphLinksFormatParam::Compressed => { pack_links(&mut links_buf, &mut raw_links, bits_per_unsorted, level_m); writer.write_all(&links_buf)?; offset += links_buf.len(); } GraphLinksFormatParam::CompressedWithVectors(vectors) => { // Unwrap safety: `vectors_layout` is `Some` for `CompressedWithVectors`. let vectors_layout = vectors_layout.as_ref().unwrap(); // 1. Base vector (`B` in the doc, only on level 0). if level == 0 { let vector = vectors.get_base_vector(id)?; if vector.len() != vectors_layout.base.size() { return Err(OperationError::service_error("Vector size mismatch")); } writer.write_all(vector)?; offset += vector.len(); } // 2. The varint-encoded length (`#` in the doc). writer.write_varint(raw_links.len() as u64)?; offset += VarInt::required_space(raw_links.len() as u64); // 3. Compressed links (`c` in the doc) pack_links(&mut links_buf, &mut raw_links, bits_per_unsorted, level_m); writer.write_all(&links_buf)?; offset += links_buf.len(); // 4. Padding to align link vectors (`_` in the doc). let padding = offset.next_multiple_of(vectors_layout.link.align()) - offset; writer.write_zeros(padding)?; offset += padding; // 5. Link vectors (`L` in the doc). // Write them in the same order as `raw_links`. for i in raw_links { let vector = vectors.get_link_vector(i)?; if vector.len() != vectors_layout.link.size() { return Err(OperationError::service_error("Vector size mismatch")); } writer.write_all(vector)?; offset += vector.len(); } // 6. Padding to align the next base vector (`_` in the doc). if level == 0 { let padding = offset.next_multiple_of(vectors_layout.base.align()) - offset; writer.write_zeros(padding)?; offset += padding; } } } offsets.push(offset as u64); links_buf.clear(); Ok(()) })?; } drop(back_index); // 7. Write offsets (and get some info for the header) let (offsets_padding, offsets_parameters) = match &format_param { GraphLinksFormatParam::Plain => { let len = writer.stream_position()? as usize; let offsets_padding = len.next_multiple_of(size_of::<u64>()) - len; writer.write_zeros(offsets_padding)?; writer.write_all(offsets.as_bytes())?; (Some(offsets_padding), None) } GraphLinksFormatParam::Compressed | GraphLinksFormatParam::CompressedWithVectors(_) => { let (compressed_offsets, offsets_parameters) = bitpacking_ordered::compress(&offsets); writer.write_all(&compressed_offsets)?; (None, Some(offsets_parameters)) } }; // 8. Write header (not a placeholder anymore) writer.seek(std::io::SeekFrom::Start(0))?; match format_param { GraphLinksFormatParam::Plain => { let header = HeaderPlain { point_count: edges.len() as u64, levels_count: levels_count as u64, total_neighbors_count: offset as u64, total_offset_count: offsets.len() as u64, offsets_padding_bytes: offsets_padding.unwrap() as u64, zero_padding: [0; 24], }; writer.write_all(header.as_bytes())?; } GraphLinksFormatParam::Compressed => { let header = HeaderCompressed { version: LittleU64::from(HEADER_VERSION_COMPRESSED), point_count: LittleU64::new(edges.len() as u64), total_neighbors_bytes: LittleU64::new(offset as u64), offsets_parameters: offsets_parameters.unwrap(), levels_count: LittleU64::new(levels_count as u64), m: LittleU64::new(hnsw_m.m as u64), m0: LittleU64::new(hnsw_m.m0 as u64), zero_padding: [0; 5], }; writer.write_all(header.as_bytes())?; } GraphLinksFormatParam::CompressedWithVectors(_) => { let vectors_layout = vectors_layout.as_ref().unwrap(); let header = HeaderCompressedWithVectors { version: LittleU64::from(HEADER_VERSION_COMPRESSED_WITH_VECTORS), point_count: LittleU64::new(edges.len() as u64), total_neighbors_bytes: LittleU64::new(offset as u64), offsets_parameters: offsets_parameters.unwrap(), levels_count: LittleU64::new(levels_count as u64), m: LittleU64::new(hnsw_m.m as u64), m0: LittleU64::new(hnsw_m.m0 as u64), base_vector_layout: pack_layout(&vectors_layout.base), link_vector_layout: pack_layout(&vectors_layout.link), zero_padding: [0; 3], }; writer.write_all(header.as_bytes())?; } }; Ok(()) } fn pack_layout(layout: &Layout) -> PackedVectorLayout { PackedVectorLayout { size: LittleU64::new(layout.size() as u64), alignment: u8::try_from(layout.align()).expect("Alignment must fit in u8"), } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_links/view.rs
lib/segment/src/index/hnsw_index/graph_links/view.rs
use std::alloc::Layout; use std::iter::{Copied, Zip}; use std::num::NonZero; use common::bitpacking::packed_bits; use common::bitpacking_links::{ MIN_BITS_PER_VALUE, PackedLinksIterator, iterate_packed_links, packed_links_size, }; use common::bitpacking_ordered; use common::types::PointOffsetType; use integer_encoding::VarInt as _; use itertools::{Either, Itertools as _}; use zerocopy::native_endian::U64 as NativeU64; use zerocopy::{FromBytes, Immutable}; use super::GraphLinksFormat; use super::header::{HEADER_VERSION_COMPRESSED, HeaderCompressed, HeaderPlain}; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::hnsw_index::HnswM; use crate::index::hnsw_index::graph_links::header::{ HEADER_VERSION_COMPRESSED_WITH_VECTORS, HeaderCompressedWithVectors, }; /// An (almost) zero-copy, non-owning view into serialized graph links stored /// as a `&[u8]` slice. #[derive(Debug)] pub(super) struct GraphLinksView<'a> { pub(super) reindex: &'a [PointOffsetType], pub(super) compression: CompressionInfo<'a>, /// Level offsets, copied into RAM for faster access. /// Has at least two elements: /// - [`super::serialize_graph_links`] always writes `0` as the first element. /// - Additional element is added during deserialization. pub(super) level_offsets: Vec<u64>, } /// An iterator type returned by [`GraphLinksView::links`]. pub type LinksIterator<'a> = Either<Copied<std::slice::Iter<'a, u32>>, PackedLinksIterator<'a>>; /// An iterator type returned by [`super::GraphLinks::links_with_vectors`]. /// Iterates over pairs of ([`PointOffsetType`], `&[u8]`). The second element is /// quantized vector bytes. pub type LinksWithVectorsIterator<'a> = Zip<PackedLinksIterator<'a>, std::slice::ChunksExact<'a, u8>>; #[derive(Debug)] pub(super) enum CompressionInfo<'a> { Uncompressed { /// Uncompressed links. /// /// A flat array of `u32` values (neighbor ids). /// ```text /// [uuuuuuuuuuuuuuuuuuuu][uuuuuuuuuuuuuuuuuuuu][uuuuuuuuuuuuuuuuuuuu]... /// [neighbors for node 0][neighbors for node 1][neighbors for node 2]... /// ``` /// Where: /// 1. `u` are uncompressed links (i.e. it represents `Vec<u32>`). neighbors: &'a [u32], offsets: &'a [NativeU64], }, Compressed { /// Compressed links. /// /// Similar to [`CompressionInfo::Uncompressed`], but compressed. /// /// ```text /// [cccccccccccccccccccc][cccccccccccccccccccc][cccccccccccccccccccc]... /// [neighbors for node 0][neighbors for node 1][neighbors for node 2]... /// ``` /// Where /// 1. `c` are compressed links (i.e. a compressed form of `Vec<u32>`). neighbors: &'a [u8], offsets: bitpacking_ordered::Reader<'a>, hnsw_m: HnswM, bits_per_unsorted: u8, }, CompressedWithVectors { /// Compressed links with vectors. /// /// Similar to [`CompressionInfo::Compressed`], but includes vectors. /// - Each node on level 0 has a fixed-size "base" vector. /// - Each link is accompanied by a fixed-size "link" vector. /// /// ```text /// [BBBB#ccccccc_LLLLLL_][BBBB#ccccccc_LLLLLL_][BBBB#ccccccc_LLLLLL_] /// [neighbors for node 0][neighbors for node 1][neighbors for node 2]... /// ``` /// Where: /// 1. `B` is a base vector (i.e. `Vec<u8>` of fixed size). /// Only present on level 0, omitted on higher levels. /// 2. `#` is a varint-encoded length. /// This value == number of links == number of link vectors. /// 3. `c` are compressed links (i.e. a compressed form of `Vec<u32>`). /// 4. `_` is a padding to make link vectors aligned. /// 5. `L` are encoded link vectors, one per link (i.e. `Vec<Vec<u8>>`). /// 6. `_` is a padding to make the next base vector aligned. /// Only present on level 0, omitted on higher levels. neighbors: &'a [u8], offsets: bitpacking_ordered::Reader<'a>, hnsw_m: HnswM, bits_per_unsorted: u8, base_vector_layout: Layout, /// `NonZero` to avoid handling unlikely corner cases. link_vector_size: NonZero<usize>, link_vector_alignment: u8, }, } impl GraphLinksView<'_> { pub(super) fn load( data: &[u8], format: GraphLinksFormat, ) -> OperationResult<GraphLinksView<'_>> { match format { GraphLinksFormat::Compressed => Self::load_compressed(data), GraphLinksFormat::Plain => Self::load_plain(data), GraphLinksFormat::CompressedWithVectors => Self::load_compressed_with_vectors(data), } } fn load_plain(data: &[u8]) -> OperationResult<GraphLinksView<'_>> { let (header, data) = HeaderPlain::ref_from_prefix(data).map_err(|_| error_unsufficent_size())?; let (level_offsets, data) = read_level_offsets(data, header.levels_count, header.total_offset_count)?; let (reindex, data) = get_slice::<PointOffsetType>(data, header.point_count)?; let (neighbors, data) = get_slice::<u32>(data, header.total_neighbors_count)?; let (_, data) = get_slice::<u8>(data, header.offsets_padding_bytes)?; let (offsets, _bytes) = get_slice::<NativeU64>(data, header.total_offset_count)?; Ok(GraphLinksView { reindex, compression: CompressionInfo::Uncompressed { neighbors, offsets }, level_offsets, }) } fn load_compressed(data: &[u8]) -> OperationResult<GraphLinksView<'_>> { let (header, data) = HeaderCompressed::ref_from_prefix(data).map_err(|_| error_unsufficent_size())?; debug_assert_eq!(header.version.get(), HEADER_VERSION_COMPRESSED); let (level_offsets, data) = read_level_offsets( data, header.levels_count.get(), header.offsets_parameters.length.get(), )?; let (reindex, data) = get_slice::<PointOffsetType>(data, header.point_count.get())?; let (neighbors, data) = get_slice::<u8>(data, header.total_neighbors_bytes.get())?; let (offsets, _bytes) = bitpacking_ordered::Reader::new(header.offsets_parameters, data) .map_err(|e| { OperationError::service_error(format!("Can't create decompressor: {e}")) })?; Ok(GraphLinksView { reindex, compression: CompressionInfo::Compressed { neighbors, offsets, hnsw_m: HnswM::new(header.m.get() as usize, header.m0.get() as usize), bits_per_unsorted: MIN_BITS_PER_VALUE.max(packed_bits( u32::try_from(header.point_count.get().saturating_sub(1)).map_err(|_| { OperationError::service_error("Too many points in GraphLinks file") })?, )), }, level_offsets, }) } fn load_compressed_with_vectors(data: &[u8]) -> OperationResult<GraphLinksView<'_>> { let total_len = data.len(); let (header, data) = HeaderCompressedWithVectors::ref_from_prefix(data) .map_err(|_| error_unsufficent_size())?; debug_assert_eq!(header.version.get(), HEADER_VERSION_COMPRESSED_WITH_VECTORS); let base_vector_layout = header.base_vector_layout.try_into_layout()?; let link_vector_layout = header.link_vector_layout.try_into_layout()?; let (level_offsets, data) = read_level_offsets( data, header.levels_count.get(), header.offsets_parameters.length.get(), )?; let (reindex, data) = get_slice::<PointOffsetType>(data, header.point_count.get())?; let (_, data) = get_slice::<u8>(data, { let pos = total_len - data.len(); let alignment = std::cmp::max(link_vector_layout.align(), base_vector_layout.align()); (pos.next_multiple_of(alignment) - pos) as u64 })?; let (neighbors, data) = get_slice::<u8>(data, header.total_neighbors_bytes.get())?; let (offsets, _bytes) = bitpacking_ordered::Reader::new(header.offsets_parameters, data) .map_err(|e| { OperationError::service_error(format!("Can't create decompressor: {e}")) })?; Ok(GraphLinksView { reindex, compression: CompressionInfo::CompressedWithVectors { neighbors, offsets, hnsw_m: HnswM::new(header.m.get() as usize, header.m0.get() as usize), bits_per_unsorted: MIN_BITS_PER_VALUE.max(packed_bits( u32::try_from(header.point_count.get().saturating_sub(1)).map_err(|_| { OperationError::service_error("Too many points in GraphLinks file") })?, )), base_vector_layout, link_vector_size: NonZero::try_from(link_vector_layout.size()).map_err(|_| { OperationError::service_error("Zero link vector size in GraphLinks file") })?, link_vector_alignment: link_vector_layout.align() as u8, }, level_offsets, }) } /// Note: it is safe to use `idx + 1` on the result of this function, /// because `level_offsets` always contains an additional element. #[inline] fn offset_idx(&self, point_id: PointOffsetType, level: usize) -> usize { if level == 0 { point_id as usize } else { self.level_offsets[level] as usize + self.reindex[point_id as usize] as usize } } /// Returns `true` if [`Self::links`] would return an empty iterator. pub(super) fn links_empty(&self, point_id: PointOffsetType, level: usize) -> bool { let idx = self.offset_idx(point_id, level); match self.compression { CompressionInfo::Uncompressed { offsets, .. } => { offsets[idx].get() == offsets[idx + 1].get() } CompressionInfo::Compressed { ref offsets, .. } => { offsets.get(idx + 1).unwrap() == offsets.get(idx).unwrap() } CompressionInfo::CompressedWithVectors { .. } => { // Not intended to be used outside of tests. self.links(point_id, level).next().is_none() } } } pub(super) fn links(&self, point_id: PointOffsetType, level: usize) -> LinksIterator<'_> { let idx = self.offset_idx(point_id, level); match self.compression { CompressionInfo::Uncompressed { neighbors, offsets } => { let neighbors_range = offsets[idx].get() as usize..offsets[idx + 1].get() as usize; Either::Left(neighbors[neighbors_range].iter().copied()) } CompressionInfo::Compressed { neighbors, ref offsets, ref hnsw_m, bits_per_unsorted, } => { let neighbors_range = offsets.get(idx).unwrap() as usize..offsets.get(idx + 1).unwrap() as usize; Either::Right(iterate_packed_links( &neighbors[neighbors_range], bits_per_unsorted, hnsw_m.level_m(level), )) } CompressionInfo::CompressedWithVectors { .. } => { // Not intended to be used outside of tests. Either::Right(self.links_with_vectors(point_id, level).1) } } } /// Returns a tuple of three elements: /// - Base vector (only on level 0, empty slice on higher levels). /// - Links iterator. /// - Link vectors iterator. /// /// Both iterators have same length and can be combined into /// [`LinksWithVectorsIterator`]. /// /// # Panics /// /// Panics when using a format that does not support vectors. pub(super) fn links_with_vectors( &self, point_id: PointOffsetType, level: usize, ) -> ( &[u8], PackedLinksIterator<'_>, std::slice::ChunksExact<'_, u8>, ) { let idx = self.offset_idx(point_id, level); match self.compression { CompressionInfo::Uncompressed { .. } => unimplemented!(), CompressionInfo::Compressed { .. } => unimplemented!(), CompressionInfo::CompressedWithVectors { neighbors, ref offsets, ref hnsw_m, bits_per_unsorted, base_vector_layout, link_vector_size, link_vector_alignment, } => { let start = offsets.get(idx).unwrap() as usize; let end = offsets.get(idx + 1).unwrap() as usize; memory::madvise::will_need_multiple_pages(&neighbors[start..end]); let mut pos = start; // 1. Base vector (`B` in the doc, only on level 0). let mut base_vector: &[u8] = &[]; if level == 0 { base_vector = &neighbors[pos..pos + base_vector_layout.size()]; debug_assert!( base_vector .as_ptr() .addr() .is_multiple_of(base_vector_layout.align()) ); pos += base_vector_layout.size(); } // 2. The varint-encoded length (`#` in the doc). let (neighbors_count, neighbors_count_size) = u64::decode_var(&neighbors[pos..end]).unwrap(); pos += neighbors_count_size; // 3. Compressed links (`c` in the doc). let links_size = packed_links_size( &neighbors[pos..end], bits_per_unsorted, hnsw_m.level_m(level), neighbors_count as usize, ); let links = iterate_packed_links( &neighbors[pos..pos + links_size], bits_per_unsorted, hnsw_m.level_m(level), ); pos += links_size; // 4. Padding to align link vectors (`_` in the doc). pos = pos.next_multiple_of(link_vector_alignment as usize); // 5. Link vectors (`L` in the doc). let link_vector_bytes = (neighbors_count as usize) * link_vector_size.get(); let link_vectors = &neighbors[pos..pos + link_vector_bytes]; debug_assert!(link_vectors.as_ptr().addr() % link_vector_alignment as usize == 0); ( base_vector, links, link_vectors.chunks_exact(link_vector_size.get()), ) } } } pub(super) fn point_level(&self, point_id: PointOffsetType) -> usize { let reindexed_point_id = u64::from(self.reindex[point_id as usize]); for (level, (&a, &b)) in self .level_offsets .iter() .skip(1) .tuple_windows() .enumerate() { if reindexed_point_id >= b - a { return level; } } // See the doc comment on `level_offsets`. self.level_offsets.len() - 2 } #[cfg(test)] pub(super) fn sorted_count(&self, level: usize) -> usize { match self.compression { CompressionInfo::Uncompressed { .. } => 0, CompressionInfo::Compressed { hnsw_m, .. } => hnsw_m.level_m(level), CompressionInfo::CompressedWithVectors { hnsw_m, .. } => hnsw_m.level_m(level), } } } fn read_level_offsets( bytes: &[u8], levels_count: u64, total_offset_count: u64, ) -> OperationResult<(Vec<u64>, &[u8])> { let (level_offsets, bytes) = get_slice::<u64>(bytes, levels_count)?; let mut result = Vec::with_capacity(level_offsets.len() + 1); result.extend_from_slice(level_offsets); result.push(total_offset_count.checked_sub(1).ok_or_else(|| { OperationError::service_error("Total offset count should be at least 1 in GraphLinks file") })?); Ok((result, bytes)) } fn get_slice<T: FromBytes + Immutable>(data: &[u8], length: u64) -> OperationResult<(&[T], &[u8])> { <[T]>::ref_from_prefix_with_elems(data, length as usize).map_err(|_| error_unsufficent_size()) } fn error_unsufficent_size() -> OperationError { OperationError::service_error("Unsufficent file size for GraphLinks file") }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/hnsw_index/graph_links/header.rs
lib/segment/src/index/hnsw_index/graph_links/header.rs
use std::alloc::Layout; use common::bitpacking_ordered; use zerocopy::little_endian::U64 as LittleU64; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::common::operation_error::{OperationError, OperationResult}; /// File header for the plain format. #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub(super) struct HeaderPlain { pub(super) point_count: u64, pub(super) levels_count: u64, pub(super) total_neighbors_count: u64, pub(super) total_offset_count: u64, /// Either 0 or 4. pub(super) offsets_padding_bytes: u64, pub(super) zero_padding: [u8; 24], } /// File header for the compressed format. #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C, align(8))] pub(super) struct HeaderCompressed { pub(super) point_count: LittleU64, /// Should be [`HEADER_VERSION_COMPRESSED`]. pub(super) version: LittleU64, pub(super) levels_count: LittleU64, pub(super) total_neighbors_bytes: LittleU64, pub(super) offsets_parameters: bitpacking_ordered::Parameters, pub(super) m: LittleU64, pub(super) m0: LittleU64, pub(super) zero_padding: [u8; 5], // for 8-byte alignment } /// File header for the compressed format with embedded vectors. #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C, align(8))] pub(super) struct HeaderCompressedWithVectors { pub(super) point_count: LittleU64, /// Should be [`HEADER_VERSION_COMPRESSED_WITH_VECTORS`]. pub(super) version: LittleU64, pub(super) levels_count: LittleU64, pub(super) total_neighbors_bytes: LittleU64, pub(super) offsets_parameters: bitpacking_ordered::Parameters, pub(super) m: LittleU64, pub(super) m0: LittleU64, pub(super) base_vector_layout: PackedVectorLayout, pub(super) link_vector_layout: PackedVectorLayout, pub(super) zero_padding: [u8; 3], // for 8-byte alignment } pub(super) const HEADER_VERSION_COMPRESSED: u64 = 0xFFFF_FFFF_FFFF_FF01; pub(super) const HEADER_VERSION_COMPRESSED_WITH_VECTORS: u64 = 0xFFFF_FFFF_FFFF_FF02; /// Packed representation of [`Layout`]. #[derive(Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub(super) struct PackedVectorLayout { pub(super) size: LittleU64, pub(super) alignment: u8, } impl PackedVectorLayout { pub(super) fn try_into_layout(self) -> OperationResult<Layout> { Layout::from_size_align(self.size.get() as usize, self.alignment as usize) .map_err(|_| OperationError::service_error("Invalid vector layout")) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/optimized_filter.rs
lib/segment/src/index/query_optimization/optimized_filter.rs
use common::types::PointOffsetType; pub type ConditionCheckerFn<'a> = Box<dyn Fn(PointOffsetType) -> bool + 'a>; pub enum OptimizedCondition<'a> { Checker(ConditionCheckerFn<'a>), /// Nested filter Filter(OptimizedFilter<'a>), } pub struct OptimizedMinShould<'a> { pub conditions: Vec<OptimizedCondition<'a>>, pub min_count: usize, } pub struct OptimizedFilter<'a> { /// At least one of those conditions should match pub should: Option<Vec<OptimizedCondition<'a>>>, /// At least minimum amount of given conditions should match pub min_should: Option<OptimizedMinShould<'a>>, /// All conditions must match pub must: Option<Vec<OptimizedCondition<'a>>>, /// All conditions must NOT match pub must_not: Option<Vec<OptimizedCondition<'a>>>, } pub fn check_optimized_filter(filter: &OptimizedFilter, point_id: PointOffsetType) -> bool { check_should(&filter.should, point_id) && check_min_should(&filter.min_should, point_id) && check_must(&filter.must, point_id) && check_must_not(&filter.must_not, point_id) } pub fn check_condition(condition: &OptimizedCondition, point_id: PointOffsetType) -> bool { match condition { OptimizedCondition::Filter(filter) => check_optimized_filter(filter, point_id), OptimizedCondition::Checker(checker) => checker(point_id), } } fn check_should(should: &Option<Vec<OptimizedCondition>>, point_id: PointOffsetType) -> bool { let check = |condition| check_condition(condition, point_id); match should { None => true, Some(conditions) => conditions.iter().any(check), } } fn check_min_should(min_should: &Option<OptimizedMinShould>, point_id: PointOffsetType) -> bool { let check = |condition| check_condition(condition, point_id); match min_should { None => true, Some(OptimizedMinShould { conditions, min_count, }) => { conditions .iter() .filter(|cond| check(cond)) .take(*min_count) .count() == *min_count } } } fn check_must(must: &Option<Vec<OptimizedCondition>>, point_id: PointOffsetType) -> bool { let check = |condition| check_condition(condition, point_id); match must { None => true, Some(conditions) => conditions.iter().all(check), } } fn check_must_not(must: &Option<Vec<OptimizedCondition>>, point_id: PointOffsetType) -> bool { let check = |condition| !check_condition(condition, point_id); match must { None => true, Some(conditions) => conditions.iter().all(check), } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/payload_provider.rs
lib/segment/src/index/query_optimization/payload_provider.rs
use std::ops::Deref; use std::sync::Arc; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use crate::payload_storage::PayloadStorage; use crate::payload_storage::payload_storage_enum::PayloadStorageEnum; use crate::types::{OwnedPayloadRef, Payload}; #[derive(Clone)] pub struct PayloadProvider { payload_storage: Arc<AtomicRefCell<PayloadStorageEnum>>, empty_payload: Payload, } impl PayloadProvider { pub fn new(payload_storage: Arc<AtomicRefCell<PayloadStorageEnum>>) -> Self { Self { payload_storage, empty_payload: Default::default(), } } pub fn with_payload<F, G>( &self, point_id: PointOffsetType, callback: F, hw_counter: &HardwareCounterCell, ) -> G where F: FnOnce(OwnedPayloadRef) -> G, { let payload_storage_guard = self.payload_storage.borrow(); let payload_ptr_opt = match payload_storage_guard.deref() { #[cfg(feature = "testing")] PayloadStorageEnum::InMemoryPayloadStorage(s) => { s.payload_ptr(point_id).map(OwnedPayloadRef::from) } #[cfg(feature = "rocksdb")] PayloadStorageEnum::SimplePayloadStorage(s) => { s.payload_ptr(point_id).map(OwnedPayloadRef::from) } // Warn: Possible panic here // Currently, it is possible that `read_payload` fails with Err, // but it seems like a very rare possibility which might only happen // if something is wrong with disk or storage is corrupted. // // In both cases it means that service can't be of use any longer. // It is as good as dead. Therefore it is tolerable to just panic here. // Downside is - API user won't be notified of the failure. // It will just timeout. // // The alternative: // Rewrite condition checking code to support error reporting. // Which may lead to slowdown and assumes a lot of changes. #[cfg(feature = "rocksdb")] PayloadStorageEnum::OnDiskPayloadStorage(s) => s .read_payload(point_id, hw_counter) .unwrap_or_else(|err| panic!("Payload storage is corrupted: {err}")) .map(OwnedPayloadRef::from), PayloadStorageEnum::MmapPayloadStorage(s) => { let payload = s .get(point_id, hw_counter) .unwrap_or_else(|err| panic!("Payload storage is corrupted: {err}")); Some(OwnedPayloadRef::from(payload)) } }; let payload = if let Some(payload_ptr) = payload_ptr_opt { payload_ptr } else { OwnedPayloadRef::from(&self.empty_payload) }; callback(payload) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/condition_converter.rs
lib/segment/src/index/query_optimization/condition_converter.rs
use std::collections::HashMap; use ahash::AHashSet; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use match_converter::get_match_checkers; use ordered_float::OrderedFloat; use serde_json::Value; use crate::index::field_index::FieldIndex; use crate::index::field_index::null_index::MutableNullIndex; use crate::index::query_optimization::optimized_filter::ConditionCheckerFn; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::struct_payload_index::StructPayloadIndex; use crate::payload_storage::query_checker::{ check_field_condition, check_is_empty_condition, check_is_null_condition, check_payload, select_nested_indexes, }; use crate::types::{ Condition, DateTimePayloadType, FieldCondition, FloatPayloadType, GeoBoundingBox, GeoPolygon, GeoRadius, IntPayloadType, OwnedPayloadRef, PayloadContainer, Range, RangeInterface, }; use crate::vector_storage::VectorStorage; mod match_converter; impl StructPayloadIndex { pub fn condition_converter<'a>( &'a self, condition: &'a Condition, payload_provider: PayloadProvider, hw_counter: &HardwareCounterCell, ) -> ConditionCheckerFn<'a> { let id_tracker = self.id_tracker.borrow(); let field_indexes = &self.field_indexes; match condition { Condition::Field(field_condition) => field_indexes .get(&field_condition.key) .and_then(|indexes| { indexes.iter().find_map(move |index| { let hw_acc = hw_counter.new_accumulator(); field_condition_index(index, field_condition, hw_acc) }) }) .unwrap_or_else(|| { let hw = hw_counter.fork(); Box::new(move |point_id| { payload_provider.with_payload( point_id, |payload| { check_field_condition(field_condition, &payload, field_indexes, &hw) }, &hw, ) }) }), // Use dedicated null index for `is_empty` check if it is available // Otherwise we might use another index just to check if a field is not empty, if we // don't have an indexed value we must still check the payload to see if its empty Condition::IsEmpty(is_empty) => { let field_indexes = field_indexes.get(&is_empty.is_empty.key); let (primary_null_index, fallback_index) = field_indexes .map(|field_indexes| get_is_empty_indexes(field_indexes)) .unwrap_or((None, None)); if let Some(null_index) = primary_null_index { get_null_index_is_empty_checker(null_index, true) } else { // Fallback to reading payload, in case we don't yet have null-index let hw = hw_counter.fork(); let fallback = Box::new(move |point_id| { payload_provider.with_payload( point_id, |payload| check_is_empty_condition(is_empty, &payload), &hw, ) }); if let Some(fallback_index) = fallback_index { get_fallback_is_empty_checker(fallback_index, true, fallback) } else { fallback } } } Condition::IsNull(is_null) => { let field_indexes = field_indexes.get(&is_null.is_null.key); let is_null_checker = field_indexes.and_then(|field_indexes| { field_indexes .iter() .find_map(|index| get_is_null_checker(index, true)) }); if let Some(checker) = is_null_checker { checker } else { // Fallback to reading payload let hw = hw_counter.fork(); Box::new(move |point_id| { payload_provider.with_payload( point_id, |payload| check_is_null_condition(is_null, &payload), &hw, ) }) } } // ToDo: It might be possible to make this condition faster by using `VisitedPool` instead of HashSet Condition::HasId(has_id) => { let segment_ids: AHashSet<_> = has_id .has_id .iter() .filter_map(|external_id| id_tracker.internal_id(*external_id)) .collect(); Box::new(move |point_id| segment_ids.contains(&point_id)) } Condition::HasVector(has_vector) => { if let Some(vector_storage) = self.vector_storages.get(&has_vector.has_vector).cloned() { Box::new(move |point_id| !vector_storage.borrow().is_deleted_vector(point_id)) } else { Box::new(|_point_id| false) } } Condition::Nested(nested) => { // Select indexes for nested fields. Trim nested part from key, so // that nested condition can address fields without nested part. // Example: // Index for field `nested.field` will be stored under key `nested.field` // And we have a query: // { // "nested": { // "path": "nested", // "filter": { // ... // "match": {"key": "field", "value": "value"} // } // } // In this case we want to use `nested.field`, but we only have `field` in query. // Therefore we need to trim `nested` part from key. So that query executor // can address proper index for nested field. let nested_path = nested.array_key(); let nested_indexes = select_nested_indexes(&nested_path, field_indexes); let hw = hw_counter.fork(); Box::new(move |point_id| { payload_provider.with_payload( point_id, |payload| { let field_values = payload.get_value(&nested_path); for value in field_values { if let Value::Object(object) = value { let get_payload = || OwnedPayloadRef::from(object); if check_payload( Box::new(get_payload), // None because has_id in nested is not supported. So retrieving // IDs through the tracker would always return None. None, // Same as above, nested conditions don't support has_vector. &HashMap::new(), &nested.nested.filter, point_id, &nested_indexes, &hw, ) { // If at least one nested object matches, return true return true; } } } false }, &hw, ) }) } Condition::CustomIdChecker(cond) => { let segment_ids: AHashSet<_> = id_tracker .iter_external() .filter(|&point_id| cond.0.check(point_id)) .filter_map(|external_id| id_tracker.internal_id(external_id)) .collect(); Box::new(move |internal_id| segment_ids.contains(&internal_id)) } Condition::Filter(_) => unreachable!(), } } } pub fn field_condition_index<'a>( index: &'a FieldIndex, field_condition: &FieldCondition, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'a>> { match field_condition { FieldCondition { r#match: Some(cond_match), .. } => get_match_checkers(index, cond_match.clone(), hw_acc), FieldCondition { range: Some(cond), .. } => get_range_checkers(index, *cond, hw_acc), FieldCondition { geo_radius: Some(geo_radius), .. } => get_geo_radius_checkers(index, *geo_radius, hw_acc), FieldCondition { geo_bounding_box: Some(geo_bounding_box), .. } => get_geo_bounding_box_checkers(index, *geo_bounding_box, hw_acc), FieldCondition { geo_polygon: Some(geo_polygon), .. } => get_geo_polygon_checkers(index, geo_polygon.clone(), hw_acc), FieldCondition { is_empty: Some(is_empty), .. } => get_is_empty_checker(index, *is_empty), FieldCondition { is_null: Some(is_null), .. } => get_is_null_checker(index, *is_null), FieldCondition { key: _, r#match: None, range: None, geo_radius: None, geo_bounding_box: None, geo_polygon: None, // We can't use index for this condition, since some indices don't count values, // like boolean index, where [true, true, true] is the same as [true]. Count should be 3 but they think is 1. // // TODO: Try to use the indices that actually support counting values. values_count: _, is_empty: None, is_null: None, } => None, } } pub fn get_geo_polygon_checkers( index: &FieldIndex, geo_polygon: GeoPolygon, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let polygon_wrapper = geo_polygon.convert(); let hw_counter = hw_acc.get_counter_cell(); match index { FieldIndex::GeoIndex(geo_index) => Some(Box::new(move |point_id: PointOffsetType| { geo_index.check_values_any(point_id, &hw_counter, |value| { polygon_wrapper.check_point(value) }) })), FieldIndex::BoolIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn get_geo_radius_checkers( index: &FieldIndex, geo_radius: GeoRadius, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let hw_counter = hw_acc.get_counter_cell(); match index { FieldIndex::GeoIndex(geo_index) => Some(Box::new(move |point_id: PointOffsetType| { geo_index.check_values_any(point_id, &hw_counter, |value| geo_radius.check_point(value)) })), FieldIndex::BoolIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn get_geo_bounding_box_checkers( index: &FieldIndex, geo_bounding_box: GeoBoundingBox, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let hw_counter = hw_acc.get_counter_cell(); match index { FieldIndex::GeoIndex(geo_index) => Some(Box::new(move |point_id: PointOffsetType| { geo_index.check_values_any(point_id, &hw_counter, |value| { geo_bounding_box.check_point(value) }) })), FieldIndex::BoolIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn get_range_checkers( index: &FieldIndex, range: RangeInterface, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { match range { RangeInterface::Float(range) => get_float_range_checkers(index, range, hw_acc), RangeInterface::DateTime(range) => get_datetime_range_checkers(index, range, hw_acc), } } pub fn get_float_range_checkers( index: &FieldIndex, range: Range<OrderedFloat<FloatPayloadType>>, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let hw_counter = hw_acc.get_counter_cell(); match index { FieldIndex::IntIndex(num_index) => { let range = range.map(|f| f.0 as IntPayloadType); Some(Box::new(move |point_id: PointOffsetType| { num_index.check_values_any(point_id, |value| range.check_range(*value), &hw_counter) })) } FieldIndex::FloatIndex(num_index) => Some(Box::new(move |point_id: PointOffsetType| { num_index.check_values_any( point_id, |value| range.check_range(OrderedFloat(*value)), &hw_counter, ) })), FieldIndex::BoolIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn get_datetime_range_checkers( index: &FieldIndex, range: Range<DateTimePayloadType>, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { match index { FieldIndex::DatetimeIndex(num_index) => { let range = range.map(|dt| dt.timestamp()); let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { num_index.check_values_any(point_id, |value| range.check_range(*value), &hw_counter) })) } FieldIndex::BoolIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } } fn get_is_empty_indexes( indexes: &[FieldIndex], ) -> (Option<&MutableNullIndex>, Option<&FieldIndex>) { let mut primary_null_index: Option<&MutableNullIndex> = None; let mut fallback_index: Option<&FieldIndex> = None; for index in indexes { match index { FieldIndex::NullIndex(null_index) => { primary_null_index = Some(null_index); } _ => { fallback_index = Some(index); } } } (primary_null_index, fallback_index) } fn get_null_index_is_empty_checker( null_index: &MutableNullIndex, is_empty: bool, ) -> ConditionCheckerFn<'_> { Box::new(move |point_id: PointOffsetType| null_index.values_is_empty(point_id) == is_empty) } fn get_fallback_is_empty_checker<'a>( index: &'a FieldIndex, is_empty: bool, fallback_checker: ConditionCheckerFn<'a>, ) -> ConditionCheckerFn<'a> { Box::new(move |point_id: PointOffsetType| { if index.values_is_empty(point_id) { // If value is empty in index, it can still be non-empty in payload fallback_checker(point_id) == is_empty } else { // Value IS in index, so we can trust the index // If `is_empty` is true, we should return false, because the value is not empty !is_empty } }) } /// Get a checker that checks if the field is empty /// /// * `index` - index to check /// * `is_empty` - if the field should be empty fn get_is_empty_checker(index: &FieldIndex, is_empty: bool) -> Option<ConditionCheckerFn<'_>> { match index { FieldIndex::NullIndex(null_index) => { Some(get_null_index_is_empty_checker(null_index, is_empty)) } FieldIndex::IntIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::BoolIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) => None, } } fn get_is_null_checker(index: &FieldIndex, is_null: bool) -> Option<ConditionCheckerFn<'_>> { match index { FieldIndex::NullIndex(null_index) => Some(Box::new(move |point_id: PointOffsetType| { null_index.values_is_null(point_id) == is_null })), FieldIndex::IntIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::BoolIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) => None, } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/optimizer.rs
lib/segment/src/index/query_optimization/optimizer.rs
use std::cmp::Reverse; use common::counter::hardware_counter::HardwareCounterCell; use itertools::Itertools; use crate::index::field_index::CardinalityEstimation; use crate::index::query_estimator::{ combine_min_should_estimations, combine_must_estimations, combine_should_estimations, invert_estimation, }; use crate::index::query_optimization::optimized_filter::{ OptimizedCondition, OptimizedFilter, OptimizedMinShould, }; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::struct_payload_index::StructPayloadIndex; use crate::types::{Condition, Filter, MinShould}; impl StructPayloadIndex { /// Converts user-provided filtering condition into optimized representation /// /// Optimizations: /// /// * Convert each condition into a checker function /// * Use column index, avoid reading Payload, if possible /// * Re-order operations using estimated cardinalities /// /// ToDo: Add optimizations between clauses /// /// # Arguments /// /// * `filter` - original filter /// * `payload_provider` - provides the payload storage /// * `total` - total number of points in segment (used for cardinality estimation) /// /// # Result /// /// Optimized query + Cardinality estimation pub fn optimize_filter<'a>( &'a self, filter: &'a Filter, payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> (OptimizedFilter<'a>, CardinalityEstimation) { let mut filter_estimations: Vec<CardinalityEstimation> = vec![]; let optimized_filter = OptimizedFilter { should: filter.should.as_ref().and_then(|conditions| { if !conditions.is_empty() { let (optimized_conditions, estimation) = self.optimize_should( conditions, payload_provider.clone(), total, hw_counter, ); filter_estimations.push(estimation); Some(optimized_conditions) } else { None } }), min_should: filter.min_should.as_ref().and_then( |MinShould { conditions, min_count, }| { if !conditions.is_empty() { let (optimized_conditions, estimation) = self.optimize_min_should( conditions, *min_count, payload_provider.clone(), total, hw_counter, ); filter_estimations.push(estimation); Some(OptimizedMinShould { conditions: optimized_conditions, min_count: *min_count, }) } else { None } }, ), must: filter.must.as_ref().and_then(|conditions| { if !conditions.is_empty() { let (optimized_conditions, estimation) = self.optimize_must(conditions, payload_provider.clone(), total, hw_counter); filter_estimations.push(estimation); Some(optimized_conditions) } else { None } }), must_not: filter.must_not.as_ref().and_then(|conditions| { if !conditions.is_empty() { let (optimized_conditions, estimation) = self.optimize_must_not(conditions, payload_provider, total, hw_counter); filter_estimations.push(estimation); Some(optimized_conditions) } else { None } }), }; ( optimized_filter, combine_must_estimations(&filter_estimations, total), ) } pub fn convert_conditions<'a>( &'a self, conditions: &'a [Condition], payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> Vec<(OptimizedCondition<'a>, CardinalityEstimation)> { conditions .iter() .map(|condition| match condition { Condition::Filter(filter) => { let (optimized_filter, estimation) = self.optimize_filter(filter, payload_provider.clone(), total, hw_counter); (OptimizedCondition::Filter(optimized_filter), estimation) } _ => { let estimation = self.condition_cardinality(condition, None, hw_counter); let condition_checker = self.condition_converter(condition, payload_provider.clone(), hw_counter); (OptimizedCondition::Checker(condition_checker), estimation) } }) .collect() } fn optimize_should<'a>( &'a self, conditions: &'a [Condition], payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> (Vec<OptimizedCondition<'a>>, CardinalityEstimation) { let mut converted = self.convert_conditions(conditions, payload_provider, total, hw_counter); // More probable conditions first converted.sort_by_key(|(_, estimation)| Reverse(estimation.exp)); let (conditions, estimations): (Vec<_>, Vec<_>) = converted.into_iter().unzip(); (conditions, combine_should_estimations(&estimations, total)) } fn optimize_min_should<'a>( &'a self, conditions: &'a [Condition], min_count: usize, payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> (Vec<OptimizedCondition<'a>>, CardinalityEstimation) { let mut converted = self.convert_conditions(conditions, payload_provider, total, hw_counter); // More probable conditions first if min_count < number of conditions if min_count < conditions.len() / 2 { converted.sort_by_key(|(_, estimation)| Reverse(estimation.exp)); } else { // Less probable conditions first converted.sort_by_key(|(_, estimation)| estimation.exp); } let (conditions, estimations): (Vec<_>, Vec<_>) = converted.into_iter().unzip(); ( conditions, combine_min_should_estimations(&estimations, min_count, total), ) } fn optimize_must<'a>( &'a self, conditions: &'a [Condition], payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> (Vec<OptimizedCondition<'a>>, CardinalityEstimation) { let mut converted = self.convert_conditions(conditions, payload_provider, total, hw_counter); // Less probable conditions first converted.sort_by_key(|(_, estimation)| estimation.exp); let (conditions, estimations): (Vec<_>, Vec<_>) = converted.into_iter().unzip(); (conditions, combine_must_estimations(&estimations, total)) } fn optimize_must_not<'a>( &'a self, conditions: &'a [Condition], payload_provider: PayloadProvider, total: usize, hw_counter: &HardwareCounterCell, ) -> (Vec<OptimizedCondition<'a>>, CardinalityEstimation) { let mut converted = self.convert_conditions(conditions, payload_provider, total, hw_counter); // More probable conditions first, as it will be reverted converted.sort_by_key(|(_, estimation)| estimation.exp); let (conditions, estimations): (Vec<_>, Vec<_>) = converted.into_iter().unzip(); ( conditions, combine_must_estimations( &estimations .into_iter() .map(|estimation| invert_estimation(&estimation, total)) .collect_vec(), total, ), ) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/mod.rs
lib/segment/src/index/query_optimization/mod.rs
pub mod condition_converter; pub mod optimized_filter; pub mod optimizer; pub mod payload_provider; pub mod rescore_formula;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/rescore_formula/parsed_formula.rs
lib/segment/src/index/query_optimization/rescore_formula/parsed_formula.rs
use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::str::FromStr; use common::types::ScoreType; use ordered_float::OrderedFloat; use serde::Serialize; use serde_json::Value; use crate::common::operation_error::{OperationError, OperationResult}; use crate::common::utils::unordered_hash_unique; use crate::json_path::{JsonPath, JsonPathItem}; use crate::types::{Condition, DateTimePayloadType, GeoPoint}; const SCORE_KEYWORD: &str = "score"; const DEFAULT_DECAY_MIDPOINT: f32 = 0.5; const DEFAULT_DECAY_SCALE: f32 = 1.0; pub type ConditionId = usize; pub type PreciseScore = f64; pub type PreciseScoreOrdered = OrderedFloat<PreciseScore>; #[derive(Debug, Clone, PartialEq, Serialize)] pub struct ParsedFormula { /// Variables used in the formula pub payload_vars: HashSet<JsonPath>, /// Conditions used in the formula. Their index in the array is used as a variable id pub conditions: Vec<Condition>, /// Defaults to use when variable is not found pub defaults: HashMap<VariableId, Value>, /// Root of the formula expression pub formula: ParsedExpression, } impl Hash for ParsedFormula { fn hash<H: Hasher>(&self, state: &mut H) { let Self { payload_vars, conditions, defaults, formula, } = self; unordered_hash_unique(state, payload_vars.iter()); conditions.hash(state); unordered_hash_unique(state, defaults.iter()); formula.hash(state); } } #[derive(Debug, Clone, PartialEq, Hash, Serialize)] pub enum ParsedExpression { // Terminal Constant(PreciseScoreOrdered), Variable(VariableId), GeoDistance { origin: GeoPoint, key: JsonPath, }, Datetime(DatetimeExpression), // Nested Mult(Vec<ParsedExpression>), Sum(Vec<ParsedExpression>), Div { left: Box<ParsedExpression>, right: Box<ParsedExpression>, by_zero_default: Option<PreciseScoreOrdered>, }, Neg(Box<ParsedExpression>), Sqrt(Box<ParsedExpression>), Pow { base: Box<ParsedExpression>, exponent: Box<ParsedExpression>, }, Exp(Box<ParsedExpression>), Log10(Box<ParsedExpression>), Ln(Box<ParsedExpression>), Abs(Box<ParsedExpression>), Decay { kind: DecayKind, /// Value to decay x: Box<ParsedExpression>, /// Value at which the decay function is the highest target: Option<Box<ParsedExpression>>, /// Constant to shape the decay function lambda: PreciseScoreOrdered, }, } #[derive(Debug, Copy, Clone, PartialEq, Serialize, Hash)] pub enum DecayKind { /// Linear decay function Lin, /// Gaussian decay function Gauss, /// Exponential decay function Exp, } #[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, PartialOrd, Ord)] pub enum VariableId { /// Score index Score(usize), /// Payload field Payload(JsonPath), /// Condition index Condition(ConditionId), } impl VariableId { pub fn unparse(self) -> String { match self { VariableId::Score(index) => format!("${SCORE_KEYWORD}[{index}]"), VariableId::Payload(path) => path.to_string(), VariableId::Condition(_) => unreachable!("there are no defaults for conditions"), } } } #[derive(Debug, Clone, PartialEq, Hash, Serialize)] pub enum DatetimeExpression { Constant(DateTimePayloadType), PayloadVariable(JsonPath), } impl ParsedExpression { pub fn new_div( left: ParsedExpression, right: ParsedExpression, by_zero_default: Option<PreciseScore>, ) -> Self { ParsedExpression::Div { left: Box::new(left), right: Box::new(right), by_zero_default: by_zero_default.map(OrderedFloat), } } pub fn new_neg(expression: ParsedExpression) -> Self { ParsedExpression::Neg(Box::new(expression)) } pub fn new_geo_distance(origin: GeoPoint, key: JsonPath) -> Self { ParsedExpression::GeoDistance { origin, key } } pub fn new_pow(base: ParsedExpression, exponent: ParsedExpression) -> Self { ParsedExpression::Pow { base: Box::new(base), exponent: Box::new(exponent), } } pub fn new_sqrt(expression: ParsedExpression) -> Self { ParsedExpression::Sqrt(Box::new(expression)) } pub fn new_log10(expression: ParsedExpression) -> Self { ParsedExpression::Log10(Box::new(expression)) } pub fn new_ln(expression: ParsedExpression) -> Self { ParsedExpression::Ln(Box::new(expression)) } pub fn new_payload_id(path: JsonPath) -> Self { ParsedExpression::Variable(VariableId::Payload(path)) } pub fn new_score_id(index: usize) -> Self { ParsedExpression::Variable(VariableId::Score(index)) } pub fn new_condition_id(index: ConditionId) -> Self { ParsedExpression::Variable(VariableId::Condition(index)) } /// Transforms the constant part of the decay function into a single `lambda` value. /// /// Graphical representation of the formulas: /// https://www.desmos.com/calculator/htg0vrfmks pub fn decay_params_to_lambda( midpoint: Option<f32>, scale: Option<f32>, kind: DecayKind, ) -> OperationResult<PreciseScore> { let midpoint = PreciseScore::from(midpoint.unwrap_or(DEFAULT_DECAY_MIDPOINT)); let scale = PreciseScore::from(scale.unwrap_or(DEFAULT_DECAY_SCALE)); match kind { DecayKind::Lin => { if !(0.0..=1.0).contains(&midpoint) { return Err(OperationError::validation_error(format!( "Linear decay midpoint should be in the range [0.0, 1.0], got {midpoint}." ))); } } DecayKind::Gauss | DecayKind::Exp => { if midpoint <= 0.0 || midpoint >= 1.0 { return Err(OperationError::validation_error(format!( "Decay midpoint should be in the range (0.0, 1.0), got {midpoint}." ))); } } } if scale <= 0.0 { return Err(OperationError::validation_error(format!( "Decay scale should be non-zero positive, got {scale}." ))); } let lambda = match kind { DecayKind::Lin => (1.0 - midpoint) / scale, DecayKind::Exp => midpoint.ln() / scale, DecayKind::Gauss => midpoint.ln() / scale.powi(2), }; Ok(lambda) } /// Converts the already computed lambda value to parameters which will result in /// the same lambda when used in a decay function on the peer node. /// /// Returns a tuple of (midpoint, scale) parameters. pub fn decay_lambda_to_params(lambda: PreciseScore, kind: DecayKind) -> (ScoreType, ScoreType) { match kind { DecayKind::Lin => { // We assume lambda is in the range (0, 1) debug_assert!(0.0 < lambda && lambda < 1.0); // Linear lambda is (1.0 - midpoint) / scale, // setting scale to 1.0 allows us to ignore the division, // and only set the midpoint to some value. // // (1.0 - midpoint) / 1.0 = lambda // 1.0 - midpoint = lambda // midpoint = 1.0 - lambda (1.0 - lambda as ScoreType, 1.0) } DecayKind::Gauss => { // We assume lambda is non-zero negative debug_assert!(lambda < 0.0); // Gauss lambda is scale^2 / ln(midpoint) // setting midpoint to 1/e (0.3678...) allows us to ignore the division, since ln(1/e) = -1 // Then we set scale to sqrt(-lambda) // // ln(1/e) / scale^2 = lambda // -1.0 / scale^2 = lambda // scale^2 = -1.0 / lambda // scale = sqrt(-1.0 / lambda) ( 1.0 / std::f32::consts::E, (-1.0 / lambda).sqrt() as ScoreType, ) } DecayKind::Exp => { // We assume lambda is non-zero negative debug_assert!(lambda < 0.0); // Exponential lambda is ln(midpoint) / scale // setting midpoint to 1/e (0.3678...) allows us to ignore the division, since ln(1/e) = -1 // Then we set scale to -1 / lambda // // ln(1/e) / scale = lambda // -1.0 / scale = lambda // scale = -1.0 / lambda (1.0 / std::f32::consts::E, -1.0 / lambda as ScoreType) } } } } impl FromStr for VariableId { type Err = String; fn from_str(var_str: &str) -> Result<Self, Self::Err> { let var_id = match var_str.strip_prefix("$") { Some(score) => { // parse as reserved word let json_path = score .parse::<JsonPath>() .map_err(|_| format!("Invalid reserved variable: {var_str}"))?; match json_path.first_key.as_str() { SCORE_KEYWORD => match &json_path.rest[..] { // Default prefetch index, like "$score" [] => VariableId::Score(0), // Specifies prefetch index, like "$score[2]" [JsonPathItem::Index(idx)] => VariableId::Score(*idx), _ => { // Only direct index is supported return Err(format!("Invalid reserved variable: {var_str}")); } }, _ => { // No other reserved words are supported return Err(format!("Invalid reserved word: {var_str}")); } } } None => { // parse as regular payload variable let parsed = var_str .parse() .map_err(|_| format!("Invalid payload variable: {var_str}"))?; VariableId::Payload(parsed) } }; Ok(var_id) } } #[cfg(test)] mod tests { use common::math::is_close; use super::*; #[test] fn test_variable_id_from_str() { // Test score variables assert_eq!( VariableId::from_str("$score").unwrap(), VariableId::Score(0) ); assert_eq!( VariableId::from_str("$score[0]").unwrap(), VariableId::Score(0) ); assert_eq!( VariableId::from_str("$score[1]").unwrap(), VariableId::Score(1) ); assert!(VariableId::from_str("$score.invalid").is_err()); assert!(VariableId::from_str("$score[1][2]").is_err()); assert!(VariableId::from_str("$score[]").is_err()); // Test invalid reserved words assert!(VariableId::from_str("$invalid").is_err()); // Test payload variables assert_eq!( VariableId::from_str("field").unwrap(), VariableId::Payload("field".parse().unwrap()) ); assert_eq!( VariableId::from_str("field.nested").unwrap(), VariableId::Payload("field.nested".parse().unwrap()) ); assert_eq!( VariableId::from_str("field[0]").unwrap(), VariableId::Payload("field[0]".parse().unwrap()) ); assert!(VariableId::from_str("").is_err()); } /// Tests that lambda can be communicated to peers in the form of its components, and be recalculated appropriately fn check_lambda_round_trip(lambda: PreciseScore, kind: DecayKind) { let (midpoint, scale) = ParsedExpression::decay_lambda_to_params(lambda, kind); // Convert back to lambda let lambda_roundtrip = ParsedExpression::decay_params_to_lambda(Some(midpoint), Some(scale), kind).unwrap(); // Check that the roundtrip conversion preserves the value assert!( is_close(lambda, lambda_roundtrip), "Lambda roundtrip failed for {kind:?}: {lambda} -> ({midpoint}, {scale}) -> {lambda_roundtrip}", ); } proptest::proptest! { #[test] fn test_lin_decay_lambda_params_roundtrip( lambda in 0.000001..1.0f64 ) { check_lambda_round_trip(lambda, DecayKind::Lin); } #[test] fn test_exp_decay_lambda_params_roundtrip( lambda in -1_000_000.0..-0.000_000_1f64 ) { check_lambda_round_trip(lambda, DecayKind::Exp); } #[test] fn test_gauss_decay_lambda_params_roundtrip( lambda in -100_000_000.0..-0.0f64 ) { check_lambda_round_trip(lambda, DecayKind::Gauss); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/rescore_formula/value_retriever.rs
lib/segment/src/index/query_optimization/rescore_formula/value_retriever.rs
#![allow(dead_code)] // TODO: remove this use std::collections::{HashMap, HashSet}; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use serde_json::{Number, Value}; use crate::common::utils::MultiValue; use crate::index::field_index::FieldIndex; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::struct_payload_index::StructPayloadIndex; use crate::json_path::JsonPath; use crate::types::{DateTimePayloadType, PayloadContainer, UuidPayloadType}; pub type VariableRetrieverFn<'a> = Box<dyn Fn(PointOffsetType) -> MultiValue<Value> + 'a>; impl StructPayloadIndex { /// Prepares optimized functions to extract each of the variables, given a point id. pub(super) fn retrievers_map<'a, 'q>( &'a self, variables: HashSet<JsonPath>, hw_counter: &'q HardwareCounterCell, ) -> HashMap<JsonPath, VariableRetrieverFn<'q>> where 'a: 'q, { let payload_provider = PayloadProvider::new(self.payload.clone()); // prepare extraction of the variables from field indices or payload. let mut var_retrievers = HashMap::new(); for key in variables { let payload_provider = payload_provider.clone(); let retriever = variable_retriever( &self.field_indexes, &key, payload_provider.clone(), hw_counter, ); var_retrievers.insert(key, retriever); } var_retrievers } } fn variable_retriever<'a, 'q>( indices: &'a HashMap<JsonPath, Vec<FieldIndex>>, json_path: &JsonPath, payload_provider: PayloadProvider, hw_counter: &'q HardwareCounterCell, ) -> VariableRetrieverFn<'q> where 'a: 'q, { indices .get(json_path) .and_then(|indices| indices.iter().find_map(indexed_variable_retriever)) // TODO(scoreboost): optimize by reusing the same payload for all variables? .unwrap_or_else(|| { // if the variable is not found in the index, try to find it in the payload let key = json_path.clone(); payload_variable_retriever(payload_provider, key, hw_counter) }) } fn payload_variable_retriever( payload_provider: PayloadProvider, json_path: JsonPath, hw_counter: &HardwareCounterCell, ) -> VariableRetrieverFn<'_> { let retriever_fn = move |point_id: PointOffsetType| { payload_provider.with_payload( point_id, |payload| { let values = payload.get_value_cloned(&json_path); if json_path.has_wildcard_suffix() { return values; } // Not using array wildcard `[]` on a key which has an array value will return the whole // array as one value, let's flatten the array if that is the case. // // This is the same thing we do for indexing payload values let mut multi_value = MultiValue::new(); for value in values { if let Value::Array(array) = value { multi_value.extend(array); } else { multi_value.push(value); } } multi_value }, hw_counter, ) }; Box::new(retriever_fn) } /// Returns function to extract all the values a point has in the index /// /// If there is no appropriate index, returns None fn indexed_variable_retriever(index: &FieldIndex) -> Option<VariableRetrieverFn<'_>> { match index { FieldIndex::IntIndex(numeric_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { numeric_index .get_values(point_id) .into_iter() .flatten() .map(|v| Value::Number(Number::from(v))) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::IntMapIndex(map_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { map_index .get_values(point_id) .into_iter() .flatten() .map(|v| Value::Number(Number::from(*v))) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::FloatIndex(numeric_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { numeric_index .get_values(point_id) .into_iter() .flatten() .filter_map(|v| Some(Value::Number(Number::from_f64(v)?))) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::DatetimeIndex(numeric_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { numeric_index .get_values(point_id) .into_iter() .flatten() .filter_map(|v| { serde_json::to_value(DateTimePayloadType::from_timestamp(v)?).ok() }) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::KeywordIndex(keyword_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { keyword_index .get_values(point_id) .into_iter() .flatten() .filter_map(|v| serde_json::to_value(v).ok()) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::GeoIndex(geo_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { geo_index .get_values(point_id) .into_iter() .flatten() .filter_map(|v| serde_json::to_value(v).ok()) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::BoolIndex(bool_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { bool_index .get_point_values(point_id) .into_iter() .map(Value::Bool) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::UuidMapIndex(uuid_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { uuid_index .get_values(point_id) .into_iter() .flatten() .map(|value| Value::String(UuidPayloadType::from_u128(*value).to_string())) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::UuidIndex(uuid_index) => { let extract_fn = move |point_id: PointOffsetType| -> MultiValue<Value> { uuid_index .get_values(point_id) .into_iter() .flatten() .map(|value| Value::String(UuidPayloadType::from_u128(value).to_string())) .collect() }; Some(Box::new(extract_fn)) } FieldIndex::FullTextIndex(_) => None, // Better get it from the payload FieldIndex::NullIndex(_) => None, // There should be other index for the same field } } #[cfg(test)] #[cfg(feature = "testing")] mod tests { use std::collections::HashMap; use std::sync::Arc; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use serde_json::{Value, from_value, json}; use crate::common::utils::MultiValue; use crate::index::field_index::geo_index::GeoMapIndex; use crate::index::field_index::numeric_index::NumericIndex; use crate::index::field_index::{FieldIndex, FieldIndexBuilderTrait}; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::query_optimization::rescore_formula::value_retriever::variable_retriever; use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage; use crate::payload_storage::payload_storage_enum::PayloadStorageEnum; use crate::types::Payload; pub fn fixture_payload_provider() -> PayloadProvider { // Create an in-memory payload storage and populate it with some payload maps containing numbers and geo points. let mut in_memory_storage = InMemoryPayloadStorage::default(); // For point id 0: a payload with a numeric value. let payload0: Payload = from_value(json!({ "value": 42 })) .unwrap(); // For point id 1: a payload with a geo point. let payload1: Payload = from_value(json!({ "location": { "lat": 10.0, "lon": 20.0 } })) .unwrap(); // For point id 2: a payload containing both a number and a geo point. let payload2: Payload = from_value(json!({ "value": [99, 55], "location": { "lat": 15.5, "lon": 25.5 } })) .unwrap(); // For point id 3: a payload with an array of 1 number, and an array of 1 geo point. let payload3: Payload = from_value(json!({ "value": [42.5], "location": [{ "lat": 16.5, "lon": 26.5 }] })) .unwrap(); // Insert the payloads into the in-memory storage. in_memory_storage.payload.insert(0, payload0); in_memory_storage.payload.insert(1, payload1); in_memory_storage.payload.insert(2, payload2); in_memory_storage.payload.insert(3, payload3); // Wrap the in-memory storage in a PayloadStorageEnum. let storage_enum = PayloadStorageEnum::InMemoryPayloadStorage(in_memory_storage); let arc_storage = Arc::new(AtomicRefCell::new(storage_enum)); PayloadProvider::new(arc_storage) } #[test] fn test_variable_retriever_from_payload() { let payload_provider = fixture_payload_provider(); let no_indices = Default::default(); let hw_counter = Default::default(); // Test retrieving a number from the payload. let retriever = variable_retriever( &no_indices, &"value".try_into().unwrap(), payload_provider.clone(), &hw_counter, ); for id in 0..=3 { let value = retriever(id); match id { 0 => assert_eq!(value, [json!(42)].into()), 1 => assert_eq!(value, MultiValue::<Value>::new()), 2 => assert_eq!(value, [json!(99), json!(55)].into()), 3 => assert_eq!(value, [json!(42.5)].into()), _ => unreachable!(), } } // Test retrieving a geo point from the payload. let retriever = variable_retriever( &no_indices, &"location".try_into().unwrap(), payload_provider.clone(), &hw_counter, ); for id in 0..=3 { let value = retriever(id); match id { 0 => assert_eq!(value, MultiValue::<Value>::new()), 1 => assert_eq!(value, [json!({ "lat": 10.0, "lon": 20.0 })].into()), 2 => assert_eq!(value, [json!({ "lat": 15.5, "lon": 25.5 })].into()), 3 => assert_eq!(value, [json!({ "lat": 16.5, "lon": 26.5 })].into()), _ => unreachable!(), } } } #[test] fn test_variable_retriever_from_index() { // Empty payload provider. let payload_provider = PayloadProvider::new(Arc::new(AtomicRefCell::new( PayloadStorageEnum::InMemoryPayloadStorage(InMemoryPayloadStorage::default()), ))); let hw_counter = HardwareCounterCell::new(); // Create a field index for a number. let dir = tempfile::tempdir().unwrap(); let mut builder = NumericIndex::builder_mmap(dir.path(), false); builder.add_point(0, &[&42.into()], &hw_counter).unwrap(); builder.add_point(1, &[], &hw_counter).unwrap(); builder .add_point(2, &[&99.into(), &55.into()], &hw_counter) .unwrap(); let numeric_index = builder.finalize().unwrap(); let numeric_index = FieldIndex::IntIndex(numeric_index); // Create a field index for a geo point. let dir = tempfile::tempdir().unwrap(); let mut builder = GeoMapIndex::builder_mmap(dir.path(), false); builder.add_point(0, &[], &hw_counter).unwrap(); builder .add_point(1, &[&json!({ "lat": 10.0, "lon": 20.0})], &hw_counter) .unwrap(); builder .add_point(2, &[&json!({"lat": 15.5, "lon": 25.5})], &hw_counter) .unwrap(); let geo_index = builder.finalize().unwrap(); let geo_index = FieldIndex::GeoIndex(geo_index); // Create a field index for datetime let dir = tempfile::tempdir().unwrap(); let mut builder = NumericIndex::builder_mmap(dir.path(), false); builder .add_point(0, &[&json!("2023-01-01T00:00:00Z")], &hw_counter) .unwrap(); builder .add_point( 1, &[&json!("2023-01-02"), &json!("2023-01-03T00:00:00Z")], &hw_counter, ) .unwrap(); builder.add_point(2, &[], &hw_counter).unwrap(); let datetime_index = builder.finalize().unwrap(); let datetime_index = FieldIndex::DatetimeIndex(datetime_index); let mut indices = HashMap::new(); indices.insert("value".try_into().unwrap(), vec![numeric_index]); indices.insert("location".try_into().unwrap(), vec![geo_index]); indices.insert("creation".try_into().unwrap(), vec![datetime_index]); let hw_counter = Default::default(); // Test retrieving a number from the index. let retriever = variable_retriever( &indices, &"value".try_into().unwrap(), payload_provider.clone(), &hw_counter, ); for id in 0..=2 { let value = retriever(id); match id { 0 => assert_eq!(value, [json!(42)].into()), 1 => assert_eq!(value, MultiValue::<Value>::new()), 2 => assert_eq!(value, [json!(99), json!(55)].into()), _ => unreachable!(), } } // Test retrieving a geo point from the index. let retriever = variable_retriever( &indices, &"location".try_into().unwrap(), payload_provider.clone(), &hw_counter, ); for id in 0..=2 { let value = retriever(id); match id { 0 => assert_eq!(value, MultiValue::<Value>::new()), 1 => assert_eq!(value, [json!({ "lat": 10.0, "lon": 20.0 })].into()), 2 => assert_eq!(value, [json!({ "lat": 15.5, "lon": 25.5 })].into()), _ => unreachable!(), } } // Test retrieving a datetime from the index. let retriever = variable_retriever( &indices, &"creation".try_into().unwrap(), payload_provider.clone(), &hw_counter, ); for id in 0..=2 { let value = retriever(id); match id { 0 => assert_eq!(value, [json!("2023-01-01T00:00:00Z")].into()), 1 => assert_eq!( value, [json!("2023-01-02T00:00:00Z"), json!("2023-01-03T00:00:00Z")].into() ), 2 => assert_eq!(value, MultiValue::<Value>::new()), _ => unreachable!(), } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/rescore_formula/mod.rs
lib/segment/src/index/query_optimization/rescore_formula/mod.rs
mod formula_scorer; pub mod parsed_formula; mod value_retriever;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/rescore_formula/formula_scorer.rs
lib/segment/src/index/query_optimization/rescore_formula/formula_scorer.rs
use std::collections::HashMap; use std::ops::Neg; use ahash::AHashMap; use common::counter::hardware_counter::HardwareCounterCell; use common::types::{PointOffsetType, ScoreType}; use geo::{Distance, Haversine}; use serde_json::Value; use super::parsed_formula::{ DatetimeExpression, DecayKind, ParsedExpression, ParsedFormula, PreciseScore, VariableId, }; use super::value_retriever::VariableRetrieverFn; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::query_optimization::optimized_filter::{OptimizedCondition, check_condition}; use crate::index::query_optimization::payload_provider::PayloadProvider; use crate::index::struct_payload_index::StructPayloadIndex; use crate::json_path::JsonPath; use crate::types::{DateTimePayloadType, GeoPoint}; const DEFAULT_SCORE: PreciseScore = 0.0; const DEFAULT_DECAY_TARGET: PreciseScore = 0.0; /// A scorer to evaluate the same formula for many points pub struct FormulaScorer<'a> { /// The formula to evaluate formula: ParsedExpression, /// One hashmap for each prefetch results prefetches_scores: &'a [AHashMap<PointOffsetType, ScoreType>], /// Payload key -> retriever function payload_retrievers: HashMap<JsonPath, VariableRetrieverFn<'a>>, /// Condition id -> checker function condition_checkers: Vec<OptimizedCondition<'a>>, /// Default values for all variables defaults: HashMap<VariableId, Value>, } pub trait FriendlyName { fn friendly_name() -> &'static str; } impl FriendlyName for PreciseScore { fn friendly_name() -> &'static str { "number" } } impl FriendlyName for GeoPoint { fn friendly_name() -> &'static str { "geo point" } } impl FriendlyName for DateTimePayloadType { fn friendly_name() -> &'static str { "datetime" } } impl StructPayloadIndex { pub fn formula_scorer<'s, 'q>( &'s self, parsed_formula: &'q ParsedFormula, prefetches_scores: &'q [AHashMap<PointOffsetType, ScoreType>], hw_counter: &'q HardwareCounterCell, ) -> FormulaScorer<'q> where 's: 'q, { let ParsedFormula { payload_vars, conditions, defaults, formula, } = parsed_formula; let payload_retrievers = self.retrievers_map(payload_vars.clone(), hw_counter); let payload_provider = PayloadProvider::new(self.payload.clone()); let total = self.available_point_count(); let condition_checkers = self .convert_conditions(conditions, payload_provider, total, hw_counter) .into_iter() .map(|(checker, _estimation)| checker) .collect(); FormulaScorer { formula: formula.clone(), prefetches_scores, payload_retrievers, condition_checkers, defaults: defaults.clone(), } } } impl FormulaScorer<'_> { /// Evaluate the formula for the given point pub fn score(&self, point_id: PointOffsetType) -> OperationResult<ScoreType> { self.eval_expression(&self.formula, point_id) .and_then(|score| { let score_f32 = score as f32; if !score_f32.is_finite() { return Err(OperationError::NonFiniteNumber { expression: format!("{score} as f32 = {score_f32}"), }); } Ok(score_f32) }) } /// Evaluate the expression recursively fn eval_expression( &self, expression: &ParsedExpression, point_id: PointOffsetType, ) -> OperationResult<PreciseScore> { match expression { ParsedExpression::Constant(c) => Ok(c.0), ParsedExpression::Variable(v) => match v { VariableId::Score(prefetch_idx) => Ok(self .prefetches_scores .get(*prefetch_idx) .and_then(|scores| scores.get(&point_id)) .map(|score| PreciseScore::from(*score)) .or_else(|| { self.defaults // if there is no score, or it isn't a number, we use the default score .get(&VariableId::Score(*prefetch_idx)) .and_then(|value| value.as_f64()) }) .unwrap_or(DEFAULT_SCORE)), VariableId::Payload(path) => { self.get_parsed_payload_value(path, point_id, |value| { value.as_f64().ok_or("Value is not a number") }) } VariableId::Condition(id) => { let value = check_condition(&self.condition_checkers[*id], point_id); let score = if value { 1.0 } else { 0.0 }; Ok(score) } }, ParsedExpression::GeoDistance { origin, key } => { let value = self.get_parsed_payload_value( key, point_id, serde_json::from_value::<GeoPoint>, )?; Ok(Haversine.distance((*origin).into(), value.into())) } ParsedExpression::Datetime(dt_expr) => { let datetime = match dt_expr { DatetimeExpression::Constant(dt) => *dt, DatetimeExpression::PayloadVariable(json_path) => { self.get_parsed_payload_value(json_path, point_id, |value| { value // datetime index also returns the Serialize impl of datetime which is a string .as_str() .ok_or("Value is not a string")? .parse::<DateTimePayloadType>() .map_err(|e| e.to_string()) })? } }; // Convert from i64 to f64. // f64's 53 bits of sign + mantissa for microseconds means a span of exact equivalence of // about 285 years, after which precision starts dropping let float_micros = datetime.timestamp() as PreciseScore; // Convert to seconds let float_seconds = float_micros / 1_000_000.0; Ok(float_seconds) } ParsedExpression::Mult(expressions) => { let mut product = 1.0; for expr in expressions { let value = self.eval_expression(expr, point_id)?; // shortcut on multiplication by zero if value == 0.0 { return Ok(0.0); } product *= value; } Ok(product) } ParsedExpression::Sum(expressions) => expressions.iter().try_fold(0.0, |acc, expr| { let value = self.eval_expression(expr, point_id)?; Ok(acc + value) }), ParsedExpression::Div { left, right, by_zero_default, } => { let left = self.eval_expression(left, point_id)?; // shortcut on numerator zero if left == 0.0 { return Ok(0.0); } let right = self.eval_expression(right, point_id)?; if right == 0.0 && let Some(default) = by_zero_default { return Ok(default.0); } let div_value = left / right; if div_value.is_finite() { return Ok(div_value); } Err(OperationError::NonFiniteNumber { expression: format!("{left}/{right} = {div_value}"), }) } ParsedExpression::Neg(expr) => { let value = self.eval_expression(expr, point_id)?; Ok(value.neg()) } ParsedExpression::Sqrt(expr) => { let value = self.eval_expression(expr, point_id)?; let sqrt_value = value.sqrt(); if sqrt_value.is_finite() { return Ok(sqrt_value); } Err(OperationError::NonFiniteNumber { expression: format!("√{value} = {sqrt_value}"), }) } ParsedExpression::Pow { base, exponent } => { let base_value = self.eval_expression(base, point_id)?; let exponent_value = self.eval_expression(exponent, point_id)?; let power = base_value.powf(exponent_value); if power.is_finite() { return Ok(power); } Err(OperationError::NonFiniteNumber { expression: format!("{base_value}^{exponent_value} = {power}"), }) } ParsedExpression::Exp(parsed_expression) => { let value = self.eval_expression(parsed_expression, point_id)?; let exp_value = value.exp(); if exp_value.is_finite() { return Ok(exp_value); } Err(OperationError::NonFiniteNumber { expression: format!("exp({value}) = {exp_value}"), }) } ParsedExpression::Log10(expr) => { let value = self.eval_expression(expr, point_id)?; let log_value = value.log10(); if log_value.is_finite() { return Ok(log_value); } Err(OperationError::NonFiniteNumber { expression: format!("log10({value}) = {log_value}"), }) } ParsedExpression::Ln(expr) => { let value = self.eval_expression(expr, point_id)?; let ln_value = value.ln(); if ln_value.is_finite() { return Ok(ln_value); } Err(OperationError::NonFiniteNumber { expression: format!("ln({value}) = {ln_value}"), }) } ParsedExpression::Abs(expr) => { let value = self.eval_expression(expr, point_id)?; Ok(value.abs()) } // Interactive formulas in https://www.desmos.com/calculator/htg0vrfmks ParsedExpression::Decay { kind, target, lambda, x, } => { let x = self.eval_expression(x, point_id)?; let target = if let Some(target) = target { self.eval_expression(target, point_id)? } else { DEFAULT_DECAY_TARGET }; let decay = match kind { DecayKind::Exp => exp_decay(x, target, lambda.0), DecayKind::Gauss => gauss_decay(x, target, lambda.0), DecayKind::Lin => linear_decay(x, target, lambda.0), }; // All decay functions have a range of [0, 1], no need to check for bounds debug_assert!((0.0..=1.0).contains(&decay)); Ok(decay) } } } fn get_payload_value(&self, json_path: &JsonPath, point_id: PointOffsetType) -> Option<Value> { self.payload_retrievers .get(json_path) .and_then(|retriever| { let mut multi_value = retriever(point_id); match multi_value.len() { 0 => None, 1 => Some(multi_value.pop().unwrap()), _ => Some(Value::Array(multi_value.into_iter().collect())), } }) } /// Tries to get a value from payload or from the defaults. Then tries to convert it to the desired type. fn get_parsed_payload_value<T, F, E>( &self, json_path: &JsonPath, point_id: PointOffsetType, from_value: F, ) -> OperationResult<T> where F: Fn(Value) -> Result<T, E>, E: ToString, T: FriendlyName, { let value = self .get_payload_value(json_path, point_id) .or_else(|| { self.defaults .get(&VariableId::Payload(json_path.clone())) .cloned() }) .ok_or_else(|| OperationError::VariableTypeError { field_name: json_path.clone(), expected_type: T::friendly_name().to_owned(), description: "No value found in a payload nor defaults".to_string(), })?; from_value(value).map_err(|e| OperationError::VariableTypeError { field_name: json_path.clone(), expected_type: T::friendly_name().to_owned(), description: e.to_string(), }) } } fn exp_decay(x: PreciseScore, target: PreciseScore, lambda: PreciseScore) -> PreciseScore { let diff = (x - target).abs(); (lambda * diff).exp() } fn gauss_decay(x: PreciseScore, target: PreciseScore, lambda: PreciseScore) -> PreciseScore { let diff = x - target; (lambda * diff * diff).exp() } fn linear_decay(x: PreciseScore, target: PreciseScore, lambda: PreciseScore) -> PreciseScore { let diff = (x - target).abs(); (-lambda * diff + 1.0).max(0.0) } #[cfg(test)] #[cfg(feature = "testing")] mod tests { use std::collections::HashMap; use rstest::rstest; use serde_json::json; use smallvec::smallvec; use super::*; use crate::index::query_optimization::rescore_formula::parsed_formula::PreciseScoreOrdered; use crate::json_path::JsonPath; const FIELD_NAME: &str = "number"; const NO_VALUE_FIELD_NAME: &str = "no_number"; const ARRAY_OF_ONE_FIELD_NAME: &str = "array_of_one"; const ARRAY_FIELD_NAME: &str = "array"; const GEO_FIELD_NAME: &str = "geo_point"; const NO_VALUE_GEO_POINT: &str = "no_value_geo_point"; const NO_VALUE_DATETIME: &str = "no_value_datetime"; // self_cell just to be able to create FormulaScorer with a "reference" to fixture scores self_cell::self_cell!( struct ScorerFixture { owner: Vec<AHashMap<u32, f32>>, #[covariant] dependent: FormulaScorer, } ); fn make_formula_scorer(defaults: &HashMap<VariableId, Value>) -> ScorerFixture { let scores = vec![ [(0, 1.0)].into_iter().collect(), [(0, 2.0)].into_iter().collect(), ]; ScorerFixture::new(scores, |prefetches_scores| { let mut payload_retrievers: HashMap<JsonPath, VariableRetrieverFn> = HashMap::new(); payload_retrievers.insert( JsonPath::new(FIELD_NAME), Box::new(|_| smallvec![json!(85.0)]), ); payload_retrievers.insert( JsonPath::new(ARRAY_OF_ONE_FIELD_NAME), Box::new(|_| smallvec![json!(1.2)]), ); payload_retrievers.insert( JsonPath::new(ARRAY_FIELD_NAME), Box::new(|_| smallvec![json!(1.2), json!(2.3)]), ); payload_retrievers.insert( JsonPath::new(GEO_FIELD_NAME), Box::new(|_| { smallvec![json!({"lat": 25.628482424190565, "lon": -100.23881855976})] }), ); let condition_checkers = vec![ OptimizedCondition::Checker(Box::new(|_| true)), OptimizedCondition::Checker(Box::new(|_| false)), ]; FormulaScorer { formula: ParsedExpression::Constant(PreciseScoreOrdered::from(0.0)), prefetches_scores, payload_retrievers, condition_checkers, defaults: defaults.clone(), } }) } #[rstest] // Basic expressions, just variables #[case(ParsedExpression::Constant(PreciseScoreOrdered::from(5.0)), 5.0)] #[case(ParsedExpression::new_score_id(0), 1.0)] #[case(ParsedExpression::new_score_id(1), 2.0)] #[case(ParsedExpression::new_payload_id(JsonPath::new(FIELD_NAME)), 85.0)] #[case(ParsedExpression::new_condition_id(0), 1.0)] #[case(ParsedExpression::new_condition_id(1), 0.0)] // Operations #[case(ParsedExpression::Sum(vec![ ParsedExpression::Constant(PreciseScoreOrdered::from(1.0)), ParsedExpression::new_score_id(0), ParsedExpression::new_payload_id(JsonPath::new(FIELD_NAME)), ParsedExpression::new_condition_id(0), ]), 1.0 + 1.0 + 85.0 + 1.0)] #[case(ParsedExpression::Mult(vec![ ParsedExpression::Constant(PreciseScoreOrdered::from(2.0)), ParsedExpression::new_score_id(0), ParsedExpression::new_payload_id(JsonPath::new(FIELD_NAME)), ParsedExpression::new_condition_id(0), ]), 2.0 * 1.0 * 85.0 * 1.0)] #[case(ParsedExpression::new_div( ParsedExpression::Constant(PreciseScoreOrdered::from(10.0)), ParsedExpression::new_score_id(0), None ), 10.0 / 1.0)] #[case(ParsedExpression::new_neg(ParsedExpression::Constant(PreciseScoreOrdered::from(10.0))), -10.0)] // Error cases #[case(ParsedExpression::new_geo_distance( GeoPoint::new_unchecked(-100.43383200156751, 25.717877679163667), JsonPath::new(GEO_FIELD_NAME) ), 21926.494151786308)] #[should_panic( expected = r#"VariableTypeError { field_name: JsonPath { first_key: "number", rest: [] }, expected_type: "geo point", "# )] #[case(ParsedExpression::new_geo_distance(GeoPoint::new_unchecked(-100.43383200156751, 25.717877679163667), JsonPath::new(FIELD_NAME)), 0.0)] #[should_panic(expected = r#"NonFiniteNumber { expression: "-1^0.4 = NaN" }"#)] #[case(ParsedExpression::new_pow(ParsedExpression::Constant(PreciseScoreOrdered::from(-1.0)), ParsedExpression::Constant(PreciseScoreOrdered::from(0.4))), 0.0)] #[should_panic(expected = r#"NonFiniteNumber { expression: "√-3 = NaN" }"#)] #[case(ParsedExpression::new_sqrt(ParsedExpression::Constant(PreciseScoreOrdered::from(-3.0))), 0.0)] #[should_panic(expected = r#"NonFiniteNumber { expression: "1/0 = inf" }"#)] #[case( ParsedExpression::new_div( ParsedExpression::Constant(PreciseScoreOrdered::from(1.0)), ParsedExpression::Constant(PreciseScoreOrdered::from(0.0)), None ), 0.0 )] #[should_panic(expected = r#"NonFiniteNumber { expression: "log10(0) = -inf" }"#)] #[case( ParsedExpression::new_log10(ParsedExpression::Constant(PreciseScoreOrdered::from(0.0))), 0.0 )] #[should_panic(expected = r#"NonFiniteNumber { expression: "ln(0) = -inf" }"#)] #[case( ParsedExpression::new_ln(ParsedExpression::Constant(PreciseScoreOrdered::from(0.0))), 0.0 )] #[test] fn test_evaluation(#[case] expr: ParsedExpression, #[case] expected: PreciseScore) { let defaults = HashMap::new(); let scorer_fixture = make_formula_scorer(&defaults); let scorer = scorer_fixture.borrow_dependent(); assert_eq!(scorer.eval_expression(&expr, 0).unwrap(), expected); } // Default values #[rstest] // Defined default score #[case(ParsedExpression::new_score_id(3), Ok(1.5))] // score idx not defined #[case(ParsedExpression::new_score_id(10), Ok(DEFAULT_SCORE))] // missing value in payload #[case( ParsedExpression::new_payload_id(JsonPath::new(ARRAY_OF_ONE_FIELD_NAME)), Ok(1.2) )] #[case( ParsedExpression::new_payload_id(JsonPath::new(ARRAY_FIELD_NAME)), Err(OperationError::VariableTypeError { field_name: JsonPath::new("array"), expected_type: "number".into(), description: "Value is not a number".into() }) )] #[case( ParsedExpression::new_payload_id(JsonPath::new(NO_VALUE_FIELD_NAME)), Ok(85.0) )] // missing value and no default value provided #[case( ParsedExpression::new_payload_id(JsonPath::new("missing_field")), Err(OperationError::VariableTypeError { field_name: JsonPath::new("missing_field"), expected_type: PreciseScore::friendly_name().to_string(), description: "No value found in a payload nor defaults".to_string(), }) )] // geo distance with default value #[case(ParsedExpression::new_geo_distance(GeoPoint::new_unchecked(-100.43383200156751, 25.717877679163667), JsonPath::new(NO_VALUE_GEO_POINT)), Ok(90951.29600298218))] // datetime expression constant #[case( ParsedExpression::Datetime(DatetimeExpression::Constant("2025-03-18".parse().unwrap())), Ok("2025-03-18".parse::<DateTimePayloadType>().unwrap().timestamp() as PreciseScore / 1_000_000.0) )] // datetime expression with payload variable that doesn't exist in payload and no default #[case( ParsedExpression::Datetime(DatetimeExpression::PayloadVariable(JsonPath::new("missing_datetime"))), Err(OperationError::VariableTypeError { field_name: JsonPath::new("missing_datetime"), expected_type: DateTimePayloadType::friendly_name().to_string(), description: "No value found in a payload nor defaults".to_string(), }) )] // datetime expression with payload variable that doesn't exist in payload but has default #[case( ParsedExpression::Datetime(DatetimeExpression::PayloadVariable(JsonPath::new(NO_VALUE_DATETIME))), Ok("2025-03-19T12:00:00".parse::<DateTimePayloadType>().unwrap().timestamp() as PreciseScore / 1_000_000.0) )] #[test] fn test_default_values( #[case] expr: ParsedExpression, #[case] expected: OperationResult<PreciseScore>, ) { let defaults = [ (VariableId::Score(3), json!(1.5)), ( VariableId::Payload(JsonPath::new(NO_VALUE_FIELD_NAME)), json!(85.0), ), ( VariableId::Payload(JsonPath::new(NO_VALUE_GEO_POINT)), json!({"lat": 25.0, "lon": -100.0}), ), ( VariableId::Payload(JsonPath::new(NO_VALUE_DATETIME)), json!("2025-03-19T12:00:00"), ), ] .into_iter() .collect(); let scorer_fixture = make_formula_scorer(&defaults); let scorer = scorer_fixture.borrow_dependent(); assert_eq!(scorer.eval_expression(&expr, 0), expected); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/query_optimization/condition_converter/match_converter.rs
lib/segment/src/index/query_optimization/condition_converter/match_converter.rs
use common::counter::hardware_accumulator::HwMeasurementAcc; use common::types::PointOffsetType; use indexmap::IndexSet; use uuid::Uuid; use crate::index::field_index::FieldIndex; use crate::index::query_optimization::optimized_filter::ConditionCheckerFn; use crate::payload_storage::condition_checker::INDEXSET_ITER_THRESHOLD; use crate::types::{ AnyVariants, Match, MatchAny, MatchExcept, MatchPhrase, MatchText, MatchTextAny, MatchValue, ValueVariants, }; pub fn get_match_checkers( index: &FieldIndex, cond_match: Match, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { match cond_match { Match::Value(MatchValue { value }) => get_match_value_checker(value, index, hw_acc), Match::Text(MatchText { text }) => { get_match_text_checker(text, TextQueryType::Text, index, hw_acc) } Match::TextAny(MatchTextAny { text_any }) => { get_match_text_checker(text_any, TextQueryType::TextAny, index, hw_acc) } Match::Phrase(MatchPhrase { phrase }) => { get_match_text_checker(phrase, TextQueryType::Phrase, index, hw_acc) } Match::Any(MatchAny { any }) => get_match_any_checker(any, index, hw_acc), Match::Except(MatchExcept { except }) => get_match_except_checker(except, index, hw_acc), } } fn get_match_value_checker( value_variant: ValueVariants, index: &FieldIndex, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { match (value_variant, index) { (ValueVariants::String(keyword), FieldIndex::KeywordIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |k| k == keyword) })) } (ValueVariants::String(value), FieldIndex::UuidMapIndex(index)) => { let uuid = Uuid::parse_str(&value).map(|uuid| uuid.as_u128()).ok()?; let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |i| i == &uuid) })) } (ValueVariants::Integer(value), FieldIndex::IntMapIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |i| *i == value) })) } (ValueVariants::Bool(is_true), FieldIndex::BoolIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, is_true, &hw_counter) })) } (ValueVariants::Bool(_), FieldIndex::DatetimeIndex(_)) | (ValueVariants::Bool(_), FieldIndex::FloatIndex(_)) | (ValueVariants::Bool(_), FieldIndex::FullTextIndex(_)) | (ValueVariants::Bool(_), FieldIndex::GeoIndex(_)) | (ValueVariants::Bool(_), FieldIndex::IntIndex(_)) | (ValueVariants::Bool(_), FieldIndex::IntMapIndex(_)) | (ValueVariants::Bool(_), FieldIndex::KeywordIndex(_)) | (ValueVariants::Bool(_), FieldIndex::UuidIndex(_)) | (ValueVariants::Bool(_), FieldIndex::UuidMapIndex(_)) | (ValueVariants::Bool(_), FieldIndex::NullIndex(_)) | (ValueVariants::Integer(_), FieldIndex::BoolIndex(_)) | (ValueVariants::Integer(_), FieldIndex::DatetimeIndex(_)) | (ValueVariants::Integer(_), FieldIndex::FloatIndex(_)) | (ValueVariants::Integer(_), FieldIndex::FullTextIndex(_)) | (ValueVariants::Integer(_), FieldIndex::GeoIndex(_)) | (ValueVariants::Integer(_), FieldIndex::IntIndex(_)) | (ValueVariants::Integer(_), FieldIndex::KeywordIndex(_)) | (ValueVariants::Integer(_), FieldIndex::UuidIndex(_)) | (ValueVariants::Integer(_), FieldIndex::UuidMapIndex(_)) | (ValueVariants::Integer(_), FieldIndex::NullIndex(_)) | (ValueVariants::String(_), FieldIndex::BoolIndex(_)) | (ValueVariants::String(_), FieldIndex::DatetimeIndex(_)) | (ValueVariants::String(_), FieldIndex::FloatIndex(_)) | (ValueVariants::String(_), FieldIndex::FullTextIndex(_)) | (ValueVariants::String(_), FieldIndex::GeoIndex(_)) | (ValueVariants::String(_), FieldIndex::IntIndex(_)) | (ValueVariants::String(_), FieldIndex::IntMapIndex(_)) | (ValueVariants::String(_), FieldIndex::UuidIndex(_)) | (ValueVariants::String(_), FieldIndex::NullIndex(_)) => None, } } fn get_match_any_checker( any_variant: AnyVariants, index: &FieldIndex, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { match (any_variant, index) { (AnyVariants::Strings(list), FieldIndex::KeywordIndex(index)) => { if list.len() < INDEXSET_ITER_THRESHOLD { let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { list.iter().any(|s| s.as_str() == value) }) })) } else { let hw_counter = hw_acc.get_counter_cell(); Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| list.contains(value)) })) } } (AnyVariants::Strings(list), FieldIndex::UuidMapIndex(index)) => { let list = list .iter() .map(|s| Uuid::parse_str(s).map(|uuid| uuid.as_u128()).ok()) .collect::<Option<IndexSet<_>>>()?; let hw_counter = hw_acc.get_counter_cell(); if list.len() < INDEXSET_ITER_THRESHOLD { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { list.iter().any(|i| i == value) }) })) } else { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| list.contains(value)) })) } } (AnyVariants::Integers(list), FieldIndex::IntMapIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); if list.len() < INDEXSET_ITER_THRESHOLD { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { list.iter().any(|i| i == value) }) })) } else { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| list.contains(value)) })) } } (AnyVariants::Integers(_), FieldIndex::BoolIndex(_)) | (AnyVariants::Integers(_), FieldIndex::DatetimeIndex(_)) | (AnyVariants::Integers(_), FieldIndex::FloatIndex(_)) | (AnyVariants::Integers(_), FieldIndex::FullTextIndex(_)) | (AnyVariants::Integers(_), FieldIndex::GeoIndex(_)) | (AnyVariants::Integers(_), FieldIndex::IntIndex(_)) | (AnyVariants::Integers(_), FieldIndex::KeywordIndex(_)) | (AnyVariants::Integers(_), FieldIndex::UuidIndex(_)) | (AnyVariants::Integers(_), FieldIndex::UuidMapIndex(_)) | (AnyVariants::Integers(_), FieldIndex::NullIndex(_)) | (AnyVariants::Strings(_), FieldIndex::BoolIndex(_)) | (AnyVariants::Strings(_), FieldIndex::DatetimeIndex(_)) | (AnyVariants::Strings(_), FieldIndex::FloatIndex(_)) | (AnyVariants::Strings(_), FieldIndex::FullTextIndex(_)) | (AnyVariants::Strings(_), FieldIndex::GeoIndex(_)) | (AnyVariants::Strings(_), FieldIndex::IntIndex(_)) | (AnyVariants::Strings(_), FieldIndex::IntMapIndex(_)) | (AnyVariants::Strings(_), FieldIndex::UuidIndex(_)) | (AnyVariants::Strings(_), FieldIndex::NullIndex(_)) => None, } } fn get_match_except_checker( except: AnyVariants, index: &FieldIndex, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let checker: Option<ConditionCheckerFn> = match (except, index) { (AnyVariants::Strings(list), FieldIndex::KeywordIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); if list.len() < INDEXSET_ITER_THRESHOLD { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { !list.iter().any(|s| s.as_str() == value) }) })) } else { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| !list.contains(value)) })) } } (AnyVariants::Strings(list), FieldIndex::UuidMapIndex(index)) => { let list = list .iter() .map(|s| Uuid::parse_str(s).map(|uuid| uuid.as_u128()).ok()) .collect::<Option<IndexSet<_>>>()?; let hw_counter = hw_acc.get_counter_cell(); if list.len() < INDEXSET_ITER_THRESHOLD { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { !list.iter().any(|i| i == value) }) })) } else { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| !list.contains(value)) })) } } (AnyVariants::Integers(list), FieldIndex::IntMapIndex(index)) => { let hw_counter = hw_acc.get_counter_cell(); if list.len() < INDEXSET_ITER_THRESHOLD { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| { !list.iter().any(|i| i == value) }) })) } else { Some(Box::new(move |point_id: PointOffsetType| { index.check_values_any(point_id, &hw_counter, |value| !list.contains(value)) })) } } (AnyVariants::Strings(_), FieldIndex::IntIndex(_)) | (AnyVariants::Strings(_), FieldIndex::DatetimeIndex(_)) | (AnyVariants::Strings(_), FieldIndex::IntMapIndex(_)) | (AnyVariants::Strings(_), FieldIndex::FloatIndex(_)) | (AnyVariants::Strings(_), FieldIndex::GeoIndex(_)) | (AnyVariants::Strings(_), FieldIndex::FullTextIndex(_)) | (AnyVariants::Strings(_), FieldIndex::BoolIndex(_)) | (AnyVariants::Strings(_), FieldIndex::UuidIndex(_)) | (AnyVariants::Strings(_), FieldIndex::NullIndex(_)) | (AnyVariants::Integers(_), FieldIndex::IntIndex(_)) | (AnyVariants::Integers(_), FieldIndex::DatetimeIndex(_)) | (AnyVariants::Integers(_), FieldIndex::KeywordIndex(_)) | (AnyVariants::Integers(_), FieldIndex::FloatIndex(_)) | (AnyVariants::Integers(_), FieldIndex::GeoIndex(_)) | (AnyVariants::Integers(_), FieldIndex::FullTextIndex(_)) | (AnyVariants::Integers(_), FieldIndex::BoolIndex(_)) | (AnyVariants::Integers(_), FieldIndex::UuidIndex(_)) | (AnyVariants::Integers(_), FieldIndex::UuidMapIndex(_)) | (AnyVariants::Integers(_), FieldIndex::NullIndex(_)) => None, }; if checker.is_none() { return Some(Box::new(|point_id: PointOffsetType| { // If there is any other value of any other index, then it's a match index.values_count(point_id) > 0 })); }; checker } enum TextQueryType { Phrase, Text, TextAny, } fn get_match_text_checker( text: String, query_type: TextQueryType, index: &FieldIndex, hw_acc: HwMeasurementAcc, ) -> Option<ConditionCheckerFn<'_>> { let hw_counter = hw_acc.get_counter_cell(); match index { FieldIndex::FullTextIndex(full_text_index) => { let query_opt = match query_type { TextQueryType::Phrase => full_text_index.parse_phrase_query(&text, &hw_counter), TextQueryType::Text => full_text_index.parse_text_query(&text, &hw_counter), TextQueryType::TextAny => full_text_index.parse_text_any_query(&text, &hw_counter), }; let Some(parsed_query) = query_opt else { return Some(Box::new(|_| false)); }; Some(Box::new(move |point_id: PointOffsetType| { full_text_index.check_match(&parsed_query, point_id) })) } FieldIndex::BoolIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::NullIndex(_) => None, } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/geo_hash.rs
lib/segment/src/index/field_index/geo_hash.rs
use std::fmt::Display; use std::ops::{Index, Range}; use ecow::EcoString; use geo::{Coord, Distance, Haversine, Intersects, LineString, Point, Polygon}; use geohash::{Direction, GeohashError, decode, decode_bbox, encode}; use itertools::Itertools; use ordered_float::OrderedFloat; use crate::common::operation_error::{OperationError, OperationResult}; use crate::types::{GeoBoundingBox, GeoPoint, GeoPolygon, GeoRadius}; /// Packed representation of a geohash string. /// /// Geohash string is a base32 encoded string. /// It means that each character can be represented with 5 bits. /// Also, the length of the string is encoded as 4 bits (because max size is `GEOHASH_MAX_LENGTH = 12`). /// So, the packed representation is 64 bits long: 5bits * 12chars + 4bits = 64 bits. /// /// Characters are stored in reverse order to keep lexicographical order. /// Length is stored in the last 4 bits. /// Unused bits are set to 0. /// /// Example for `dr5ruj447`: /// /// ```text /// Bit no. 63 4 3 0 /// | | | | /// Bit value 01100 10111 00101 10111 11010 10001 00100 00100 00111 00000 00000 00000 1001 /// Decoded 'd' 'r' '5' 'r' 'u' 'j' '4' '4' '7' 9 /// Meaning s[0] s[1] s[2] s[3] s[4] s[5] s[6] s[7] s[8] s[9] s[10] s[11] length /// ``` #[repr(C)] #[derive(Default, Clone, Copy, Debug, PartialEq, Hash, Ord, PartialOrd, Eq)] pub struct GeoHash { packed: u64, } // code from geohash crate // the alphabet for the base32 encoding used in geohashing #[rustfmt::skip] const BASE32_CODES: [u8; 32] = [ b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'j', b'k', b'm', b'n', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', ]; /// Max size of geo-hash used for indexing. size=12 is about 6cm2 pub const GEOHASH_MAX_LENGTH: usize = 12; const LON_RANGE: Range<f64> = -180.0..180.0; const LAT_RANGE: Range<f64> = -90.0..90.0; const COORD_EPS: f64 = 1e-12; impl Index<usize> for GeoHash { type Output = u8; fn index(&self, i: usize) -> &Self::Output { assert!(i < self.len()); let index = (self.packed >> Self::shift_value(i)) & 0b11111; &BASE32_CODES[index as usize] } } impl TryFrom<EcoString> for GeoHash { type Error = GeohashError; fn try_from(hash: EcoString) -> Result<Self, Self::Error> { Self::new(hash.as_bytes()) } } impl TryFrom<String> for GeoHash { type Error = GeohashError; fn try_from(hash: String) -> Result<Self, Self::Error> { Self::new(hash.as_bytes()) } } impl From<GeoHash> for EcoString { fn from(hash: GeoHash) -> Self { hash.iter().map(char::from).collect() } } impl Display for GeoHash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { EcoString::from(*self).fmt(f) } } pub struct GeoHashIterator { packed_chars: u64, } impl Iterator for GeoHashIterator { type Item = u8; fn next(&mut self) -> Option<Self::Item> { let len = self.packed_chars & 0b1111; if len > 0 { // take first character from the packed value let char_index = (self.packed_chars >> 59) & 0b11111; // shift packed value to the left to get the next character self.packed_chars = (self.packed_chars << 5) | (len - 1); // get character from the base32 alphabet Some(BASE32_CODES[char_index as usize]) } else { None } } } impl GeoHash { pub fn new<H>(s: H) -> Result<Self, GeohashError> where H: AsRef<[u8]>, { let s = s.as_ref(); if s.len() > GEOHASH_MAX_LENGTH { return Err(GeohashError::InvalidLength(s.len())); } let mut packed: u64 = 0; for (i, c) in s.iter().enumerate() { let index = BASE32_CODES.iter().position(|x| x == c).unwrap() as u64; packed |= index << Self::shift_value(i); } packed |= s.len() as u64; Ok(Self { packed }) } pub fn iter(&self) -> GeoHashIterator { if !self.is_empty() { GeoHashIterator { packed_chars: self.packed, } } else { GeoHashIterator { packed_chars: 0 } } } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn len(&self) -> usize { (self.packed & 0b1111) as usize } pub fn truncate(&self, new_len: usize) -> Self { assert!(new_len <= self.len()); if new_len == self.len() { return *self; } if new_len == 0 { return Self { packed: 0 }; } let mut packed = self.packed; // Clear all bits after `new_len`-th character and clear length bits let shift = Self::shift_value(new_len - 1); packed = (packed >> shift) << shift; packed |= new_len as u64; // set new length Self { packed } } pub fn starts_with(&self, other: GeoHash) -> bool { if self.len() < other.len() { // other is longer than self return false; } if other.is_empty() { // empty string is a prefix of any string return true; } let self_shifted = self.packed >> Self::shift_value(other.len() - 1); let other_shifted = other.packed >> Self::shift_value(other.len() - 1); self_shifted == other_shifted } // Returns the shift value. If we apply this shift to the packed value, we get the value of the `i`-th character. fn shift_value(i: usize) -> usize { assert!(i < GEOHASH_MAX_LENGTH); // first 4 bits is size, then 5 bits per character in reverse order (for lexicographical order) 5 * (GEOHASH_MAX_LENGTH - 1 - i) + 4 } } impl From<GeoPoint> for Coord<f64> { fn from(point: GeoPoint) -> Self { Self { x: point.lon.0, y: point.lat.0, } } } pub fn common_hash_prefix(geo_hashes: &[GeoHash]) -> Option<GeoHash> { if geo_hashes.is_empty() { return None; } let first = &geo_hashes[0]; let mut prefix: usize = first.len(); for geo_hash in geo_hashes.iter().skip(1) { for i in 0..prefix { if first[i] != geo_hash[i] { prefix = i; break; } } } Some(first.truncate(prefix)) } /// Fix longitude for spherical overflow /// lon: 181.0 -> -179.0 fn sphere_lon(lon: f64) -> f64 { let mut res_lon = lon; if res_lon > LON_RANGE.end { res_lon = LON_RANGE.start + res_lon - LON_RANGE.end; } if res_lon < LON_RANGE.start { res_lon = LON_RANGE.end + res_lon - LON_RANGE.start; } res_lon } /// Fix latitude for spherical overflow fn sphere_lat(lat: f64) -> f64 { let mut res_lat = lat; if res_lat > LAT_RANGE.end { res_lat = LAT_RANGE.end - COORD_EPS; } if res_lat < LAT_RANGE.start { res_lat = LAT_RANGE.start + COORD_EPS; } res_lat } /// Get neighbour geohash even from the other side of coordinates fn sphere_neighbor(hash: GeoHash, direction: Direction) -> Result<GeoHash, GeohashError> { let hash_str = EcoString::from(hash); let (coord, lon_err, lat_err) = decode(hash_str.as_str())?; let (dlat, dlng) = direction.to_tuple(); let lon = sphere_lon(coord.x + 2f64 * lon_err.abs() * dlng); let lat = sphere_lat(coord.y + 2f64 * lat_err.abs() * dlat); let neighbor_coord = Coord { x: lon, y: lat }; let encoded_string = encode(neighbor_coord, hash_str.len())?; GeoHash::try_from(encoded_string) } pub fn encode_max_precision(lon: f64, lat: f64) -> Result<GeoHash, GeohashError> { let encoded_string = encode((lon, lat).into(), GEOHASH_MAX_LENGTH)?; GeoHash::try_from(encoded_string) } pub fn geo_hash_to_box(geo_hash: GeoHash) -> GeoBoundingBox { let rectangle = decode_bbox(EcoString::from(geo_hash).as_str()).unwrap(); let top_left = GeoPoint { lon: OrderedFloat(rectangle.min().x), lat: OrderedFloat(rectangle.max().y), }; let bottom_right = GeoPoint { lon: OrderedFloat(rectangle.max().x), lat: OrderedFloat(rectangle.min().y), }; GeoBoundingBox { top_left, bottom_right, } } #[derive(Debug)] struct GeohashBoundingBox { north_west: GeoHash, south_west: GeoHash, #[allow(dead_code)] south_east: GeoHash, // field is not involved in the calculations, but is kept for symmetry north_east: GeoHash, } impl GeohashBoundingBox { /// Calculate geo-hashes covering the rectangular region with given precision /// /// # Arguments /// /// * `precision` - precision of cover /// * `max_regions` - stop early if maximal amount of regions exceeded /// /// # Result /// /// * None - if there are more regions than a limit /// * Some(list of geo-hashes covering the region /// fn geohash_regions(&self, precision: usize, max_regions: usize) -> Option<Vec<GeoHash>> { let mut seen: Vec<GeoHash> = Vec::new(); let mut from_row: GeoHash = self.north_west.truncate(precision); let mut to_row: GeoHash = self.north_east.truncate(precision); let to_column = self.south_west.truncate(precision); loop { let mut current = from_row; loop { seen.push(current); if seen.len() > max_regions { return None; } if current == to_row { break; } current = sphere_neighbor(current, Direction::E).unwrap(); } if from_row == to_column { break; } from_row = sphere_neighbor(from_row, Direction::S).unwrap(); to_row = sphere_neighbor(to_row, Direction::S).unwrap(); } Some(seen) } } impl From<GeoBoundingBox> for GeohashBoundingBox { fn from(bounding_box: GeoBoundingBox) -> Self { let GeoPoint { lat: OrderedFloat(max_lat), lon: OrderedFloat(min_lon), } = bounding_box.top_left; let GeoPoint { lat: OrderedFloat(min_lat), lon: OrderedFloat(max_lon), } = bounding_box.bottom_right; // Unwrap is acceptable, as data should be validated before let north_west = encode_max_precision(min_lon, max_lat).unwrap(); let south_west = encode_max_precision(min_lon, min_lat).unwrap(); let south_east = encode_max_precision(max_lon, min_lat).unwrap(); let north_east = encode_max_precision(max_lon, max_lat).unwrap(); Self { north_west, south_west, south_east, north_east, } } } /// Check if geohash tile intersects the circle fn check_circle_intersection(geohash: &str, circle: &GeoRadius) -> bool { let precision = geohash.len(); if precision == 0 { return true; } let rect = decode_bbox(geohash).unwrap(); let c0 = rect.min(); let c1 = rect.max(); let bbox_center = Point::new((c0.x + c1.x) / 2f64, (c0.y + c1.y) / 2f64); let half_diagonal = Haversine.distance(bbox_center, Point(c0)); half_diagonal + circle.radius.0 > Haversine.distance( bbox_center, Point::new(circle.center.lon.0, circle.center.lat.0), ) } /// Check if geohash tile intersects the polygon fn check_polygon_intersection(geohash: &str, polygon: &Polygon) -> bool { let precision = geohash.len(); if precision == 0 { return true; } let rect = decode_bbox(geohash).unwrap(); rect.intersects(polygon) } fn create_hashes( mapping_fn: impl Fn(usize) -> Option<Vec<GeoHash>>, ) -> OperationResult<Vec<GeoHash>> { (0..=GEOHASH_MAX_LENGTH) .map(mapping_fn) .take_while(|hashes| hashes.is_some()) .last() .ok_or_else(|| OperationError::service_error("no hash coverage for any precision"))? .ok_or_else(|| OperationError::service_error("geo-hash coverage is empty")) } /// Return as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain the whole circle. pub fn circle_hashes(circle: &GeoRadius, max_regions: usize) -> OperationResult<Vec<GeoHash>> { if max_regions == 0 { return Err(OperationError::service_error( "max_regions cannot be equal to zero", )); } let geo_bounding_box = minimum_bounding_rectangle_for_circle(circle); if geo_bounding_box.top_left.lat.is_nan() || geo_bounding_box.top_left.lon.is_nan() || geo_bounding_box.bottom_right.lat.is_nan() || geo_bounding_box.bottom_right.lat.is_nan() { return Err(OperationError::service_error("Invalid circle")); } let full_geohash_bounding_box: GeohashBoundingBox = geo_bounding_box.into(); let mapping_fn = |precision| { full_geohash_bounding_box .geohash_regions(precision, max_regions) .map(|hashes| { hashes .into_iter() .filter(|hash| { check_circle_intersection(EcoString::from(*hash).as_str(), circle) }) .collect_vec() }) }; create_hashes(mapping_fn) } /// Return as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain the whole rectangle. pub fn rectangle_hashes( rectangle: &GeoBoundingBox, max_regions: usize, ) -> OperationResult<Vec<GeoHash>> { if max_regions == 0 { return Err(OperationError::service_error( "max_regions cannot be equal to zero", )); } let full_geohash_bounding_box: GeohashBoundingBox = (*rectangle).into(); let mapping_fn = |precision| full_geohash_bounding_box.geohash_regions(precision, max_regions); create_hashes(mapping_fn) } /// Return as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain a boundary defined by closed LineString. fn boundary_hashes(boundary: &LineString, max_regions: usize) -> OperationResult<Vec<GeoHash>> { let geo_bounding_box = minimum_bounding_rectangle_for_boundary(boundary); let full_geohash_bounding_box: GeohashBoundingBox = geo_bounding_box.into(); let polygon = Polygon::new(boundary.clone(), vec![]); let mapping_fn = |precision| { full_geohash_bounding_box .geohash_regions(precision, max_regions) .map(|hashes| { hashes .into_iter() .filter(|hash| { check_polygon_intersection(EcoString::from(*hash).as_str(), &polygon) }) .collect_vec() }) }; create_hashes(mapping_fn) } /// A function used for cardinality estimation. /// /// The first return value is as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain the polygon's exterior. /// The second return value is all as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain each polygon's interior. pub fn polygon_hashes_estimation( polygon: &GeoPolygon, max_regions: usize, ) -> (Vec<GeoHash>, Vec<Vec<GeoHash>>) { assert_ne!(max_regions, 0, "max_regions cannot be equal to zero"); let polygon_wrapper = polygon.convert().polygon; let exterior_hashes = boundary_hashes(&polygon_wrapper.exterior().clone(), max_regions); let interiors_hashes = polygon_wrapper .interiors() .iter() .map(|interior| boundary_hashes(interior, max_regions).unwrap()) .collect_vec(); (exterior_hashes.unwrap(), interiors_hashes) } /// Return as-high-as-possible with maximum of `max_regions` /// number of geo-hash guaranteed to contain the whole polygon. pub fn polygon_hashes(polygon: &GeoPolygon, max_regions: usize) -> OperationResult<Vec<GeoHash>> { if max_regions == 0 { return Err(OperationError::service_error( "max_regions cannot be equal to zero", )); } let polygon_wrapper = polygon.convert().polygon; let geo_bounding_box = minimum_bounding_rectangle_for_boundary(polygon_wrapper.exterior()); let full_geohash_bounding_box: GeohashBoundingBox = geo_bounding_box.into(); let mapping_fn = |precision| { full_geohash_bounding_box .geohash_regions(precision, max_regions) .map(|hashes| { hashes .into_iter() .filter(|hash| { check_polygon_intersection( EcoString::from(*hash).as_str(), &polygon_wrapper, ) }) .collect_vec() }) }; create_hashes(mapping_fn) } /// A globally-average value is usually considered to be 6,371 kilometres (3,959 mi) with a 0.3% variability (±10 km). /// <https://en.wikipedia.org/wiki/Earth_radius>. const EARTH_RADIUS_METERS: f64 = 6371.0 * 1000.; /// Returns the GeoBoundingBox that defines the MBR /// <http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates#Longitude> fn minimum_bounding_rectangle_for_circle(circle: &GeoRadius) -> GeoBoundingBox { // circle.radius is in meter let angular_radius: f64 = circle.radius.0 / EARTH_RADIUS_METERS; let angular_lat = circle.center.lat.to_radians(); let mut min_lat = (angular_lat - angular_radius).to_degrees(); let mut max_lat = (angular_lat + angular_radius).to_degrees(); let (min_lon, max_lon) = if LAT_RANGE.start < min_lat && max_lat < LAT_RANGE.end { // Poles are not within the query, default scenario let angular_lon = circle.center.lon.to_radians(); let delta_lon = (angular_radius.sin() / angular_lat.cos()).asin(); let min_lon = (angular_lon - delta_lon).to_degrees(); let max_lon = (angular_lon + delta_lon).to_degrees(); (min_lon, max_lon) } else { // poles are within circle - use whole cup if LAT_RANGE.start > min_lat { min_lat = LAT_RANGE.start + COORD_EPS; } if max_lat > LAT_RANGE.end { max_lat = LAT_RANGE.end - COORD_EPS; } (LON_RANGE.start + COORD_EPS, LON_RANGE.end - COORD_EPS) }; let top_left = GeoPoint { lat: OrderedFloat(max_lat), lon: OrderedFloat(sphere_lon(min_lon)), }; let bottom_right = GeoPoint { lat: OrderedFloat(min_lat), lon: OrderedFloat(sphere_lon(max_lon)), }; GeoBoundingBox { top_left, bottom_right, } } fn minimum_bounding_rectangle_for_boundary(boundary: &LineString) -> GeoBoundingBox { let mut min_lon = f64::MAX; let mut max_lon = f64::MIN; let mut min_lat = f64::MAX; let mut max_lat = f64::MIN; for point in boundary.coords() { if point.x < min_lon { min_lon = point.x; } if point.x > max_lon { max_lon = point.x; } if point.y < min_lat { min_lat = point.y; } if point.y > max_lat { max_lat = point.y; } } let top_left = GeoPoint { lon: OrderedFloat(min_lon), lat: OrderedFloat(max_lat), }; let bottom_right = GeoPoint { lon: OrderedFloat(max_lon), lat: OrderedFloat(min_lat), }; GeoBoundingBox { top_left, bottom_right, } } #[cfg(test)] mod tests { use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use super::*; use crate::types::test_utils::{build_polygon, build_polygon_with_interiors}; const BERLIN: GeoPoint = GeoPoint { lat: OrderedFloat(52.52437), lon: OrderedFloat(13.41053), }; const NYC: GeoPoint = GeoPoint { lat: OrderedFloat(40.75798), lon: OrderedFloat(-73.991516), }; #[test] fn geohash_ordering() { let mut v: Vec<&[u8]> = vec![ b"dr5ru", b"uft56", b"hhbcd", b"uft560000000", b"h", b"hbcd", b"887hh1234567", b"", b"hwx98", b"hbc", b"dr5rukz", ]; let mut hashes = v.iter().map(|s| GeoHash::new(s).unwrap()).collect_vec(); hashes.sort_unstable(); v.sort_unstable(); for (a, b) in hashes.iter().zip(v) { assert_eq!(a.to_string().as_bytes(), b); } // special case for hash which ends with 0 // "uft56" and "uft560000000" have the same encoded chars, but different length assert_eq!( GeoHash::new(b"uft5600") .unwrap() .cmp(&GeoHash::new(b"uft560000000").unwrap()), "uft5600".cmp("uft560000000"), ); assert_eq!( GeoHash::new(b"") .unwrap() .cmp(&GeoHash::new(b"000000000000").unwrap()), "".cmp("000000000000"), ); } #[test] fn geohash_starts_with() { let samples: [&[u8]; 6] = [ b"", b"uft5601", b"uft560100000", b"uft56010000r", b"uft5602", b"uft560200000", ]; for a in samples.iter() { let a_hash = GeoHash::new(a).unwrap(); for b in samples.iter() { let b_hash = GeoHash::new(b).unwrap(); if a.starts_with(b) { assert!( a_hash.starts_with(b_hash), "{a:?} expected to start with {b:?}", ); } else { assert!( !a_hash.starts_with(b_hash), "{a:?} expected to not start with {b:?}", ); } } } } #[test] fn geohash_encode_longitude_first() { let center_hash = GeoHash::new(encode(Coord::from(NYC), GEOHASH_MAX_LENGTH).unwrap()); assert_eq!(center_hash.ok(), GeoHash::new(b"dr5ru7c02wnv").ok()); let center_hash = GeoHash::new(encode(Coord::from(NYC), 6).unwrap()); assert_eq!(center_hash.ok(), GeoHash::new(b"dr5ru7").ok()); let center_hash = GeoHash::new(encode(Coord::from(BERLIN), GEOHASH_MAX_LENGTH).unwrap()); assert_eq!(center_hash.ok(), GeoHash::new(b"u33dc1v0xupz").ok()); let center_hash = GeoHash::new(encode(Coord::from(BERLIN), 6).unwrap()); assert_eq!(center_hash.ok(), GeoHash::new(b"u33dc1").ok()); } #[test] fn rectangle_geo_hash_nyc() { // data from https://www.titanwolf.org/Network/q/a98ba365-14c5-48f4-8839-86a0962e0ab9/y let near_nyc_circle = GeoRadius { center: NYC, radius: OrderedFloat(800.0), }; let bounding_box = minimum_bounding_rectangle_for_circle(&near_nyc_circle); let rectangle: GeohashBoundingBox = bounding_box.into(); assert_eq!(rectangle.north_west, GeoHash::new(b"dr5ruj4477kd").unwrap()); assert_eq!(rectangle.south_west, GeoHash::new(b"dr5ru46ne2ux").unwrap()); assert_eq!(rectangle.south_east, GeoHash::new(b"dr5ru6ryw0cp").unwrap()); assert_eq!(rectangle.north_east, GeoHash::new(b"dr5rumpfq534").unwrap()); } #[test] fn top_level_rectangle_geo_area() { let rect = GeohashBoundingBox { north_west: GeoHash::new(b"u").unwrap(), south_west: GeoHash::new(b"s").unwrap(), south_east: GeoHash::new(b"t").unwrap(), north_east: GeoHash::new(b"v").unwrap(), }; let mut geo_area = rect.geohash_regions(1, 100).unwrap(); let mut expected = vec![ GeoHash::new(b"u").unwrap(), GeoHash::new(b"s").unwrap(), GeoHash::new(b"v").unwrap(), GeoHash::new(b"t").unwrap(), ]; geo_area.sort_unstable(); expected.sort_unstable(); assert_eq!(geo_area, expected); } #[test] fn nyc_rectangle_geo_area_high_precision() { let rect = GeohashBoundingBox { north_west: GeoHash::new(b"dr5ruj4477kd").unwrap(), south_west: GeoHash::new(b"dr5ru46ne2ux").unwrap(), south_east: GeoHash::new(b"dr5ru6ryw0cp").unwrap(), north_east: GeoHash::new(b"dr5rumpfq534").unwrap(), }; // calling `rect.geohash_regions()` is too expensive assert!(rect.geohash_regions(12, 100).is_none()); } #[test] fn nyc_rectangle_geo_area_medium_precision() { let rect = GeohashBoundingBox { north_west: GeoHash::new(b"dr5ruj4").unwrap(), south_west: GeoHash::new(b"dr5ru46").unwrap(), south_east: GeoHash::new(b"dr5ru6r").unwrap(), north_east: GeoHash::new(b"dr5rump").unwrap(), }; let geo_area = rect.geohash_regions(7, 1000).unwrap(); assert_eq!(14 * 12, geo_area.len()); } #[test] fn nyc_rectangle_geo_area_low_precision() { let rect = GeohashBoundingBox { north_west: GeoHash::new(b"dr5ruj").unwrap(), south_west: GeoHash::new(b"dr5ru4").unwrap(), south_east: GeoHash::new(b"dr5ru6").unwrap(), north_east: GeoHash::new(b"dr5rum").unwrap(), }; let mut geo_area = rect.geohash_regions(6, 100).unwrap(); let mut expected = vec![ GeoHash::new(b"dr5ru4").unwrap(), GeoHash::new(b"dr5ru5").unwrap(), GeoHash::new(b"dr5ru6").unwrap(), GeoHash::new(b"dr5ru7").unwrap(), GeoHash::new(b"dr5ruh").unwrap(), GeoHash::new(b"dr5ruj").unwrap(), GeoHash::new(b"dr5rum").unwrap(), GeoHash::new(b"dr5ruk").unwrap(), ]; expected.sort_unstable(); geo_area.sort_unstable(); assert_eq!(geo_area, expected); } #[test] fn rectangle_hashes_nyc() { // conversion to lon/lat http://geohash.co/ // "dr5ruj4477kd" let top_left = GeoPoint { lon: OrderedFloat(-74.00101399), lat: OrderedFloat(40.76517460), }; // "dr5ru6ryw0cp" let bottom_right = GeoPoint { lon: OrderedFloat(-73.98201792), lat: OrderedFloat(40.75078539), }; let near_nyc_rectangle = GeoBoundingBox { top_left, bottom_right, }; let nyc_hashes_result = rectangle_hashes(&near_nyc_rectangle, 200); let nyc_hashes = nyc_hashes_result.unwrap(); assert_eq!(nyc_hashes.len(), 168); assert!(nyc_hashes.iter().all(|h| h.len() == 7)); // geohash precision let mut nyc_hashes_result = rectangle_hashes(&near_nyc_rectangle, 10); nyc_hashes_result.as_mut().unwrap().sort_unstable(); let mut expected = vec![ GeoHash::new(b"dr5ruj").unwrap(), GeoHash::new(b"dr5ruh").unwrap(), GeoHash::new(b"dr5ru5").unwrap(), GeoHash::new(b"dr5ru4").unwrap(), GeoHash::new(b"dr5rum").unwrap(), GeoHash::new(b"dr5ruk").unwrap(), GeoHash::new(b"dr5ru7").unwrap(), GeoHash::new(b"dr5ru6").unwrap(), ]; expected.sort_unstable(); assert_eq!(nyc_hashes_result.unwrap(), expected); // Graphical proof using https://www.movable-type.co.uk/scripts/geohash.html // dr5rgy dr5run dr5ruq dr5ruw // dr5rgv dr5ruj dr5rum dr5rut // dr5rgu dr5ruh dr5ruk dr5rus // dr5rgg dr5ru5 dr5ru7 dr5rue // dr5rgf dr5ru4 dr5ru6 dr5rud // dr5rgc dr5ru1 dr5ru3 dr5ru9 // XXXXXX XXXXXX XXXXXX XXXXXX // XXXXXX dr5ruj dr5rum XXXXXX // XXXXXX dr5ruh dr5ruk XXXXXX // XXXXXX dr5ru5 dr5ru7 XXXXXX // XXXXXX dr5ru4 Xr5ru6 XXXXXX // XXXXXX XXXXXX XXXXXX XXXXXX // falls back to finest region that encompasses the whole area let nyc_hashes_result = rectangle_hashes(&near_nyc_rectangle, 7); assert_eq!( nyc_hashes_result.unwrap(), [GeoHash::new(b"dr5ru").unwrap()], ); } #[test] fn rectangle_hashes_crossing_antimeridian() { // conversion to lon/lat http://geohash.co/ // "ztnv2hjxn03k" let top_left = GeoPoint { lat: OrderedFloat(74.071028), lon: OrderedFloat(167.0), }; // "dr5ru7c02wnv" let bottom_right = GeoPoint { lat: OrderedFloat(40.75798), lon: OrderedFloat(-73.991516), }; let crossing_usa_rectangle = GeoBoundingBox { top_left, bottom_right, }; let usa_hashes_result = rectangle_hashes(&crossing_usa_rectangle, 200); let usa_hashes = usa_hashes_result.unwrap(); assert_eq!(usa_hashes.len(), 84); assert!(usa_hashes.iter().all(|h| h.len() == 2)); // low geohash precision let mut usa_hashes_result = rectangle_hashes(&crossing_usa_rectangle, 10); usa_hashes_result.as_mut().unwrap().sort_unstable(); let mut expected = vec![ GeoHash::new(b"8").unwrap(), GeoHash::new(b"9").unwrap(), GeoHash::new(b"b").unwrap(), GeoHash::new(b"c").unwrap(), GeoHash::new(b"d").unwrap(), GeoHash::new(b"f").unwrap(), GeoHash::new(b"x").unwrap(), GeoHash::new(b"z").unwrap(), ]; expected.sort_unstable(); assert_eq!(usa_hashes_result.unwrap(), expected); // Graphical proof using https://www.movable-type.co.uk/scripts/geohash.html // n p 0 1 4 5 // y z b c f g // w x 8 9 d e // q r 2 3 6 7 // - - - - - - // | z b c f | // | x 8 9 d | // - - - - - - } #[test] fn polygon_hashes_nyc() { // conversion to lon/lat http://geohash.co/ // "dr5ruj4477kd" let near_nyc_polygon = build_polygon(vec![ (-74.00101399, 40.76517460), (-73.98201792, 40.76517460), (-73.98201792, 40.75078539), (-74.00101399, 40.75078539), (-74.00101399, 40.76517460), ]); let nyc_hashes_result = polygon_hashes(&near_nyc_polygon, 200); let nyc_hashes = nyc_hashes_result.unwrap(); assert_eq!(nyc_hashes.len(), 168); assert!(nyc_hashes.iter().all(|h| h.len() == 7)); // geohash precision let mut nyc_hashes_result = polygon_hashes(&near_nyc_polygon, 10); nyc_hashes_result.as_mut().unwrap().sort_unstable(); let mut expected = vec![ GeoHash::new(b"dr5ruj").unwrap(), GeoHash::new(b"dr5ruh").unwrap(), GeoHash::new(b"dr5ru5").unwrap(), GeoHash::new(b"dr5ru4").unwrap(), GeoHash::new(b"dr5rum").unwrap(), GeoHash::new(b"dr5ruk").unwrap(), GeoHash::new(b"dr5ru7").unwrap(), GeoHash::new(b"dr5ru6").unwrap(), ]; expected.sort_unstable(); assert_eq!(nyc_hashes_result.unwrap(), expected); // falls back to finest region that encompasses the whole area let nyc_hashes_result = polygon_hashes(&near_nyc_polygon, 7); assert_eq!( nyc_hashes_result.unwrap(), [GeoHash::new(b"dr5ru").unwrap()], ); } #[test] fn random_circles() { let mut rnd = StdRng::seed_from_u64(42); for _ in 0..1000 { let r_meters = rnd.random_range(1.0..10000.0); let query = GeoRadius { center: GeoPoint::new_unchecked( rnd.random_range(LON_RANGE), rnd.random_range(LAT_RANGE), ), radius: OrderedFloat(r_meters), }; let max_hashes = rnd.random_range(1..32); let hashes = circle_hashes(&query, max_hashes); assert!(hashes.unwrap().len() <= max_hashes); } } #[test] fn test_check_polygon_intersection() { fn check_intersection(geohash: &str, polygon: &GeoPolygon, expected: bool) { let intersect = check_polygon_intersection(geohash, &polygon.convert().polygon); assert_eq!(intersect, expected); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/field_index_base.rs
lib/segment/src/index/field_index/field_index_base.rs
use std::fmt::Formatter; use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use serde_json::Value; use super::bool_index::BoolIndex; use super::bool_index::mutable_bool_index::MutableBoolIndexBuilder; use super::facet_index::FacetIndexEnum; use super::full_text_index::mmap_text_index::FullTextMmapIndexBuilder; use super::full_text_index::text_index::{FullTextGridstoreIndexBuilder, FullTextIndex}; use super::geo_index::{GeoMapIndexGridstoreBuilder, GeoMapIndexMmapBuilder}; #[cfg(feature = "rocksdb")] use super::map_index::MapIndexBuilder; use super::map_index::{MapIndex, MapIndexGridstoreBuilder, MapIndexMmapBuilder}; #[cfg(feature = "rocksdb")] use super::numeric_index::NumericIndexBuilder; use super::numeric_index::{ NumericIndex, NumericIndexGridstoreBuilder, NumericIndexMmapBuilder, StreamRange, }; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::data_types::order_by::OrderValue; use crate::index::field_index::geo_index::GeoMapIndex; use crate::index::field_index::null_index::MutableNullIndex; use crate::index::field_index::null_index::mutable_null_index::MutableNullIndexBuilder; use crate::index::field_index::numeric_index::NumericIndexInner; use crate::index::field_index::{CardinalityEstimation, PayloadBlockCondition}; use crate::index::payload_config::{ FullPayloadIndexType, IndexMutability, PayloadIndexType, StorageType, }; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{ DateTimePayloadType, FieldCondition, FloatPayloadType, IntPayloadType, Match, MatchPhrase, MatchText, PayloadKeyType, RangeInterface, UuidIntType, UuidPayloadType, }; pub trait PayloadFieldIndex { /// Return number of points with at least one value indexed in here fn count_indexed_points(&self) -> usize; /// Remove db content or files of the current payload index fn wipe(self) -> OperationResult<()>; /// Return function that flushes all pending updates to disk. fn flusher(&self) -> Flusher; fn files(&self) -> Vec<PathBuf>; fn immutable_files(&self) -> Vec<PathBuf>; /// Get iterator over points fitting given `condition` /// Return `None` if condition does not match the index type fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>>; /// Return estimation of amount of points which satisfy given condition. /// Returns `None` if the condition does not match the index type fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation>; /// Iterate conditions for payload blocks with minimum size of `threshold` /// Required for building HNSW index fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_>; } pub trait ValueIndexer { type ValueType; /// Add multiple values associated with a single point /// This function should be called only once for each point fn add_many( &mut self, id: PointOffsetType, values: Vec<Self::ValueType>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Extract index-able value from payload `Value` fn get_value(value: &Value) -> Option<Self::ValueType>; /// Try to extract index-able values from payload `Value`, even if it is an array fn get_values(value: &Value) -> Vec<Self::ValueType> { match value { Value::Array(values) => values.iter().filter_map(|x| Self::get_value(x)).collect(), _ => Self::get_value(value).map(|x| vec![x]).unwrap_or_default(), } } /// Add point with payload to index fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.remove_point(id)?; let mut flatten_values: Vec<_> = vec![]; for value in payload.iter() { match value { Value::Array(values) => { flatten_values.extend(values.iter().filter_map(|x| Self::get_value(x))); } _ => { if let Some(x) = Self::get_value(value) { flatten_values.push(x); } } } } self.add_many(id, flatten_values, hw_counter) } /// remove a point from the index fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()>; } /// Common interface for all possible types of field indexes /// Enables polymorphism on field indexes pub enum FieldIndex { IntIndex(NumericIndex<IntPayloadType, IntPayloadType>), DatetimeIndex(NumericIndex<IntPayloadType, DateTimePayloadType>), IntMapIndex(MapIndex<IntPayloadType>), KeywordIndex(MapIndex<str>), FloatIndex(NumericIndex<FloatPayloadType, FloatPayloadType>), GeoIndex(GeoMapIndex), FullTextIndex(FullTextIndex), BoolIndex(BoolIndex), UuidIndex(NumericIndex<UuidIntType, UuidPayloadType>), UuidMapIndex(MapIndex<UuidIntType>), NullIndex(MutableNullIndex), } impl std::fmt::Debug for FieldIndex { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { FieldIndex::IntIndex(_index) => write!(f, "IntIndex"), FieldIndex::DatetimeIndex(_index) => write!(f, "DatetimeIndex"), FieldIndex::IntMapIndex(_index) => write!(f, "IntMapIndex"), FieldIndex::KeywordIndex(_index) => write!(f, "KeywordIndex"), FieldIndex::FloatIndex(_index) => write!(f, "FloatIndex"), FieldIndex::GeoIndex(_index) => write!(f, "GeoIndex"), FieldIndex::BoolIndex(_index) => write!(f, "BoolIndex"), FieldIndex::FullTextIndex(_index) => write!(f, "FullTextIndex"), FieldIndex::UuidIndex(_index) => write!(f, "UuidIndex"), FieldIndex::UuidMapIndex(_index) => write!(f, "UuidMapIndex"), FieldIndex::NullIndex(_index) => write!(f, "NullIndex"), } } } impl FieldIndex { /// Try to check condition for a payload given a field index. /// Required because some index parameters may influence the condition checking logic. /// For example, full text index may have different tokenizers. /// /// Returns `None` if there is no special logic for the given index /// returns `Some(true)` if condition is satisfied /// returns `Some(false)` if condition is not satisfied pub fn special_check_condition( &self, condition: &FieldCondition, payload_value: &Value, hw_counter: &HardwareCounterCell, ) -> Option<bool> { match self { FieldIndex::IntIndex(_) => None, FieldIndex::DatetimeIndex(_) => None, FieldIndex::IntMapIndex(_) => None, FieldIndex::KeywordIndex(_) => None, FieldIndex::FloatIndex(_) => None, FieldIndex::GeoIndex(_) => None, FieldIndex::BoolIndex(_) => None, FieldIndex::FullTextIndex(full_text_index) => match &condition.r#match { Some(Match::Text(MatchText { text })) => Some( full_text_index.check_payload_match::<false>(payload_value, text, hw_counter), ), Some(Match::Phrase(MatchPhrase { phrase })) => Some( full_text_index.check_payload_match::<true>(payload_value, phrase, hw_counter), ), _ => None, }, FieldIndex::UuidIndex(_) => None, FieldIndex::UuidMapIndex(_) => None, FieldIndex::NullIndex(_) => None, } } fn get_payload_field_index(&self) -> &dyn PayloadFieldIndex { match self { FieldIndex::IntIndex(payload_field_index) => payload_field_index.inner(), FieldIndex::DatetimeIndex(payload_field_index) => payload_field_index.inner(), FieldIndex::IntMapIndex(payload_field_index) => payload_field_index, FieldIndex::KeywordIndex(payload_field_index) => payload_field_index, FieldIndex::FloatIndex(payload_field_index) => payload_field_index.inner(), FieldIndex::GeoIndex(payload_field_index) => payload_field_index, FieldIndex::BoolIndex(payload_field_index) => payload_field_index, FieldIndex::FullTextIndex(payload_field_index) => payload_field_index, FieldIndex::UuidIndex(payload_field_index) => payload_field_index.inner(), FieldIndex::UuidMapIndex(payload_field_index) => payload_field_index, FieldIndex::NullIndex(payload_field_index) => payload_field_index, } } pub fn wipe(self) -> OperationResult<()> { match self { FieldIndex::IntIndex(index) => index.wipe(), FieldIndex::DatetimeIndex(index) => index.wipe(), FieldIndex::IntMapIndex(index) => index.wipe(), FieldIndex::KeywordIndex(index) => index.wipe(), FieldIndex::FloatIndex(index) => index.wipe(), FieldIndex::GeoIndex(index) => index.wipe(), FieldIndex::BoolIndex(index) => index.wipe(), FieldIndex::FullTextIndex(index) => index.wipe(), FieldIndex::UuidIndex(index) => index.wipe(), FieldIndex::UuidMapIndex(index) => index.wipe(), FieldIndex::NullIndex(index) => index.wipe(), } } pub fn count_indexed_points(&self) -> usize { self.get_payload_field_index().count_indexed_points() } pub fn flusher(&self) -> Flusher { self.get_payload_field_index().flusher() } pub fn files(&self) -> Vec<PathBuf> { self.get_payload_field_index().files() } pub fn immutable_files(&self) -> Vec<PathBuf> { self.get_payload_field_index().immutable_files() } pub fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { self.get_payload_field_index().filter(condition, hw_counter) } pub fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { self.get_payload_field_index() .estimate_cardinality(condition, hw_counter) } pub fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { self.get_payload_field_index() .payload_blocks(threshold, key) } pub fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { FieldIndex::IntIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::DatetimeIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::IntMapIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::KeywordIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::FloatIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::GeoIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::BoolIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::FullTextIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::UuidIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::UuidMapIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } FieldIndex::NullIndex(payload_field_index) => { payload_field_index.add_point(id, payload, hw_counter) } } } pub fn remove_point(&mut self, point_id: PointOffsetType) -> OperationResult<()> { match self { FieldIndex::IntIndex(index) => index.mut_inner().remove_point(point_id), FieldIndex::DatetimeIndex(index) => index.mut_inner().remove_point(point_id), FieldIndex::IntMapIndex(index) => index.remove_point(point_id), FieldIndex::KeywordIndex(index) => index.remove_point(point_id), FieldIndex::FloatIndex(index) => index.mut_inner().remove_point(point_id), FieldIndex::GeoIndex(index) => index.remove_point(point_id), FieldIndex::BoolIndex(index) => index.remove_point(point_id), FieldIndex::FullTextIndex(index) => index.remove_point(point_id), FieldIndex::UuidIndex(index) => index.remove_point(point_id), FieldIndex::UuidMapIndex(index) => index.remove_point(point_id), FieldIndex::NullIndex(index) => index.remove_point(point_id), } } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { match self { FieldIndex::IntIndex(index) => index.get_telemetry_data(), FieldIndex::DatetimeIndex(index) => index.get_telemetry_data(), FieldIndex::IntMapIndex(index) => index.get_telemetry_data(), FieldIndex::KeywordIndex(index) => index.get_telemetry_data(), FieldIndex::FloatIndex(index) => index.get_telemetry_data(), FieldIndex::GeoIndex(index) => index.get_telemetry_data(), FieldIndex::BoolIndex(index) => index.get_telemetry_data(), FieldIndex::FullTextIndex(index) => index.get_telemetry_data(), FieldIndex::UuidIndex(index) => index.get_telemetry_data(), FieldIndex::UuidMapIndex(index) => index.get_telemetry_data(), FieldIndex::NullIndex(index) => index.get_telemetry_data(), } } pub fn values_count(&self, point_id: PointOffsetType) -> usize { match self { FieldIndex::IntIndex(index) => index.values_count(point_id), FieldIndex::DatetimeIndex(index) => index.values_count(point_id), FieldIndex::IntMapIndex(index) => index.values_count(point_id), FieldIndex::KeywordIndex(index) => index.values_count(point_id), FieldIndex::FloatIndex(index) => index.values_count(point_id), FieldIndex::GeoIndex(index) => index.values_count(point_id), FieldIndex::BoolIndex(index) => index.values_count(point_id), FieldIndex::FullTextIndex(index) => index.values_count(point_id), FieldIndex::UuidIndex(index) => index.values_count(point_id), FieldIndex::UuidMapIndex(index) => index.values_count(point_id), FieldIndex::NullIndex(index) => index.values_count(point_id), } } pub fn values_is_empty(&self, point_id: PointOffsetType) -> bool { match self { FieldIndex::IntIndex(index) => index.values_is_empty(point_id), FieldIndex::DatetimeIndex(index) => index.values_is_empty(point_id), FieldIndex::IntMapIndex(index) => index.values_is_empty(point_id), FieldIndex::KeywordIndex(index) => index.values_is_empty(point_id), FieldIndex::FloatIndex(index) => index.values_is_empty(point_id), FieldIndex::GeoIndex(index) => index.values_is_empty(point_id), FieldIndex::BoolIndex(index) => index.values_is_empty(point_id), FieldIndex::FullTextIndex(index) => index.values_is_empty(point_id), FieldIndex::UuidIndex(index) => index.values_is_empty(point_id), FieldIndex::UuidMapIndex(index) => index.values_is_empty(point_id), FieldIndex::NullIndex(index) => index.values_is_empty(point_id), } } pub fn as_numeric(&self) -> Option<NumericFieldIndex<'_>> { match self { FieldIndex::IntIndex(index) => Some(NumericFieldIndex::IntIndex(index.inner())), FieldIndex::DatetimeIndex(index) => Some(NumericFieldIndex::IntIndex(index.inner())), FieldIndex::FloatIndex(index) => Some(NumericFieldIndex::FloatIndex(index.inner())), FieldIndex::IntMapIndex(_) | FieldIndex::KeywordIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::BoolIndex(_) | FieldIndex::UuidMapIndex(_) | FieldIndex::UuidIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn as_facet_index(&self) -> Option<FacetIndexEnum<'_>> { match self { FieldIndex::KeywordIndex(index) => Some(FacetIndexEnum::Keyword(index)), FieldIndex::IntMapIndex(index) => Some(FacetIndexEnum::Int(index)), FieldIndex::UuidMapIndex(index) => Some(FacetIndexEnum::Uuid(index)), FieldIndex::BoolIndex(index) => Some(FacetIndexEnum::Bool(index)), FieldIndex::UuidIndex(_) | FieldIndex::IntIndex(_) | FieldIndex::DatetimeIndex(_) | FieldIndex::FloatIndex(_) | FieldIndex::GeoIndex(_) | FieldIndex::FullTextIndex(_) | FieldIndex::NullIndex(_) => None, } } pub fn is_on_disk(&self) -> bool { match self { FieldIndex::IntIndex(index) => index.is_on_disk(), FieldIndex::DatetimeIndex(index) => index.is_on_disk(), FieldIndex::IntMapIndex(index) => index.is_on_disk(), FieldIndex::KeywordIndex(index) => index.is_on_disk(), FieldIndex::FloatIndex(index) => index.is_on_disk(), FieldIndex::GeoIndex(index) => index.is_on_disk(), FieldIndex::BoolIndex(index) => index.is_on_disk(), FieldIndex::FullTextIndex(index) => index.is_on_disk(), FieldIndex::UuidIndex(index) => index.is_on_disk(), FieldIndex::UuidMapIndex(index) => index.is_on_disk(), FieldIndex::NullIndex(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { FieldIndex::IntIndex(index) => index.is_rocksdb(), FieldIndex::DatetimeIndex(index) => index.is_rocksdb(), FieldIndex::IntMapIndex(index) => index.is_rocksdb(), FieldIndex::KeywordIndex(index) => index.is_rocksdb(), FieldIndex::FloatIndex(index) => index.is_rocksdb(), FieldIndex::GeoIndex(index) => index.is_rocksdb(), FieldIndex::BoolIndex(index) => index.is_rocksdb(), FieldIndex::FullTextIndex(index) => index.is_rocksdb(), FieldIndex::UuidIndex(index) => index.is_rocksdb(), FieldIndex::UuidMapIndex(index) => index.is_rocksdb(), FieldIndex::NullIndex(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { FieldIndex::IntIndex(index) => index.populate(), FieldIndex::DatetimeIndex(index) => index.populate(), FieldIndex::IntMapIndex(index) => index.populate(), FieldIndex::KeywordIndex(index) => index.populate(), FieldIndex::FloatIndex(index) => index.populate(), FieldIndex::GeoIndex(index) => index.populate(), FieldIndex::BoolIndex(index) => index.populate(), FieldIndex::FullTextIndex(index) => index.populate(), FieldIndex::UuidIndex(index) => index.populate(), FieldIndex::UuidMapIndex(index) => index.populate(), FieldIndex::NullIndex(index) => index.populate(), } } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { FieldIndex::IntIndex(index) => index.clear_cache(), FieldIndex::DatetimeIndex(index) => index.clear_cache(), FieldIndex::IntMapIndex(index) => index.clear_cache(), FieldIndex::KeywordIndex(index) => index.clear_cache(), FieldIndex::FloatIndex(index) => index.clear_cache(), FieldIndex::GeoIndex(index) => index.clear_cache(), FieldIndex::BoolIndex(index) => index.clear_cache(), FieldIndex::FullTextIndex(index) => index.clear_cache(), FieldIndex::UuidIndex(index) => index.clear_cache(), FieldIndex::UuidMapIndex(index) => index.clear_cache(), FieldIndex::NullIndex(index) => index.clear_cache(), } } pub fn get_full_index_type(&self) -> FullPayloadIndexType { let index_type = match self { FieldIndex::IntIndex(_) => PayloadIndexType::IntIndex, FieldIndex::DatetimeIndex(_) => PayloadIndexType::DatetimeIndex, FieldIndex::IntMapIndex(_) => PayloadIndexType::IntMapIndex, FieldIndex::KeywordIndex(_) => PayloadIndexType::KeywordIndex, FieldIndex::FloatIndex(_) => PayloadIndexType::FloatIndex, FieldIndex::GeoIndex(_) => PayloadIndexType::GeoIndex, FieldIndex::FullTextIndex(_) => PayloadIndexType::FullTextIndex, FieldIndex::BoolIndex(_) => PayloadIndexType::BoolIndex, FieldIndex::UuidIndex(_) => PayloadIndexType::UuidIndex, FieldIndex::UuidMapIndex(_) => PayloadIndexType::UuidMapIndex, FieldIndex::NullIndex(_) => PayloadIndexType::NullIndex, }; FullPayloadIndexType { index_type, mutability: self.get_mutability_type(), storage_type: self.get_storage_type(), } } fn get_mutability_type(&self) -> IndexMutability { match self { FieldIndex::IntIndex(index) => index.get_mutability_type(), FieldIndex::DatetimeIndex(index) => index.get_mutability_type(), FieldIndex::IntMapIndex(index) => index.get_mutability_type(), FieldIndex::KeywordIndex(index) => index.get_mutability_type(), FieldIndex::FloatIndex(index) => index.get_mutability_type(), FieldIndex::GeoIndex(index) => index.get_mutability_type(), FieldIndex::FullTextIndex(index) => index.get_mutability_type(), FieldIndex::BoolIndex(index) => index.get_mutability_type(), FieldIndex::UuidIndex(index) => index.get_mutability_type(), FieldIndex::UuidMapIndex(index) => index.get_mutability_type(), FieldIndex::NullIndex(index) => index.get_mutability_type(), } } fn get_storage_type(&self) -> StorageType { match self { FieldIndex::IntIndex(index) => index.get_storage_type(), FieldIndex::DatetimeIndex(index) => index.get_storage_type(), FieldIndex::IntMapIndex(index) => index.get_storage_type(), FieldIndex::KeywordIndex(index) => index.get_storage_type(), FieldIndex::FloatIndex(index) => index.get_storage_type(), FieldIndex::GeoIndex(index) => index.get_storage_type(), FieldIndex::FullTextIndex(index) => index.get_storage_type(), FieldIndex::BoolIndex(index) => index.get_storage_type(), FieldIndex::UuidIndex(index) => index.get_storage_type(), FieldIndex::UuidMapIndex(index) => index.get_storage_type(), FieldIndex::NullIndex(index) => index.get_storage_type(), } } } /// Common interface for all index builders. pub trait FieldIndexBuilderTrait { /// The resulting type of the index. type FieldIndexType; /// Start building the index, e.g. create a database column or a directory. /// Expected to be called exactly once before any other method. fn init(&mut self) -> OperationResult<()>; fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; fn finalize(self) -> OperationResult<Self::FieldIndexType>; /// Create an empty index for testing purposes. #[cfg(test)] fn make_empty(mut self) -> OperationResult<Self::FieldIndexType> where Self: Sized, { self.init()?; self.finalize() } } /// Builders for all index types pub enum FieldIndexBuilder { #[cfg(feature = "rocksdb")] IntIndex(NumericIndexBuilder<IntPayloadType, IntPayloadType>), IntMmapIndex(NumericIndexMmapBuilder<IntPayloadType, IntPayloadType>), IntGridstoreIndex(NumericIndexGridstoreBuilder<IntPayloadType, IntPayloadType>), #[cfg(feature = "rocksdb")] DatetimeIndex(NumericIndexBuilder<IntPayloadType, DateTimePayloadType>), DatetimeMmapIndex(NumericIndexMmapBuilder<IntPayloadType, DateTimePayloadType>), DatetimeGridstoreIndex(NumericIndexGridstoreBuilder<IntPayloadType, DateTimePayloadType>), #[cfg(feature = "rocksdb")] IntMapIndex(MapIndexBuilder<IntPayloadType>), IntMapMmapIndex(MapIndexMmapBuilder<IntPayloadType>), IntMapGridstoreIndex(MapIndexGridstoreBuilder<IntPayloadType>), #[cfg(feature = "rocksdb")] KeywordIndex(MapIndexBuilder<str>), KeywordMmapIndex(MapIndexMmapBuilder<str>), KeywordGridstoreIndex(MapIndexGridstoreBuilder<str>), #[cfg(feature = "rocksdb")] FloatIndex(NumericIndexBuilder<FloatPayloadType, FloatPayloadType>), FloatMmapIndex(NumericIndexMmapBuilder<FloatPayloadType, FloatPayloadType>), FloatGridstoreIndex(NumericIndexGridstoreBuilder<FloatPayloadType, FloatPayloadType>), #[cfg(feature = "rocksdb")] GeoIndex(super::geo_index::GeoMapIndexBuilder), GeoMmapIndex(GeoMapIndexMmapBuilder), GeoGridstoreIndex(GeoMapIndexGridstoreBuilder), #[cfg(feature = "rocksdb")] FullTextIndex(super::full_text_index::text_index::FullTextIndexRocksDbBuilder), FullTextMmapIndex(FullTextMmapIndexBuilder), FullTextGridstoreIndex(FullTextGridstoreIndexBuilder), #[cfg(feature = "rocksdb")] BoolIndex(super::bool_index::simple_bool_index::BoolIndexBuilder), BoolMmapIndex(MutableBoolIndexBuilder), #[cfg(feature = "rocksdb")] UuidIndex(MapIndexBuilder<UuidIntType>), UuidMmapIndex(MapIndexMmapBuilder<UuidIntType>), UuidGridstoreIndex(MapIndexGridstoreBuilder<UuidIntType>), NullIndex(MutableNullIndexBuilder), } impl FieldIndexBuilderTrait for FieldIndexBuilder { type FieldIndexType = FieldIndex; fn init(&mut self) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] Self::IntIndex(index) => index.init(), Self::IntMmapIndex(index) => index.init(), Self::IntGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::DatetimeIndex(index) => index.init(), Self::DatetimeMmapIndex(index) => index.init(), Self::DatetimeGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::IntMapIndex(index) => index.init(), Self::IntMapMmapIndex(index) => index.init(), Self::IntMapGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::KeywordIndex(index) => index.init(), Self::KeywordMmapIndex(index) => index.init(), Self::KeywordGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::FloatIndex(index) => index.init(), Self::FloatMmapIndex(index) => index.init(), Self::FloatGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::GeoIndex(index) => index.init(), Self::GeoMmapIndex(index) => index.init(), Self::GeoGridstoreIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::BoolIndex(index) => index.init(), Self::BoolMmapIndex(index) => index.init(), #[cfg(feature = "rocksdb")] Self::FullTextIndex(index) => index.init(), Self::FullTextMmapIndex(builder) => builder.init(), Self::FullTextGridstoreIndex(builder) => builder.init(), #[cfg(feature = "rocksdb")] Self::UuidIndex(index) => index.init(), Self::UuidMmapIndex(index) => index.init(), Self::UuidGridstoreIndex(index) => index.init(), Self::NullIndex(index) => index.init(), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] Self::IntIndex(index) => index.add_point(id, payload, hw_counter), Self::IntMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::IntGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::DatetimeIndex(index) => index.add_point(id, payload, hw_counter), Self::DatetimeMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::DatetimeGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::IntMapIndex(index) => index.add_point(id, payload, hw_counter), Self::IntMapMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::IntMapGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::KeywordIndex(index) => index.add_point(id, payload, hw_counter), Self::KeywordMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::KeywordGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::FloatIndex(index) => index.add_point(id, payload, hw_counter), Self::FloatMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::FloatGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::GeoIndex(index) => index.add_point(id, payload, hw_counter), Self::GeoMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::GeoGridstoreIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::BoolIndex(index) => index.add_point(id, payload, hw_counter), Self::BoolMmapIndex(index) => index.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] Self::FullTextIndex(index) => index.add_point(id, payload, hw_counter), Self::FullTextMmapIndex(builder) => { FieldIndexBuilderTrait::add_point(builder, id, payload, hw_counter) } Self::FullTextGridstoreIndex(builder) => { FieldIndexBuilderTrait::add_point(builder, id, payload, hw_counter) } #[cfg(feature = "rocksdb")] Self::UuidIndex(index) => index.add_point(id, payload, hw_counter), Self::UuidMmapIndex(index) => index.add_point(id, payload, hw_counter), Self::UuidGridstoreIndex(index) => index.add_point(id, payload, hw_counter), Self::NullIndex(index) => index.add_point(id, payload, hw_counter), } } fn finalize(self) -> OperationResult<FieldIndex> { Ok(match self { #[cfg(feature = "rocksdb")] Self::IntIndex(index) => FieldIndex::IntIndex(index.finalize()?), Self::IntMmapIndex(index) => FieldIndex::IntIndex(index.finalize()?), Self::IntGridstoreIndex(index) => FieldIndex::IntIndex(index.finalize()?), #[cfg(feature = "rocksdb")] Self::DatetimeIndex(index) => FieldIndex::DatetimeIndex(index.finalize()?), Self::DatetimeMmapIndex(index) => FieldIndex::DatetimeIndex(index.finalize()?),
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/immutable_point_to_values.rs
lib/segment/src/index/field_index/immutable_point_to_values.rs
use std::ops::Range; use common::types::PointOffsetType; // Flatten points-to-values map // It's an analogue of `Vec<Vec<N>>` but more RAM efficient because it stores values in a single Vec. // This structure doesn't support adding new values, only removing. // It's used in immutable field indices like `ImmutableMapIndex`, `ImmutableNumericIndex`, etc to store points-to-values map. #[derive(Debug, Clone, Default)] pub struct ImmutablePointToValues<N: Default> { // ranges in `point_to_values_container` which contains values for each point // `u32` is used instead of `usize` because it's more RAM efficient // We can expect that we will never have more than 4 billion values per segment point_to_values: Vec<Range<u32>>, // flattened values point_to_values_container: Vec<N>, } impl<N: Default> ImmutablePointToValues<N> { pub fn new(src: Vec<Vec<N>>) -> Self { let mut point_to_values = Vec::with_capacity(src.len()); let all_values_count = src.iter().fold(0, |acc, values| acc + values.len()); let mut point_to_values_container = Vec::with_capacity(all_values_count); for values in src { let container_len = point_to_values_container.len() as u32; let range = container_len..container_len + values.len() as u32; point_to_values.push(range.clone()); point_to_values_container.extend(values); } Self { point_to_values, point_to_values_container, } } pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl FnMut(&N) -> bool) -> bool { let Some(range) = self.point_to_values.get(idx as usize).cloned() else { return false; }; let range = range.start as usize..range.end as usize; if let Some(values) = self.point_to_values_container.get(range) { values.iter().any(check_fn) } else { false } } pub fn get_values(&self, idx: PointOffsetType) -> Option<impl Iterator<Item = &N> + '_> { let range = self.point_to_values.get(idx as usize)?.clone(); let range = range.start as usize..range.end as usize; Some(self.point_to_values_container[range].iter()) } pub fn get_values_count(&self, idx: PointOffsetType) -> Option<usize> { self.point_to_values .get(idx as usize) .map(|range| (range.end - range.start) as usize) } pub fn remove_point(&mut self, idx: PointOffsetType) -> Vec<N> { if self.point_to_values.len() <= idx as usize { return Default::default(); } let removed_values_range = self.point_to_values[idx as usize].clone(); self.point_to_values[idx as usize] = Default::default(); let mut result = Vec::with_capacity(removed_values_range.len()); for value_index in removed_values_range { // deleted values still use RAM, but it's not a problem because optimizers will actually reduce RAM usage let value = std::mem::take(&mut self.point_to_values_container[value_index as usize]); result.push(value); } result } } #[cfg(test)] mod tests { use super::*; #[test] fn test_immutable_point_to_values_remove() { let mut values = vec![ vec![0, 1, 2, 3, 4], vec![5, 6, 7, 8, 9], vec![0, 1, 2, 3, 4], vec![5, 6, 7, 8, 9], vec![10, 11, 12], vec![], vec![13], vec![14, 15], ]; let mut point_to_values = ImmutablePointToValues::new(values.clone()); let check = |point_to_values: &ImmutablePointToValues<_>, values: &[Vec<_>]| { for (idx, values) in values.iter().enumerate() { let values_vec: Option<Vec<_>> = point_to_values .get_values(idx as PointOffsetType) .map(|i| i.copied().collect()); assert_eq!(values_vec, Some(values.clone()),); } }; check(&point_to_values, values.as_slice()); point_to_values.remove_point(0); values[0].clear(); check(&point_to_values, values.as_slice()); point_to_values.remove_point(3); values[3].clear(); check(&point_to_values, values.as_slice()); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/mmap_point_to_values.rs
lib/segment/src/index/field_index/mmap_point_to_values.rs
use std::path::{Path, PathBuf}; use common::counter::conditioned_counter::ConditionedCounter; use common::types::PointOffsetType; use memmap2::Mmap; use memory::fadvise::clear_disk_cache; use memory::madvise::{AdviceSetting, Madviseable}; use memory::mmap_ops::{create_and_ensure_length, open_write_mmap}; use ordered_float::OrderedFloat; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::common::operation_error::{OperationError, OperationResult}; use crate::types::{FloatPayloadType, GeoPoint, IntPayloadType, UuidIntType}; const POINT_TO_VALUES_PATH: &str = "point_to_values.bin"; const NOT_ENOUGHT_BYTES_ERROR_MESSAGE: &str = "Not enough bytes to operate with memmapped file `point_to_values.bin`. Is the storage corrupted?"; const PADDING_SIZE: usize = 4096; /// Trait for values that can be stored in memmapped file. It's used in `MmapPointToValues` to store values. pub trait MmapValue { /// Lifetime `'a` is required to define lifetime for `&'a str` case type Referenced<'a>: Sized + Clone; fn mmapped_size(value: Self::Referenced<'_>) -> usize; fn read_from_mmap(bytes: &[u8]) -> Option<Self::Referenced<'_>>; fn write_to_mmap(value: Self::Referenced<'_>, bytes: &mut [u8]) -> Option<()>; fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self; fn as_referenced(&self) -> Self::Referenced<'_>; } impl MmapValue for IntPayloadType { type Referenced<'a> = &'a Self; fn mmapped_size(_value: Self::Referenced<'_>) -> usize { std::mem::size_of::<Self>() } fn read_from_mmap(bytes: &[u8]) -> Option<Self::Referenced<'_>> { Some(Self::ref_from_prefix(bytes).ok()?.0) } fn write_to_mmap(value: Self::Referenced<'_>, bytes: &mut [u8]) -> Option<()> { value.write_to_prefix(bytes).ok() } fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self { value } fn as_referenced(&self) -> Self::Referenced<'_> { self } } impl MmapValue for FloatPayloadType { type Referenced<'a> = Self; fn mmapped_size(_value: Self) -> usize { std::mem::size_of::<Self>() } fn read_from_mmap(bytes: &[u8]) -> Option<Self> { Some(*Self::ref_from_prefix(bytes).ok()?.0) } fn write_to_mmap(value: Self, bytes: &mut [u8]) -> Option<()> { value.write_to_prefix(bytes).ok() } fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self { value } fn as_referenced(&self) -> Self::Referenced<'_> { *self } } impl MmapValue for UuidIntType { type Referenced<'a> = &'a Self; fn mmapped_size(_value: Self::Referenced<'_>) -> usize { std::mem::size_of::<Self>() } fn read_from_mmap(bytes: &[u8]) -> Option<Self::Referenced<'_>> { Some(Self::ref_from_prefix(bytes).ok()?.0) } fn write_to_mmap(value: Self::Referenced<'_>, bytes: &mut [u8]) -> Option<()> { value.write_to_prefix(bytes).ok() } fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self { value } fn as_referenced(&self) -> Self::Referenced<'_> { self } } impl MmapValue for GeoPoint { type Referenced<'a> = Self; fn mmapped_size(_value: Self) -> usize { 2 * std::mem::size_of::<f64>() } fn read_from_mmap(bytes: &[u8]) -> Option<Self> { let (lon, bytes) = f64::read_from_prefix(bytes).ok()?; let (lat, _) = f64::read_from_prefix(bytes).ok()?; Some(Self { lon: OrderedFloat(lon), lat: OrderedFloat(lat), }) } fn write_to_mmap(value: Self, bytes: &mut [u8]) -> Option<()> { value.lon.write_to_prefix(bytes).ok()?; bytes .get_mut(std::mem::size_of::<f64>()..) .and_then(|bytes| value.lat.write_to_prefix(bytes).ok()) } fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self { value } fn as_referenced(&self) -> Self::Referenced<'_> { *self } } impl MmapValue for str { type Referenced<'a> = &'a str; fn mmapped_size(value: &str) -> usize { value.len() + std::mem::size_of::<u32>() } fn read_from_mmap(bytes: &[u8]) -> Option<&str> { let (size, bytes) = u32::read_from_prefix(bytes).ok()?; let bytes = bytes.get(..size as usize)?; std::str::from_utf8(bytes).ok() } fn write_to_mmap(value: &str, bytes: &mut [u8]) -> Option<()> { u32::write_to_prefix(&(value.len() as u32), bytes).ok()?; bytes .get_mut(std::mem::size_of::<u32>()..std::mem::size_of::<u32>() + value.len())? .copy_from_slice(value.as_bytes()); Some(()) } fn from_referenced<'a>(value: &'a Self::Referenced<'_>) -> &'a Self { value } fn as_referenced(&self) -> Self::Referenced<'_> { self } } /// Flattened memmapped points-to-values map /// It's an analogue of `Vec<Vec<N>>` but in memmapped file. /// This structure doesn't support adding new values, only removing. /// It's used in mmap field indices like `MmapMapIndex`, `MmapNumericIndex`, etc to store points-to-values map. /// This structure is not generic to avoid boxing lifetimes for `&str` values. pub struct MmapPointToValues<T: MmapValue + ?Sized> { file_name: PathBuf, mmap: Mmap, header: Header, phantom: std::marker::PhantomData<T>, } /// Memory and IO overhead of accessing mmap index. pub const MMAP_PTV_ACCESS_OVERHEAD: usize = size_of::<MmapRange>(); #[repr(C)] #[derive(Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout)] struct MmapRange { start: u64, count: u64, } #[repr(C)] #[derive(Copy, Clone, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] struct Header { ranges_start: u64, points_count: u64, } impl<T: MmapValue + ?Sized> MmapPointToValues<T> { pub fn from_iter<'a>( path: &Path, iter: impl Iterator<Item = (PointOffsetType, impl Iterator<Item = T::Referenced<'a>>)> + Clone, ) -> OperationResult<Self> { // calculate file size let points_count = iter .clone() .map(|(point_id, _)| (point_id + 1) as usize) .max() .unwrap_or(0); let ranges_size = points_count * std::mem::size_of::<MmapRange>(); let values_size = iter .clone() .map(|v| v.1.map(|v| T::mmapped_size(v)).sum::<usize>()) .sum::<usize>(); let file_size = PADDING_SIZE + ranges_size + values_size; // create new file and mmap let file_name = path.join(POINT_TO_VALUES_PATH); create_and_ensure_length(&file_name, file_size)?; let mut mmap = open_write_mmap(&file_name, AdviceSetting::Global, false)?; // fill mmap file data let header = Header { ranges_start: PADDING_SIZE as u64, points_count: points_count as u64, }; header .write_to_prefix(mmap.as_mut()) .map_err(|_| OperationError::service_error(NOT_ENOUGHT_BYTES_ERROR_MESSAGE))?; // counter for values offset let mut point_values_offset = header.ranges_start as usize + ranges_size; for (point_id, values) in iter { let start = point_values_offset; let mut values_count = 0; for value in values { values_count += 1; let bytes = mmap.get_mut(point_values_offset..).ok_or_else(|| { OperationError::service_error(NOT_ENOUGHT_BYTES_ERROR_MESSAGE) })?; T::write_to_mmap(value.clone(), bytes).ok_or_else(|| { OperationError::service_error(NOT_ENOUGHT_BYTES_ERROR_MESSAGE) })?; point_values_offset += T::mmapped_size(value); } let range = MmapRange { start: start as u64, count: values_count as u64, }; mmap.get_mut( header.ranges_start as usize + point_id as usize * std::mem::size_of::<MmapRange>().., ) .and_then(|bytes| range.write_to_prefix(bytes).ok()) .ok_or_else(|| OperationError::service_error(NOT_ENOUGHT_BYTES_ERROR_MESSAGE))?; } mmap.flush()?; Ok(Self { file_name, mmap: mmap.make_read_only()?, header, phantom: std::marker::PhantomData, }) } pub fn open(path: &Path, populate: bool) -> OperationResult<Self> { let file_name = path.join(POINT_TO_VALUES_PATH); let mmap = open_write_mmap(&file_name, AdviceSetting::Global, populate)?; let (header, _) = Header::read_from_prefix(mmap.as_ref()).map_err(|_| { OperationError::InconsistentStorage { description: NOT_ENOUGHT_BYTES_ERROR_MESSAGE.to_owned(), } })?; Ok(Self { file_name, mmap: mmap.make_read_only()?, header, phantom: std::marker::PhantomData, }) } pub fn files(&self) -> Vec<PathBuf> { vec![self.file_name.clone()] } pub fn immutable_files(&self) -> Vec<PathBuf> { // `MmapPointToValues` is immutable vec![self.file_name.clone()] } pub fn check_values_any( &self, point_id: PointOffsetType, check_fn: impl Fn(T::Referenced<'_>) -> bool, hw_counter: &ConditionedCounter, ) -> bool { let hw_cell = hw_counter.payload_index_io_read_counter(); // Measure IO overhead of `self.get_range()` hw_cell.incr_delta(MMAP_PTV_ACCESS_OVERHEAD); self.get_range(point_id) .map(|range| { let mut value_offset = range.start as usize; for _ in 0..range.count { let bytes = self.mmap.get(value_offset..).unwrap(); let value = T::read_from_mmap(bytes).unwrap(); let mmap_size = T::mmapped_size(value.clone()); hw_cell.incr_delta(mmap_size); if check_fn(value) { return true; } value_offset += mmap_size; } false }) .unwrap_or(false) } pub fn get_values<'a>( &'a self, point_id: PointOffsetType, ) -> Option<impl Iterator<Item = T::Referenced<'a>> + 'a> { // first, get range of values for point let range = self.get_range(point_id)?; // second, define iteration step for values // iteration step gets remainder range from memmapped file and returns left range let bytes: &[u8] = self.mmap.as_ref(); let read_value = move |range: MmapRange| -> Option<(T::Referenced<'a>, MmapRange)> { if range.count > 0 { let bytes = bytes.get(range.start as usize..)?; T::read_from_mmap(bytes).map(|value| { let range = MmapRange { start: range.start + T::mmapped_size(value.clone()) as u64, count: range.count - 1, }; (value, range) }) } else { None } }; // finally, return iterator Some( std::iter::successors(read_value(range), move |range| read_value(range.1)) .map(|(value, _)| value), ) } pub fn get_values_count(&self, point_id: PointOffsetType) -> Option<usize> { self.get_range(point_id).map(|range| range.count as usize) } pub fn len(&self) -> usize { self.header.points_count as usize } #[allow(dead_code)] pub fn is_empty(&self) -> bool { self.header.points_count == 0 } fn get_range(&self, point_id: PointOffsetType) -> Option<MmapRange> { if point_id < self.header.points_count as PointOffsetType { let range_offset = (self.header.ranges_start as usize) + (point_id as usize) * std::mem::size_of::<MmapRange>(); MmapRange::read_from_prefix(self.mmap.get(range_offset..)?) .ok() .map(|(range, _)| range) } else { None } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) { self.mmap.populate(); } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { clear_disk_cache(&self.file_name)?; Ok(()) } pub fn iter( &self, ) -> impl Iterator< Item = ( PointOffsetType, Option<impl Iterator<Item = T::Referenced<'_>> + '_>, ), > + Clone { (0..self.len() as PointOffsetType).map(|idx| (idx, self.get_values(idx))) } } #[cfg(test)] mod tests { use itertools::Itertools; use tempfile::Builder; use super::*; #[test] fn test_mmap_point_to_values_string() { let values: Vec<Vec<String>> = vec![ vec![ "fox".to_owned(), "driver".to_owned(), "point".to_owned(), "it".to_owned(), "box".to_owned(), ], vec![ "alice".to_owned(), "red".to_owned(), "yellow".to_owned(), "blue".to_owned(), "apple".to_owned(), ], vec![ "box".to_owned(), "qdrant".to_owned(), "line".to_owned(), "bash".to_owned(), "reproduction".to_owned(), ], vec![ "list".to_owned(), "vitamin".to_owned(), "one".to_owned(), "two".to_owned(), "three".to_owned(), ], vec![ "tree".to_owned(), "metallic".to_owned(), "ownership".to_owned(), ], vec![], vec!["slice".to_owned()], vec!["red".to_owned(), "pink".to_owned()], ]; let dir = Builder::new() .prefix("mmap_point_to_values") .tempdir() .unwrap(); MmapPointToValues::<str>::from_iter( dir.path(), values .iter() .enumerate() .map(|(id, values)| (id as PointOffsetType, values.iter().map(|s| s.as_str()))), ) .unwrap(); let point_to_values = MmapPointToValues::<str>::open(dir.path(), false).unwrap(); for (idx, values) in values.iter().enumerate() { let iter = point_to_values.get_values(idx as PointOffsetType); let v: Vec<String> = iter .map(|iter| iter.map(|s: &str| s.to_owned()).collect_vec()) .unwrap_or_default(); assert_eq!(&v, values); } } #[test] fn test_mmap_point_to_values_geo() { let values: Vec<Vec<GeoPoint>> = vec![ vec![ GeoPoint::new_unchecked(6.0, 2.0), GeoPoint::new_unchecked(4.0, 3.0), GeoPoint::new_unchecked(2.0, 5.0), GeoPoint::new_unchecked(8.0, 7.0), GeoPoint::new_unchecked(1.0, 9.0), ], vec![ GeoPoint::new_unchecked(8.0, 1.0), GeoPoint::new_unchecked(3.0, 3.0), GeoPoint::new_unchecked(5.0, 9.0), GeoPoint::new_unchecked(1.0, 8.0), GeoPoint::new_unchecked(7.0, 2.0), ], vec![ GeoPoint::new_unchecked(6.0, 3.0), GeoPoint::new_unchecked(4.0, 4.0), GeoPoint::new_unchecked(3.0, 7.0), GeoPoint::new_unchecked(1.0, 2.0), GeoPoint::new_unchecked(4.0, 8.0), ], vec![ GeoPoint::new_unchecked(1.0, 3.0), GeoPoint::new_unchecked(3.0, 9.0), GeoPoint::new_unchecked(7.0, 0.0), ], vec![], vec![GeoPoint::new_unchecked(8.0, 5.0)], vec![GeoPoint::new_unchecked(9.0, 4.0)], ]; let dir = Builder::new() .prefix("mmap_point_to_values") .tempdir() .unwrap(); MmapPointToValues::<GeoPoint>::from_iter( dir.path(), values .iter() .enumerate() .map(|(id, values)| (id as PointOffsetType, values.iter().cloned())), ) .unwrap(); let point_to_values = MmapPointToValues::<GeoPoint>::open(dir.path(), false).unwrap(); for (idx, values) in values.iter().enumerate() { let iter = point_to_values.get_values(idx as PointOffsetType); let v: Vec<GeoPoint> = iter.map(|iter| iter.collect_vec()).unwrap_or_default(); assert_eq!(&v, values); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/utils.rs
lib/segment/src/index/field_index/utils.rs
use std::ops::Bound; use std::ops::Bound::{Excluded, Included}; pub fn check_boundaries<T>(start: &Bound<T>, end: &Bound<T>) -> bool where T: PartialOrd, { match (&start, &end) { (Excluded(s), Excluded(e)) if s >= e => { // range start and end are equal and excluded in BTreeMap return false; } (Included(s) | Excluded(s), Included(e) | Excluded(e)) if s > e => { //range start is greater than range end return false; } _ => {} } true }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/facet_index.rs
lib/segment/src/index/field_index/facet_index.rs
use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use super::bool_index::BoolIndex; use super::map_index::{IdIter, MapIndex}; use crate::data_types::facets::{FacetHit, FacetValueRef}; use crate::types::{IntPayloadType, UuidIntType}; pub trait FacetIndex { /// Get all values for a point fn get_point_values( &self, point_id: PointOffsetType, ) -> impl Iterator<Item = FacetValueRef<'_>> + '_; /// Get all values in the index fn iter_values(&self) -> impl Iterator<Item = FacetValueRef<'_>> + '_; /// Get all value->point_ids mappings fn iter_values_map<'a>( &'a self, hw_acc: &'a HardwareCounterCell, ) -> impl Iterator<Item = (FacetValueRef<'a>, IdIter<'a>)> + 'a; /// Get all value->count mappings fn iter_counts_per_value(&self) -> impl Iterator<Item = FacetHit<FacetValueRef<'_>>> + '_; } pub enum FacetIndexEnum<'a> { Keyword(&'a MapIndex<str>), Int(&'a MapIndex<IntPayloadType>), Uuid(&'a MapIndex<UuidIntType>), Bool(&'a BoolIndex), } impl<'a> FacetIndexEnum<'a> { pub fn get_point_values( &self, point_id: PointOffsetType, ) -> Box<dyn Iterator<Item = FacetValueRef<'a>> + 'a> { match self { FacetIndexEnum::Keyword(index) => { Box::new(FacetIndex::get_point_values(*index, point_id)) } FacetIndexEnum::Int(index) => Box::new(FacetIndex::get_point_values(*index, point_id)), FacetIndexEnum::Uuid(index) => Box::new(FacetIndex::get_point_values(*index, point_id)), FacetIndexEnum::Bool(index) => Box::new(FacetIndex::get_point_values(*index, point_id)), } } pub fn iter_values(&self) -> Box<dyn Iterator<Item = FacetValueRef<'a>> + 'a> { match self { FacetIndexEnum::Keyword(index) => Box::new(FacetIndex::iter_values(*index)), FacetIndexEnum::Int(index) => Box::new(FacetIndex::iter_values(*index)), FacetIndexEnum::Uuid(index) => Box::new(FacetIndex::iter_values(*index)), FacetIndexEnum::Bool(index) => Box::new(FacetIndex::iter_values(*index)), } } pub fn iter_values_map<'b>( &'b self, hw_counter: &'b HardwareCounterCell, ) -> Box<dyn Iterator<Item = (FacetValueRef<'b>, IdIter<'b>)> + 'b> { match self { FacetIndexEnum::Keyword(index) => { Box::new(FacetIndex::iter_values_map(*index, hw_counter)) } FacetIndexEnum::Int(index) => Box::new(FacetIndex::iter_values_map(*index, hw_counter)), FacetIndexEnum::Uuid(index) => { Box::new(FacetIndex::iter_values_map(*index, hw_counter)) } FacetIndexEnum::Bool(index) => { Box::new(FacetIndex::iter_values_map(*index, hw_counter)) } } } pub fn iter_counts_per_value( &'a self, ) -> Box<dyn Iterator<Item = FacetHit<FacetValueRef<'a>>> + 'a> { match self { FacetIndexEnum::Keyword(index) => Box::new(FacetIndex::iter_counts_per_value(*index)), FacetIndexEnum::Int(index) => Box::new(FacetIndex::iter_counts_per_value(*index)), FacetIndexEnum::Uuid(index) => Box::new(FacetIndex::iter_counts_per_value(*index)), FacetIndexEnum::Bool(index) => Box::new(FacetIndex::iter_counts_per_value(*index)), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/mod.rs
lib/segment/src/index/field_index/mod.rs
use std::ops::Deref; use ahash::AHashSet; use common::types::PointOffsetType; use crate::types::{Condition, FieldCondition, PointIdType, VectorNameBuf}; pub mod bool_index; pub(super) mod facet_index; mod field_index_base; pub mod full_text_index; pub mod geo_hash; pub mod geo_index; mod histogram; mod immutable_point_to_values; pub mod index_selector; pub mod map_index; mod mmap_point_to_values; pub mod null_index; pub mod numeric_index; mod stat_tools; #[cfg(test)] mod tests; mod utils; pub use field_index_base::*; use crate::utils::maybe_arc::MaybeArc; #[derive(Debug, Clone, PartialEq)] pub struct ResolvedHasId { /// Original IDs, as provided in filtering condition pub point_ids: MaybeArc<AHashSet<PointIdType>>, /// Resolved point offsets, which are specific to the segment. pub resolved_point_offsets: Vec<PointOffsetType>, } #[derive(Debug, Clone, PartialEq)] pub enum PrimaryCondition { Condition(Box<FieldCondition>), Ids(ResolvedHasId), HasVector(VectorNameBuf), } impl From<FieldCondition> for PrimaryCondition { fn from(condition: FieldCondition) -> Self { PrimaryCondition::Condition(Box::new(condition)) } } #[derive(Debug, Clone)] pub struct PayloadBlockCondition { pub condition: FieldCondition, pub cardinality: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CardinalityEstimation { /// Conditions that could be used to make a primary point selection. pub primary_clauses: Vec<PrimaryCondition>, /// Minimal possible matched points in best case for a query pub min: usize, /// Expected number of matched points for a query, assuming even random distribution if stored data pub exp: usize, /// The largest possible number of matched points in a worst case for a query pub max: usize, } impl CardinalityEstimation { pub const fn exact(count: usize) -> Self { CardinalityEstimation { primary_clauses: vec![], min: count, exp: count, max: count, } } /// Generate estimation for unknown filter pub const fn unknown(total: usize) -> Self { CardinalityEstimation { primary_clauses: vec![], min: 0, exp: total / 2, max: total, } } /// Push a primary clause to the estimation pub fn with_primary_clause(mut self, clause: PrimaryCondition) -> Self { self.primary_clauses.push(clause); self } #[cfg(test)] pub const fn equals_min_exp_max(&self, other: &Self) -> bool { self.min == other.min && self.exp == other.exp && self.max == other.max } /// Checks that the given condition is a primary condition of the estimation. pub fn is_primary(&self, condition: &Condition) -> bool { self.primary_clauses .iter() .any(|primary_condition| match primary_condition { PrimaryCondition::Condition(primary_field_condition) => match condition { Condition::Field(field_condition) => { primary_field_condition.as_ref() == field_condition } _ => false, }, PrimaryCondition::Ids(ids) => match condition { Condition::HasId(has_id) => ids.point_ids.deref() == has_id.has_id.deref(), _ => false, }, PrimaryCondition::HasVector(has_vector) => match condition { Condition::HasVector(vector_condition) => { has_vector == &vector_condition.has_vector } _ => false, }, }) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/stat_tools.rs
lib/segment/src/index/field_index/stat_tools.rs
use std::f64::consts::{E, PI}; /// This function estimates how many real points were selected with the filter. /// It is assumed that each real point has, on average, X values in correspondence. As a response /// to the execution of the query it is possible to establish only the number of matched associated /// values. /// /// # Arguments /// /// * `total_points` - total number of the unique points in the whole collection /// * `total_values` - total number of payload values in the collection /// * `selected_values_count` - amount of values selected during the query /// /// # Result /// /// Expected amount of unique points contained in selected values /// The result might overflow at some corner cases /// so it is better to limit its value with min and max /// pub fn estimate_multi_value_selection_cardinality( total_points: usize, total_values: usize, selected_values_count: usize, ) -> f64 { // Value >= 1.0 assert!(total_values >= total_points); let values_per_point = total_values as f64 / total_points as f64; // Probability to select each unique value let prob_select = 1. - prob_not_select(total_values, values_per_point, selected_values_count); prob_select * total_points as f64 } /// Fast approximate computation of $ln(n!)$ /// See: <https://en.wikipedia.org/wiki/Stirling%27s_approximation> fn approx_fact_log(n: f64) -> f64 { if n < 1.0 { return 1.0; // By definition } (2. * PI * n).sqrt().ln() + n * (n / E).ln() } /// Probability of each individual unique point to be selected with the query /// /// Straight equation: /// $\prod_{i=0}^{N-1} \frac{total - avg - i}{total - i}$ /// , where `N` - number of selected points /// /// Proof: /// /// $$ /// \prod_{i=0}^{N-1} \frac{total - avg - i}{total - i} /// = \frac{\prod_{i=0}^{N-1} (total - avg - i)}{\prod_{i=0}^{N-1}(total - i)} /// = \frac{\prod_{i=1}^{N} (total - avg - i + 1)}{\prod_{i=1}^{N}(total - i + 1)}\\ /// = \frac{\prod_{i=1}^{N} (total - avg - (N - i + 1) + 1)}{\prod_{i=1}^{N}(total - (N - i + 1) + 1)} /// = \frac{\prod_{i=1}^{N} (i + total - avg - N)}{\prod_{i=1}^{N}(i + total - N)}\\ /// = \frac{\prod_{i=1}^{total - avg} i}{\prod_{i=1}^{total - avg - N} i} \frac{\prod_{i=1}^{total - N} i}{\prod_{i=1}^{total} i} /// = \frac{(total - avg)!(total - N)!}{(total - avg - N)!(total)!} /// = \exp(\ln{\frac{(total - avg)!(total - N)!}{(total - avg - N)!(total)!}})\\ /// = \exp(\ln((total - avg)!(total - N)!) - \ln((total - avg - N)!(total)!)) /// = \exp( \ln((total - avg)!) + \ln((total - N)!) - \ln((total - avg - N)!) - \ln(total!)) /// $$ /// /// Hint: use <https://latex.codecogs.com/eqneditor/editor.php> to render formula fn prob_not_select(total: usize, avg: f64, selected: usize) -> f64 { let total = total as f64; let selected = selected as f64; (approx_fact_log(total - avg) + approx_fact_log(total - selected) - approx_fact_log(total - avg - selected) - approx_fact_log(total)) .exp() } /// Calculate number of selected points, based on the amount of matched values. /// Assuming that values are randomly distributed among points and each point can have multiple values. /// Math is based on: <https://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives> pub fn number_of_selected_points(points: usize, values: usize) -> usize { let prob_of_selection = 1. - (-(values as f64 / points as f64)).exp(); (prob_of_selection * points as f64).round() as usize } #[cfg(test)] mod tests { use std::collections::HashSet; use rand::SeedableRng; use rand::prelude::StdRng; use rand::seq::SliceRandom; use super::*; #[test] fn test_selected_points_est() { let res = number_of_selected_points(100, 1000); assert!(res > 95); assert!(res <= 100); let res = number_of_selected_points(1000, 10); assert!(res > 5); assert!(res <= 10); } fn simulate(uniq: usize, avg: usize, selected: usize) -> usize { let mut data: Vec<_> = vec![]; for i in 0..uniq { for _ in 0..avg { data.push(i); } } data.shuffle(&mut StdRng::seed_from_u64(42)); let mut unique_selected: HashSet<_> = Default::default(); for x in data.into_iter().take(selected) { unique_selected.insert(x); } unique_selected.len() } #[test] fn approx_factorial() { let approx = approx_fact_log(10.).exp(); let real = f64::from(2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10); let error = (approx / real - 1.0).abs(); assert!(error < 0.01); } #[test] fn test_estimation_corner_cases() { let count = estimate_multi_value_selection_cardinality(10, 20, 20); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); let count = estimate_multi_value_selection_cardinality(100, 100, 100); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); let count = estimate_multi_value_selection_cardinality(100, 100, 50); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); let count = estimate_multi_value_selection_cardinality(10, 10, 10); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); let count = estimate_multi_value_selection_cardinality(1, 1, 1); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); let count = estimate_multi_value_selection_cardinality(1, 1, 0); assert!(!count.is_nan()); eprintln!("count = {count:#?}"); } #[test] fn test_estimation_1() { let total = 2000; let unique = 1000; let selected = 50; let estimation = estimate_multi_value_selection_cardinality(unique, total, selected); let experiment = simulate(unique, total / unique, selected); let error = (estimation / experiment as f64 - 1.0).abs(); assert!(error < 0.05); } #[test] fn test_estimation_2() { let total = 2000; let unique = 1000; let selected = 300; let estimation = estimate_multi_value_selection_cardinality(unique, total, selected); let experiment = simulate(unique, total / unique, selected); let error = (estimation / experiment as f64 - 1.0).abs(); assert!(error < 0.05); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/histogram.rs
lib/segment/src/index/field_index/histogram.rs
use std::collections::BTreeMap; use std::collections::Bound::{Excluded, Included, Unbounded}; use std::ops::Bound; use std::path::{Path, PathBuf}; use common::types::PointOffsetType; use io::file_operations::{atomic_save_bin, atomic_save_json, read_bin, read_json}; use itertools::Itertools; use num_traits::Num; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use crate::common::operation_error::OperationResult; use crate::index::field_index::utils::check_boundaries; const MIN_BUCKET_SIZE: usize = 10; const CONFIG_PATH: &str = "histogram_config.json"; const BORDERS_PATH: &str = "histogram_borders.bin"; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct Counts { pub left: usize, pub right: usize, } #[allow(clippy::derive_ord_xor_partial_ord)] #[derive(PartialEq, PartialOrd, Debug, Clone, Serialize, Deserialize)] #[repr(C)] pub struct Point<T> { pub val: T, pub idx: PointOffsetType, } impl<T> Point<T> { pub fn new(val: T, idx: PointOffsetType) -> Self { Self { val, idx } } } impl<T: PartialEq> Eq for Point<T> {} impl<T: PartialOrd + Copy> Ord for Point<T> { fn cmp(&self, other: &Point<T>) -> std::cmp::Ordering { (self.val, self.idx) .partial_cmp(&(other.val, other.idx)) .unwrap() } } /// A trait that should represent common properties of integer and floating point types. /// In particular, i64 and f64. pub trait Numericable: Num + PartialEq + PartialOrd + Copy { fn min_value() -> Self; fn max_value() -> Self; fn to_f64(self) -> f64; fn from_f64(x: f64) -> Self; fn from_u128(x: u128) -> Self; fn min(self, b: Self) -> Self { if self < b { self } else { b } } fn max(self, b: Self) -> Self { if self > b { self } else { b } } fn abs_diff(self, b: Self) -> Self { if self > b { self - b } else { b - self } } } impl Numericable for i64 { fn min_value() -> Self { i64::MIN } fn max_value() -> Self { i64::MAX } fn to_f64(self) -> f64 { self as f64 } fn from_f64(x: f64) -> Self { x as Self } fn from_u128(x: u128) -> Self { x as i64 } fn abs_diff(self, b: Self) -> Self { i64::abs_diff(self, b) as i64 } } impl Numericable for f64 { fn min_value() -> Self { f64::MIN } fn max_value() -> Self { f64::MAX } fn to_f64(self) -> f64 { self } fn from_f64(x: f64) -> Self { x } fn from_u128(x: u128) -> Self { x as Self } } impl Numericable for u128 { fn min_value() -> Self { u128::MIN } fn max_value() -> Self { u128::MAX } fn to_f64(self) -> f64 { self as f64 } fn from_f64(x: f64) -> Self { x as u128 } fn from_u128(x: u128) -> Self { x } fn abs_diff(self, b: Self) -> Self { u128::abs_diff(self, b) } } #[derive(Default, Debug, PartialEq)] pub struct Histogram<T: Numericable + Serialize + DeserializeOwned> { max_bucket_size: usize, precision: f64, total_count: usize, borders: BTreeMap<Point<T>, Counts>, } #[derive(Debug, Serialize, Deserialize)] struct HistogramConfig { max_bucket_size: usize, precision: f64, total_count: usize, } impl<T: Numericable + Serialize + DeserializeOwned> Histogram<T> { pub fn new(max_bucket_size: usize, precision: f64) -> Self { assert!(precision < 1.0); assert!(precision > 0.0); Self { max_bucket_size, precision, total_count: 0, borders: BTreeMap::default(), } } pub fn load(path: &Path) -> OperationResult<Self> { let config_path = path.join(CONFIG_PATH); let borders_path = path.join(BORDERS_PATH); let histogram_config: HistogramConfig = read_json(&config_path)?; let histogram_buckets: Vec<(Point<T>, Counts)> = read_bin(&borders_path)?; Ok(Self { max_bucket_size: histogram_config.max_bucket_size, precision: histogram_config.precision, total_count: histogram_config.total_count, borders: histogram_buckets.into_iter().collect(), }) } pub fn save(&self, path: &Path) -> OperationResult<()> { let config_path = path.join(CONFIG_PATH); let borders_path = path.join(BORDERS_PATH); atomic_save_json( &config_path, &HistogramConfig { max_bucket_size: self.max_bucket_size, precision: self.precision, total_count: self.total_count, }, )?; let borders: Vec<(Point<T>, Counts)> = self .borders .iter() .map(|(k, v)| (k.clone(), v.clone())) .collect(); atomic_save_bin(&borders_path, &borders)?; Ok(()) } pub fn files(path: &Path) -> Vec<PathBuf> { vec![path.join(CONFIG_PATH), path.join(BORDERS_PATH)] } pub fn immutable_files(path: &Path) -> Vec<PathBuf> { vec![path.join(CONFIG_PATH)] } #[cfg(test)] pub fn total_count(&self) -> usize { self.total_count } #[cfg(test)] pub fn borders(&self) -> &BTreeMap<Point<T>, Counts> { &self.borders } pub fn current_bucket_size(&self) -> usize { let bucket_size = (self.total_count as f64 * self.precision) as usize; bucket_size.clamp(MIN_BUCKET_SIZE, self.max_bucket_size) } pub fn get_total_count(&self) -> usize { self.total_count } /// Infers boundaries for bucket of given size and starting point. /// Returns `to` range of values starting provided `from`value which is expected to contain /// `range_size` values /// /// Returns `Unbounded` if there are no points stored pub fn get_range_by_size(&self, from: Bound<T>, range_size: usize) -> Bound<T> { let from_ = match from { Included(val) => Included(Point { val, idx: PointOffsetType::MIN, }), Excluded(val) => Excluded(Point { val, idx: PointOffsetType::MAX, }), Unbounded => Unbounded, }; let mut reached_count = 0; for (border, counts) in self.borders.range((from_, Unbounded)) { if reached_count + counts.left > range_size { // required size reached return Included(border.val); } else { // Size not yet reached reached_count += counts.left; } } Unbounded } pub fn estimate(&self, from: Bound<T>, to: Bound<T>) -> (usize, usize, usize) { let from_ = match &from { Included(val) => Included(Point { val: *val, idx: PointOffsetType::MIN, }), Excluded(val) => Excluded(Point { val: *val, idx: PointOffsetType::MAX, }), Unbounded => Unbounded, }; let to_ = match &to { Included(val) => Included(Point { val: *val, idx: PointOffsetType::MAX, }), Excluded(val) => Excluded(Point { val: *val, idx: PointOffsetType::MIN, }), Unbounded => Unbounded, }; // Value for range fraction estimation let from_val = match from { Included(val) => val, Excluded(val) => val, Unbounded => T::min_value(), }; let to_val = match to { Included(val) => val, Excluded(val) => val, Unbounded => T::max_value(), }; let left_border = { if matches!(from_, Unbounded) { None } else { self.borders.range((Unbounded, from_.clone())).next_back() } }; let right_border = { if matches!(to_, Unbounded) { None } else { self.borders.range((to_.clone(), Unbounded)).next() } }; if !check_boundaries(&from_, &to_) { return (0, 0, 0); } left_border .into_iter() .chain(self.borders.range((from_, to_))) .chain(right_border) .tuple_windows() .map( |((a, a_count), (b, _b_count)): ((&Point<T>, &Counts), (&Point<T>, _))| { let val_range = (b.val - a.val).to_f64(); if val_range == 0. { // Zero-length range is always covered let estimates = a_count.right + 1; return (estimates, estimates, estimates); } if a_count.right == 0 { // Range covers most-right border return (1, 1, 1); } let cover_range = (to_val.min(b.val) - from_val.max(a.val)).to_f64(); let covered_frac = cover_range / val_range; let estimate = (a_count.right as f64 * covered_frac).round() as usize + 1; let min_estimate = if cover_range == val_range { a_count.right + 1 } else { 0 }; let max_estimate = a_count.right + 1; (min_estimate, estimate, max_estimate) }, ) .reduce(|a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2)) .unwrap_or((0, 0, 0)) } pub fn remove<F, G>(&mut self, val: &Point<T>, left_neighbour: F, right_neighbour: G) where F: Fn(&Point<T>) -> Option<Point<T>>, G: Fn(&Point<T>) -> Option<Point<T>>, { let (mut close_neighbors, (mut far_left_neighbor, mut far_right_neighbor)) = { let mut left_iterator = self .borders .range((Unbounded, Included(val.clone()))) .map(|(k, v)| (k.clone(), v.clone())); let mut right_iterator = self .borders .range((Excluded(val.clone()), Unbounded)) .map(|(k, v)| (k.clone(), v.clone())); ( (left_iterator.next_back(), right_iterator.next()), (left_iterator.next_back(), right_iterator.next()), ) }; let (to_remove, to_create, removed) = match &mut close_neighbors { (None, None) => (None, None, false), // histogram is empty (Some((left_border, left_border_count)), None) => { if left_border == val { // ....| // ...| if left_border_count.left == 0 { // ...|| // ...| (Some(left_border.clone()), None, true) } else { // ...|..| // ...|.| if let Some((_fln, fln_count)) = &mut far_left_neighbor { fln_count.right -= 1 } let (new_border, new_border_count) = ( left_neighbour(left_border).unwrap(), Counts { left: left_border_count.left - 1, right: 0, }, ); ( Some(left_border.clone()), Some((new_border, new_border_count)), true, ) } } else { (None, None, false) } } (None, Some((right_border, right_border_count))) => { if right_border == val { // |... // |.. if right_border_count.right == 0 { // ||... // |... (Some(right_border.clone()), None, true) } else { // |..|... // |.|... if let Some((_frn, frn_count)) = &mut far_right_neighbor { frn_count.left -= 1 } let (new_border, new_border_count) = ( right_neighbour(right_border).unwrap(), Counts { left: 0, right: right_border_count.right - 1, }, ); ( Some(right_border.clone()), Some((new_border, new_border_count)), true, ) } } else { (None, None, false) } } (Some((left_border, left_border_count)), Some((right_border, right_border_count))) => { // ...|...x.|... if left_border == val { // ...|....|... // ... |...|... if left_border_count.right == 0 { // ...||... // ... |... right_border_count.left = left_border_count.left; (Some(left_border.clone()), None, true) } else if right_border_count.left + left_border_count.left <= self.current_bucket_size() && far_left_neighbor.is_some() { // ...|.l..r... // ...|. ..r... if let Some((_fln, fln_count)) = &mut far_left_neighbor { fln_count.right += right_border_count.left; right_border_count.left = fln_count.right; } (Some(left_border.clone()), None, true) } else { // ...|..|... // ... |.|... right_border_count.left -= 1; let (new_border, new_border_count) = ( right_neighbour(left_border).unwrap(), Counts { left: left_border_count.left, right: left_border_count.right - 1, }, ); ( Some(left_border.clone()), Some((new_border, new_border_count)), true, ) } } else if right_border == val { // ...|....|... // ...|...| ... if right_border_count.left == 0 { // ...||... // ...| ... left_border_count.right = right_border_count.left; (Some(right_border.clone()), None, true) } else if left_border_count.right + right_border_count.right <= self.current_bucket_size() && far_right_neighbor.is_some() { // ...l..r.|... // ...l.. .|... if let Some((_frn, frn_count)) = &mut far_right_neighbor { frn_count.left += left_border_count.right; left_border_count.right = frn_count.left; } (Some(right_border.clone()), None, true) } else { // ...|..|... // ...|.| ... left_border_count.right -= 1; let (new_border, new_border_count) = ( left_neighbour(right_border).unwrap(), Counts { left: right_border_count.right, right: right_border_count.left - 1, }, ); ( Some(right_border.clone()), Some((new_border, new_border_count)), true, ) } } else if right_border_count.left == 0 { // ...||... // ...||... (None, None, false) } else { // ...|...|... // ...|. .|... right_border_count.left -= 1; left_border_count.right -= 1; (None, None, true) } } }; if removed { self.total_count -= 1; } let (left_border_opt, right_border_opt) = close_neighbors; if let Some((k, v)) = left_border_opt { self.borders.insert(k, v); } if let Some((k, v)) = right_border_opt { self.borders.insert(k, v); } if let Some((k, v)) = far_left_neighbor { self.borders.insert(k, v); } if let Some((k, v)) = far_right_neighbor { self.borders.insert(k, v); } if let Some(remove_border) = to_remove { self.borders.remove(&remove_border); } if let Some((new_border, new_border_count)) = to_create { self.borders.insert(new_border, new_border_count); } } /// Warn: `val` should be unique pub fn insert<F, G>(&mut self, val: Point<T>, left_neighbour: F, right_neighbour: G) where F: Fn(&Point<T>) -> Option<Point<T>>, G: Fn(&Point<T>) -> Option<Point<T>>, { self.total_count += 1; if self.borders.len() < 2 { self.borders.insert(val, Counts { left: 0, right: 0 }); return; } let (mut close_neighbors, (mut far_left_neighbor, mut far_right_neighbor)) = { let mut left_iterator = self .borders .range((Unbounded, Included(val.clone()))) .map(|(k, v)| (k.clone(), v.clone())); let mut right_iterator = self .borders .range((Excluded(val.clone()), Unbounded)) .map(|(k, v)| (k.clone(), v.clone())); ( (left_iterator.next_back(), right_iterator.next()), (left_iterator.next_back(), right_iterator.next()), ) }; let (to_remove, to_create) = match &mut close_neighbors { (None, Some((right_border, right_border_count))) => { // x|.....|... let new_count = right_border_count.right + 1; let (new_border, mut new_border_count) = ( val, Counts { left: 0, right: new_count, }, ); if new_count > self.current_bucket_size() { // Too many values, can't move the border // x|.....|... // ||.....|... new_border_count.right = 0; (None, Some((new_border, new_border_count))) } else { // x|.....|... // |......|... if let Some((_frn, frn_count)) = &mut far_right_neighbor { frn_count.left = new_count; } ( Some(right_border.clone()), Some((new_border, new_border_count)), ) } } (Some((left_border, left_border_count)), None) => { // ...|.....|x let new_count = left_border_count.left + 1; let (new_border, mut new_border_count) = ( val, Counts { left: new_count, right: 0, }, ); if new_count > self.current_bucket_size() { // Too many values, can't move the border // ...|.....|x // ...|.....|| new_border_count.left = 0; (None, Some((new_border, new_border_count))) } else { // ...|.....|x // ...|......| if let Some((_fln, fln_count)) = &mut far_left_neighbor { fln_count.right = new_count } ( Some(left_border.clone()), Some((new_border, new_border_count)), ) } } (Some((left_border, left_border_count)), Some((right_border, right_border_count))) => { assert_eq!(left_border_count.right, right_border_count.left); let new_count = left_border_count.right + 1; if new_count > self.current_bucket_size() { // Too many values, let's adjust // Decide which border to move let left_dist = val.val.abs_diff(left_border.val); let right_dist = val.val.abs_diff(right_border.val); if left_dist < right_dist { // left border closer: // ...|..x.........|... let (new_border, mut new_border_count) = ( right_neighbour(left_border).unwrap(), Counts { left: left_border_count.left + 1, right: left_border_count.right, }, ); if left_border_count.left < self.current_bucket_size() && far_left_neighbor.is_some() { //we can move // ...|..x.........|... // ....|.x.........|... if let Some((_fln, fln_count)) = &mut far_left_neighbor { fln_count.right = new_border_count.left } ( Some(left_border.clone()), Some((new_border, new_border_count)), ) } else { // Can't be moved anymore, create an additional one // ...|..x.........|... // ...||.x.........|... new_border_count.left = 0; left_border_count.right = 0; (None, Some((new_border, new_border_count))) } } else { // right border closer // ...|........x...|... let (new_border, mut new_border_count) = ( left_neighbour(right_border).unwrap(), Counts { left: right_border_count.left, right: right_border_count.right + 1, }, ); if right_border_count.right < self.current_bucket_size() && far_right_neighbor.is_some() { // it's ok, we can move // 1: ...|........x...|... // 2: ...|........x..|.... if let Some((_frn, frn_count)) = &mut far_right_neighbor { frn_count.left = new_border_count.right } ( Some(right_border.clone()), Some((new_border, new_border_count)), ) } else { // Can't be moved anymore, create a new one // 1: ...|........x...|... // 2: ...|........x..||... new_border_count.right = 0; right_border_count.left = 0; (None, Some((new_border, new_border_count))) } } } else { left_border_count.right = new_count; right_border_count.left = new_count; (None, None) } } (None, None) => unreachable!(), }; let (left_border_opt, right_border_opt) = close_neighbors; if let Some((k, v)) = left_border_opt { self.borders.insert(k, v); } if let Some((k, v)) = right_border_opt { self.borders.insert(k, v); } if let Some((k, v)) = far_left_neighbor { self.borders.insert(k, v); } if let Some((k, v)) = far_right_neighbor { self.borders.insert(k, v); } if let Some(remove_border) = to_remove { self.borders.remove(&remove_border); } if let Some((new_border, new_border_count)) = to_create { self.borders.insert(new_border, new_border_count); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/index_selector.rs
lib/segment/src/index/field_index/index_selector.rs
use std::path::{Path, PathBuf}; #[cfg(feature = "rocksdb")] use std::sync::Arc; use gridstore::Blob; use super::bool_index::BoolIndex; use super::bool_index::mutable_bool_index::MutableBoolIndex; #[cfg(feature = "rocksdb")] use super::bool_index::simple_bool_index::SimpleBoolIndex; use super::geo_index::{GeoMapIndexGridstoreBuilder, GeoMapIndexMmapBuilder}; use super::histogram::Numericable; use super::map_index::{MapIndex, MapIndexGridstoreBuilder, MapIndexKey, MapIndexMmapBuilder}; use super::mmap_point_to_values::MmapValue; use super::numeric_index::{ Encodable, NumericIndexGridstoreBuilder, NumericIndexIntoInnerValue, NumericIndexMmapBuilder, }; use super::{FieldIndexBuilder, ValueIndexer}; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::index::TextIndexParams; use crate::index::field_index::FieldIndex; use crate::index::field_index::full_text_index::text_index::FullTextIndex; use crate::index::field_index::geo_index::GeoMapIndex; use crate::index::field_index::null_index::MutableNullIndex; use crate::index::field_index::numeric_index::NumericIndex; use crate::index::payload_config::{FullPayloadIndexType, PayloadIndexType}; use crate::json_path::JsonPath; use crate::types::{PayloadFieldSchema, PayloadSchemaParams}; /// Selects index and index builder types based on field type. #[derive(Copy, Clone)] pub enum IndexSelector<'a> { /// In-memory index on RocksDB, appendable or non-appendable #[cfg(feature = "rocksdb")] RocksDb(IndexSelectorRocksDb<'a>), /// On disk or in-memory index on mmaps, non-appendable Mmap(IndexSelectorMmap<'a>), /// In-memory index on gridstore, appendable Gridstore(IndexSelectorGridstore<'a>), } #[cfg(feature = "rocksdb")] #[derive(Copy, Clone)] pub struct IndexSelectorRocksDb<'a> { pub db: &'a Arc<parking_lot::RwLock<rocksdb::DB>>, pub is_appendable: bool, } #[derive(Copy, Clone)] pub struct IndexSelectorMmap<'a> { pub dir: &'a Path, pub is_on_disk: bool, } #[derive(Copy, Clone)] pub struct IndexSelectorGridstore<'a> { pub dir: &'a Path, } impl IndexSelector<'_> { /// Loads the correct index based on `index_type`. pub fn new_index_with_type( &self, field: &JsonPath, payload_schema: &PayloadFieldSchema, index_type: &FullPayloadIndexType, path: &Path, total_point_count: usize, create_if_missing: bool, ) -> OperationResult<Option<FieldIndex>> { let index = match (&index_type.index_type, payload_schema.expand().as_ref()) { (PayloadIndexType::IntIndex, PayloadSchemaParams::Integer(params)) => { // IntIndex only gets created if `range` is true. This will only throw an error if storage is corrupt. // // Note that `params.range == None` means the index was created without directly specifying these parameters. // In those cases it defaults to `true` so we don't need to cover this case. if params.range == Some(false) { log::warn!( "Inconsistent payload schema: Int index configured but schema.range is false" ); } self.numeric_new(field, create_if_missing)? .map(FieldIndex::IntIndex) } (PayloadIndexType::IntMapIndex, PayloadSchemaParams::Integer(params)) => { // IntMapIndex only gets created if `lookup` is true. This will only throw an error if storage is corrupt. // // Note that `params.lookup == None` means the index was created without directly specifying these parameters. // In those cases it defaults to `true` so we don't need to cover this case. if params.lookup == Some(false) { log::warn!( "Inconsistent payload schema: IntMap index configured but schema.lookup is false", ); } self.map_new(field, create_if_missing)? .map(FieldIndex::IntMapIndex) } (PayloadIndexType::DatetimeIndex, PayloadSchemaParams::Datetime(_)) => self .numeric_new(field, create_if_missing)? .map(FieldIndex::DatetimeIndex), (PayloadIndexType::KeywordIndex, PayloadSchemaParams::Keyword(_)) => self .map_new(field, create_if_missing)? .map(FieldIndex::KeywordIndex), (PayloadIndexType::FloatIndex, PayloadSchemaParams::Float(_)) => self .numeric_new(field, create_if_missing)? .map(FieldIndex::FloatIndex), (PayloadIndexType::GeoIndex, PayloadSchemaParams::Geo(_)) => self .geo_new(field, create_if_missing)? .map(FieldIndex::GeoIndex), (PayloadIndexType::FullTextIndex, PayloadSchemaParams::Text(params)) => self .text_new(field, params.clone(), create_if_missing)? .map(FieldIndex::FullTextIndex), (PayloadIndexType::BoolIndex, PayloadSchemaParams::Bool(_)) => self .bool_new(field, create_if_missing)? .map(FieldIndex::BoolIndex), (PayloadIndexType::UuidIndex, PayloadSchemaParams::Uuid(_)) => self .map_new(field, create_if_missing)? .map(FieldIndex::UuidMapIndex), (PayloadIndexType::UuidMapIndex, PayloadSchemaParams::Uuid(_)) => self .map_new(field, create_if_missing)? .map(FieldIndex::UuidMapIndex), (PayloadIndexType::NullIndex, _) => MutableNullIndex::open( &null_dir(path, field), total_point_count, create_if_missing, )? .map(FieldIndex::NullIndex), // Storage inconsistency. Should never happen. (index_type, schema) => { return Err(OperationError::service_error(format!( "Payload index storage inconsistent. Schema defines {schema:?} but storage is {index_type:?}" ))); } }; Ok(index) } /// Selects index type based on field type. pub fn new_index( &self, field: &JsonPath, payload_schema: &PayloadFieldSchema, create_if_missing: bool, ) -> OperationResult<Option<Vec<FieldIndex>>> { let indexes = match payload_schema.expand().as_ref() { PayloadSchemaParams::Keyword(_) => self .map_new(field, create_if_missing)? .map(|index| vec![FieldIndex::KeywordIndex(index)]), PayloadSchemaParams::Integer(integer_params) => { let use_lookup = integer_params.lookup.unwrap_or(true); let use_range = integer_params.range.unwrap_or(true); let lookup = if use_lookup { match self.map_new(field, create_if_missing)? { Some(index) => Some(FieldIndex::IntMapIndex(index)), None => return Ok(None), } } else { None }; let range = if use_range { match self.numeric_new(field, create_if_missing)? { Some(index) => Some(FieldIndex::IntIndex(index)), None => return Ok(None), } } else { None }; Some(lookup.into_iter().chain(range).collect()) } PayloadSchemaParams::Float(_) => self .numeric_new(field, create_if_missing)? .map(|index| vec![FieldIndex::FloatIndex(index)]), PayloadSchemaParams::Geo(_) => self .geo_new(field, create_if_missing)? .map(|index| vec![FieldIndex::GeoIndex(index)]), PayloadSchemaParams::Text(text_index_params) => self .text_new(field, text_index_params.clone(), create_if_missing)? .map(|index| vec![FieldIndex::FullTextIndex(index)]), PayloadSchemaParams::Bool(_) => self .bool_new(field, create_if_missing)? .map(|index| vec![FieldIndex::BoolIndex(index)]), PayloadSchemaParams::Datetime(_) => self .numeric_new(field, create_if_missing)? .map(|index| vec![FieldIndex::DatetimeIndex(index)]), PayloadSchemaParams::Uuid(_) => self .map_new(field, create_if_missing)? .map(|index| vec![FieldIndex::UuidMapIndex(index)]), }; Ok(indexes) } /// Selects index builder based on field type. pub fn index_builder( &self, field: &JsonPath, payload_schema: &PayloadFieldSchema, ) -> OperationResult<Vec<FieldIndexBuilder>> { let builders = match payload_schema.expand().as_ref() { PayloadSchemaParams::Keyword(_) => { vec![self.map_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::KeywordIndex, FieldIndexBuilder::KeywordMmapIndex, FieldIndexBuilder::KeywordGridstoreIndex, )?] } PayloadSchemaParams::Integer(integer_params) => { let use_lookup = integer_params.lookup.unwrap_or(true); let use_range = integer_params.range.unwrap_or(true); let lookup = if use_lookup { Some(self.map_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::IntMapIndex, FieldIndexBuilder::IntMapMmapIndex, FieldIndexBuilder::IntMapGridstoreIndex, )?) } else { None }; let range = if use_range { Some(self.numeric_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::IntIndex, FieldIndexBuilder::IntMmapIndex, FieldIndexBuilder::IntGridstoreIndex, )?) } else { None }; lookup.into_iter().chain(range).collect() } PayloadSchemaParams::Float(_) => { vec![self.numeric_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::FloatIndex, FieldIndexBuilder::FloatMmapIndex, FieldIndexBuilder::FloatGridstoreIndex, )?] } PayloadSchemaParams::Geo(_) => { vec![self.geo_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::GeoIndex, FieldIndexBuilder::GeoMmapIndex, FieldIndexBuilder::GeoGridstoreIndex, )?] } PayloadSchemaParams::Text(text_index_params) => { vec![self.text_builder(field, text_index_params.clone())?] } PayloadSchemaParams::Bool(_) => { vec![self.bool_builder(field)?] } PayloadSchemaParams::Datetime(_) => { vec![self.numeric_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::DatetimeIndex, FieldIndexBuilder::DatetimeMmapIndex, FieldIndexBuilder::DatetimeGridstoreIndex, )?] } PayloadSchemaParams::Uuid(_) => { vec![self.map_builder( field, #[cfg(feature = "rocksdb")] FieldIndexBuilder::UuidIndex, FieldIndexBuilder::UuidMmapIndex, FieldIndexBuilder::UuidGridstoreIndex, )?] } }; Ok(builders) } fn map_new<N: MapIndexKey + ?Sized>( &self, field: &JsonPath, create_if_missing: bool, ) -> OperationResult<Option<MapIndex<N>>> where Vec<N::Owned>: Blob + Send + Sync, { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable }) => { MapIndex::new_rocksdb( Arc::clone(db), &field.to_string(), *is_appendable, create_if_missing, )? } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { MapIndex::new_mmap(&map_dir(dir, field), *is_on_disk)? } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { MapIndex::new_gridstore(map_dir(dir, field), create_if_missing)? } }) } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] fn map_builder<N: MapIndexKey + ?Sized>( &self, field: &JsonPath, #[cfg(feature = "rocksdb")] make_rocksdb: fn( super::map_index::MapIndexBuilder<N>, ) -> FieldIndexBuilder, make_mmap: fn(MapIndexMmapBuilder<N>) -> FieldIndexBuilder, make_gridstore: fn(MapIndexGridstoreBuilder<N>) -> FieldIndexBuilder, ) -> OperationResult<FieldIndexBuilder> where Vec<N::Owned>: Blob + Send + Sync, { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, .. }) => make_rocksdb( MapIndex::builder_rocksdb(Arc::clone(db), &field.to_string())?, ), IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { make_mmap(MapIndex::builder_mmap(&map_dir(dir, field), *is_on_disk)) } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { make_gridstore(MapIndex::builder_gridstore(map_dir(dir, field))) } }) } fn numeric_new<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P>( &self, field: &JsonPath, create_if_missing: bool, ) -> OperationResult<Option<NumericIndex<T, P>>> where Vec<T>: Blob, { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable }) => { NumericIndex::new_rocksdb( Arc::clone(db), &field.to_string(), *is_appendable, create_if_missing, )? } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { NumericIndex::new_mmap(&numeric_dir(dir, field), *is_on_disk)? } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { NumericIndex::new_gridstore(numeric_dir(dir, field), create_if_missing)? } }) } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] fn numeric_builder<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P>( &self, field: &JsonPath, #[cfg(feature = "rocksdb")] make_rocksdb: fn( super::numeric_index::NumericIndexBuilder<T, P>, ) -> FieldIndexBuilder, make_mmap: fn(NumericIndexMmapBuilder<T, P>) -> FieldIndexBuilder, make_gridstore: fn(NumericIndexGridstoreBuilder<T, P>) -> FieldIndexBuilder, ) -> OperationResult<FieldIndexBuilder> where NumericIndex<T, P>: ValueIndexer<ValueType = P> + NumericIndexIntoInnerValue<T, P>, Vec<T>: Blob, { match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: _, }) => Ok(make_rocksdb(NumericIndex::builder_rocksdb( Arc::clone(db), &field.to_string(), )?)), IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => Ok(make_mmap( NumericIndex::builder_mmap(&numeric_dir(dir, field), *is_on_disk), )), IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => Ok(make_gridstore( NumericIndex::builder_gridstore(numeric_dir(dir, field)), )), } } fn geo_new( &self, field: &JsonPath, create_if_missing: bool, ) -> OperationResult<Option<GeoMapIndex>> { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable }) => { GeoMapIndex::new_memory( Arc::clone(db), &field.to_string(), *is_appendable, create_if_missing, )? } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { GeoMapIndex::new_mmap(&map_dir(dir, field), *is_on_disk)? } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { GeoMapIndex::new_gridstore(map_dir(dir, field), create_if_missing)? } }) } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] fn geo_builder( &self, field: &JsonPath, #[cfg(feature = "rocksdb")] make_rocksdb: fn( super::geo_index::GeoMapIndexBuilder, ) -> FieldIndexBuilder, make_mmap: fn(GeoMapIndexMmapBuilder) -> FieldIndexBuilder, make_gridstore: fn(GeoMapIndexGridstoreBuilder) -> FieldIndexBuilder, ) -> OperationResult<FieldIndexBuilder> { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, .. }) => { make_rocksdb(GeoMapIndex::builder(Arc::clone(db), &field.to_string())?) } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { make_mmap(GeoMapIndex::builder_mmap(&map_dir(dir, field), *is_on_disk)) } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { make_gridstore(GeoMapIndex::builder_gridstore(map_dir(dir, field))) } }) } pub fn null_builder(dir: &Path, field: &JsonPath) -> OperationResult<FieldIndexBuilder> { // null index is always on disk and appendable Ok(FieldIndexBuilder::NullIndex(MutableNullIndex::builder( &null_dir(dir, field), )?)) } pub fn new_null_index( dir: &Path, field: &JsonPath, total_point_count: usize, create_if_missing: bool, ) -> OperationResult<Option<FieldIndex>> { // null index is always on disk and is appendable Ok( MutableNullIndex::open(&null_dir(dir, field), total_point_count, create_if_missing)? .map(FieldIndex::NullIndex), ) } fn text_new( &self, field: &JsonPath, config: TextIndexParams, create_if_missing: bool, ) -> OperationResult<Option<FullTextIndex>> { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable }) => { FullTextIndex::new_rocksdb( Arc::clone(db), config, &field.to_string(), *is_appendable, create_if_missing, )? } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { FullTextIndex::new_mmap(text_dir(dir, field), config, *is_on_disk)? } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { FullTextIndex::new_gridstore(text_dir(dir, field), config, create_if_missing)? } }) } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] fn text_builder( &self, field: &JsonPath, config: TextIndexParams, ) -> OperationResult<FieldIndexBuilder> { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable }) => { FieldIndexBuilder::FullTextIndex(FullTextIndex::builder_rocksdb( Arc::clone(db), config, &field.to_string(), *is_appendable, )?) } IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk }) => { FieldIndexBuilder::FullTextMmapIndex(FullTextIndex::builder_mmap( text_dir(dir, field), config, *is_on_disk, )) } IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { FieldIndexBuilder::FullTextGridstoreIndex(FullTextIndex::builder_gridstore( text_dir(dir, field), config, )) } }) } fn bool_builder(&self, field: &JsonPath) -> OperationResult<FieldIndexBuilder> { match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: _, }) => Ok(FieldIndexBuilder::BoolIndex(SimpleBoolIndex::builder( Arc::clone(db), &field.to_string(), )?)), IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk: _ }) => { let dir = bool_dir(dir, field); Ok(FieldIndexBuilder::BoolMmapIndex(MutableBoolIndex::builder( &dir, )?)) } // Skip Gridstore for boolean index, mmap index is simpler and is also mutable IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { let dir = bool_dir(dir, field); Ok(FieldIndexBuilder::BoolMmapIndex(MutableBoolIndex::builder( &dir, )?)) } } } fn bool_new( &self, field: &JsonPath, create_if_missing: bool, ) -> OperationResult<Option<BoolIndex>> { Ok(match self { #[cfg(feature = "rocksdb")] IndexSelector::RocksDb(IndexSelectorRocksDb { db, is_appendable: _, }) => SimpleBoolIndex::new(Arc::clone(db), &field.to_string(), create_if_missing)? .map(BoolIndex::Simple), IndexSelector::Mmap(IndexSelectorMmap { dir, is_on_disk: _ }) => { let dir = bool_dir(dir, field); MutableBoolIndex::open(&dir, create_if_missing)?.map(BoolIndex::Mmap) } // Skip Gridstore for boolean index, mmap index is simpler and is also mutable IndexSelector::Gridstore(IndexSelectorGridstore { dir }) => { let dir = bool_dir(dir, field); MutableBoolIndex::open(&dir, create_if_missing)?.map(BoolIndex::Mmap) } }) } } fn map_dir(dir: &Path, field: &JsonPath) -> PathBuf { dir.join(format!("{}-map", &field.filename())) } fn numeric_dir(dir: &Path, field: &JsonPath) -> PathBuf { dir.join(format!("{}-numeric", &field.filename())) } fn text_dir(dir: &Path, field: &JsonPath) -> PathBuf { dir.join(format!("{}-text", &field.filename())) } fn bool_dir(dir: &Path, field: &JsonPath) -> PathBuf { dir.join(format!("{}-bool", &field.filename())) } fn null_dir(dir: &Path, field: &JsonPath) -> PathBuf { dir.join(format!("{}-null", &field.filename())) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/geo_index/mutable_geo_index.rs
lib/segment/src/index/field_index/geo_index/mutable_geo_index.rs
use std::cmp::max; use std::collections::BTreeMap; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use ahash::AHashSet; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use delegate::delegate; use gridstore::Gridstore; use gridstore::config::StorageOptions; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; #[cfg(feature = "rocksdb")] use super::GeoMapIndex; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::geo_hash::{GeoHash, encode_max_precision}; use crate::index::payload_config::StorageType; use crate::types::{GeoPoint, RawGeoPoint}; /// Default options for Gridstore storage const GRIDSTORE_OPTIONS: StorageOptions = StorageOptions { // Size of geo point values in index block_size_bytes: Some(size_of::<RawGeoPoint>()), // Compressing geo point values is unreasonable compression: Some(gridstore::config::Compression::None), // Scale page size down with block size, prevents overhead of first page when there's (almost) no values page_size_bytes: Some(size_of::<RawGeoPoint>() * 8192 * 32), // 4 to 8 MiB = block_size * region_blocks * regions, region_size_blocks: None, }; pub struct MutableGeoMapIndex { in_memory_index: InMemoryGeoMapIndex, storage: Storage, } enum Storage { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Gridstore(Gridstore<Vec<RawGeoPoint>>), } pub struct InMemoryGeoMapIndex { /* { "d": 10, "dr": 10, "dr5": 4, "dr5r": 3, "dr5ru": 1, "dr5rr": 2, ... } */ pub points_per_hash: BTreeMap<GeoHash, usize>, pub values_per_hash: BTreeMap<GeoHash, usize>, /* { "dr5ru": {1}, "dr5rr": {2, 3}, ... } */ pub points_map: BTreeMap<GeoHash, AHashSet<PointOffsetType>>, pub point_to_values: Vec<Vec<GeoPoint>>, pub points_count: usize, pub points_values_count: usize, pub max_values_per_point: usize, } impl MutableGeoMapIndex { /// Open and load mutable geo index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db: Arc<RwLock<DB>>, store_cf_name: &str, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, store_cf_name, )); if !db_wrapper.has_column_family()? { if create_if_missing { db_wrapper.recreate_column_family()?; } else { // Column family doesn't exist, cannot load return Ok(None); } }; // Load in-memory index from RocksDB let mut in_memory_index = InMemoryGeoMapIndex::new(); let mut points_to_hashes: BTreeMap<PointOffsetType, Vec<GeoHash>> = Default::default(); for (key, value) in db_wrapper.lock_db().iter()? { let (geo_hash, idx) = GeoMapIndex::decode_db_key(key)?; let geo_point = GeoMapIndex::decode_db_value(value)?; if in_memory_index.point_to_values.len() <= idx as usize { in_memory_index .point_to_values .resize_with(idx as usize + 1, Vec::new); } if in_memory_index.point_to_values[idx as usize].is_empty() { in_memory_index.points_count += 1; } points_to_hashes.entry(idx).or_default().push(geo_hash); in_memory_index.point_to_values[idx as usize].push(geo_point); in_memory_index .points_map .entry(geo_hash) .or_default() .insert(idx); in_memory_index.points_values_count += 1; } for (_idx, geo_hashes) in points_to_hashes { in_memory_index.max_values_per_point = max(in_memory_index.max_values_per_point, geo_hashes.len()); in_memory_index.increment_hash_point_counts(&geo_hashes); for geo_hash in geo_hashes { in_memory_index.increment_hash_value_counts(&geo_hash); } } Ok(Some(Self { in_memory_index, storage: Storage::RocksDb(db_wrapper), })) } /// Open and load mutable geo index from Gridstore storage /// /// The `create_if_missing` parameter indicates whether to create a new Gridstore if it does /// not exist. If false and files don't exist, the load function will indicate nothing could be /// loaded. pub fn open_gridstore(path: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { let store = if create_if_missing { Gridstore::open_or_create(path, GRIDSTORE_OPTIONS).map_err(|err| { OperationError::service_error(format!( "failed to open mutable geo index on gridstore: {err}" )) })? } else if path.exists() { Gridstore::open(path).map_err(|err| { OperationError::service_error(format!( "failed to open mutable geo index on gridstore: {err}" )) })? } else { // Files don't exist, cannot load return Ok(None); }; // Load in-memory index from Gridstore let mut in_memory_index = InMemoryGeoMapIndex::new(); let hw_counter = HardwareCounterCell::disposable(); let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); store .iter::<_, OperationError>( |idx, values: Vec<RawGeoPoint>| { let geo_points = values.into_iter().map(GeoPoint::from).collect::<Vec<_>>(); let geo_hashes = geo_points .iter() .map(|geo_point| { encode_max_precision(geo_point.lon.0, geo_point.lat.0).map_err(|e| { OperationError::service_error(format!("Malformed geo points: {e}")) }) }) .collect::<Result<Vec<_>, _>>()?; for geo_point in geo_points { if in_memory_index.point_to_values.len() <= idx as usize { in_memory_index .point_to_values .resize_with(idx as usize + 1, Vec::new); } if in_memory_index.point_to_values[idx as usize].is_empty() { in_memory_index.points_count += 1; } in_memory_index.point_to_values[idx as usize].push(geo_point); in_memory_index.points_values_count += 1; } in_memory_index.max_values_per_point = max(in_memory_index.max_values_per_point, geo_hashes.len()); in_memory_index.increment_hash_point_counts(&geo_hashes); for geo_hash in geo_hashes { in_memory_index.increment_hash_value_counts(&geo_hash); in_memory_index .points_map .entry(geo_hash) .or_default() .insert(idx); } Ok(true) }, hw_counter_ref, ) .map_err(|err| { OperationError::service_error(format!( "Failed to load mutable geo index from gridstore: {err}" )) })?; Ok(Some(Self { in_memory_index, storage: Storage::Gridstore(store), })) } #[cfg_attr(not(feature = "rocksdb"), expect(dead_code))] #[inline] pub(super) fn clear(&mut self) -> OperationResult<()> { match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.recreate_column_family(), Storage::Gridstore(store) => store.clear().map_err(|err| { OperationError::service_error(format!("Failed to clear mutable geo index: {err}",)) }), } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Gridstore(store) => store.wipe().map_err(|err| { OperationError::service_error(format!("Failed to wipe mutable geo index: {err}",)) }), } } /// Clear cache /// /// Only clears cache of Gridstore storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Gridstore(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable geo index gridstore cache: {err}" )) }), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Gridstore(store) => store.files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.flusher(), Storage::Gridstore(store) => { let storage_flusher = store.flusher(); Box::new(move || { storage_flusher().map_err(|err| { OperationError::service_error(format!( "Failed to flush mutable geo index gridstore: {err}" )) }) }) } } } pub fn add_many_geo_points( &mut self, idx: PointOffsetType, values: &[GeoPoint], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { for added_point in values { let added_geo_hash: GeoHash = encode_max_precision(added_point.lon.0, added_point.lat.0).map_err( |e| OperationError::service_error(format!("Malformed geo points: {e}")), )?; let key = GeoMapIndex::encode_db_key(added_geo_hash, idx); let value = GeoMapIndex::encode_db_value(added_point); db_wrapper.put(&key, value)?; } } // We cannot store empty value, then delete instead Storage::Gridstore(store) if values.is_empty() => { store.delete_value(idx); } Storage::Gridstore(store) => { let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); let values = values .iter() .cloned() .map(RawGeoPoint::from) .collect::<Vec<_>>(); store .put_value(idx, &values, hw_counter_ref) .map_err(|err| { OperationError::service_error(format!( "failed to put value in mutable geo index gridstore: {err}" )) })?; } } self.in_memory_index .add_many_geo_points(idx, values, hw_counter) } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { let Some(geo_points_to_remove) = self.in_memory_index.point_to_values.get(idx as usize) else { return Ok(()); }; for removed_geo_point in geo_points_to_remove { let geo_hash_to_remove: GeoHash = encode_max_precision(removed_geo_point.lon.0, removed_geo_point.lat.0) .map_err(|e| { OperationError::service_error(format!("Malformed geo points: {e}")) })?; let key = GeoMapIndex::encode_db_key(geo_hash_to_remove, idx); db_wrapper.remove(&key)?; } } Storage::Gridstore(store) => { store.delete_value(idx); } } self.in_memory_index.remove_point(idx) } pub fn points_count(&self) -> usize { self.in_memory_index.points_count } pub fn points_values_count(&self) -> usize { self.in_memory_index.points_values_count } pub fn max_values_per_point(&self) -> usize { self.in_memory_index.max_values_per_point } pub fn into_in_memory_index(self) -> InMemoryGeoMapIndex { self.in_memory_index } pub fn get_values(&self, idx: u32) -> Option<impl Iterator<Item = &GeoPoint> + '_> { self.in_memory_index .point_to_values .get(idx as usize) .map(|v| v.iter()) } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Gridstore(_) => false, } } delegate! { to self.in_memory_index { pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&GeoPoint) -> bool) -> bool; pub fn values_count(&self, idx: PointOffsetType) -> usize; pub fn points_per_hash(&self) -> impl Iterator<Item = (&GeoHash, usize)>; pub fn points_of_hash(&self, hash: &GeoHash) -> usize; pub fn values_of_hash(&self, hash: &GeoHash) -> usize; pub fn stored_sub_regions( &self, geo: GeoHash, ) -> impl Iterator<Item = PointOffsetType>; } } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Gridstore(_) => StorageType::Gridstore, } } } impl Default for InMemoryGeoMapIndex { fn default() -> Self { Self::new() } } impl InMemoryGeoMapIndex { pub fn new() -> Self { Self { points_per_hash: Default::default(), values_per_hash: Default::default(), points_map: Default::default(), point_to_values: vec![], points_count: 0, points_values_count: 0, max_values_per_point: 0, } } pub fn check_values_any( &self, idx: PointOffsetType, check_fn: impl Fn(&GeoPoint) -> bool, ) -> bool { self.point_to_values .get(idx as usize) .map(|values| values.iter().any(check_fn)) .unwrap_or(false) } pub fn values_count(&self, idx: PointOffsetType) -> usize { self.point_to_values .get(idx as usize) .map(Vec::len) .unwrap_or_default() } pub fn points_per_hash(&self) -> impl Iterator<Item = (&GeoHash, usize)> { self.points_per_hash .iter() .map(|(hash, count)| (hash, *count)) } pub fn points_of_hash(&self, hash: &GeoHash) -> usize { self.points_per_hash.get(hash).copied().unwrap_or(0) } pub fn values_of_hash(&self, hash: &GeoHash) -> usize { self.values_per_hash.get(hash).copied().unwrap_or(0) } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { if self.point_to_values.len() <= idx as usize { return Ok(()); // Already removed or never actually existed } let removed_geo_points = std::mem::take(&mut self.point_to_values[idx as usize]); if removed_geo_points.is_empty() { return Ok(()); } self.points_count -= 1; self.points_values_count -= removed_geo_points.len(); let mut removed_geo_hashes = Vec::with_capacity(removed_geo_points.len()); for removed_geo_point in removed_geo_points { let removed_geo_hash: GeoHash = encode_max_precision(removed_geo_point.lon.0, removed_geo_point.lat.0).map_err( |e| OperationError::service_error(format!("Malformed geo points: {e}")), )?; removed_geo_hashes.push(removed_geo_hash); let is_last = if let Some(hash_ids) = self.points_map.get_mut(&removed_geo_hash) { hash_ids.remove(&idx); hash_ids.is_empty() } else { log::warn!("Geo index error: no points for hash {removed_geo_hash} was found"); false }; if is_last { self.points_map.remove(&removed_geo_hash); } self.decrement_hash_value_counts(&removed_geo_hash); } self.decrement_hash_point_counts(&removed_geo_hashes); Ok(()) } pub fn add_many_geo_points( &mut self, idx: PointOffsetType, values: &[GeoPoint], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { if values.is_empty() { return Ok(()); } if self.point_to_values.len() <= idx as usize { // That's a smart reallocation self.point_to_values.resize_with(idx as usize + 1, Vec::new); } self.point_to_values[idx as usize] = values.to_vec(); let mut geo_hashes = vec![]; let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); for added_point in values { let added_geo_hash: GeoHash = encode_max_precision(added_point.lon.0, added_point.lat.0).map_err(|e| { OperationError::service_error(format!("Malformed geo points: {e}")) })?; hw_cell_wb.incr_delta(size_of_val(&added_geo_hash)); geo_hashes.push(added_geo_hash); } for geo_hash in &geo_hashes { self.points_map .entry(geo_hash.to_owned()) .or_default() .insert(idx); self.increment_hash_value_counts(geo_hash); } hw_cell_wb.incr_delta(geo_hashes.len() * size_of::<PointOffsetType>()); self.increment_hash_point_counts(&geo_hashes); self.points_values_count += values.len(); self.points_count += 1; self.max_values_per_point = self.max_values_per_point.max(values.len()); Ok(()) } /// Returns an iterator over all point IDs which have the `geohash` prefix. /// Note. Point ID may be repeated multiple times in the iterator. pub fn stored_sub_regions(&self, geo: GeoHash) -> impl Iterator<Item = PointOffsetType> + '_ { self.points_map .range(geo..) .take_while(move |(p, _h)| p.starts_with(geo)) .flat_map(|(_, points)| points.iter().copied()) } fn increment_hash_value_counts(&mut self, geo_hash: &GeoHash) { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); match self.values_per_hash.get_mut(&sub_geo_hash) { None => { self.values_per_hash.insert(sub_geo_hash, 1); } Some(count) => { *count += 1; } }; } } fn increment_hash_point_counts(&mut self, geo_hashes: &[GeoHash]) { let mut seen_hashes: AHashSet<GeoHash> = Default::default(); for geo_hash in geo_hashes { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); if seen_hashes.contains(&sub_geo_hash) { continue; } seen_hashes.insert(sub_geo_hash); match self.points_per_hash.get_mut(&sub_geo_hash) { None => { self.points_per_hash.insert(sub_geo_hash, 1); } Some(count) => { *count += 1; } }; } } } fn decrement_hash_value_counts(&mut self, geo_hash: &GeoHash) { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); match self.values_per_hash.get_mut(&sub_geo_hash) { None => { debug_assert!( false, "Hash value count is not found for hash: {sub_geo_hash}", ); self.values_per_hash.insert(sub_geo_hash, 0); } Some(count) => { *count -= 1; } }; } } fn decrement_hash_point_counts(&mut self, geo_hashes: &[GeoHash]) { let mut seen_hashes: AHashSet<GeoHash> = Default::default(); for geo_hash in geo_hashes { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); if seen_hashes.contains(&sub_geo_hash) { continue; } seen_hashes.insert(sub_geo_hash); match self.points_per_hash.get_mut(&sub_geo_hash) { None => { debug_assert!( false, "Hash point count is not found for hash: {sub_geo_hash}", ); self.points_per_hash.insert(sub_geo_hash, 0); } Some(count) => { *count -= 1; } }; } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/geo_index/mod.rs
lib/segment/src/index/field_index/geo_index/mod.rs
use std::cmp::{max, min}; #[cfg(feature = "rocksdb")] use std::io::Write; use std::path::{Path, PathBuf}; #[cfg(feature = "rocksdb")] use std::str::FromStr; #[cfg(feature = "rocksdb")] use std::sync::Arc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use itertools::Itertools; use mutable_geo_index::InMemoryGeoMapIndex; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use serde_json::Value; #[cfg(feature = "rocksdb")] use smallvec::SmallVec; use self::immutable_geo_index::ImmutableGeoMapIndex; use self::mmap_geo_index::MmapGeoMapIndex; use self::mutable_geo_index::MutableGeoMapIndex; use super::FieldIndexBuilderTrait; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::geo_hash::{ GeoHash, circle_hashes, common_hash_prefix, geo_hash_to_box, polygon_hashes, polygon_hashes_estimation, rectangle_hashes, }; use crate::index::field_index::stat_tools::estimate_multi_value_selection_cardinality; use crate::index::field_index::{ CardinalityEstimation, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, ValueIndexer, }; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{FieldCondition, GeoPoint, PayloadKeyType}; pub mod immutable_geo_index; pub mod mmap_geo_index; pub mod mutable_geo_index; /// Max number of sub-regions computed for an input geo query // TODO discuss value, should it be dynamically computed? const GEO_QUERY_MAX_REGION: usize = 12; pub enum GeoMapIndex { Mutable(MutableGeoMapIndex), Immutable(ImmutableGeoMapIndex), Mmap(Box<MmapGeoMapIndex>), } impl GeoMapIndex { #[cfg(feature = "rocksdb")] pub fn new_memory( db: Arc<RwLock<DB>>, field: &str, is_appendable: bool, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let store_cf_name = GeoMapIndex::storage_cf_name(field); let index = if is_appendable { MutableGeoMapIndex::open_rocksdb(db, &store_cf_name, create_if_missing)? .map(GeoMapIndex::Mutable) } else { ImmutableGeoMapIndex::open_rocksdb(db, &store_cf_name)?.map(GeoMapIndex::Immutable) }; Ok(index) } pub fn new_mmap(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let Some(mmap_index) = MmapGeoMapIndex::open(path, is_on_disk)? else { // Files don't exist, cannot load return Ok(None); }; let index = if is_on_disk { GeoMapIndex::Mmap(Box::new(mmap_index)) } else { GeoMapIndex::Immutable(ImmutableGeoMapIndex::open_mmap(mmap_index)) }; Ok(Some(index)) } pub fn new_gridstore(dir: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { Ok(MutableGeoMapIndex::open_gridstore(dir, create_if_missing)?.map(GeoMapIndex::Mutable)) } #[cfg(feature = "rocksdb")] pub fn builder(db: Arc<RwLock<DB>>, field: &str) -> OperationResult<GeoMapIndexBuilder> { let index = Self::new_memory(db, field, true, true)?.ok_or_else(|| { OperationError::service_error("Failed to open GeoMapIndex after creating it") })?; Ok(GeoMapIndexBuilder(index)) } #[cfg(all(test, feature = "rocksdb"))] pub fn builder_immutable( db: Arc<RwLock<DB>>, field: &str, ) -> OperationResult<GeoMapImmutableIndexBuilder> { let index = Self::new_memory(db.clone(), field, true, true)?.ok_or_else(|| { OperationError::service_error("Failed to open GeoMapIndex after creating it") })?; Ok(GeoMapImmutableIndexBuilder { index, field: field.to_owned(), db, }) } pub fn builder_mmap(path: &Path, is_on_disk: bool) -> GeoMapIndexMmapBuilder { GeoMapIndexMmapBuilder { path: path.to_owned(), in_memory_index: InMemoryGeoMapIndex::new(), is_on_disk, } } pub fn builder_gridstore(dir: PathBuf) -> GeoMapIndexGridstoreBuilder { GeoMapIndexGridstoreBuilder::new(dir) } fn points_count(&self) -> usize { match self { GeoMapIndex::Mutable(index) => index.points_count(), GeoMapIndex::Immutable(index) => index.points_count(), GeoMapIndex::Mmap(index) => index.points_count(), } } fn points_values_count(&self) -> usize { match self { GeoMapIndex::Mutable(index) => index.points_values_count(), GeoMapIndex::Immutable(index) => index.points_values_count(), GeoMapIndex::Mmap(index) => index.points_values_count(), } } /// Maximum number of values per point /// /// # Warning /// /// Zero if the index is empty. fn max_values_per_point(&self) -> usize { match self { GeoMapIndex::Mutable(index) => index.max_values_per_point(), GeoMapIndex::Immutable(index) => index.max_values_per_point(), GeoMapIndex::Mmap(index) => index.max_values_per_point(), } } fn points_of_hash(&self, hash: &GeoHash, hw_counter: &HardwareCounterCell) -> usize { match self { GeoMapIndex::Mutable(index) => index.points_of_hash(hash), GeoMapIndex::Immutable(index) => index.points_of_hash(hash), GeoMapIndex::Mmap(index) => index.points_of_hash(hash, hw_counter), } } fn values_of_hash(&self, hash: &GeoHash, hw_counter: &HardwareCounterCell) -> usize { match self { GeoMapIndex::Mutable(index) => index.values_of_hash(hash), GeoMapIndex::Immutable(index) => index.values_of_hash(hash), GeoMapIndex::Mmap(index) => index.values_of_hash(hash, hw_counter), } } #[cfg(feature = "rocksdb")] fn storage_cf_name(field: &str) -> String { format!("{field}_geo") } /// Encode db key /// /// Maximum length is 23 bytes, e.g.: `dr5ruj4477kd/4294967295` #[cfg(feature = "rocksdb")] fn encode_db_key(value: GeoHash, idx: PointOffsetType) -> SmallVec<[u8; 23]> { let mut result = SmallVec::new(); write!(result, "{value}/{idx}").unwrap(); result } #[cfg(feature = "rocksdb")] fn decode_db_key<K>(s: K) -> OperationResult<(GeoHash, PointOffsetType)> where K: AsRef<[u8]>, { const DECODE_ERR: &str = "Index db parsing error: wrong data format"; let s = s.as_ref(); let separator_pos = s .iter() .rposition(|b| b == &b'/') .ok_or_else(|| OperationError::service_error(DECODE_ERR))?; if separator_pos == s.len() - 1 { return Err(OperationError::service_error(DECODE_ERR)); } let geohash = &s[..separator_pos]; let idx_bytes = &s[separator_pos + 1..]; // Use `from_ascii_radix` here once stabilized instead of intermediate string reference let idx = PointOffsetType::from_str(std::str::from_utf8(idx_bytes).map_err(|_| { OperationError::service_error("Index load error: UTF8 error while DB parsing") })?) .map_err(|_| OperationError::service_error(DECODE_ERR))?; Ok((GeoHash::new(geohash).map_err(OperationError::from)?, idx)) } #[cfg(feature = "rocksdb")] fn decode_db_value<T: AsRef<[u8]>>(value: T) -> OperationResult<GeoPoint> { let lat_bytes = value.as_ref()[0..8] .try_into() .map_err(|_| OperationError::service_error("invalid lat encoding"))?; let lon_bytes = value.as_ref()[8..16] .try_into() .map_err(|_| OperationError::service_error("invalid lat encoding"))?; let lat = f64::from_be_bytes(lat_bytes); let lon = f64::from_be_bytes(lon_bytes); Ok(GeoPoint::new_unchecked(lon, lat)) } #[cfg(feature = "rocksdb")] fn encode_db_value(value: &GeoPoint) -> [u8; 16] { let mut result: [u8; 16] = [0; 16]; result[0..8].clone_from_slice(&value.lat.to_be_bytes()); result[8..16].clone_from_slice(&value.lon.to_be_bytes()); result } pub fn check_values_any( &self, idx: PointOffsetType, hw_counter: &HardwareCounterCell, check_fn: impl Fn(&GeoPoint) -> bool, ) -> bool { match self { GeoMapIndex::Mutable(index) => index.check_values_any(idx, check_fn), GeoMapIndex::Immutable(index) => index.check_values_any(idx, check_fn), GeoMapIndex::Mmap(index) => index.check_values_any(idx, hw_counter, check_fn), } } pub fn values_count(&self, idx: PointOffsetType) -> usize { match self { GeoMapIndex::Mutable(index) => index.values_count(idx), GeoMapIndex::Immutable(index) => index.values_count(idx), GeoMapIndex::Mmap(index) => index.values_count(idx), } } pub fn get_values( &self, idx: PointOffsetType, ) -> Option<Box<dyn Iterator<Item = GeoPoint> + '_>> { match self { GeoMapIndex::Mutable(index) => index.get_values(idx).map(|x| Box::new(x.cloned()) as _), GeoMapIndex::Immutable(index) => { index.get_values(idx).map(|x| Box::new(x.cloned()) as _) } GeoMapIndex::Mmap(index) => index.get_values(idx).map(|x| Box::new(x) as _), } } pub fn match_cardinality( &self, values: &[GeoHash], hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { let max_values_per_point = self.max_values_per_point(); if max_values_per_point == 0 { return CardinalityEstimation::exact(0); } let Some(common_hash) = common_hash_prefix(values) else { return CardinalityEstimation::exact(0); }; let total_points = self.points_of_hash(&common_hash, hw_counter); let total_values = self.values_of_hash(&common_hash, hw_counter); let (sum, maximum_per_hash) = values .iter() .map(|region| self.points_of_hash(region, hw_counter)) .fold((0, 0), |(sum, maximum), count| { (sum + count, max(maximum, count)) }); // Assume all selected points have `max_values_per_point` value hits. // Therefore number of points can't be less than `total_hits / max_values_per_point` // Note: max_values_per_point is never zero here because we check it above let min_hits_by_value_groups = sum / max_values_per_point; // Assume that we have selected all possible duplications of the points let point_duplications = total_values - total_points; let possible_non_duplicated = sum.saturating_sub(point_duplications); let estimation_min = max( max(min_hits_by_value_groups, possible_non_duplicated), maximum_per_hash, ); let estimation_max = min(sum, total_points); // estimate_multi_value_selection_cardinality might overflow at some corner cases // so it is better to limit its value with min and max let estimation_exp = estimate_multi_value_selection_cardinality(total_points, total_values, sum).round() as usize; CardinalityEstimation { primary_clauses: vec![], min: estimation_min, exp: min(estimation_max, max(estimation_min, estimation_exp)), max: estimation_max, } } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, points_count: self.points_count(), points_values_count: self.points_values_count(), histogram_bucket_size: None, index_type: match self { GeoMapIndex::Mutable(_) => "mutable_geo", GeoMapIndex::Immutable(_) => "immutable_geo", GeoMapIndex::Mmap(_) => "mmap_geo", }, } } fn iterator(&self, values: Vec<GeoHash>) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { match self { GeoMapIndex::Mutable(index) => Box::new( values .into_iter() .flat_map(|top_geo_hash| index.stored_sub_regions(top_geo_hash)) .unique(), ), GeoMapIndex::Immutable(index) => Box::new( values .into_iter() .flat_map(|top_geo_hash| index.stored_sub_regions(top_geo_hash)) .unique(), ), GeoMapIndex::Mmap(index) => Box::new( values .into_iter() .flat_map(|top_geo_hash| index.stored_sub_regions(top_geo_hash)) .unique(), ), } } /// Get iterator over smallest geo-hash regions larger than `threshold` points fn large_hashes(&self, threshold: usize) -> impl Iterator<Item = (GeoHash, usize)> + '_ { let filter_condition = |(hash, size): &(GeoHash, usize)| *size > threshold && !hash.is_empty(); let mut large_regions = match self { GeoMapIndex::Mutable(index) => index .points_per_hash() .map(|(&hash, size)| (hash, size)) .filter(filter_condition) .collect_vec(), GeoMapIndex::Immutable(index) => index .points_per_hash() .map(|(&hash, size)| (hash, size)) .filter(filter_condition) .collect_vec(), GeoMapIndex::Mmap(index) => index .points_per_hash() .filter(filter_condition) .collect_vec(), }; // smallest regions first large_regions.sort_by(|a, b| b.cmp(a)); let mut edge_region = vec![]; let mut current_region = GeoHash::default(); for (region, size) in large_regions { if !current_region.starts_with(region) { current_region = region; edge_region.push((region, size)); } } edge_region.into_iter() } pub fn values_is_empty(&self, idx: PointOffsetType) -> bool { self.values_count(idx) == 0 } pub fn is_on_disk(&self) -> bool { match self { GeoMapIndex::Mutable(_) => false, GeoMapIndex::Immutable(_) => false, GeoMapIndex::Mmap(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { GeoMapIndex::Mutable(index) => index.is_rocksdb(), GeoMapIndex::Immutable(index) => index.is_rocksdb(), GeoMapIndex::Mmap(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { GeoMapIndex::Mutable(_) => {} // Not a mmap GeoMapIndex::Immutable(_) => {} // Not a mmap GeoMapIndex::Mmap(index) => index.populate()?, } Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { // Only clears backing mmap storage if used, not in-memory representation GeoMapIndex::Mutable(index) => index.clear_cache(), // Only clears backing mmap storage if used, not in-memory representation GeoMapIndex::Immutable(index) => index.clear_cache(), GeoMapIndex::Mmap(index) => index.clear_cache(), } } pub fn get_mutability_type(&self) -> IndexMutability { match self { Self::Mutable(_) => IndexMutability::Mutable, Self::Immutable(_) => IndexMutability::Immutable, Self::Mmap(_) => IndexMutability::Immutable, } } pub fn get_storage_type(&self) -> StorageType { match self { Self::Mutable(index) => index.storage_type(), Self::Immutable(index) => index.storage_type(), Self::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } } #[cfg(feature = "rocksdb")] pub struct GeoMapIndexBuilder(GeoMapIndex); #[cfg(feature = "rocksdb")] impl FieldIndexBuilderTrait for GeoMapIndexBuilder { type FieldIndexType = GeoMapIndex; fn init(&mut self) -> OperationResult<()> { match &mut self.0 { GeoMapIndex::Mutable(index) => index.clear(), GeoMapIndex::Immutable(_) => Err(OperationError::service_error( "Cannot use immutable index as a builder type", )), GeoMapIndex::Mmap(_) => Err(OperationError::service_error( "Cannot use mmap index as a builder type", )), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(self.0) } } #[cfg(all(test, feature = "rocksdb"))] pub struct GeoMapImmutableIndexBuilder { index: GeoMapIndex, field: String, db: Arc<RwLock<DB>>, } #[cfg(all(test, feature = "rocksdb"))] impl FieldIndexBuilderTrait for GeoMapImmutableIndexBuilder { type FieldIndexType = GeoMapIndex; fn init(&mut self) -> OperationResult<()> { match &mut self.index { GeoMapIndex::Mutable(index) => index.clear(), GeoMapIndex::Immutable(_) => Err(OperationError::service_error( "Cannot use immutable index as a builder type", )), GeoMapIndex::Mmap(_) => Err(OperationError::service_error( "Cannot use mmap index as a builder type", )), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.index.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { drop(self.index); let immutable_index = GeoMapIndex::new_memory(self.db, &self.field, false, false)? .ok_or_else(|| { OperationError::service_error("Failed to open GeoMapIndex after creating it") })?; Ok(immutable_index) } } pub struct GeoMapIndexMmapBuilder { path: PathBuf, in_memory_index: InMemoryGeoMapIndex, is_on_disk: bool, } impl FieldIndexBuilderTrait for GeoMapIndexMmapBuilder { type FieldIndexType = GeoMapIndex; fn init(&mut self) -> OperationResult<()> { Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let values = payload .iter() .flat_map(|value| <GeoMapIndex as ValueIndexer>::get_values(value)) .collect::<Vec<_>>(); self.in_memory_index .add_many_geo_points(id, &values, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(GeoMapIndex::Mmap(Box::new(MmapGeoMapIndex::build( self.in_memory_index, &self.path, self.is_on_disk, )?))) } } impl ValueIndexer for GeoMapIndex { type ValueType = GeoPoint; fn add_many( &mut self, id: PointOffsetType, values: Vec<GeoPoint>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { GeoMapIndex::Mutable(index) => index.add_many_geo_points(id, &values, hw_counter), GeoMapIndex::Immutable(_) => Err(OperationError::service_error( "Can't add values to immutable geo index", )), GeoMapIndex::Mmap(_) => Err(OperationError::service_error( "Can't add values to mmap geo index", )), } } fn get_value(value: &Value) -> Option<GeoPoint> { match value { Value::Object(obj) => { let lon_op = obj.get("lon").and_then(|x| x.as_f64()); let lat_op = obj.get("lat").and_then(|x| x.as_f64()); if let (Some(lon), Some(lat)) = (lon_op, lat_op) { return GeoPoint::new(lon, lat).ok(); } None } _ => None, } } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { match self { GeoMapIndex::Mutable(index) => index.remove_point(id), GeoMapIndex::Immutable(index) => index.remove_point(id), GeoMapIndex::Mmap(index) => { index.remove_point(id); Ok(()) } } } } pub struct GeoMapIndexGridstoreBuilder { dir: PathBuf, index: Option<GeoMapIndex>, } impl GeoMapIndexGridstoreBuilder { fn new(dir: PathBuf) -> Self { Self { dir, index: None } } } impl FieldIndexBuilderTrait for GeoMapIndexGridstoreBuilder { type FieldIndexType = GeoMapIndex; fn init(&mut self) -> OperationResult<()> { assert!( self.index.is_none(), "index must be initialized exactly once", ); self.index.replace( GeoMapIndex::new_gridstore(self.dir.clone(), true)?.ok_or_else(|| { OperationError::service_error("Failed to open GeoMapIndex after creating it") })?, ); Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let Some(index) = &mut self.index else { return Err(OperationError::service_error( "GeoMapIndexGridstoreBuilder: index must be initialized before adding points", )); }; index.add_point(id, payload, hw_counter) } fn finalize(mut self) -> OperationResult<Self::FieldIndexType> { let Some(index) = self.index.take() else { return Err(OperationError::service_error( "GeoMapIndexGridstoreBuilder: index must be initialized to finalize", )); }; index.flusher()()?; Ok(index) } } impl PayloadFieldIndex for GeoMapIndex { fn count_indexed_points(&self) -> usize { self.points_count() } fn wipe(self) -> OperationResult<()> { match self { GeoMapIndex::Mutable(index) => index.wipe(), GeoMapIndex::Immutable(index) => index.wipe(), GeoMapIndex::Mmap(index) => index.wipe(), } } fn flusher(&self) -> Flusher { match self { GeoMapIndex::Mutable(index) => index.flusher(), GeoMapIndex::Immutable(index) => index.flusher(), GeoMapIndex::Mmap(index) => index.flusher(), } } fn files(&self) -> Vec<PathBuf> { match &self { GeoMapIndex::Mutable(index) => index.files(), GeoMapIndex::Immutable(index) => index.files(), GeoMapIndex::Mmap(index) => index.files(), } } fn immutable_files(&self) -> Vec<PathBuf> { match &self { GeoMapIndex::Mutable(_) => vec![], GeoMapIndex::Immutable(index) => index.immutable_files(), GeoMapIndex::Mmap(index) => index.immutable_files(), } } fn filter<'a>( &'a self, condition: &FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { if let Some(geo_bounding_box) = &condition.geo_bounding_box { let geo_hashes = rectangle_hashes(geo_bounding_box, GEO_QUERY_MAX_REGION).ok()?; let geo_condition_copy = *geo_bounding_box; return Some(Box::new(self.iterator(geo_hashes).filter(move |point| { self.check_values_any(*point, hw_counter, |geo_point| { geo_condition_copy.check_point(geo_point) }) }))); } if let Some(geo_radius) = &condition.geo_radius { let geo_hashes = circle_hashes(geo_radius, GEO_QUERY_MAX_REGION).ok()?; let geo_condition_copy = *geo_radius; return Some(Box::new(self.iterator(geo_hashes).filter(move |point| { self.check_values_any(*point, hw_counter, |geo_point| { geo_condition_copy.check_point(geo_point) }) }))); } if let Some(geo_polygon) = &condition.geo_polygon { let geo_hashes = polygon_hashes(geo_polygon, GEO_QUERY_MAX_REGION).ok()?; let geo_condition_copy = geo_polygon.convert(); return Some(Box::new(self.iterator(geo_hashes).filter(move |point| { self.check_values_any(*point, hw_counter, |geo_point| { geo_condition_copy.check_point(geo_point) }) }))); } None } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { if let Some(geo_bounding_box) = &condition.geo_bounding_box { let geo_hashes = rectangle_hashes(geo_bounding_box, GEO_QUERY_MAX_REGION).ok()?; let mut estimation = self.match_cardinality(&geo_hashes, hw_counter); estimation .primary_clauses .push(PrimaryCondition::Condition(Box::new(condition.clone()))); return Some(estimation); } if let Some(geo_radius) = &condition.geo_radius { let geo_hashes = circle_hashes(geo_radius, GEO_QUERY_MAX_REGION).ok()?; let mut estimation = self.match_cardinality(&geo_hashes, hw_counter); estimation .primary_clauses .push(PrimaryCondition::Condition(Box::new(condition.clone()))); return Some(estimation); } if let Some(geo_polygon) = &condition.geo_polygon { let (exterior_hashes, interior_hashes) = polygon_hashes_estimation(geo_polygon, GEO_QUERY_MAX_REGION); // The polygon cardinality estimation should consider its exterior and interiors. // Therefore, we compute exterior estimation first and then subtract all interior estimation. let mut exterior_estimation = self.match_cardinality(&exterior_hashes, hw_counter); for interior in &interior_hashes { let interior_estimation = self.match_cardinality(interior, hw_counter); exterior_estimation.min = max(0, exterior_estimation.min - interior_estimation.max); exterior_estimation.max = max( exterior_estimation.min, exterior_estimation.max - interior_estimation.min, ); exterior_estimation.exp = max( exterior_estimation.exp - interior_estimation.exp, exterior_estimation.min, ); } exterior_estimation .primary_clauses .push(PrimaryCondition::Condition(Box::new(condition.clone()))); return Some(exterior_estimation); } None } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { Box::new( self.large_hashes(threshold) .map(move |(geo_hash, size)| PayloadBlockCondition { condition: FieldCondition::new_geo_bounding_box( key.clone(), geo_hash_to_box(geo_hash), ), cardinality: size, }), ) } } #[cfg(test)] mod tests { use std::collections::{BTreeSet, HashSet}; use std::ops::Range; use common::counter::hardware_accumulator::HwMeasurementAcc; use itertools::Itertools; use ordered_float::OrderedFloat; use rand::SeedableRng; use rand::prelude::StdRng; use rstest::rstest; use serde_json::json; use tempfile::{Builder, TempDir}; use super::*; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::open_db_with_existing_cf; use crate::fixtures::payload_fixtures::random_geo_payload; use crate::json_path::JsonPath; use crate::types::test_utils::build_polygon; use crate::types::{GeoBoundingBox, GeoLineString, GeoPolygon, GeoRadius}; #[cfg(feature = "rocksdb")] type Database = std::sync::Arc<parking_lot::RwLock<DB>>; #[cfg(not(feature = "rocksdb"))] type Database = (); #[derive(Clone, Copy, PartialEq, Debug)] enum IndexType { #[cfg(feature = "rocksdb")] Mutable, MutableGridstore, #[cfg(feature = "rocksdb")] Immutable, Mmap, RamMmap, } enum IndexBuilder { #[cfg(feature = "rocksdb")] Mutable(GeoMapIndexBuilder), MutableGridstore(GeoMapIndexGridstoreBuilder), #[cfg(feature = "rocksdb")] Immutable(GeoMapImmutableIndexBuilder), Mmap(GeoMapIndexMmapBuilder), RamMmap(GeoMapIndexMmapBuilder), } impl IndexBuilder { fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::Mutable(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::MutableGridstore(builder) => { builder.add_point(id, payload, hw_counter) } #[cfg(feature = "rocksdb")] IndexBuilder::Immutable(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::Mmap(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::RamMmap(builder) => builder.add_point(id, payload, hw_counter), } } fn finalize(self) -> OperationResult<GeoMapIndex> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::Mutable(builder) => builder.finalize(), IndexBuilder::MutableGridstore(builder) => builder.finalize(), #[cfg(feature = "rocksdb")] IndexBuilder::Immutable(builder) => builder.finalize(), IndexBuilder::Mmap(builder) => builder.finalize(), IndexBuilder::RamMmap(builder) => { let GeoMapIndex::Mmap(index) = builder.finalize()? else { panic!("expected mmap index"); }; // Load index from mmap let index = GeoMapIndex::Immutable(ImmutableGeoMapIndex::open_mmap(*index)); Ok(index) } } } } const NYC: GeoPoint = GeoPoint::new_unchecked(-73.991516, 40.75798); const BERLIN: GeoPoint = GeoPoint::new_unchecked(13.41053, 52.52437); const POTSDAM: GeoPoint = GeoPoint::new_unchecked(13.064473, 52.390569); const TOKYO: GeoPoint = GeoPoint::new_unchecked(139.691706, 35.689487); const LOS_ANGELES: GeoPoint = GeoPoint::new_unchecked(-118.243683, 34.052235); #[cfg(feature = "rocksdb")] const FIELD_NAME: &str = "test"; fn condition_for_geo_radius(key: &str, geo_radius: GeoRadius) -> FieldCondition { FieldCondition::new_geo_radius(JsonPath::new(key), geo_radius) } fn condition_for_geo_polygon(key: &str, geo_polygon: GeoPolygon) -> FieldCondition { FieldCondition::new_geo_polygon(JsonPath::new(key), geo_polygon) } fn condition_for_geo_box(key: &str, geo_bounding_box: GeoBoundingBox) -> FieldCondition { FieldCondition::new_geo_bounding_box(JsonPath::new(key), geo_bounding_box) } #[cfg(feature = "testing")] fn create_builder(index_type: IndexType) -> (IndexBuilder, TempDir, Database) { let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap();
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/geo_index/immutable_geo_index.rs
lib/segment/src/index/field_index/geo_index/immutable_geo_index.rs
use std::path::PathBuf; use ahash::AHashSet; use common::types::PointOffsetType; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use super::mmap_geo_index::MmapGeoMapIndex; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::geo_hash::{GeoHash, encode_max_precision}; use crate::index::field_index::immutable_point_to_values::ImmutablePointToValues; use crate::index::payload_config::StorageType; use crate::types::GeoPoint; #[derive(Copy, Clone, Debug)] struct Counts { hash: GeoHash, points: u32, values: u32, } impl From<super::mmap_geo_index::Counts> for Counts { #[inline] fn from(counts: super::mmap_geo_index::Counts) -> Self { let super::mmap_geo_index::Counts { hash, points, values, } = counts; Self { hash, points, values, } } } pub struct ImmutableGeoMapIndex { counts_per_hash: Vec<Counts>, points_map: Vec<(GeoHash, AHashSet<PointOffsetType>)>, point_to_values: ImmutablePointToValues<GeoPoint>, points_count: usize, points_values_count: usize, max_values_per_point: usize, // Backing s torage, source of state, persists deletions storage: Storage, } enum Storage { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Mmap(Box<MmapGeoMapIndex>), } impl ImmutableGeoMapIndex { /// Open and load immutable geo index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db: std::sync::Arc<RwLock<DB>>, store_cf_name: &str, ) -> OperationResult<Option<Self>> { use std::collections::BTreeMap; use crate::index::field_index::geo_index::mutable_geo_index::{ InMemoryGeoMapIndex, MutableGeoMapIndex, }; let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, store_cf_name, )); let Some(mutable) = MutableGeoMapIndex::open_rocksdb( db_wrapper.get_database(), db_wrapper.get_column_name(), false, )? else { // Column family doesn't exist, cannot load return Ok(None); }; let InMemoryGeoMapIndex { points_per_hash, values_per_hash, points_map, point_to_values, points_count, points_values_count, max_values_per_point, .. } = mutable.into_in_memory_index(); let mut counts_per_hash: BTreeMap<GeoHash, Counts> = Default::default(); for (hash, points) in points_per_hash { counts_per_hash.insert( hash, Counts { hash, points: points as u32, values: 0, }, ); } for (hash, values) in values_per_hash { if let Some(counts) = counts_per_hash.get_mut(&hash) { counts.values = values as u32; } else { counts_per_hash.insert( hash, Counts { hash, points: 0, values: values as u32, }, ); } } Ok(Some(Self { counts_per_hash: counts_per_hash.values().cloned().collect(), points_map: points_map.iter().map(|(k, v)| (*k, v.clone())).collect(), point_to_values: ImmutablePointToValues::new(point_to_values), points_count, points_values_count, max_values_per_point, storage: Storage::RocksDb(db_wrapper), })) } /// Open and load immutable geo index from mmap storage pub fn open_mmap(index: MmapGeoMapIndex) -> Self { let counts_per_hash = index .storage .counts_per_hash .iter() .copied() .map(Counts::from) .collect(); // Get points per geo hash and filter deleted points let points_map = index .storage .points_map .iter() .copied() .map(|item| { let super::mmap_geo_index::PointKeyValue { hash, ids_start, ids_end, } = item; ( hash, index.storage.points_map_ids[ids_start as usize..ids_end as usize] .iter() .copied() // Filter deleted points .filter(|id| !index.storage.deleted.get(*id as usize).unwrap_or_default()) .collect(), ) }) .collect(); // Get point values and filter deleted points // Track deleted points to adjust point and value counts after loading let mut deleted_points: Vec<(PointOffsetType, Vec<GeoPoint>)> = Vec::with_capacity(index.deleted_count); let point_to_values = ImmutablePointToValues::new( index .storage .point_to_values .iter() .map(|(id, values)| { let is_deleted = index.storage.deleted.get(id as usize).unwrap_or_default(); match (is_deleted, values) { (false, Some(values)) => values.into_iter().collect(), (false, None) => vec![], (true, Some(values)) => { let geo_points: Vec<GeoPoint> = values.collect(); deleted_points.push((id, geo_points)); vec![] } (true, None) => { deleted_points.push((id, vec![])); vec![] } } }) .collect(), ); // Index is now loaded into memory, clear cache of backing mmap storage if let Err(err) = index.clear_cache() { log::warn!("Failed to clear mmap cache of ram mmap geo index: {err}"); } let _ = index; // Construct immutable geo index let mut index = Self { counts_per_hash, points_map, point_to_values, points_count: index.points_count(), points_values_count: index.points_values_count(), max_values_per_point: index.max_values_per_point(), storage: Storage::Mmap(Box::new(index)), }; // Update point and value counts based on deleted points for (_idx, removed_geo_points) in deleted_points { index.points_values_count = index .points_values_count .saturating_sub(removed_geo_points.len()); let removed_geo_hashes: Vec<_> = removed_geo_points .into_iter() .map(|geo_point| encode_max_precision(geo_point.lon.0, geo_point.lat.0).unwrap()) .collect(); for removed_geo_hash in &removed_geo_hashes { index.decrement_hash_value_counts(removed_geo_hash); } index.decrement_hash_point_counts(&removed_geo_hashes); } index } #[cfg(all(test, feature = "rocksdb"))] pub fn db_wrapper(&self) -> Option<&DatabaseColumnScheduledDeleteWrapper> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => Some(db_wrapper), Storage::Mmap(_) => None, } } pub fn files(&self) -> Vec<PathBuf> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(ref index) => index.files(), } } pub fn immutable_files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(index) => index.immutable_files(), } } /// Clear cache /// /// Only clears cache of mmap storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Mmap(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear immutable geo index gridstore cache: {err}" )) }), } } pub fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => db_wrapper.remove_column_family(), Storage::Mmap(index) => index.wipe(), } } pub fn flusher(&self) -> Flusher { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => db_wrapper.flusher(), Storage::Mmap(ref index) => index.flusher(), } } pub fn points_count(&self) -> usize { self.points_count } pub fn points_values_count(&self) -> usize { self.points_values_count } pub fn max_values_per_point(&self) -> usize { self.max_values_per_point } pub fn check_values_any( &self, idx: PointOffsetType, check_fn: impl Fn(&GeoPoint) -> bool, ) -> bool { let mut counter = 0usize; self.point_to_values.check_values_any(idx, |v| { counter += 1; check_fn(v) }) } pub fn get_values(&self, idx: u32) -> Option<impl Iterator<Item = &GeoPoint> + '_> { self.point_to_values.get_values(idx) } pub fn values_count(&self, idx: PointOffsetType) -> usize { self.point_to_values .get_values_count(idx) .unwrap_or_default() } pub fn points_per_hash(&self) -> impl Iterator<Item = (&GeoHash, usize)> { self.counts_per_hash .iter() .map(|counts| (&counts.hash, counts.points as usize)) } pub fn points_of_hash(&self, hash: &GeoHash) -> usize { if let Ok(index) = self.counts_per_hash.binary_search_by(|x| x.hash.cmp(hash)) { self.counts_per_hash[index].points as usize } else { 0 } } pub fn values_of_hash(&self, hash: &GeoHash) -> usize { if let Ok(index) = self.counts_per_hash.binary_search_by(|x| x.hash.cmp(hash)) { self.counts_per_hash[index].values as usize } else { 0 } } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { let removed_geo_points = self.point_to_values.remove_point(idx); if removed_geo_points.is_empty() { return Ok(()); } self.points_count -= 1; self.points_values_count -= removed_geo_points.len(); let mut removed_geo_hashes = Vec::with_capacity(removed_geo_points.len()); for removed_geo_point in removed_geo_points { let removed_geo_hash: GeoHash = encode_max_precision(removed_geo_point.lon.0, removed_geo_point.lat.0).unwrap(); removed_geo_hashes.push(removed_geo_hash); match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => { let key = super::GeoMapIndex::encode_db_key(removed_geo_hash, idx); db_wrapper.remove(&key)?; } Storage::Mmap(ref mut index) => { index.remove_point(idx); } } if let Ok(index) = self .points_map .binary_search_by(|x| x.0.cmp(&removed_geo_hash)) { self.points_map[index].1.remove(&idx); } else { log::warn!("Geo index error: no points for hash {removed_geo_hash} were found"); }; self.decrement_hash_value_counts(&removed_geo_hash); } self.decrement_hash_point_counts(&removed_geo_hashes); Ok(()) } /// Returns an iterator over all point IDs which have the `geohash` prefix. /// Note. Point ID may be repeated multiple times in the iterator. pub fn stored_sub_regions(&self, geo: GeoHash) -> impl Iterator<Item = PointOffsetType> { let start_index = self .points_map .binary_search_by(|(p, _h)| p.cmp(&geo)) .unwrap_or_else(|index| index); self.points_map[start_index..] .iter() .take_while(move |(p, _h)| p.starts_with(geo)) .flat_map(|(_, points)| points.iter().copied()) } fn decrement_hash_value_counts(&mut self, geo_hash: &GeoHash) { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); if let Ok(index) = self .counts_per_hash .binary_search_by(|x| x.hash.cmp(&sub_geo_hash)) { let values_count = self.counts_per_hash[index].values; if values_count > 0 { self.counts_per_hash[index].values = values_count - 1; } else { debug_assert!(false, "Hash value count is already empty: {sub_geo_hash}"); } } else { debug_assert!( false, "Hash value count is not found for hash: {sub_geo_hash}", ); } } } fn decrement_hash_point_counts(&mut self, geo_hashes: &[GeoHash]) { let mut seen_hashes: AHashSet<GeoHash> = Default::default(); for geo_hash in geo_hashes { for i in 0..=geo_hash.len() { let sub_geo_hash = geo_hash.truncate(i); if seen_hashes.contains(&sub_geo_hash) { continue; } seen_hashes.insert(sub_geo_hash); if let Ok(index) = self .counts_per_hash .binary_search_by(|x| x.hash.cmp(&sub_geo_hash)) { let points_count = self.counts_per_hash[index].points; if points_count > 0 { self.counts_per_hash[index].points = points_count - 1; } else { debug_assert!(false, "Hash point count is already empty: {sub_geo_hash}"); } } else { debug_assert!( false, "Hash point count is not found for hash: {sub_geo_hash}", ); }; } } } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Mmap(_) => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/geo_index/mmap_geo_index.rs
lib/segment/src/index/field_index/geo_index/mmap_geo_index.rs
use std::path::{Path, PathBuf}; use common::counter::conditioned_counter::ConditionedCounter; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use fs_err as fs; use io::file_operations::{atomic_save_json, read_json}; use memmap2::MmapMut; use memory::fadvise::clear_disk_cache; use memory::madvise::AdviceSetting; use memory::mmap_ops::{create_and_ensure_length, open_write_mmap}; use memory::mmap_type::{MmapBitSlice, MmapSlice}; use serde::{Deserialize, Serialize}; use super::mutable_geo_index::InMemoryGeoMapIndex; use crate::common::Flusher; use crate::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::geo_hash::GeoHash; use crate::index::field_index::mmap_point_to_values::MmapPointToValues; use crate::types::GeoPoint; const DELETED_PATH: &str = "deleted.bin"; const COUNTS_PER_HASH: &str = "counts_per_hash.bin"; const POINTS_MAP: &str = "points_map.bin"; const POINTS_MAP_IDS: &str = "points_map_ids.bin"; const STATS_PATH: &str = "mmap_field_index_stats.json"; #[repr(C)] #[derive(Copy, Clone, Debug)] pub(super) struct Counts { pub hash: GeoHash, pub points: u32, pub values: u32, } #[repr(C)] #[derive(Copy, Clone, Debug)] pub(super) struct PointKeyValue { pub hash: GeoHash, pub ids_start: u32, pub ids_end: u32, } /// /// points_map /// ┌─────────────────────────────────────────┐ /// │ (ABC, 10, 20)|(ABD, 20, 40) │ /// └────────┬──┬──────────┬───┬──────────────┘ /// │ │ │ │ /// ┌──────┘ └────────┐ │ └───────────────────┐ /// │ │ └───┐ │ /// │ │ │ │ /// ┌─▼──────────────────▼─────▼───────────────────▼──────────┐ /// │ 1, 8, 10, 18, 129, 213, 12, 13, 14, 87, 99, 199 │ /// └─────────────────────────────────────────────────────────┘ /// points_map_ids /// pub struct MmapGeoMapIndex { path: PathBuf, pub(super) storage: Storage, pub(super) deleted_count: usize, points_values_count: usize, max_values_per_point: usize, is_on_disk: bool, } pub(super) struct Storage { /// Stores GeoHash, points count and values count. /// Sorted by geohash, so we binary search the region. pub(super) counts_per_hash: MmapSlice<Counts>, /// Stores GeoHash and associated range of offsets in the points_map_ids. /// Sorted by geohash, so we binary search the region. pub(super) points_map: MmapSlice<PointKeyValue>, /// A storage of associations between geo-hashes and point ids. (See the diagram above) pub(super) points_map_ids: MmapSlice<PointOffsetType>, /// One-to-many mapping of the PointOffsetType to the GeoPoint. pub(super) point_to_values: MmapPointToValues<GeoPoint>, /// Deleted flags for each PointOffsetType pub(super) deleted: MmapBitSliceBufferedUpdateWrapper, } #[derive(Debug, Clone, Serialize, Deserialize)] struct MmapGeoMapIndexStat { points_values_count: usize, max_values_per_point: usize, } impl MmapGeoMapIndex { pub fn build( dynamic_index: InMemoryGeoMapIndex, path: &Path, is_on_disk: bool, ) -> OperationResult<Self> { fs::create_dir_all(path)?; let deleted_path = path.join(DELETED_PATH); let stats_path = path.join(STATS_PATH); let counts_per_hash_path = path.join(COUNTS_PER_HASH); let points_map_path = path.join(POINTS_MAP); let points_map_ids_path = path.join(POINTS_MAP_IDS); // Create the point-to-value mapping and persist in the mmap file MmapPointToValues::<GeoPoint>::from_iter( path, dynamic_index .point_to_values .iter() .enumerate() .map(|(idx, values)| (idx as PointOffsetType, values.iter().cloned())), )?; { let points_map_file = create_and_ensure_length( &points_map_path, dynamic_index.points_map.len() * std::mem::size_of::<PointKeyValue>(), )?; let points_map_file = unsafe { MmapMut::map_mut(&points_map_file)? }; let mut points_map = unsafe { MmapSlice::<PointKeyValue>::try_from(points_map_file)? }; let points_map_ids_file = create_and_ensure_length( &points_map_ids_path, dynamic_index .points_map .values() .map(|v| v.len()) .sum::<usize>() * std::mem::size_of::<PointOffsetType>(), )?; let points_map_ids_file = unsafe { MmapMut::map_mut(&points_map_ids_file)? }; let mut points_map_ids = unsafe { MmapSlice::<PointOffsetType>::try_from(points_map_ids_file)? }; let mut ids_offset = 0; for (i, (hash, ids)) in dynamic_index.points_map.iter().enumerate() { points_map[i].hash = *hash; points_map[i].ids_start = ids_offset as u32; points_map[i].ids_end = (ids_offset + ids.len()) as u32; points_map_ids[ids_offset..ids_offset + ids.len()].copy_from_slice( &ids.iter() .map(|v| *v as PointOffsetType) .collect::<Vec<_>>(), ); ids_offset += ids.len(); } } { let counts_per_hash_file = create_and_ensure_length( &counts_per_hash_path, std::cmp::min( dynamic_index.points_per_hash.len(), dynamic_index.values_per_hash.len(), ) * std::mem::size_of::<Counts>(), )?; let counts_per_hash_file = unsafe { MmapMut::map_mut(&counts_per_hash_file)? }; let mut counts_per_hash = unsafe { MmapSlice::<Counts>::try_from(counts_per_hash_file)? }; for ((hash, points), dst) in dynamic_index .points_per_hash .iter() .zip(counts_per_hash.iter_mut()) { if let Some(values) = dynamic_index.values_per_hash.get(hash) { dst.hash = *hash; dst.points = *points as u32; dst.values = *values as u32; } } } { let deleted_flags_count = dynamic_index.point_to_values.len(); let deleted_file = create_and_ensure_length( &deleted_path, deleted_flags_count .div_ceil(u8::BITS as usize) .next_multiple_of(std::mem::size_of::<usize>()), )?; let mut deleted_mmap = unsafe { MmapMut::map_mut(&deleted_file)? }; deleted_mmap.fill(0); let mut deleted_bitflags = MmapBitSlice::from(deleted_mmap, 0); for (idx, values) in dynamic_index.point_to_values.iter().enumerate() { if values.is_empty() { deleted_bitflags.set(idx, true); } } } atomic_save_json( &stats_path, &MmapGeoMapIndexStat { points_values_count: dynamic_index.points_values_count, max_values_per_point: dynamic_index.max_values_per_point, }, )?; Self::open(path, is_on_disk)?.ok_or_else(|| { OperationError::service_error("Failed to open MmapGeoMapIndex after building it") }) } pub fn open(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let deleted_path = path.join(DELETED_PATH); let stats_path = path.join(STATS_PATH); let counts_per_hash_path = path.join(COUNTS_PER_HASH); let points_map_path = path.join(POINTS_MAP); let points_map_ids_path = path.join(POINTS_MAP_IDS); // If stats file doesn't exist, assume the index doesn't exist on disk if !stats_path.is_file() { return Ok(None); } let populate = !is_on_disk; let stats: MmapGeoMapIndexStat = read_json(&stats_path)?; let counts_per_hash = unsafe { MmapSlice::try_from(open_write_mmap( &counts_per_hash_path, AdviceSetting::Global, populate, )?)? }; let points_map = unsafe { MmapSlice::try_from(open_write_mmap( &points_map_path, AdviceSetting::Global, populate, )?)? }; let points_map_ids = unsafe { MmapSlice::try_from(open_write_mmap( &points_map_ids_path, AdviceSetting::Global, populate, )?)? }; let point_to_values = MmapPointToValues::open(path, true)?; let deleted = open_write_mmap(&deleted_path, AdviceSetting::Global, populate)?; let deleted = MmapBitSlice::from(deleted, 0); let deleted_count = deleted.count_ones(); Ok(Some(Self { path: path.to_owned(), storage: Storage { counts_per_hash, points_map, points_map_ids, point_to_values, deleted: MmapBitSliceBufferedUpdateWrapper::new(deleted), }, deleted_count, points_values_count: stats.points_values_count, max_values_per_point: stats.max_values_per_point, is_on_disk, })) } pub fn check_values_any( &self, idx: PointOffsetType, hw_counter: &HardwareCounterCell, check_fn: impl Fn(&GeoPoint) -> bool, ) -> bool { let hw_counter = self.make_conditioned_counter(hw_counter); self.storage .deleted .get(idx as usize) .filter(|b| !b) .map(|_| { self.storage .point_to_values .check_values_any(idx, |v| check_fn(&v), &hw_counter) }) .unwrap_or(false) } pub fn get_values(&self, idx: u32) -> Option<impl Iterator<Item = GeoPoint> + '_> { self.storage.point_to_values.get_values(idx) } pub fn values_count(&self, idx: PointOffsetType) -> usize { self.storage .deleted .get(idx as usize) .filter(|b| !b) .and_then(|_| self.storage.point_to_values.get_values_count(idx)) .unwrap_or(0) } pub fn points_per_hash(&self) -> impl Iterator<Item = (GeoHash, usize)> + '_ { self.storage .counts_per_hash .iter() .map(|counts| (counts.hash, counts.points as usize)) } pub fn points_of_hash(&self, hash: &GeoHash, hw_counter: &HardwareCounterCell) -> usize { let hw_counter = self.make_conditioned_counter(hw_counter); hw_counter .payload_index_io_read_counter() // Simulate binary search complexity as IO read estimation .incr_delta( (self.storage.counts_per_hash.len() as f32).log2().ceil() as usize * size_of::<Counts>(), ); if let Ok(index) = self .storage .counts_per_hash .binary_search_by(|x| x.hash.cmp(hash)) { self.storage.counts_per_hash[index].points as usize } else { 0 } } pub fn values_of_hash(&self, hash: &GeoHash, hw_counter: &HardwareCounterCell) -> usize { let hw_counter = self.make_conditioned_counter(hw_counter); hw_counter .payload_index_io_read_counter() // Simulate binary search complexity as IO read estimation .incr_delta( (self.storage.counts_per_hash.len() as f32).log2().ceil() as usize * size_of::<Counts>(), ); if let Ok(index) = self .storage .counts_per_hash .binary_search_by(|x| x.hash.cmp(hash)) { self.storage.counts_per_hash[index].values as usize } else { 0 } } pub fn wipe(self) -> OperationResult<()> { let files = self.files(); let path = self.path.clone(); // drop mmap handles before deleting files drop(self); for file in files { fs::remove_file(file)?; } let _ = fs::remove_dir(path); Ok(()) } pub fn files(&self) -> Vec<PathBuf> { let mut files = vec![ self.path.join(DELETED_PATH), self.path.join(COUNTS_PER_HASH), self.path.join(POINTS_MAP), self.path.join(POINTS_MAP_IDS), self.path.join(STATS_PATH), ]; files.extend(self.storage.point_to_values.files()); files } pub fn immutable_files(&self) -> Vec<PathBuf> { let mut files = vec![ self.path.join(COUNTS_PER_HASH), self.path.join(POINTS_MAP), self.path.join(POINTS_MAP_IDS), self.path.join(STATS_PATH), ]; files.extend(self.storage.point_to_values.immutable_files()); files } pub fn flusher(&self) -> Flusher { self.storage.deleted.flusher() } pub fn remove_point(&mut self, idx: PointOffsetType) { let idx = idx as usize; if let Some(deleted) = self.storage.deleted.get(idx) && !deleted { self.storage.deleted.set(idx, true); self.deleted_count += 1; } } /// Returns an iterator over all point IDs which have the `geohash` prefix. /// Note. Point ID may be repeated multiple times in the iterator. pub fn stored_sub_regions<'a>( &'a self, geohash: GeoHash, ) -> impl Iterator<Item = PointOffsetType> + 'a { let start_index = self .storage .points_map .binary_search_by(|point_key_value| point_key_value.hash.cmp(&geohash)) .unwrap_or_else(|index| index); self.storage.points_map[start_index..] .iter() .take_while(move |point_key_value| point_key_value.hash.starts_with(geohash)) .filter_map(|point_key_value| { Some( self.storage .points_map_ids .get(point_key_value.ids_start as usize..point_key_value.ids_end as usize)? .iter() .copied() .filter(|idx| !self.storage.deleted.get(*idx as usize).unwrap_or(true)), ) }) .flatten() } pub fn points_count(&self) -> usize { self.storage .point_to_values .len() .saturating_sub(self.deleted_count) } pub fn points_values_count(&self) -> usize { self.points_values_count } pub fn max_values_per_point(&self) -> usize { self.max_values_per_point } fn make_conditioned_counter<'a>( &self, hw_counter: &'a HardwareCounterCell, ) -> ConditionedCounter<'a> { ConditionedCounter::new(self.is_on_disk, hw_counter) } pub fn is_on_disk(&self) -> bool { self.is_on_disk } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { self.storage.counts_per_hash.populate()?; self.storage.points_map.populate()?; self.storage.points_map_ids.populate()?; self.storage.point_to_values.populate(); Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { let deleted_path = self.path.join(DELETED_PATH); let counts_per_hash_path = self.path.join(COUNTS_PER_HASH); let points_map_path = self.path.join(POINTS_MAP); let points_map_ids_path = self.path.join(POINTS_MAP_IDS); clear_disk_cache(&deleted_path)?; clear_disk_cache(&counts_per_hash_path)?; clear_disk_cache(&points_map_path)?; clear_disk_cache(&points_map_ids_path)?; self.storage.point_to_values.clear_cache()?; Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/mutable_text_index.rs
lib/segment/src/index/field_index/full_text_index/mutable_text_index.rs
use std::borrow::Cow; use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use gridstore::Gridstore; use gridstore::config::StorageOptions; use itertools::Itertools; use super::inverted_index::mutable_inverted_index::MutableInvertedIndex; use super::inverted_index::mutable_inverted_index_builder::MutableInvertedIndexBuilder; use super::inverted_index::{Document, InvertedIndex, TokenSet}; use super::text_index::FullTextIndex; use super::tokenizers::Tokenizer; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; use crate::data_types::index::TextIndexParams; use crate::index::field_index::ValueIndexer; use crate::index::payload_config::StorageType; const GRIDSTORE_OPTIONS: StorageOptions = StorageOptions { compression: Some(gridstore::config::Compression::None), page_size_bytes: None, block_size_bytes: None, region_size_blocks: None, }; pub struct MutableFullTextIndex { pub(super) inverted_index: MutableInvertedIndex, pub(super) config: TextIndexParams, pub(super) storage: Storage, pub(super) tokenizer: Tokenizer, } pub(super) enum Storage { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Gridstore(Gridstore<Vec<u8>>), } impl MutableFullTextIndex { /// Open and load mutable full text index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db_wrapper: DatabaseColumnScheduledDeleteWrapper, config: TextIndexParams, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let tokenizer = Tokenizer::new_from_text_index_params(&config); if !db_wrapper.has_column_family()? { if create_if_missing { db_wrapper.recreate_column_family()?; } else { // Column family doesn't exist, cannot load return Ok(None); } }; let phrase_matching = config.phrase_matching.unwrap_or_default(); let db = db_wrapper.clone(); let db = db.lock_db(); let iter = db.iter()?.map(|(key, value)| { let idx = FullTextIndex::restore_key(&key); let str_tokens = FullTextIndex::deserialize_document(&value)?; Ok((idx, str_tokens)) }); Ok(Some(Self { inverted_index: MutableInvertedIndex::build_index(iter, phrase_matching)?, config, storage: Storage::RocksDb(db_wrapper), tokenizer, })) } /// Open and load mutable full text index from Gridstore storage /// /// The `create_if_missing` parameter indicates whether to create a new Gridstore if it does /// not exist. If false and files don't exist, the load function will indicate nothing could be /// loaded. pub fn open_gridstore( path: PathBuf, config: TextIndexParams, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let store = if create_if_missing { Gridstore::open_or_create(path, GRIDSTORE_OPTIONS).map_err(|err| { OperationError::service_error(format!( "failed to open mutable full text index on gridstore: {err}" )) })? } else if path.exists() { Gridstore::open(path).map_err(|err| { OperationError::service_error(format!( "failed to open mutable full text index on gridstore: {err}" )) })? } else { // Files don't exist, cannot load return Ok(None); }; let phrase_matching = config.phrase_matching.unwrap_or_default(); let tokenizer = Tokenizer::new_from_text_index_params(&config); let hw_counter = HardwareCounterCell::disposable(); let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); let mut builder = MutableInvertedIndexBuilder::new(phrase_matching); store .iter::<_, OperationError>( |idx, value: Vec<u8>| { let str_tokens = FullTextIndex::deserialize_document(&value)?; builder.add(idx, str_tokens); Ok(true) }, hw_counter_ref, ) .map_err(|err| { OperationError::service_error(format!( "Failed to load mutable full text index from gridstore: {err}" )) })?; Ok(Some(Self { inverted_index: builder.build(), config, storage: Storage::Gridstore(store), tokenizer, })) } #[inline] pub(super) fn init(&mut self) -> OperationResult<()> { match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.recreate_column_family(), Storage::Gridstore(store) => store.clear().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable full text index: {err}", )) }), } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Gridstore(store) => store.wipe().map_err(|err| { OperationError::service_error(format!( "Failed to wipe mutable full text index: {err}", )) }), } } /// Clear cache /// /// Only clears cache of Gridstore storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Gridstore(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable full text index gridstore cache: {err}" )) }), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Gridstore(store) => store.files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.flusher(), Storage::Gridstore(store) => { let storage_flusher = store.flusher(); Box::new(move || { storage_flusher().map_err(|err| { OperationError::service_error(format!( "Failed to flush mutable full text index gridstore: {err}" )) }) }) } } } pub fn add_many( &mut self, idx: PointOffsetType, values: Vec<String>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { if values.is_empty() { return Ok(()); } let mut str_tokens: Vec<Cow<str>> = Vec::new(); for value in &values { self.tokenizer.tokenize_doc(value, |token| { str_tokens.push(token); }); } let tokens = self.inverted_index.register_tokens(&str_tokens); let phrase_matching = self.config.phrase_matching.unwrap_or_default(); if phrase_matching { let document = Document::new(tokens.clone()); self.inverted_index .index_document(idx, document, hw_counter)?; } let token_set = TokenSet::from_iter(tokens); self.inverted_index .index_tokens(idx, token_set, hw_counter)?; let tokens_to_store = if phrase_matching { // store ordered tokens str_tokens } else { // store sorted, unique tokens str_tokens.into_iter().sorted().dedup().collect() }; let db_document = FullTextIndex::serialize_document(tokens_to_store)?; // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { let db_idx = FullTextIndex::store_key(idx); db_wrapper.put(db_idx, db_document)?; } Storage::Gridstore(store) => { store .put_value( idx, &db_document, hw_counter.ref_payload_index_io_write_counter(), ) .map_err(|err| { OperationError::service_error(format!( "failed to put value in mutable full text index gridstore: {err}" )) })?; } } Ok(()) } #[allow(clippy::unnecessary_wraps)] pub fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { if self.inverted_index.remove(id) { let db_doc_id = FullTextIndex::store_key(id); db_wrapper.remove(db_doc_id)?; } } Storage::Gridstore(store) => { if self.inverted_index.remove(id) { store.delete_value(id); } } } Ok(()) } /// Get the tokenized document stored for a given point ID. Only for testing purposes. #[cfg(test)] pub fn get_doc(&self, idx: PointOffsetType) -> Option<Vec<String>> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db) => { let db_idx = FullTextIndex::store_key(idx); db.get_pinned(&db_idx, |bytes| { FullTextIndex::deserialize_document(bytes).unwrap() }) .unwrap() } Storage::Gridstore(gridstore) => gridstore .get_value::<false>(idx, &HardwareCounterCell::disposable()) .map(|bytes| FullTextIndex::deserialize_document(&bytes).unwrap()), } } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Gridstore(_) => StorageType::Gridstore, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Gridstore(_) => false, } } } impl ValueIndexer for MutableFullTextIndex { type ValueType = String; fn add_many( &mut self, idx: PointOffsetType, values: Vec<String>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.add_many(idx, values, hw_counter) } fn get_value(value: &serde_json::Value) -> Option<String> { FullTextIndex::get_value(value) } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { self.remove_point(id) } } #[cfg(test)] mod tests { use tempfile::Builder; use super::*; use crate::data_types::index::{TextIndexType, TokenizerType}; use crate::json_path::JsonPath; use crate::types::{FieldCondition, Match}; fn filter_request(text: &str) -> FieldCondition { FieldCondition::new_match(JsonPath::new("text"), Match::new_text(text)) } #[test] fn test_full_text_indexing() { use common::counter::hardware_accumulator::HwMeasurementAcc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use crate::index::field_index::{PayloadFieldIndex, ValueIndexer}; let payloads: Vec<_> = vec![ serde_json::json!( "The celebration had a long way to go and even in the silent depths of Multivac's underground chambers, it hung in the air." ), serde_json::json!("If nothing else, there was the mere fact of isolation and silence."), serde_json::json!([ "For the first time in a decade, technicians were not scurrying about the vitals of the giant computer, ", "the soft lights did not wink out their erratic patterns, the flow of information in and out had halted." ]), serde_json::json!( "It would not be halted long, of course, for the needs of peace would be pressing." ), serde_json::json!( "Yet now, for a day, perhaps for a week, even Multivac might celebrate the great time, and rest." ), ]; let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap(); let config = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: None, phrase_matching: None, on_disk: None, stopwords: None, stemmer: None, ascii_folding: None, }; { let mut index = FullTextIndex::new_gridstore(temp_dir.path().join("test_db"), config.clone(), true) .unwrap() .unwrap(); let hw_cell = HardwareCounterCell::new(); for (idx, payload) in payloads.iter().enumerate() { index .add_point(idx as PointOffsetType, &[payload], &hw_cell) .unwrap(); } assert_eq!(index.count_indexed_points(), payloads.len()); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let filter_condition = filter_request("multivac"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![0, 4]); let filter_condition = filter_request("giant computer"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![2]); let filter_condition = filter_request("the great time"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![4]); index.remove_point(2).unwrap(); index.remove_point(3).unwrap(); let filter_condition = filter_request("giant computer"); assert!( index .filter(&filter_condition, &hw_counter) .unwrap() .next() .is_none() ); assert_eq!(index.count_indexed_points(), payloads.len() - 2); let payload = serde_json::json!([ "The last question was asked for the first time, half in jest, on May 21, 2061,", "at a time when humanity first stepped into the light." ]); index.add_point(3, &[&payload], &hw_cell).unwrap(); let payload = serde_json::json!([ "The question came about as a result of a five dollar bet over highballs, and it happened this way: " ]); index.add_point(4, &[&payload], &hw_cell).unwrap(); assert_eq!(index.count_indexed_points(), payloads.len() - 1); index.flusher()().unwrap(); } { let mut index = FullTextIndex::new_gridstore(temp_dir.path().join("test_db"), config, true) .unwrap() .unwrap(); assert_eq!(index.count_indexed_points(), 4); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let filter_condition = filter_request("multivac"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![0]); let filter_condition = filter_request("the"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![0, 1, 3, 4]); // check deletion index.remove_point(0).unwrap(); let filter_condition = filter_request("multivac"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert!(search_res.is_empty()); assert_eq!(index.count_indexed_points(), 3); index.remove_point(3).unwrap(); let filter_condition = filter_request("the"); let search_res: Vec<_> = index .filter(&filter_condition, &hw_counter) .unwrap() .collect(); assert_eq!(search_res, vec![1, 4]); assert_eq!(index.count_indexed_points(), 2); // check deletion of non-existing point index.remove_point(3).unwrap(); assert_eq!(index.count_indexed_points(), 2); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/immutable_text_index.rs
lib/segment/src/index/field_index/full_text_index/immutable_text_index.rs
use std::path::PathBuf; use common::types::PointOffsetType; use super::inverted_index::InvertedIndex; use super::inverted_index::immutable_inverted_index::ImmutableInvertedIndex; #[cfg(feature = "rocksdb")] use super::inverted_index::mutable_inverted_index::MutableInvertedIndex; use super::mmap_text_index::MmapFullTextIndex; #[cfg(feature = "rocksdb")] use super::text_index::FullTextIndex; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::data_types::index::TextIndexParams; #[cfg(feature = "rocksdb")] use crate::index::field_index::full_text_index::mutable_text_index::{self, MutableFullTextIndex}; use crate::index::field_index::full_text_index::tokenizers::Tokenizer; use crate::index::payload_config::StorageType; pub struct ImmutableFullTextIndex { pub(super) inverted_index: ImmutableInvertedIndex, pub(super) tokenizer: Tokenizer, // Backing storage, source of state, persists deletions pub(super) storage: Storage, } pub(super) enum Storage { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Mmap(Box<MmapFullTextIndex>), } impl ImmutableFullTextIndex { /// Open and load immutable full text index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db_wrapper: DatabaseColumnScheduledDeleteWrapper, config: TextIndexParams, ) -> OperationResult<Option<Self>> { let tokenizer = Tokenizer::new_from_text_index_params(&config); if !db_wrapper.has_column_family()? { return Ok(None); }; let db = db_wrapper.clone(); let db = db.lock_db(); let phrase_matching = config.phrase_matching.unwrap_or_default(); let iter = db.iter()?.map(|(key, value)| { let idx = FullTextIndex::restore_key(&key); let tokens = FullTextIndex::deserialize_document(&value)?; Ok((idx, tokens)) }); let mutable = MutableInvertedIndex::build_index(iter, phrase_matching)?; Ok(Some(Self { inverted_index: ImmutableInvertedIndex::from(mutable), tokenizer, storage: Storage::RocksDb(db_wrapper), })) } /// Open and load immutable full text index from mmap storage pub fn open_mmap(index: MmapFullTextIndex) -> Self { let inverted_index = ImmutableInvertedIndex::from(&index.inverted_index); // ToDo(rocksdb): this is a duplication of tokenizer, // ToDo(rocksdb): But once the RocksDB is removed, we can always use the tokenizer from the index. let tokenizer = index.tokenizer.clone(); // Index is now loaded into memory, clear cache of backing mmap storage if let Err(err) = index.clear_cache() { log::warn!("Failed to clear mmap cache of ram mmap full text index: {err}"); } Self { inverted_index, storage: Storage::Mmap(Box::new(index)), tokenizer, } } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] pub fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { if self.inverted_index.remove(id) { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => { let db_doc_id = FullTextIndex::store_key(id); db_wrapper.remove(db_doc_id)?; } Storage::Mmap(ref mut index) => { index.remove_point(id); } } } Ok(()) } pub fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Mmap(index) => index.wipe(), } } /// Clear cache /// /// Only clears cache of mmap storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Mmap(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear immutable full text index gridstore cache: {err}" )) }), } } pub fn files(&self) -> Vec<PathBuf> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(ref index) => index.files(), } } pub fn immutable_files(&self) -> Vec<PathBuf> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(ref index) => index.immutable_files(), } } pub fn flusher(&self) -> Flusher { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => db_wrapper.flusher(), Storage::Mmap(ref index) => index.flusher(), } } #[cfg(feature = "rocksdb")] pub fn from_rocksdb_mutable(mutable: MutableFullTextIndex) -> Self { let MutableFullTextIndex { inverted_index, config: _, tokenizer, storage, } = mutable; let mutable_text_index::Storage::RocksDb(db) = storage else { unreachable!( "There is no Gridstore-backed immutable text index, it should be Mmap-backed instead", ); }; Self { inverted_index: ImmutableInvertedIndex::from(inverted_index), tokenizer, storage: Storage::RocksDb(db), } } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Mmap(_) => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/text_index.rs
lib/segment/src/index/field_index/full_text_index/text_index.rs
use std::borrow::Cow; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use ahash::AHashSet; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use serde::{Deserialize, Serialize}; use serde_json::Value; use super::immutable_text_index::ImmutableFullTextIndex; use super::inverted_index::{InvertedIndex, ParsedQuery, TokenId, TokenSet}; use super::mmap_text_index::{FullTextMmapIndexBuilder, MmapFullTextIndex}; use super::mutable_text_index::MutableFullTextIndex; use super::tokenizers::Tokenizer; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::data_types::index::TextIndexParams; use crate::index::field_index::full_text_index::inverted_index::Document; use crate::index::field_index::{ CardinalityEstimation, FieldIndexBuilderTrait, PayloadBlockCondition, PayloadFieldIndex, ValueIndexer, }; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{FieldCondition, Match, MatchPhrase, MatchText, PayloadKeyType}; pub enum FullTextIndex { Mutable(MutableFullTextIndex), Immutable(ImmutableFullTextIndex), Mmap(Box<MmapFullTextIndex>), } impl FullTextIndex { #[cfg(feature = "rocksdb")] pub fn new_rocksdb( db: Arc<RwLock<DB>>, config: TextIndexParams, field: &str, is_appendable: bool, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let store_cf_name = Self::storage_cf_name(field); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); let index = if is_appendable { MutableFullTextIndex::open_rocksdb(db_wrapper, config, create_if_missing)? .map(Self::Mutable) } else { ImmutableFullTextIndex::open_rocksdb(db_wrapper, config)?.map(Self::Immutable) }; Ok(index) } pub fn new_mmap( path: PathBuf, config: TextIndexParams, is_on_disk: bool, ) -> OperationResult<Option<Self>> { let Some(mmap_index) = MmapFullTextIndex::open(path, config, is_on_disk)? else { return Ok(None); }; let index = if is_on_disk { // Use on mmap directly Some(Self::Mmap(Box::new(mmap_index))) } else { // Load into RAM, use mmap as backing storage Some(Self::Immutable(ImmutableFullTextIndex::open_mmap( mmap_index, ))) }; Ok(index) } pub fn new_gridstore( dir: PathBuf, config: TextIndexParams, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let index = MutableFullTextIndex::open_gridstore(dir, config, create_if_missing)?; Ok(index.map(Self::Mutable)) } pub fn init(&mut self) -> OperationResult<()> { match self { Self::Mutable(index) => index.init(), Self::Immutable(_) => { debug_assert!(false, "Immutable index should be initialized before use"); Ok(()) } Self::Mmap(_) => { debug_assert!(false, "Mmap index should be initialized before use"); Ok(()) } } } #[cfg(feature = "rocksdb")] pub fn builder_rocksdb( db: Arc<RwLock<DB>>, config: TextIndexParams, field: &str, keep_appendable: bool, ) -> OperationResult<FullTextIndexRocksDbBuilder> { FullTextIndexRocksDbBuilder::new(db, config, field, keep_appendable) } pub fn builder_mmap( path: PathBuf, config: TextIndexParams, is_on_disk: bool, ) -> FullTextMmapIndexBuilder { FullTextMmapIndexBuilder::new(path, config, is_on_disk) } pub fn builder_gridstore( dir: PathBuf, config: TextIndexParams, ) -> FullTextGridstoreIndexBuilder { FullTextGridstoreIndexBuilder::new(dir, config) } #[cfg(feature = "rocksdb")] fn storage_cf_name(field: &str) -> String { format!("{field}_fts") } pub(super) fn points_count(&self) -> usize { match self { Self::Mutable(index) => index.inverted_index.points_count(), Self::Immutable(index) => index.inverted_index.points_count(), Self::Mmap(index) => index.inverted_index.points_count(), } } pub(super) fn get_token( &self, token: &str, hw_counter: &HardwareCounterCell, ) -> Option<TokenId> { match self { Self::Mutable(index) => index.inverted_index.get_token_id(token, hw_counter), Self::Immutable(index) => index.inverted_index.get_token_id(token, hw_counter), Self::Mmap(index) => index.inverted_index.get_token_id(token, hw_counter), } } pub(super) fn filter_query<'a>( &'a self, query: ParsedQuery, hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { match self { Self::Mutable(index) => index.inverted_index.filter(query, hw_counter), Self::Immutable(index) => index.inverted_index.filter(query, hw_counter), Self::Mmap(index) => index.inverted_index.filter(query, hw_counter), } } fn get_tokenizer(&self) -> &Tokenizer { match self { Self::Mutable(index) => &index.tokenizer, Self::Immutable(index) => &index.tokenizer, Self::Mmap(index) => &index.tokenizer, } } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { match self { Self::Mutable(index) => Box::new(index.inverted_index.payload_blocks(threshold, key)), Self::Immutable(index) => Box::new(index.inverted_index.payload_blocks(threshold, key)), Self::Mmap(index) => Box::new(index.inverted_index.payload_blocks(threshold, key)), } } pub(super) fn estimate_query_cardinality( &self, query: &ParsedQuery, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { match self { Self::Mutable(index) => index .inverted_index .estimate_cardinality(query, condition, hw_counter), Self::Immutable(index) => index .inverted_index .estimate_cardinality(query, condition, hw_counter), Self::Mmap(index) => index .inverted_index .estimate_cardinality(query, condition, hw_counter), } } pub fn check_match(&self, query: &ParsedQuery, point_id: PointOffsetType) -> bool { match self { Self::Mutable(index) => index.inverted_index.check_match(query, point_id), Self::Immutable(index) => index.inverted_index.check_match(query, point_id), Self::Mmap(index) => index.inverted_index.check_match(query, point_id), } } pub fn values_count(&self, point_id: PointOffsetType) -> usize { match self { Self::Mutable(index) => index.inverted_index.values_count(point_id), Self::Immutable(index) => index.inverted_index.values_count(point_id), Self::Mmap(index) => index.inverted_index.values_count(point_id), } } pub fn values_is_empty(&self, point_id: PointOffsetType) -> bool { match self { Self::Mutable(index) => index.inverted_index.values_is_empty(point_id), Self::Immutable(index) => index.inverted_index.values_is_empty(point_id), Self::Mmap(index) => index.inverted_index.values_is_empty(point_id), } } #[cfg(feature = "rocksdb")] pub(super) fn store_key(id: PointOffsetType) -> Vec<u8> { bincode::serialize(&id).unwrap() } #[cfg(feature = "rocksdb")] pub(super) fn restore_key(data: &[u8]) -> PointOffsetType { bincode::deserialize(data).unwrap() } pub(super) fn serialize_document(tokens: Vec<Cow<str>>) -> OperationResult<Vec<u8>> { #[derive(Serialize)] struct StoredDocument<'a> { tokens: Vec<Cow<'a, str>>, } let doc = StoredDocument { tokens }; serde_cbor::to_vec(&doc).map_err(|e| { OperationError::service_error(format!("Failed to serialize document: {e}")) }) } pub(super) fn deserialize_document(data: &[u8]) -> OperationResult<Vec<String>> { #[derive(Deserialize)] struct StoredDocument { tokens: Vec<String>, } serde_cbor::from_slice::<StoredDocument>(data) .map_err(|e| { OperationError::service_error(format!("Failed to deserialize document: {e}")) }) .map(|doc| doc.tokens) } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, index_type: match self { FullTextIndex::Mutable(_) => "mutable_full_text", FullTextIndex::Immutable(_) => "immutable_full_text", FullTextIndex::Mmap(_) => "mmap_full_text", }, points_values_count: self.points_count(), points_count: self.points_count(), histogram_bucket_size: None, } } /// Tries to parse a phrase query. If there are any unseen tokens, returns `None` /// /// Preserves token order pub fn parse_phrase_query( &self, phrase: &str, hw_counter: &HardwareCounterCell, ) -> Option<ParsedQuery> { let document = self.parse_document(phrase, hw_counter)?; Some(ParsedQuery::Phrase(document)) } /// Tries to parse a query. If there are any unseen tokens, returns `None` /// /// Tokens are made unique pub fn parse_text_query( &self, text: &str, hw_counter: &HardwareCounterCell, ) -> Option<ParsedQuery> { let mut tokens = AHashSet::new(); self.get_tokenizer().tokenize_query(text, |token| { tokens.insert(self.get_token(token.as_ref(), hw_counter)); }); let tokens = tokens.into_iter().collect::<Option<TokenSet>>()?; Some(ParsedQuery::AllTokens(tokens)) } pub fn parse_text_any_query( &self, text: &str, hw_counter: &HardwareCounterCell, ) -> Option<ParsedQuery> { let mut tokens = AHashSet::new(); self.get_tokenizer().tokenize_query(text, |token| { if let Some(token_id) = self.get_token(token.as_ref(), hw_counter) { tokens.insert(token_id); } }); let tokens = tokens.into_iter().collect::<TokenSet>(); Some(ParsedQuery::AnyTokens(tokens)) } pub fn parse_tokenset(&self, text: &str, hw_counter: &HardwareCounterCell) -> TokenSet { let mut tokenset = AHashSet::new(); self.get_tokenizer().tokenize_doc(text, |token| { if let Some(token_id) = self.get_token(token.as_ref(), hw_counter) { tokenset.insert(token_id); } }); TokenSet::from(tokenset) } /// Parse document /// /// If there are any unseen tokens, returns `None` pub fn parse_document(&self, text: &str, hw_counter: &HardwareCounterCell) -> Option<Document> { let mut document_tokens = Vec::new(); let mut unknow_token = false; self.get_tokenizer().tokenize_doc(text, |token| { if let Some(token_id) = self.get_token(token.as_ref(), hw_counter) { document_tokens.push(token_id); } else { unknow_token = true } }); // Bail out if the text contains unknown token if unknow_token { None } else { Some(Document::new(document_tokens)) } } #[cfg(test)] pub fn query<'a>( &'a self, query: &'a str, hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { let Some(parsed_query) = self.parse_text_query(query, hw_counter) else { return Box::new(std::iter::empty()); }; self.filter_query(parsed_query, hw_counter) } /// Checks the text directly against the payload value pub fn check_payload_match<const IS_PHRASE: bool>( &self, payload_value: &serde_json::Value, text: &str, hw_counter: &HardwareCounterCell, ) -> bool { let query_opt = if IS_PHRASE { self.parse_phrase_query(text, hw_counter) } else { self.parse_text_query(text, hw_counter) }; let Some(query) = query_opt else { return false; }; FullTextIndex::get_values(payload_value) .iter() .any(|value| match &query { ParsedQuery::AllTokens(query) => { let tokenset = self.parse_tokenset(value, hw_counter); tokenset.has_subset(query) } ParsedQuery::Phrase(query) => { let document = self.parse_document(value, hw_counter); document.map(|doc| doc.has_phrase(query)).unwrap_or(false) } ParsedQuery::AnyTokens(query) => { let tokenset = self.parse_tokenset(value, hw_counter); tokenset.has_any(query) } }) } pub fn is_on_disk(&self) -> bool { match self { FullTextIndex::Mutable(_) => false, FullTextIndex::Immutable(_) => false, FullTextIndex::Mmap(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { FullTextIndex::Mutable(index) => index.is_rocksdb(), FullTextIndex::Immutable(index) => index.is_rocksdb(), FullTextIndex::Mmap(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { FullTextIndex::Mutable(_) => {} // Not a mmap FullTextIndex::Immutable(_) => {} // Not a mmap FullTextIndex::Mmap(index) => index.populate()?, } Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { // Only clears backing mmap storage if used, not in-memory representation FullTextIndex::Mutable(index) => index.clear_cache(), // Only clears backing mmap storage if used, not in-memory representation FullTextIndex::Immutable(index) => index.clear_cache(), FullTextIndex::Mmap(index) => index.clear_cache(), } } pub fn get_mutability_type(&self) -> IndexMutability { match self { FullTextIndex::Mutable(_) => IndexMutability::Mutable, FullTextIndex::Immutable(_) => IndexMutability::Immutable, FullTextIndex::Mmap(_) => IndexMutability::Immutable, } } pub fn get_storage_type(&self) -> StorageType { match self { FullTextIndex::Mutable(index) => index.storage_type(), FullTextIndex::Immutable(index) => index.storage_type(), FullTextIndex::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } } #[cfg(feature = "rocksdb")] pub struct FullTextIndexRocksDbBuilder { mutable_index: MutableFullTextIndex, keep_appendable: bool, } #[cfg(feature = "rocksdb")] impl FullTextIndexRocksDbBuilder { pub fn new( db: Arc<RwLock<DB>>, config: TextIndexParams, field: &str, keep_appendable: bool, ) -> OperationResult<Self> { let store_cf_name = FullTextIndex::storage_cf_name(field); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); let mutable_index = MutableFullTextIndex::open_rocksdb(db_wrapper, config, true)? .ok_or_else(|| { OperationError::service_error(format!( "Failed to create and open mutable full text index for field: {field}" )) })?; Ok(FullTextIndexRocksDbBuilder { mutable_index, keep_appendable, }) } } #[cfg(feature = "rocksdb")] impl FieldIndexBuilderTrait for FullTextIndexRocksDbBuilder { type FieldIndexType = FullTextIndex; fn init(&mut self) -> OperationResult<()> { self.mutable_index.init() } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.mutable_index.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { if self.keep_appendable { return Ok(FullTextIndex::Mutable(self.mutable_index)); } Ok(FullTextIndex::Immutable( ImmutableFullTextIndex::from_rocksdb_mutable(self.mutable_index), )) } } impl ValueIndexer for FullTextIndex { type ValueType = String; fn add_many( &mut self, idx: PointOffsetType, values: Vec<String>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { Self::Mutable(index) => index.add_many(idx, values, hw_counter), Self::Immutable(_) => Err(OperationError::service_error( "Cannot add values to immutable text index", )), Self::Mmap(_) => Err(OperationError::service_error( "Cannot add values to mmap text index", )), } } fn get_value(value: &Value) -> Option<String> { value.as_str().map(ToOwned::to_owned) } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { match self { FullTextIndex::Mutable(index) => index.remove_point(id), FullTextIndex::Immutable(index) => index.remove_point(id), FullTextIndex::Mmap(index) => { index.remove_point(id); Ok(()) } } } } impl PayloadFieldIndex for FullTextIndex { fn count_indexed_points(&self) -> usize { self.points_count() } fn wipe(self) -> OperationResult<()> { match self { Self::Mutable(index) => index.wipe(), Self::Immutable(index) => index.wipe(), Self::Mmap(index) => index.wipe(), } } fn flusher(&self) -> Flusher { match self { Self::Mutable(index) => index.flusher(), Self::Immutable(index) => index.flusher(), Self::Mmap(index) => index.flusher(), } } fn files(&self) -> Vec<PathBuf> { match self { Self::Mutable(index) => index.files(), Self::Immutable(index) => index.files(), Self::Mmap(index) => index.files(), } } fn immutable_files(&self) -> Vec<PathBuf> { match self { Self::Mutable(_) => vec![], Self::Immutable(index) => index.immutable_files(), Self::Mmap(index) => index.immutable_files(), } } fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { let parsed_query_opt = match &condition.r#match { Some(Match::Text(MatchText { text })) => self.parse_text_query(text, hw_counter), Some(Match::Phrase(MatchPhrase { phrase })) => { self.parse_phrase_query(phrase, hw_counter) } _ => return None, }; let Some(parsed_query) = parsed_query_opt else { return Some(Box::new(std::iter::empty())); }; Some(self.filter_query(parsed_query, hw_counter)) } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { let parsed_query_opt = match &condition.r#match { Some(Match::Text(MatchText { text })) => self.parse_text_query(text, hw_counter), Some(Match::Phrase(MatchPhrase { phrase })) => { self.parse_phrase_query(phrase, hw_counter) } _ => return None, }; let Some(parsed_query) = parsed_query_opt else { return Some(CardinalityEstimation::exact(0)); }; Some(self.estimate_query_cardinality(&parsed_query, condition, hw_counter)) } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { self.payload_blocks(threshold, key) } } pub struct FullTextGridstoreIndexBuilder { dir: PathBuf, config: TextIndexParams, index: Option<FullTextIndex>, } impl FullTextGridstoreIndexBuilder { pub fn new(dir: PathBuf, config: TextIndexParams) -> Self { Self { dir, config, index: None, } } } impl ValueIndexer for FullTextGridstoreIndexBuilder { type ValueType = String; fn get_value(value: &Value) -> Option<String> { FullTextIndex::get_value(value) } fn add_many( &mut self, id: PointOffsetType, values: Vec<Self::ValueType>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let values: Vec<Value> = values.into_iter().map(Value::String).collect(); let values: Vec<&Value> = values.iter().collect(); FieldIndexBuilderTrait::add_point(self, id, &values, hw_counter) } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { let Some(index) = &mut self.index else { return Err(OperationError::service_error( "FullTextIndexGridstoreBuilder: index must be initialized before adding points", )); }; index.remove_point(id) } } impl FieldIndexBuilderTrait for FullTextGridstoreIndexBuilder { type FieldIndexType = FullTextIndex; fn init(&mut self) -> OperationResult<()> { assert!( self.index.is_none(), "index must be initialized exactly once", ); self.index.replace( FullTextIndex::new_gridstore(self.dir.clone(), self.config.clone(), true)?.ok_or_else( || { OperationError::service_error( "Failed to create and open mutable full text index on gridstore", ) }, )?, ); Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let Some(index) = &mut self.index else { return Err(OperationError::service_error( "FullTextIndexGridstoreBuilder: index must be initialized before adding points", )); }; index.add_point(id, payload, hw_counter) } fn finalize(mut self) -> OperationResult<Self::FieldIndexType> { let Some(index) = self.index.take() else { return Err(OperationError::service_error( "FullTextIndexGridstoreBuilder: index must be initialized to finalize", )); }; index.flusher()()?; Ok(index) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false