repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/mod.rs
lib/segment/src/index/field_index/full_text_index/mod.rs
mod immutable_text_index; mod inverted_index; pub mod mmap_text_index; mod mutable_text_index; pub mod stop_words; pub mod text_index; pub mod tokenizers; #[cfg(test)] mod tests;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/mmap_text_index.rs
lib/segment/src/index/field_index/full_text_index/mmap_text_index.rs
use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use fs_err as fs; use serde_json::Value; use super::inverted_index::immutable_inverted_index::ImmutableInvertedIndex; use super::inverted_index::mmap_inverted_index::MmapInvertedIndex; use super::inverted_index::mutable_inverted_index::MutableInvertedIndex; use super::inverted_index::{Document, InvertedIndex, TokenSet}; use super::text_index::FullTextIndex; use super::tokenizers::Tokenizer; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::index::TextIndexParams; use crate::index::field_index::full_text_index::immutable_text_index::{ ImmutableFullTextIndex, Storage, }; use crate::index::field_index::{FieldIndexBuilderTrait, ValueIndexer}; pub struct MmapFullTextIndex { pub(super) inverted_index: MmapInvertedIndex, pub(super) tokenizer: Tokenizer, } impl MmapFullTextIndex { pub fn open( path: PathBuf, config: TextIndexParams, is_on_disk: bool, ) -> OperationResult<Option<Self>> { let populate = !is_on_disk; let has_positions = config.phrase_matching == Some(true); let tokenizer = Tokenizer::new_from_text_index_params(&config); let inverted_index = MmapInvertedIndex::open(path, populate, has_positions)?; Ok(inverted_index.map(|inverted_index| Self { inverted_index, tokenizer, })) } pub fn files(&self) -> Vec<PathBuf> { self.inverted_index.files() } pub fn immutable_files(&self) -> Vec<PathBuf> { self.inverted_index.immutable_files() } fn path(&self) -> &PathBuf { &self.inverted_index.path } pub fn wipe(self) -> OperationResult<()> { let files = self.files(); let path = self.path().clone(); // drop mmap handles before deleting files drop(self); for file in files { fs::remove_file(file)?; } let _ = fs::remove_dir(path); Ok(()) } pub fn remove_point(&mut self, id: PointOffsetType) { self.inverted_index.remove(id); } pub fn flusher(&self) -> Flusher { self.inverted_index.flusher() } pub fn is_on_disk(&self) -> bool { self.inverted_index.is_on_disk() } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { self.inverted_index.populate()?; Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { self.inverted_index.clear_cache()?; Ok(()) } } pub struct FullTextMmapIndexBuilder { path: PathBuf, mutable_index: MutableInvertedIndex, config: TextIndexParams, is_on_disk: bool, tokenizer: Tokenizer, } impl FullTextMmapIndexBuilder { pub fn new(path: PathBuf, config: TextIndexParams, is_on_disk: bool) -> Self { let with_positions = config.phrase_matching.unwrap_or_default(); let tokenizer = Tokenizer::new_from_text_index_params(&config); Self { path, mutable_index: MutableInvertedIndex::new(with_positions), config, is_on_disk, tokenizer, } } } impl ValueIndexer for FullTextMmapIndexBuilder { type ValueType = String; fn get_value(value: &Value) -> Option<String> { match value { Value::String(s) => Some(s.clone()), _ => None, } } fn add_many( &mut self, id: PointOffsetType, values: Vec<Self::ValueType>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { if values.is_empty() { return Ok(()); } let mut str_tokens = Vec::new(); for value in &values { self.tokenizer.tokenize_doc(value, |token| { str_tokens.push(token); }); } let tokens = self.mutable_index.register_tokens(&str_tokens); if self.mutable_index.point_to_doc.is_some() { let document = Document::new(tokens.clone()); self.mutable_index .index_document(id, document, hw_counter)?; } let token_set = TokenSet::from_iter(tokens); self.mutable_index.index_tokens(id, token_set, hw_counter)?; Ok(()) } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { self.mutable_index.remove(id); Ok(()) } } impl FieldIndexBuilderTrait for FullTextMmapIndexBuilder { type FieldIndexType = FullTextIndex; fn init(&mut self) -> OperationResult<()> { Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { ValueIndexer::add_point(self, id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { let Self { path, mutable_index, config, is_on_disk, tokenizer, } = self; let immutable = ImmutableInvertedIndex::from(mutable_index); fs::create_dir_all(path.as_path())?; MmapInvertedIndex::create(path.clone(), &immutable)?; let populate = !is_on_disk; let has_positions = config.phrase_matching.unwrap_or_default(); let inverted_index = MmapInvertedIndex::open(path, populate, has_positions)?.ok_or_else(|| { OperationError::service_error( "Failed to open MmapInvertedIndex that was just created", ) })?; let mmap_index = MmapFullTextIndex { inverted_index, tokenizer: tokenizer.clone(), }; let text_index = if is_on_disk { FullTextIndex::Mmap(Box::new(mmap_index)) } else { FullTextIndex::Immutable(ImmutableFullTextIndex { inverted_index: immutable, tokenizer, storage: Storage::Mmap(Box::new(mmap_index)), }) }; Ok(text_index) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/postings_iterator.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/postings_iterator.rs
use common::types::PointOffsetType; use itertools::{Either, Itertools}; use posting_list::{PostingIterator, PostingListView, PostingValue}; use super::posting_list::PostingList; use crate::index::field_index::full_text_index::inverted_index::positions::{ PartialDocument, Positions, TokenPosition, }; use crate::index::field_index::full_text_index::inverted_index::{Document, TokenId}; pub fn intersect_postings_iterator<'a>( mut postings: Vec<&'a PostingList>, ) -> impl Iterator<Item = PointOffsetType> + 'a { let smallest_posting_idx = postings .iter() .enumerate() .min_by_key(|(_idx, posting)| posting.len()) .map(|(idx, _posting)| idx) .unwrap(); let smallest_posting = postings.remove(smallest_posting_idx); smallest_posting .iter() .filter(move |doc_id| postings.iter().all(|posting| posting.contains(*doc_id))) } pub fn merge_postings_iterator<'a>( postings: Vec<&'a PostingList>, ) -> impl Iterator<Item = PointOffsetType> + 'a { postings .into_iter() .map(PostingList::iter) .kmerge_by(|a, b| a < b) .dedup() } pub fn intersect_compressed_postings_iterator<'a, V: PostingValue + 'a>( mut postings: Vec<PostingListView<'a, V>>, is_active: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { let smallest_posting_idx = postings .iter() .enumerate() .min_by_key(|(_idx, posting)| posting.len()) .map(|(idx, _posting)| idx) .unwrap(); let smallest_posting = postings.remove(smallest_posting_idx); let smallest_posting_iterator = smallest_posting.into_iter(); let mut posting_iterators = postings .into_iter() .map(PostingListView::into_iter) .collect::<Vec<_>>(); smallest_posting_iterator .map(|elem| elem.id) .filter(move |id| { is_active(*id) && posting_iterators.iter_mut().all(|posting_iterator| { // Custom "contains" check, which leverages the fact that smallest posting is sorted, // so the next id that must be in all postings is strictly greater than the previous one. // // This means that the other iterators can remember the last id they returned to avoid extra work posting_iterator // potential optimization: Make posting iterator of just ids, without values (a.k.a. positions). // We are discarding them here, thus unnecessarily reading them from the tails of the posting lists. .advance_until_greater_or_equal(*id) .is_some_and(|elem| elem.id == *id) }) }) } pub fn merge_compressed_postings_iterator<'a, V: PostingValue + 'a>( postings: Vec<PostingListView<'a, V>>, is_active: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { postings .into_iter() // potential optimization: Make posting iterator of just ids, without values (a.k.a. positions). // We are discarding them here, thus unnecessarily reading them from the tails of the posting lists. .map(|view| view.into_iter().map(|elem| elem.id)) .kmerge_by(|a, b| a < b) .dedup() .filter(move |id| is_active(*id)) } /// Returns an iterator over the points that match the given phrase query. pub fn intersect_compressed_postings_phrase_iterator<'a>( phrase: Document, token_to_posting: impl Fn(&TokenId) -> Option<PostingListView<'a, Positions>>, is_active: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { if phrase.is_empty() { // Empty request -> no matches return Either::Left(std::iter::empty()); } let postings_opt: Option<Vec<_>> = phrase .to_token_set() .tokens() .iter() .map(|token_id| token_to_posting(token_id).map(|posting| (*token_id, posting))) .collect(); let Some(mut postings) = postings_opt else { // There are unseen tokens -> no matches return Either::Left(std::iter::empty()); }; let smallest_posting_idx = postings .iter() .enumerate() .min_by_key(|(_idx, (_token_id, posting))| posting.len()) .map(|(idx, _posting)| idx) .unwrap(); let (smallest_posting_token, smallest_posting) = postings.remove(smallest_posting_idx); let smallest_posting_iterator = smallest_posting.into_iter(); let mut posting_iterators = postings .into_iter() .map(|(token_id, posting)| (token_id, posting.into_iter())) .collect::<Vec<_>>(); let has_phrase_iter = smallest_posting_iterator .filter(move |elem| { if !is_active(elem.id) { return false; } let initial_tokens_positions = elem.value.to_token_positions(smallest_posting_token); phrase_in_all_postings( elem.id, &phrase, initial_tokens_positions, &mut posting_iterators, ) }) .map(|elem| elem.id); Either::Right(has_phrase_iter) } /// Reconstructs a partial document from the posting lists (which contain positions) /// /// Returns true if the document contains the entire phrase, in the same order. /// /// # Arguments /// /// - `initial_tokens_positions` - must be prepopulated if iterating over a posting not included in the `posting_iterators`. fn phrase_in_all_postings<'a>( id: PointOffsetType, phrase: &Document, initial_tokens_positions: Vec<TokenPosition>, posting_iterators: &mut Vec<(TokenId, PostingIterator<'a, Positions>)>, ) -> bool { let mut tokens_positions = initial_tokens_positions; for (token_id, posting_iterator) in posting_iterators.iter_mut() { // Custom "contains" check, which leverages the fact that smallest posting is sorted, // so the next id that must be in all postings is strictly greater than the previous one. // // This means that the other iterators can remember the last id they returned to avoid extra work let Some(other) = posting_iterator.advance_until_greater_or_equal(id) else { return false; }; if id != other.id { return false; } debug_assert!(!other.value.is_empty()); tokens_positions.extend(other.value.to_token_positions(*token_id)) } PartialDocument::new(tokens_positions).has_phrase(phrase) } pub fn check_compressed_postings_phrase<'a>( phrase: &Document, point_id: PointOffsetType, token_to_posting: impl Fn(&TokenId) -> Option<PostingListView<'a, Positions>>, ) -> bool { let Some(mut posting_iterators): Option<Vec<_>> = phrase .to_token_set() .tokens() .iter() .map(|token_id| token_to_posting(token_id).map(|posting| (*token_id, posting.into_iter()))) .collect() else { // not all tokens are present in the index return false; }; phrase_in_all_postings(point_id, phrase, Vec::new(), &mut posting_iterators) } #[cfg(test)] mod tests { use posting_list::IdsPostingList; use super::*; #[test] fn test_postings_iterator() { let mut p1 = PostingList::default(); p1.insert(1); p1.insert(2); p1.insert(3); p1.insert(4); p1.insert(5); let mut p2 = PostingList::default(); p2.insert(2); p2.insert(4); p2.insert(5); p2.insert(5); let mut p3 = PostingList::default(); p3.insert(1); p3.insert(2); p3.insert(5); p3.insert(6); p3.insert(7); let postings = vec![&p1, &p2, &p3]; let merged = intersect_postings_iterator(postings); let res = merged.collect::<Vec<_>>(); assert_eq!(res, vec![2, 5]); let p1_compressed: IdsPostingList = p1.iter().map(|id| (id, ())).collect(); let p2_compressed: IdsPostingList = p2.iter().map(|id| (id, ())).collect(); let p3_compressed: IdsPostingList = p3.iter().map(|id| (id, ())).collect(); let compressed_posting_reades = vec![ p1_compressed.view(), p2_compressed.view(), p3_compressed.view(), ]; let merged = intersect_compressed_postings_iterator(compressed_posting_reades, |_| true); let res = merged.collect::<Vec<_>>(); assert_eq!(res, vec![2, 5]); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mutable_inverted_index.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mutable_inverted_index.rs
use std::collections::HashMap; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use itertools::Either; use super::posting_list::PostingList; use super::postings_iterator::{intersect_postings_iterator, merge_postings_iterator}; use super::{Document, InvertedIndex, ParsedQuery, TokenId, TokenSet}; use crate::common::operation_error::OperationResult; #[cfg_attr(test, derive(Clone))] pub struct MutableInvertedIndex { pub(super) postings: Vec<PostingList>, pub vocab: HashMap<String, TokenId>, pub(super) point_to_tokens: Vec<Option<TokenSet>>, /// Optional additional structure to store positional information of tokens in the documents. /// /// Must be enabled explicitly. pub point_to_doc: Option<Vec<Option<Document>>>, pub(super) points_count: usize, } impl MutableInvertedIndex { /// Create a new inverted index with or without positional information. pub fn new(with_positions: bool) -> Self { Self { postings: Vec::new(), vocab: HashMap::new(), point_to_tokens: Vec::new(), point_to_doc: with_positions.then_some(Vec::new()), points_count: 0, } } #[cfg(feature = "rocksdb")] pub fn build_index( iter: impl Iterator<Item = OperationResult<(PointOffsetType, Vec<String>)>>, phrase_matching: bool, ) -> OperationResult<Self> { let mut builder = super::mutable_inverted_index_builder::MutableInvertedIndexBuilder::new( phrase_matching, ); builder.add_iter(iter)?; Ok(builder.build()) } fn get_tokens(&self, idx: PointOffsetType) -> Option<&TokenSet> { self.point_to_tokens.get(idx as usize)?.as_ref() } fn get_document(&self, idx: PointOffsetType) -> Option<&Document> { self.point_to_doc.as_ref()?.get(idx as usize)?.as_ref() } /// Iterate over point ids whose documents contain all given tokens fn filter_has_all(&self, tokens: TokenSet) -> impl Iterator<Item = PointOffsetType> + '_ { let postings_opt: Option<Vec<_>> = tokens .tokens() .iter() .map(|&token_id| { // if a ParsedQuery token was given an index, then it must exist in the vocabulary // dictionary. Posting list entry can be None but it exists. self.postings.get(token_id as usize) }) .collect(); let Some(postings) = postings_opt else { // There are unseen tokens -> no matches return Either::Left(std::iter::empty()); }; if postings.is_empty() { // Empty request -> no matches return Either::Left(std::iter::empty()); } Either::Right(intersect_postings_iterator(postings)) } fn filter_has_any(&self, tokens: TokenSet) -> impl Iterator<Item = PointOffsetType> + '_ { let postings_opt: Vec<_> = tokens .tokens() .iter() .filter_map(|&token_id| { // if a ParsedQuery token was given an index, then it must exist in the vocabulary // dictionary. Posting list entry can be None but it exists. self.postings.get(token_id as usize) }) .collect(); if postings_opt.is_empty() { // Empty request -> no matches return Either::Left(std::iter::empty()); } Either::Right(merge_postings_iterator(postings_opt)) } pub fn filter_has_phrase( &self, phrase: Document, ) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { let Some(point_to_doc) = self.point_to_doc.as_ref() else { // Return empty iterator when not enabled return Box::new(std::iter::empty()); }; let iter = self .filter_has_all(phrase.to_token_set()) .filter(move |id| { let doc = point_to_doc[*id as usize] .as_ref() .expect("if it passed the intersection filter, it must exist"); doc.has_phrase(&phrase) }); Box::new(iter) } } impl InvertedIndex for MutableInvertedIndex { fn get_vocab_mut(&mut self) -> &mut HashMap<String, TokenId> { &mut self.vocab } fn index_tokens( &mut self, point_id: PointOffsetType, tokens: TokenSet, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.points_count += 1; let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); if self.point_to_tokens.len() <= point_id as usize { let new_len = point_id as usize + 1; // Only measure the overhead of `TokenSet` here since we account for the tokens a few lines below. hw_cell_wb .incr_delta((new_len - self.point_to_tokens.len()) * size_of::<Option<TokenSet>>()); self.point_to_tokens.resize_with(new_len, Default::default); } for token_id in tokens.tokens() { let token_idx_usize = *token_id as usize; if self.postings.len() <= token_idx_usize { let new_len = token_idx_usize + 1; hw_cell_wb.incr_delta((new_len - self.postings.len()) * size_of::<PostingList>()); self.postings.resize_with(new_len, Default::default); } hw_cell_wb.incr_delta(size_of_val(&point_id)); self.postings .get_mut(token_idx_usize) .expect("posting must exist") .insert(point_id); } self.point_to_tokens[point_id as usize] = Some(tokens); Ok(()) } fn index_document( &mut self, point_id: PointOffsetType, ordered_document: Document, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let Some(point_to_doc) = &mut self.point_to_doc else { // Phrase matching is not enabled return Ok(()); }; let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); // Ensure container has enough capacity if point_id as usize >= point_to_doc.len() { let new_len = point_id as usize + 1; hw_cell_wb.incr_delta((new_len - point_to_doc.len()) * size_of::<Option<Document>>()); point_to_doc.resize_with(new_len, Default::default); } // Store the ordered document point_to_doc[point_id as usize] = Some(ordered_document); Ok(()) } fn remove(&mut self, point_id: PointOffsetType) -> bool { if point_id as usize >= self.point_to_tokens.len() { return false; // Already removed or never actually existed } let Some(removed_token_set) = self.point_to_tokens[point_id as usize].take() else { return false; }; if let Some(point_to_doc) = &mut self.point_to_doc { point_to_doc[point_id as usize] = None; } self.points_count -= 1; for removed_token in removed_token_set.tokens() { // unwrap safety: posting list exists and contains the point idx let posting = self.postings.get_mut(*removed_token as usize).unwrap(); posting.remove(point_id); } true } fn filter( &self, query: ParsedQuery, _hw_counter: &HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { match query { ParsedQuery::AllTokens(tokens) => Box::new(self.filter_has_all(tokens)), ParsedQuery::Phrase(phrase) => self.filter_has_phrase(phrase), ParsedQuery::AnyTokens(tokens) => Box::new(self.filter_has_any(tokens)), } } fn get_posting_len(&self, token_id: TokenId, _: &HardwareCounterCell) -> Option<usize> { self.postings.get(token_id as usize).map(|x| x.len()) } fn vocab_with_postings_len_iter(&self) -> impl Iterator<Item = (&str, usize)> + '_ { self.vocab.iter().filter_map(|(token, &posting_idx)| { self.postings .get(posting_idx as usize) .map(|postings| (token.as_str(), postings.len())) }) } fn check_match(&self, parsed_query: &ParsedQuery, point_id: PointOffsetType) -> bool { match parsed_query { ParsedQuery::AllTokens(query) => { let Some(doc) = self.get_tokens(point_id) else { return false; }; // Check that all tokens are in document doc.has_subset(query) } ParsedQuery::Phrase(document) => { let Some(doc) = self.get_document(point_id) else { return false; }; // Check that all tokens are in document, in order doc.has_phrase(document) } ParsedQuery::AnyTokens(query) => { let Some(doc) = self.get_tokens(point_id) else { return false; }; // Check that at least one token is in document doc.has_any(query) } } } fn values_is_empty(&self, point_id: PointOffsetType) -> bool { self.get_tokens(point_id).is_none_or(|x| x.is_empty()) } fn values_count(&self, point_id: PointOffsetType) -> usize { // Maybe we want number of documents in the future? self.get_tokens(point_id).map(|x| x.len()).unwrap_or(0) } fn points_count(&self) -> usize { self.points_count } fn get_token_id(&self, token: &str, _hw_counter: &HardwareCounterCell) -> Option<TokenId> { self.vocab.get(token).copied() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/posting_list.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/posting_list.rs
use common::types::PointOffsetType; use roaring::RoaringBitmap; #[derive(Clone, Debug, Default)] pub struct PostingList { list: RoaringBitmap, } impl PostingList { pub fn insert(&mut self, idx: PointOffsetType) { self.list.insert(idx); } pub fn remove(&mut self, idx: PointOffsetType) { self.list.remove(idx); } #[inline] pub fn len(&self) -> usize { self.list.len() as usize } #[inline] pub fn is_empty(&self) -> bool { self.list.is_empty() } #[inline] pub fn contains(&self, val: PointOffsetType) -> bool { self.list.contains(val) } #[inline] pub fn iter(&self) -> impl Iterator<Item = PointOffsetType> + '_ { self.list.iter() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mutable_inverted_index_builder.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mutable_inverted_index_builder.rs
use common::types::PointOffsetType; use super::InvertedIndex; use super::mutable_inverted_index::MutableInvertedIndex; #[cfg(feature = "rocksdb")] use crate::common::operation_error::OperationResult; use crate::index::field_index::full_text_index::inverted_index::{Document, TokenSet}; pub struct MutableInvertedIndexBuilder { index: MutableInvertedIndex, } impl MutableInvertedIndexBuilder { pub fn new(phrase_matching: bool) -> Self { let index = MutableInvertedIndex::new(phrase_matching); Self { index } } /// Add a vector to the inverted index builder pub fn add(&mut self, idx: PointOffsetType, str_tokens: impl IntoIterator<Item = String>) { self.index.points_count += 1; // resize point_to_* structures if needed if self.index.point_to_tokens.len() <= idx as usize { self.index .point_to_tokens .resize_with(idx as usize + 1, Default::default); if let Some(point_to_doc) = self.index.point_to_doc.as_mut() { point_to_doc.resize_with(idx as usize + 1, Default::default); } } let tokens = self.index.register_tokens(str_tokens); // insert as whole document if let Some(point_to_doc) = self.index.point_to_doc.as_mut() { point_to_doc[idx as usize] = Some(Document::new(tokens.clone())); } // insert as tokenset let tokens_set = TokenSet::from_iter(tokens); self.index.point_to_tokens[idx as usize] = Some(tokens_set); } #[cfg(feature = "rocksdb")] pub fn add_iter( &mut self, iter: impl Iterator<Item = OperationResult<(PointOffsetType, Vec<String>)>>, // TODO(phrase-index): add param for including phrase field ) -> OperationResult<()> { for item in iter { let (idx, str_tokens) = item?; self.add(idx, str_tokens); } Ok(()) } /// Consumes the builder and returns a MutableInvertedIndex pub fn build(mut self) -> MutableInvertedIndex { // build postings from point_to_tokens // build in order to increase point id for (idx, tokenset) in self.index.point_to_tokens.iter().enumerate() { if let Some(tokenset) = tokenset { for token_idx in tokenset.tokens() { if self.index.postings.len() <= *token_idx as usize { self.index .postings .resize_with(*token_idx as usize + 1, Default::default); } self.index .postings .get_mut(*token_idx as usize) .expect("posting must exist") .insert(idx as PointOffsetType); } } } self.index } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mod.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mod.rs
pub(super) mod immutable_inverted_index; pub mod immutable_postings_enum; pub(super) mod mmap_inverted_index; pub(super) mod mutable_inverted_index; pub(super) mod mutable_inverted_index_builder; mod positions; mod posting_list; mod postings_iterator; use std::cmp::min; use std::collections::HashMap; use ahash::AHashSet; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use itertools::Itertools; use crate::common::operation_error::OperationResult; use crate::index::field_index::{CardinalityEstimation, PayloadBlockCondition, PrimaryCondition}; use crate::index::query_estimator::expected_should_estimation; use crate::types::{FieldCondition, Match, PayloadKeyType}; pub type TokenId = u32; /// Contains the set of tokens that are in a document. /// /// Internally, it keeps them unique and sorted, so that we can binary-search over them #[derive(Default, Debug, Clone)] pub struct TokenSet(Vec<TokenId>); impl TokenSet { pub fn len(&self) -> usize { self.0.len() } pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn tokens(&self) -> &[TokenId] { &self.0 } pub fn inner(self) -> Vec<TokenId> { self.0 } pub fn contains(&self, token: &TokenId) -> bool { self.0.binary_search(token).is_ok() } /// Checks if the current set contains all given tokens. /// /// Returns false if the subset is empty pub fn has_subset(&self, subset: &TokenSet) -> bool { if subset.is_empty() { return false; } subset.0.iter().all(|token| self.contains(token)) } /// Checks if the current set contains any of the given tokens. /// Returns false if the subset is empty pub fn has_any(&self, subset: &TokenSet) -> bool { if subset.is_empty() { return false; } subset.0.iter().any(|token| self.contains(token)) } } impl From<AHashSet<TokenId>> for TokenSet { fn from(tokens: AHashSet<TokenId>) -> Self { let sorted_unique = tokens.into_iter().sorted_unstable().collect(); Self(sorted_unique) } } impl FromIterator<TokenId> for TokenSet { fn from_iter<T: IntoIterator<Item = TokenId>>(iter: T) -> Self { let tokens = iter .into_iter() .sorted_unstable() .dedup() .collect::<Vec<_>>(); Self(tokens) } } /// Contains the token ids that make up a document, in the same order that appear in the document. /// /// In contrast to `TokenSet`, it can contain the same token in multiple places. #[derive(Debug, Clone)] pub struct Document(Vec<TokenId>); impl Document { pub fn new(tokens: Vec<TokenId>) -> Self { Self(tokens) } pub fn len(&self) -> usize { self.0.len() } pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn tokens(&self) -> &[TokenId] { &self.0 } pub fn to_token_set(&self) -> TokenSet { self.0.iter().copied().collect() } /// Checks if the current document contains the given phrase. /// /// Returns false if the phrase is empty pub fn has_phrase(&self, phrase: &Document) -> bool { let doc = self.0.as_slice(); let phrase = phrase.0.as_slice(); if doc.is_empty() || phrase.is_empty() { return false; } // simple check for tokens in the same order as phrase doc.windows(phrase.len()).any(|window| window == phrase) } } impl IntoIterator for Document { type Item = TokenId; type IntoIter = std::vec::IntoIter<TokenId>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl FromIterator<TokenId> for Document { fn from_iter<T: IntoIterator<Item = TokenId>>(iter: T) -> Self { let tokens = iter.into_iter().collect::<Vec<_>>(); Self(tokens) } } #[derive(Debug, Clone)] pub enum ParsedQuery { /// All these tokens must be present in the document, regardless of order. /// /// In other words this should be a subset of the document's token set. AllTokens(TokenSet), /// At least one of these tokens must be present in the document. AnyTokens(TokenSet), /// All these tokens must be present in the document, in the same order as this query. Phrase(Document), } pub trait InvertedIndex { fn get_vocab_mut(&mut self) -> &mut HashMap<String, TokenId>; /// Translate the string tokens into token ids. /// If it is an unseen token, it is added to the vocabulary and a new token id is generated. /// /// The order of the tokens is preserved. fn register_tokens<'a>( &mut self, str_tokens: impl IntoIterator<Item = impl AsRef<str>> + 'a, ) -> Vec<TokenId> { str_tokens .into_iter() .map(|token| self.register_token(token)) .collect() } /// Translate the string token into token id. /// If it is an unseen token, it is added to the vocabulary and a new token id is generated. fn register_token<S: AsRef<str>>(&mut self, token_str: S) -> TokenId { let vocab = self.get_vocab_mut(); match vocab.get(token_str.as_ref()) { Some(&idx) => idx, None => { let next_token_id = vocab.len() as TokenId; vocab.insert(token_str.as_ref().to_string(), next_token_id); next_token_id } } } fn index_tokens( &mut self, idx: PointOffsetType, tokens: TokenSet, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; fn index_document( &mut self, idx: PointOffsetType, document: Document, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; fn remove(&mut self, idx: PointOffsetType) -> bool; fn filter<'a>( &'a self, query: ParsedQuery, hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a>; fn get_posting_len(&self, token_id: TokenId, hw_counter: &HardwareCounterCell) -> Option<usize>; fn estimate_cardinality( &self, query: &ParsedQuery, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { match query { ParsedQuery::AllTokens(tokens) => { self.estimate_has_subset_cardinality(tokens, condition, hw_counter) } ParsedQuery::Phrase(phrase) => { self.estimate_has_phrase_cardinality(phrase, condition, hw_counter) } ParsedQuery::AnyTokens(tokens) => { self.estimate_has_any_cardinality(tokens, condition, hw_counter) } } } fn estimate_has_subset_cardinality( &self, tokens: &TokenSet, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { let points_count = self.points_count(); let posting_lengths: Option<Vec<usize>> = tokens .tokens() .iter() .map(|&vocab_idx| self.get_posting_len(vocab_idx, hw_counter)) .collect(); if posting_lengths.is_none() || points_count == 0 { // There are unseen tokens -> no matches return CardinalityEstimation::exact(0) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } let postings = posting_lengths.unwrap(); if postings.is_empty() { // Empty request -> no matches return CardinalityEstimation::exact(0) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } // Smallest posting is the largest possible cardinality let smallest_posting = postings.iter().min().copied().unwrap(); if postings.len() == 1 { return CardinalityEstimation::exact(smallest_posting) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } let expected_frac: f64 = postings .iter() .map(|posting| *posting as f64 / points_count as f64) .product(); let exp = (expected_frac * points_count as f64) as usize; CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(condition.clone()))], min: 0, // ToDo: make better estimation exp, max: smallest_posting, } } fn estimate_has_any_cardinality( &self, tokens: &TokenSet, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { let points_count = self.points_count(); let posting_lengths: Vec<_> = tokens .tokens() .iter() .filter_map(|&vocab_idx| self.get_posting_len(vocab_idx, hw_counter)) .collect(); if posting_lengths.is_empty() { // Empty request -> no matches return CardinalityEstimation::exact(0) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } // At least one posting is the largest possible cardinality let largest_posting = posting_lengths.iter().max().copied().unwrap(); if posting_lengths.len() == 1 { return CardinalityEstimation::exact(largest_posting) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } let sum: usize = posting_lengths.iter().sum(); let exp = expected_should_estimation(posting_lengths.into_iter(), points_count); CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(condition.clone()))], min: largest_posting, exp, max: min(sum, points_count), } } fn estimate_has_phrase_cardinality( &self, phrase: &Document, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { if phrase.is_empty() { return CardinalityEstimation::exact(0) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); } // Start with same cardinality estimation as has_subset let tokenset = phrase.to_token_set(); let subset_estimation = self.estimate_has_subset_cardinality(&tokenset, condition, hw_counter); // But we can restrict it by considering the phrase length let phrase_sq = phrase.len() * phrase.len(); CardinalityEstimation { primary_clauses: vec![PrimaryCondition::Condition(Box::new(condition.clone()))], min: subset_estimation.min / phrase_sq, exp: subset_estimation.exp / phrase_sq, max: subset_estimation.max / phrase_sq, } } fn vocab_with_postings_len_iter(&self) -> impl Iterator<Item = (&str, usize)> + '_; fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> impl Iterator<Item = PayloadBlockCondition> + '_ { let map_filter_condition = move |(token, postings_len): (&str, usize)| { if postings_len >= threshold { Some(PayloadBlockCondition { condition: FieldCondition::new_match(key.clone(), Match::new_text(token)), cardinality: postings_len, }) } else { None } }; // It might be very hard to predict possible combinations of conditions, // so we only build it for individual tokens self.vocab_with_postings_len_iter() .filter_map(map_filter_condition) } fn check_match(&self, parsed_query: &ParsedQuery, point_id: PointOffsetType) -> bool; fn values_is_empty(&self, point_id: PointOffsetType) -> bool; fn values_count(&self, point_id: PointOffsetType) -> usize; fn points_count(&self) -> usize; fn get_token_id(&self, token: &str, hw_counter: &HardwareCounterCell) -> Option<TokenId>; } #[cfg(test)] mod tests { use common::counter::hardware_counter::HardwareCounterCell; use rand::Rng; use rand::seq::SliceRandom; use rstest::rstest; use super::{Document, InvertedIndex, ParsedQuery, TokenId, TokenSet}; use crate::index::field_index::full_text_index::inverted_index::immutable_inverted_index::ImmutableInvertedIndex; use crate::index::field_index::full_text_index::inverted_index::mmap_inverted_index::MmapInvertedIndex; use crate::index::field_index::full_text_index::inverted_index::mutable_inverted_index::MutableInvertedIndex; fn generate_word() -> String { let mut rng = rand::rng(); // Each word is 1 to 3 characters long let len = rng.random_range(1..=3); rng.sample_iter(rand::distr::Alphanumeric) .take(len) .map(char::from) .collect() } fn generate_query() -> Vec<String> { let mut rng = rand::rng(); let len = rng.random_range(1..=2); (0..len).map(|_| generate_word()).collect() } /// Tries to parse a query. If there is an unknown id to a token, returns `None` fn to_parsed_query( query: Vec<String>, token_to_id: impl Fn(String) -> Option<TokenId>, ) -> Option<ParsedQuery> { let tokens = query .into_iter() .map(token_to_id) .collect::<Option<TokenSet>>()?; Some(ParsedQuery::AllTokens(tokens)) } fn to_parsed_query_any( query: Vec<String>, token_to_id: impl Fn(String) -> Option<TokenId>, ) -> Option<ParsedQuery> { let tokens = query .into_iter() .map(token_to_id) .collect::<Option<TokenSet>>()?; Some(ParsedQuery::AnyTokens(tokens)) } fn mutable_inverted_index( indexed_count: u32, deleted_count: u32, with_positions: bool, ) -> MutableInvertedIndex { let mut index = MutableInvertedIndex::new(with_positions); let hw_counter = HardwareCounterCell::new(); for idx in 0..indexed_count { // Generate 10 to 30-word documents let doc_len = rand::rng().random_range(10..=30); let tokens: Vec<String> = (0..doc_len).map(|_| generate_word()).collect(); let token_ids = index.register_tokens(&tokens); if with_positions { index .index_document(idx, Document(token_ids.clone()), &hw_counter) .unwrap(); } let token_set = TokenSet::from_iter(token_ids); index.index_tokens(idx, token_set, &hw_counter).unwrap(); } // Remove some points let mut points_to_delete = (0..indexed_count).collect::<Vec<_>>(); points_to_delete.shuffle(&mut rand::rng()); for idx in &points_to_delete[..deleted_count as usize] { index.remove(*idx); } index } #[rstest] fn test_mutable_to_immutable(#[values(false, true)] phrase_matching: bool) { let mutable = mutable_inverted_index(2000, 400, phrase_matching); // todo: test with phrase-enabled let immutable = ImmutableInvertedIndex::from(mutable.clone()); assert!(immutable.vocab.len() < mutable.vocab.len()); assert!(immutable.postings.len() < mutable.postings.len()); assert!(!immutable.vocab.is_empty()); // Check that new vocabulary token ids leads to the same posting lists assert!({ immutable.vocab.iter().all(|(key, new_token)| { let mut new_posting_iter = immutable.postings.iter_ids(*new_token).unwrap(); let orig_token = mutable.vocab.get(key).unwrap(); let orig_posting = mutable.postings.get(*orig_token as usize).cloned().unwrap(); let all_equal = orig_posting .iter() .zip(&mut new_posting_iter) .all(|(orig, new)| orig == new); let same_length = new_posting_iter.next().is_none(); all_equal && same_length }) }); } #[rstest] #[case(2000, 400)] #[case(2000, 2000)] #[case(1111, 1110)] #[case(1111, 0)] #[case(10, 2)] #[case(0, 0)] #[test] fn test_immutable_to_mmap_to_immutable( #[case] indexed_count: u32, #[case] deleted_count: u32, #[values(false, true)] phrase_matching: bool, ) { use std::collections::HashSet; let mutable = mutable_inverted_index(indexed_count, deleted_count, phrase_matching); let immutable = ImmutableInvertedIndex::from(mutable); let mmap_dir = tempfile::tempdir().unwrap(); let hw_counter = HardwareCounterCell::new(); MmapInvertedIndex::create(mmap_dir.path().into(), &immutable).unwrap(); let mmap = MmapInvertedIndex::open(mmap_dir.path().into(), false, phrase_matching) .unwrap() .unwrap(); let imm_mmap = ImmutableInvertedIndex::from(&mmap); // Check same vocabulary for (token, token_id) in immutable.vocab.iter() { assert_eq!(mmap.get_token_id(token, &hw_counter), Some(*token_id)); assert_eq!(imm_mmap.get_token_id(token, &hw_counter), Some(*token_id)); } // Check same postings for token_id in 0..immutable.postings.len() as TokenId { let mutable_ids = immutable .postings .iter_ids(token_id) .unwrap() .collect::<HashSet<_>>(); // Check mutable vs mmap let mmap_ids = mmap.storage.postings.iter_ids(token_id).unwrap().collect(); assert_eq!(mutable_ids, mmap_ids); // Check mutable vs immutable mmap let imm_mmap_ids = imm_mmap .postings .iter_ids(token_id) .unwrap() .collect::<HashSet<_>>(); assert_eq!(mutable_ids, imm_mmap_ids); } for (point_id, count) in immutable.point_to_tokens_count.iter().enumerate() { // Check same deleted points assert_eq!( mmap.storage.deleted_points.get(point_id).unwrap(), *count == 0, "point_id: {point_id}", ); // Check same count assert_eq!( *mmap.storage.point_to_tokens_count.get(point_id).unwrap(), *count ); assert_eq!(imm_mmap.point_to_tokens_count[point_id], *count); } // Check same points count assert_eq!(immutable.points_count, mmap.active_points_count); assert_eq!(immutable.points_count, imm_mmap.points_count); } #[rstest] fn test_mmap_index_congruence(#[values(false, true)] phrase_matching: bool) { let indexed_count = 10000; let deleted_count = 500; let hw_counter = HardwareCounterCell::new(); let mmap_dir = tempfile::tempdir().unwrap(); let mut mut_index = mutable_inverted_index(indexed_count, deleted_count, phrase_matching); let immutable = ImmutableInvertedIndex::from(mut_index.clone()); MmapInvertedIndex::create(mmap_dir.path().into(), &immutable).unwrap(); let mut mmap_index = MmapInvertedIndex::open(mmap_dir.path().into(), false, phrase_matching) .unwrap() .unwrap(); let mut imm_mmap_index = ImmutableInvertedIndex::from(&mmap_index); let queries: Vec<_> = (0..100).map(|_| generate_query()).collect(); let mut_parsed_queries: Vec<_> = queries .iter() .cloned() .flat_map(|query| { vec![ to_parsed_query(query.clone(), |token| mut_index.vocab.get(&token).copied()), to_parsed_query_any(query, |token| mut_index.vocab.get(&token).copied()), ] }) .collect(); let mmap_parsed_queries: Vec<_> = queries .iter() .cloned() .flat_map(|query| { vec![ to_parsed_query(query.clone(), |token| { mmap_index.get_token_id(&token, &hw_counter) }), to_parsed_query_any(query, |token| { mmap_index.get_token_id(&token, &hw_counter) }), ] }) .collect(); let imm_mmap_parsed_queries: Vec<_> = queries .into_iter() .flat_map(|query| { vec![ to_parsed_query(query.clone(), |token| { imm_mmap_index.get_token_id(&token, &hw_counter) }), to_parsed_query_any(query, |token| { imm_mmap_index.get_token_id(&token, &hw_counter) }), ] }) .collect(); check_query_congruence( &mut_parsed_queries, &mmap_parsed_queries, &imm_mmap_parsed_queries, &mut_index, &mmap_index, &imm_mmap_index, &hw_counter, ); // Delete random documents from both indexes let points_to_delete: Vec<_> = (0..deleted_count) .map(|_| rand::rng().random_range(0..indexed_count)) .collect(); for point_id in &points_to_delete { mut_index.remove(*point_id); mmap_index.remove(*point_id); imm_mmap_index.remove(*point_id); } // Check congruence after deletion check_query_congruence( &mut_parsed_queries, &mmap_parsed_queries, &imm_mmap_parsed_queries, &mut_index, &mmap_index, &imm_mmap_index, &hw_counter, ); } fn check_query_congruence( mut_parsed_queries: &[Option<ParsedQuery>], mmap_parsed_queries: &[Option<ParsedQuery>], imm_mmap_parsed_queries: &[Option<ParsedQuery>], mut_index: &MutableInvertedIndex, mmap_index: &MmapInvertedIndex, imm_mmap_index: &ImmutableInvertedIndex, hw_counter: &HardwareCounterCell, ) { for queries in mut_parsed_queries.iter().cloned().zip( mmap_parsed_queries .iter() .cloned() .zip(imm_mmap_parsed_queries.iter().cloned()), ) { let (Some(mut_query), (Some(imm_query), Some(imm_mmap_query))) = queries else { // Immutable index can have a smaller vocabulary, since it only contains tokens that have // non-empty posting lists. // Since we removed some documents from the mutable index, it can happen that the immutable // index returns None when parsing the query, even if the mutable index returns Some. // // In this case both queries would filter to an empty set of documents. continue; }; let mut_filtered = mut_index.filter(mut_query, hw_counter).collect::<Vec<_>>(); let imm_filtered = mmap_index.filter(imm_query, hw_counter).collect::<Vec<_>>(); let imm_mmap_filtered = imm_mmap_index .filter(imm_mmap_query, hw_counter) .collect::<Vec<_>>(); assert_eq!(mut_filtered, imm_filtered); assert_eq!(imm_filtered, imm_mmap_filtered); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/immutable_postings_enum.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/immutable_postings_enum.rs
#[cfg(test)] use common::types::PointOffsetType; use posting_list::PostingList; use super::positions::Positions; use crate::index::field_index::full_text_index::inverted_index::TokenId; #[cfg_attr(test, derive(Clone))] #[derive(Debug)] pub enum ImmutablePostings { Ids(Vec<PostingList<()>>), WithPositions(Vec<PostingList<Positions>>), } impl ImmutablePostings { pub fn len(&self) -> usize { match self { ImmutablePostings::Ids(lists) => lists.len(), ImmutablePostings::WithPositions(lists) => lists.len(), } } pub fn posting_len(&self, token: TokenId) -> Option<usize> { match self { ImmutablePostings::Ids(postings) => { postings.get(token as usize).map(|posting| posting.len()) } ImmutablePostings::WithPositions(postings) => { postings.get(token as usize).map(|posting| posting.len()) } } } #[cfg(test)] pub fn iter_ids( &self, token_id: TokenId, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + '_>> { match self { ImmutablePostings::Ids(postings) => postings.get(token_id as usize).map(|posting| { Box::new(posting.iter().map(|elem| elem.id)) as Box<dyn Iterator<Item = PointOffsetType>> }), ImmutablePostings::WithPositions(postings) => { postings.get(token_id as usize).map(|posting| { Box::new(posting.iter().map(|elem| elem.id)) as Box<dyn Iterator<Item = PointOffsetType>> }) } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/immutable_inverted_index.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/immutable_inverted_index.rs
use std::collections::HashMap; use std::fmt::Debug; use ahash::AHashMap; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use itertools::Either; use posting_list::{PostingBuilder, PostingList, PostingListView, PostingValue}; use super::immutable_postings_enum::ImmutablePostings; use super::mmap_inverted_index::MmapInvertedIndex; use super::mmap_inverted_index::mmap_postings_enum::MmapPostingsEnum; use super::mutable_inverted_index::MutableInvertedIndex; use super::positions::Positions; use super::postings_iterator::{ intersect_compressed_postings_iterator, merge_compressed_postings_iterator, }; use super::{Document, InvertedIndex, ParsedQuery, TokenId, TokenSet}; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::full_text_index::inverted_index::postings_iterator::{ check_compressed_postings_phrase, intersect_compressed_postings_phrase_iterator, }; #[cfg_attr(test, derive(Clone))] #[derive(Debug)] pub struct ImmutableInvertedIndex { pub(in crate::index::field_index::full_text_index) postings: ImmutablePostings, pub(in crate::index::field_index::full_text_index) vocab: HashMap<String, TokenId>, pub(in crate::index::field_index::full_text_index) point_to_tokens_count: Vec<usize>, pub(in crate::index::field_index::full_text_index) points_count: usize, } impl ImmutableInvertedIndex { /// Iterate over point ids whose documents contain all given tokens fn filter_has_all<'a>( &'a self, tokens: TokenSet, ) -> impl Iterator<Item = PointOffsetType> + 'a { // in case of immutable index, deleted documents are still in the postings let filter = move |idx| { self.point_to_tokens_count .get(idx as usize) .is_some_and(|x| *x > 0) }; fn intersection<'a, V: PostingValue>( postings: &'a [PostingList<V>], tokens: TokenSet, filter: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { let postings_opt: Option<Vec<_>> = tokens .tokens() .iter() .map(|&token_id| postings.get(token_id as usize).map(PostingList::view)) .collect(); // All tokens must have postings let Some(postings) = postings_opt else { return Either::Left(std::iter::empty()); }; // Query must not be empty if postings.is_empty() { return Either::Left(std::iter::empty()); }; Either::Right(intersect_compressed_postings_iterator(postings, filter)) } match &self.postings { ImmutablePostings::Ids(postings) => { Either::Left(intersection(postings, tokens, filter)) } ImmutablePostings::WithPositions(postings) => { Either::Right(intersection(postings, tokens, filter)) } } } /// Iterate over point ids whose documents contain at least one of the given tokens fn filter_has_any<'a>( &'a self, tokens: TokenSet, ) -> impl Iterator<Item = PointOffsetType> + 'a { // in case of immutable index, deleted documents are still in the postings let is_active = move |idx| { self.point_to_tokens_count .get(idx as usize) .is_some_and(|x| *x > 0) }; fn merge<'a, V: PostingValue>( postings: &'a [PostingList<V>], tokens: TokenSet, is_active: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { let postings: Vec<_> = tokens .tokens() .iter() .filter_map(|&token_id| postings.get(token_id as usize).map(PostingList::view)) .collect(); // Query must not be empty if postings.is_empty() { return Either::Left(std::iter::empty()); }; Either::Right(merge_compressed_postings_iterator(postings, is_active)) } match &self.postings { ImmutablePostings::Ids(postings) => Either::Left(merge(postings, tokens, is_active)), ImmutablePostings::WithPositions(postings) => { Either::Right(merge(postings, tokens, is_active)) } } } fn check_has_subset(&self, tokens: &TokenSet, point_id: PointOffsetType) -> bool { if tokens.is_empty() { return false; } // check presence of the document if self.values_is_empty(point_id) { return false; } fn check_intersection<V: PostingValue>( postings: &[PostingList<V>], tokens: &TokenSet, point_id: PointOffsetType, ) -> bool { // Check that all tokens are in document tokens.tokens().iter().all(|token_id| { let posting_list = &postings[*token_id as usize]; posting_list.visitor().contains(point_id) }) } match &self.postings { ImmutablePostings::Ids(postings) => check_intersection(postings, tokens, point_id), ImmutablePostings::WithPositions(postings) => { check_intersection(postings, tokens, point_id) } } } fn check_has_any(&self, tokens: &TokenSet, point_id: PointOffsetType) -> bool { if tokens.is_empty() { return false; } // check presence of the document if self.values_is_empty(point_id) { return false; } fn check_any<V: PostingValue>( postings: &[PostingList<V>], tokens: &TokenSet, point_id: PointOffsetType, ) -> bool { // Check that at least one token is in document tokens.tokens().iter().any(|token_id| { let posting_list = &postings[*token_id as usize]; posting_list.visitor().contains(point_id) }) } match &self.postings { ImmutablePostings::Ids(postings) => check_any(postings, tokens, point_id), ImmutablePostings::WithPositions(postings) => check_any(postings, tokens, point_id), } } /// Iterate over point ids whose documents contain all given tokens in the same order they are provided pub fn filter_has_phrase<'a>( &'a self, phrase: Document, ) -> impl Iterator<Item = PointOffsetType> + 'a { // in case of mmap immutable index, deleted points are still in the postings let is_active = move |idx| { self.point_to_tokens_count .get(idx as usize) .is_some_and(|x| *x > 0) }; match &self.postings { ImmutablePostings::WithPositions(postings) => { Either::Right(intersect_compressed_postings_phrase_iterator( phrase, |token_id| postings.get(*token_id as usize).map(PostingList::view), is_active, )) } // cannot do phrase matching if there's no positional information ImmutablePostings::Ids(_postings) => Either::Left(std::iter::empty()), } } /// Checks if the point document contains all given tokens in the same order they are provided pub fn check_has_phrase(&self, phrase: &Document, point_id: PointOffsetType) -> bool { // in case of mmap immutable index, deleted points are still in the postings if self .point_to_tokens_count .get(point_id as usize) .is_none_or(|x| *x == 0) { return false; } match &self.postings { ImmutablePostings::WithPositions(postings) => { check_compressed_postings_phrase(phrase, point_id, |token_id| { postings.get(*token_id as usize).map(PostingList::view) }) } // cannot do phrase matching if there's no positional information ImmutablePostings::Ids(_postings) => false, } } } impl InvertedIndex for ImmutableInvertedIndex { fn get_vocab_mut(&mut self) -> &mut HashMap<String, TokenId> { &mut self.vocab } fn index_tokens( &mut self, _idx: PointOffsetType, _tokens: super::TokenSet, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { Err(OperationError::service_error( "Can't add values to immutable text index", )) } fn index_document( &mut self, _idx: PointOffsetType, _document: super::Document, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { Err(OperationError::service_error( "Can't add values to immutable text index", )) } fn remove(&mut self, idx: PointOffsetType) -> bool { if self.values_is_empty(idx) { return false; // Already removed or never actually existed } self.point_to_tokens_count[idx as usize] = 0; self.points_count -= 1; true } fn filter<'a>( &'a self, query: ParsedQuery, _hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { match query { ParsedQuery::AllTokens(tokens) => Box::new(self.filter_has_all(tokens)), ParsedQuery::Phrase(tokens) => Box::new(self.filter_has_phrase(tokens)), ParsedQuery::AnyTokens(tokens) => Box::new(self.filter_has_any(tokens)), } } fn get_posting_len(&self, token_id: TokenId, _: &HardwareCounterCell) -> Option<usize> { self.postings.posting_len(token_id) } fn vocab_with_postings_len_iter(&self) -> impl Iterator<Item = (&str, usize)> + '_ { self.vocab.iter().filter_map(|(token, &token_id)| { self.postings .posting_len(token_id) .map(|len| (token.as_str(), len)) }) } fn check_match(&self, parsed_query: &ParsedQuery, point_id: PointOffsetType) -> bool { match parsed_query { ParsedQuery::AllTokens(tokens) => self.check_has_subset(tokens, point_id), ParsedQuery::Phrase(phrase) => self.check_has_phrase(phrase, point_id), ParsedQuery::AnyTokens(tokens) => self.check_has_any(tokens, point_id), } } fn values_is_empty(&self, point_id: PointOffsetType) -> bool { self.point_to_tokens_count .get(point_id as usize) .is_none_or(|count| *count == 0) } fn values_count(&self, point_id: PointOffsetType) -> usize { self.point_to_tokens_count .get(point_id as usize) .copied() .unwrap_or(0) } fn points_count(&self) -> usize { self.points_count } fn get_token_id(&self, token: &str, _: &HardwareCounterCell) -> Option<TokenId> { self.vocab.get(token).copied() } } impl From<MutableInvertedIndex> for ImmutableInvertedIndex { fn from(index: MutableInvertedIndex) -> Self { let MutableInvertedIndex { postings, vocab, point_to_tokens, point_to_doc, points_count, } = index; let (postings, vocab, orig_to_new_token) = optimized_postings_and_vocab(postings, vocab); let postings = match point_to_doc { None => ImmutablePostings::Ids(create_compressed_postings(postings)), Some(point_to_doc) => { ImmutablePostings::WithPositions(create_compressed_postings_with_positions( postings, point_to_doc, &orig_to_new_token, )) } }; ImmutableInvertedIndex { postings, vocab, point_to_tokens_count: point_to_tokens .iter() .map(|tokenset| { tokenset .as_ref() .map(|tokenset| tokenset.len()) .unwrap_or(0) }) .collect(), points_count, } } } fn optimized_postings_and_vocab( postings: Vec<super::posting_list::PostingList>, vocab: HashMap<String, u32>, ) -> ( Vec<super::posting_list::PostingList>, HashMap<String, u32>, AHashMap<u32, u32>, ) { // Keep only tokens that have non-empty postings let (postings, orig_to_new_token): (Vec<_>, AHashMap<_, _>) = postings .into_iter() .enumerate() .filter_map(|(orig_token, posting)| (!posting.is_empty()).then_some((orig_token, posting))) .enumerate() .map(|(new_token, (orig_token, posting))| { (posting, (orig_token as TokenId, new_token as TokenId)) }) .unzip(); // Update vocab entries let mut vocab: HashMap<String, TokenId> = vocab .into_iter() .filter_map(|(key, orig_token)| { orig_to_new_token .get(&orig_token) .map(|new_token| (key, *new_token)) }) .collect(); vocab.shrink_to_fit(); (postings, vocab, orig_to_new_token) } fn create_compressed_postings( postings: Vec<super::posting_list::PostingList>, ) -> Vec<PostingList<()>> { postings .into_iter() .map(|posting| { let mut builder = PostingBuilder::new(); for id in posting.iter() { builder.add_id(id); } builder.build() }) .collect() } fn create_compressed_postings_with_positions( postings: Vec<super::posting_list::PostingList>, point_to_doc: Vec<Option<Document>>, orig_to_new_token: &AHashMap<TokenId, TokenId>, ) -> Vec<PostingList<Positions>> { // precalculate positions for each token in each document let mut point_to_tokens_positions: Vec<AHashMap<TokenId, Positions>> = point_to_doc .into_iter() .map(|doc_opt| { let Some(doc) = doc_opt else { return AHashMap::new(); }; // get positions for each token in the document let doc_len = doc.len(); (0u32..).zip(doc).fold( AHashMap::with_capacity(doc_len), |mut map: AHashMap<u32, Positions>, (position, token)| { // use translation of original token to new token from postings optimization let new_token = orig_to_new_token[&token]; map.entry(new_token).or_default().push(position); map }, ) }) .collect::<Vec<_>>(); (0u32..) .zip(postings) .map(|(token, posting)| { posting .iter() .map(|id| { let positions = point_to_tokens_positions[id as usize] .remove(&token) .expect( "If id is this token's posting list, it should have at least one position", ); (id, positions) }) .collect() }) .collect() } impl From<&MmapInvertedIndex> for ImmutableInvertedIndex { fn from(index: &MmapInvertedIndex) -> Self { let postings = match &index.storage.postings { MmapPostingsEnum::Ids(postings) => ImmutablePostings::Ids( postings .iter_postings() .map(PostingListView::to_owned) .collect(), ), MmapPostingsEnum::WithPositions(postings) => ImmutablePostings::WithPositions( postings .iter_postings() .map(PostingListView::to_owned) .collect(), ), }; let vocab: HashMap<String, TokenId> = index .storage .vocab .iter() .map(|(token_str, token_id)| (token_str.to_owned(), token_id[0])) .collect(); debug_assert!( postings.len() == vocab.len(), "postings and vocab must be the same size", ); ImmutableInvertedIndex { postings, vocab, point_to_tokens_count: index.storage.point_to_tokens_count.to_vec(), points_count: index.points_count(), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/positions.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/positions.rs
use posting_list::{PostingValue, UnsizedHandler, UnsizedValue}; use zerocopy::{FromBytes, IntoBytes}; use crate::index::field_index::full_text_index::inverted_index::{Document, TokenId}; /// Represents a list of positions of a token in a document. #[derive(Default, Clone, Debug)] pub struct Positions(Vec<u32>); impl Positions { pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn push(&mut self, position: u32) { self.0.push(position); } pub fn to_token_positions(&self, token_id: TokenId) -> Vec<TokenPosition> { self.0 .iter() .map(|pos| TokenPosition { token_id, position: *pos, }) .collect() } } impl PostingValue for Positions { type Handler = UnsizedHandler<Self>; } impl UnsizedValue for Positions { fn write_len(&self) -> usize { self.0.as_bytes().len() } fn write_to(&self, dst: &mut [u8]) { self.0 .as_slice() .write_to(dst) .expect("write_len should provide correct length"); } fn from_bytes(data: &[u8]) -> Self { let positions = <[u32]>::ref_from_bytes(data).expect("write_len should provide correct length"); Positions(positions.to_vec()) } } #[derive(Debug, Eq, PartialEq)] pub struct TokenPosition { token_id: TokenId, position: u32, } /// A reconstructed partial document which stores [`TokenPosition`]s, ordered by positions #[derive(Debug)] pub struct PartialDocument(Vec<TokenPosition>); impl PartialDocument { pub fn new(mut tokens_positions: Vec<TokenPosition>) -> Self { tokens_positions.sort_by_key(|tok_pos| tok_pos.position); // There should be no duplicate token with same position debug_assert!( tokens_positions .windows(2) .all(|window| window[0] != window[1]) ); Self(tokens_positions) } /// Returns true if any sequential window of tokens match the given phrase. pub fn has_phrase(&self, phrase: &Document) -> bool { match phrase.tokens() { // no tokens in query -> no match [] => false, // single token -> match if any token matches [token] => self.0.iter().any(|tok_pos| tok_pos.token_id == *token), // multiple tokens -> match if any sequential window matches phrase => self.sequential_windows(phrase.len()).any(|seq_window| { seq_window .zip(phrase) .all(|(doc_token, query_token)| &doc_token == query_token) }), } } /// Returns an iterator over windows which have sequential sequence of tokens. /// /// Will only return a window if: /// - the window is as large as the window size /// - all positions in the window are sequential fn sequential_windows( &self, window_size: usize, ) -> impl Iterator<Item = impl Iterator<Item = TokenId>> { debug_assert!(window_size >= 2, "Window size must be at least 2"); self.0.windows(window_size).filter_map(|window| { // make sure the positions are sequential window .windows(2) .all(|pair| pair[0].position + 1 == pair[1].position) .then_some(window.iter().map(|tok_pos| tok_pos.token_id)) }) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mmap_postings_enum.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mmap_postings_enum.rs
#[cfg(test)] use common::types::PointOffsetType; use crate::index::field_index::full_text_index::inverted_index::TokenId; use crate::index::field_index::full_text_index::inverted_index::mmap_inverted_index::mmap_postings::MmapPostings; use super::super::positions::Positions; pub enum MmapPostingsEnum { Ids(MmapPostings<()>), WithPositions(MmapPostings<Positions>), } impl MmapPostingsEnum { pub fn populate(&self) { match self { MmapPostingsEnum::Ids(postings) => postings.populate(), MmapPostingsEnum::WithPositions(postings) => postings.populate(), } } pub fn posting_len(&self, token_id: TokenId) -> Option<usize> { match self { MmapPostingsEnum::Ids(postings) => postings.get(token_id).map(|view| view.len()), MmapPostingsEnum::WithPositions(postings) => { postings.get(token_id).map(|view| view.len()) } } } #[cfg(test)] pub fn iter_ids<'a>( &'a self, token_id: TokenId, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match self { MmapPostingsEnum::Ids(postings) => postings.get(token_id).map(|view| { Box::new(view.into_iter().map(|elem| elem.id)) as Box<dyn Iterator<Item = PointOffsetType>> }), MmapPostingsEnum::WithPositions(postings) => postings.get(token_id).map(|view| { Box::new(view.into_iter().map(|elem| elem.id)) as Box<dyn Iterator<Item = PointOffsetType>> }), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mmap_postings.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mmap_postings.rs
use std::fmt::Debug; use std::io; use std::io::Write; use std::marker::PhantomData; use std::path::{Path, PathBuf}; use common::types::PointOffsetType; use common::zeros::WriteZerosExt; use fs_err::File; use memmap2::Mmap; use memory::madvise::{Advice, AdviceSetting, Madviseable}; use memory::mmap_ops::open_read_mmap; use posting_list::{ PostingChunk, PostingList, PostingListComponents, PostingListView, PostingValue, RemainderPosting, SizedTypeFor, ValueHandler, }; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Unaligned}; use crate::index::field_index::full_text_index::inverted_index::TokenId; use crate::index::field_index::full_text_index::inverted_index::positions::Positions; const ALIGNMENT: usize = 4; /// Trait marker to enrich [`posting_list::PostingValue`] for handling mmap files with the posting list. pub(in crate::index::field_index::full_text_index) trait MmapPostingValue: PostingValue< Handler: ValueHandler<Sized: FromBytes + Immutable + IntoBytes + KnownLayout + Unaligned> + Clone + Debug, > { } impl MmapPostingValue for () {} impl MmapPostingValue for Positions {} #[derive(Debug, Default, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] struct PostingsHeader { /// Number of posting lists. One posting list per term pub posting_count: usize, _reserved: [u8; 32], } /// This data structure should contain all the necessary information to /// construct `PostingListView<V>` from the mmap file. #[derive(Debug, Default, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub(in crate::index::field_index::full_text_index) struct PostingListHeader { /// Offset in bytes from the start of the mmap file /// where the posting list data starts offset: u64, /// Amount of chunks in compressed posting list chunks_count: u32, /// Length in bytes for the compressed postings data ids_data_bytes_count: u32, /// Length in bytes for the alignment bytes alignment_bytes_count: u8, /// Length in bytes for the remainder postings remainder_count: u8, _reserved: [u8; 2], /// Length in bytes for the var-sized data. Add-on for phrase matching, otherwise 0 var_size_data_bytes_count: u32, } impl PostingListHeader { fn posting_size<V: PostingValue>(&self) -> usize { self.ids_data_bytes_count as usize + self.var_size_data_bytes_count as usize + self.alignment_bytes_count as usize + self.remainder_count as usize * size_of::<RemainderPosting<SizedTypeFor<V>>>() + self.chunks_count as usize * size_of::<PostingChunk<SizedTypeFor<V>>>() + size_of::<PointOffsetType>() // last_doc_id } } /// MmapPostings Structure on disk: /// /// /// `| PostingsHeader | /// [ PostingListHeader, PostingListHeader, ... ] | /// [ CompressedMmapPostingList, CompressedMmapPostingList, ... ] |` pub struct MmapPostings<V: MmapPostingValue> { _path: PathBuf, mmap: Mmap, header: PostingsHeader, _value_type: PhantomData<V>, } impl<V: MmapPostingValue> MmapPostings<V> { fn get_header(&self, token_id: TokenId) -> Option<&PostingListHeader> { if self.header.posting_count <= token_id as usize { return None; } let header_offset = size_of::<PostingsHeader>() + token_id as usize * size_of::<PostingListHeader>(); PostingListHeader::ref_from_prefix(self.mmap.get(header_offset..)?) .ok() .map(|(header, _)| header) } /// Create PostingListView<V> from the given header /// /// Assume the following layout: /// /// ```ignore /// last_doc_id: &'a PointOffsetType, /// chunks_index: &'a [PostingChunk<()>], /// data: &'a [u8], /// var_size_data: &'a [u8], // might be empty in case of only ids /// _alignment: &'a [u8], // 0-3 extra bytes to align the data /// remainder_postings: &'a [PointOffsetType], /// ``` fn get_view<'a>(&'a self, header: &'a PostingListHeader) -> Option<PostingListView<'a, V>> { let bytes = self.mmap.get(header.offset as usize..)?; let (last_doc_id, bytes) = PointOffsetType::read_from_prefix(bytes).ok()?; let (chunks, bytes) = <[PostingChunk<SizedTypeFor<V>>]>::ref_from_prefix_with_elems( bytes, header.chunks_count as usize, ) .ok()?; let (id_data, bytes) = bytes.split_at(header.ids_data_bytes_count as usize); let (var_size_data, bytes) = bytes.split_at(header.var_size_data_bytes_count as usize); // skip padding let bytes = bytes.get(header.alignment_bytes_count as usize..)?; let (remainder_postings, _) = <[RemainderPosting<SizedTypeFor<V>>]>::ref_from_prefix_with_elems( bytes, header.remainder_count as usize, ) .ok()?; Some(PostingListView::from_components( id_data, chunks, var_size_data, remainder_postings, Some(last_doc_id), )) } pub fn get<'a>(&'a self, token_id: TokenId) -> Option<PostingListView<'a, V>> { let header = self.get_header(token_id)?; self.get_view(header) } /// Given a vector of compressed posting lists, this function writes them to the `path` file. /// The format of the file is compatible with the `MmapPostings` structure. pub fn create(path: PathBuf, compressed_postings: &[PostingList<V>]) -> io::Result<()> { // Create a new empty file, where we will write the compressed posting lists and the header let (file, temp_path) = tempfile::Builder::new() .prefix(path.file_name().ok_or(io::ErrorKind::InvalidInput)?) .tempfile_in(path.parent().ok_or(io::ErrorKind::InvalidInput)?)? .into_parts(); let file = File::from_parts::<&Path>(file, temp_path.as_ref()); let mut bufw = io::BufWriter::new(&file); let postings_header = PostingsHeader { posting_count: compressed_postings.len(), _reserved: [0; 32], }; // Write the header to the buffer bufw.write_all(postings_header.as_bytes())?; let postings_lists_headers_size = compressed_postings.len() * size_of::<PostingListHeader>(); let mut posting_offset = size_of::<PostingsHeader>() + postings_lists_headers_size; for compressed_posting in compressed_postings { let view = compressed_posting.view(); let PostingListComponents { id_data, chunks, var_size_data, remainders, last_id: _, // not used for the header } = view.components(); let id_data_len = id_data.len(); let var_size_data_len = var_size_data.len(); let data_len = id_data_len + var_size_data_len; let alignment_len = data_len.next_multiple_of(ALIGNMENT) - data_len; let posting_list_header = PostingListHeader { offset: posting_offset as u64, chunks_count: chunks.len() as u32, ids_data_bytes_count: id_data_len as u32, var_size_data_bytes_count: var_size_data_len as u32, alignment_bytes_count: alignment_len as u8, remainder_count: remainders.len() as u8, _reserved: [0; 2], }; // Write the posting list header to the buffer bufw.write_all(posting_list_header.as_bytes())?; posting_offset += posting_list_header.posting_size::<V>(); } for compressed_posting in compressed_postings { let view = compressed_posting.view(); let PostingListComponents { id_data, chunks, var_size_data, // not used with just ids postings remainders, last_id, } = view.components(); bufw.write_all( last_id .expect("posting must have at least one element") .as_bytes(), )?; for chunk in chunks { bufw.write_all(chunk.as_bytes())?; } // write all unaligned data together bufw.write_all(id_data)?; // write var_size_data if it exists if !var_size_data.is_empty() { bufw.write_all(var_size_data)?; } // write alignment padding // Example: // For data size = 5, alignment = 3 as (5 + 3 = 8) // alignment = 8 - 5 = 3 let data_len = id_data.len() + var_size_data.len(); bufw.write_zeros(data_len.next_multiple_of(ALIGNMENT) - data_len)?; for element in remainders { bufw.write_all(element.as_bytes())?; } } // Explicitly flush write buffer so we can catch IO errors bufw.flush()?; drop(bufw); file.sync_all()?; drop(file); temp_path.persist(path)?; Ok(()) } pub fn open(path: impl Into<PathBuf>, populate: bool) -> io::Result<Self> { let path = path.into(); let mmap = open_read_mmap(&path, AdviceSetting::Advice(Advice::Normal), populate)?; let (header, _) = PostingsHeader::read_from_prefix(&mmap).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, format!("Invalid header deserialization in {}", path.display()), ) })?; Ok(Self { _path: path, mmap, header, _value_type: PhantomData, }) } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) { self.mmap.populate(); } /// Iterate over posting lists, returning a view for each pub fn iter_postings<'a>(&'a self) -> impl Iterator<Item = PostingListView<'a, V>> { (0..self.header.posting_count as u32) // we are iterating over existing posting lists, all of them should return `Some` .filter_map(|posting_idx| self.get(posting_idx)) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mod.rs
lib/segment/src/index/field_index/full_text_index/inverted_index/mmap_inverted_index/mod.rs
use std::collections::HashMap; use std::path::PathBuf; use bitvec::vec::BitVec; use common::counter::hardware_counter::HardwareCounterCell; use common::mmap_hashmap::{MmapHashMap, READ_ENTRY_OVERHEAD}; use common::types::PointOffsetType; use itertools::Either; use memory::fadvise::clear_disk_cache; use memory::madvise::AdviceSetting; use memory::mmap_ops; use memory::mmap_type::{MmapBitSlice, MmapSlice}; use mmap_postings::{MmapPostingValue, MmapPostings}; use super::immutable_inverted_index::ImmutableInvertedIndex; use super::immutable_postings_enum::ImmutablePostings; use super::mmap_inverted_index::mmap_postings_enum::MmapPostingsEnum; use super::positions::Positions; use super::postings_iterator::{ intersect_compressed_postings_iterator, merge_compressed_postings_iterator, }; use super::{InvertedIndex, ParsedQuery, TokenId, TokenSet}; use crate::common::Flusher; use crate::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::full_text_index::inverted_index::Document; use crate::index::field_index::full_text_index::inverted_index::postings_iterator::{ check_compressed_postings_phrase, intersect_compressed_postings_phrase_iterator, }; pub(super) mod mmap_postings; pub mod mmap_postings_enum; const POSTINGS_FILE: &str = "postings.dat"; const VOCAB_FILE: &str = "vocab.dat"; const POINT_TO_TOKENS_COUNT_FILE: &str = "point_to_tokens_count.dat"; const DELETED_POINTS_FILE: &str = "deleted_points.dat"; pub struct MmapInvertedIndex { pub(in crate::index::field_index::full_text_index) path: PathBuf, pub(in crate::index::field_index::full_text_index) storage: Storage, /// Number of points which are not deleted pub(in crate::index::field_index::full_text_index) active_points_count: usize, is_on_disk: bool, } pub(in crate::index::field_index::full_text_index) struct Storage { pub(in crate::index::field_index::full_text_index) postings: MmapPostingsEnum, pub(in crate::index::field_index::full_text_index) vocab: MmapHashMap<str, TokenId>, pub(in crate::index::field_index::full_text_index) point_to_tokens_count: MmapSlice<usize>, pub(in crate::index::field_index::full_text_index) deleted_points: MmapBitSliceBufferedUpdateWrapper, } impl MmapInvertedIndex { pub fn create(path: PathBuf, inverted_index: &ImmutableInvertedIndex) -> OperationResult<()> { let ImmutableInvertedIndex { postings, vocab, point_to_tokens_count, points_count: _, } = inverted_index; debug_assert_eq!(vocab.len(), postings.len()); let postings_path = path.join(POSTINGS_FILE); let vocab_path = path.join(VOCAB_FILE); let point_to_tokens_count_path = path.join(POINT_TO_TOKENS_COUNT_FILE); let deleted_points_path = path.join(DELETED_POINTS_FILE); match postings { ImmutablePostings::Ids(postings) => MmapPostings::create(postings_path, postings)?, ImmutablePostings::WithPositions(postings) => { MmapPostings::create(postings_path, postings)? } } // Currently MmapHashMap maps str -> [u32], but we only need to map str -> u32. // TODO: Consider making another mmap structure for this case. MmapHashMap::<str, TokenId>::create( &vocab_path, vocab.iter().map(|(k, v)| (k.as_str(), std::iter::once(*v))), )?; // Save point_to_tokens_count, separated into a bitslice for None values and a slice for actual values // // None values are represented as deleted in the bitslice let deleted_bitslice: BitVec = point_to_tokens_count .iter() .map(|count| *count == 0) .collect(); MmapBitSlice::create(&deleted_points_path, &deleted_bitslice)?; // The actual values go in the slice let point_to_tokens_count_iter = point_to_tokens_count.iter().copied(); MmapSlice::create(&point_to_tokens_count_path, point_to_tokens_count_iter)?; Ok(()) } pub fn open( path: PathBuf, populate: bool, has_positions: bool, ) -> OperationResult<Option<Self>> { let postings_path = path.join(POSTINGS_FILE); let vocab_path = path.join(VOCAB_FILE); let point_to_tokens_count_path = path.join(POINT_TO_TOKENS_COUNT_FILE); let deleted_points_path = path.join(DELETED_POINTS_FILE); // If postings don't exist, assume the index doesn't exist on disk if !postings_path.is_file() { return Ok(None); } let postings = match has_positions { false => MmapPostingsEnum::Ids(MmapPostings::<()>::open(&postings_path, populate)?), true => MmapPostingsEnum::WithPositions(MmapPostings::<Positions>::open( &postings_path, populate, )?), }; let vocab = MmapHashMap::<str, TokenId>::open(&vocab_path, false)?; let point_to_tokens_count = unsafe { MmapSlice::try_from(mmap_ops::open_write_mmap( &point_to_tokens_count_path, AdviceSetting::Global, populate, )?)? }; let deleted = mmap_ops::open_write_mmap(&deleted_points_path, AdviceSetting::Global, populate)?; let deleted = MmapBitSlice::from(deleted, 0); let num_deleted_points = deleted.count_ones(); let deleted_points = MmapBitSliceBufferedUpdateWrapper::new(deleted); let points_count = point_to_tokens_count.len() - num_deleted_points; Ok(Some(Self { path, storage: Storage { postings, vocab, point_to_tokens_count, deleted_points, }, active_points_count: points_count, is_on_disk: !populate, })) } pub(super) fn iter_vocab(&self) -> impl Iterator<Item = (&str, &TokenId)> + '_ { // unwrap safety: we know that each token points to a token id. self.storage .vocab .iter() .map(|(k, v)| (k, v.first().unwrap())) } /// Returns whether the point id is valid and active. pub fn is_active(&self, point_id: PointOffsetType) -> bool { let is_deleted = self .storage .deleted_points .get(point_id as usize) .unwrap_or(true); !is_deleted } /// Iterate over point ids whose documents contain all given tokens pub fn filter_has_all<'a>( &'a self, tokens: TokenSet, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { // in case of mmap immutable index, deleted points are still in the postings let filter = move |idx| self.is_active(idx); fn intersection<'a, V: MmapPostingValue>( postings: &'a MmapPostings<V>, tokens: TokenSet, filter: impl Fn(u32) -> bool + 'a, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { let postings_opt: Option<Vec<_>> = tokens .tokens() .iter() .map(|&token_id| postings.get(token_id)) .collect(); let Some(posting_readers) = postings_opt else { // There are unseen tokens -> no matches return Box::new(std::iter::empty()); }; if posting_readers.is_empty() { // Empty request -> no matches return Box::new(std::iter::empty()); } Box::new(intersect_compressed_postings_iterator( posting_readers, filter, )) } match &self.storage.postings { MmapPostingsEnum::Ids(postings) => intersection(postings, tokens, filter), MmapPostingsEnum::WithPositions(postings) => intersection(postings, tokens, filter), } } /// Iterate over point ids whose documents contain at least one of the given tokens fn filter_has_any<'a>( &'a self, tokens: TokenSet, ) -> impl Iterator<Item = PointOffsetType> + 'a { // in case of immutable index, deleted documents are still in the postings let is_active = move |idx| self.is_active(idx); fn merge<'a, V: MmapPostingValue>( postings: &'a MmapPostings<V>, tokens: TokenSet, is_active: impl Fn(PointOffsetType) -> bool + 'a, ) -> impl Iterator<Item = PointOffsetType> + 'a { let postings: Vec<_> = tokens .tokens() .iter() .filter_map(|&token_id| postings.get(token_id)) .collect(); // Query must not be empty if postings.is_empty() { return Either::Left(std::iter::empty()); }; Either::Right(merge_compressed_postings_iterator(postings, is_active)) } match &self.storage.postings { MmapPostingsEnum::Ids(postings) => Either::Left(merge(postings, tokens, is_active)), MmapPostingsEnum::WithPositions(postings) => { Either::Right(merge(postings, tokens, is_active)) } } } fn check_has_subset(&self, tokens: &TokenSet, point_id: PointOffsetType) -> bool { // check non-empty query if tokens.is_empty() { return false; } // check presence of the document if self.values_is_empty(point_id) { return false; } fn check_intersection<V: MmapPostingValue>( postings: &MmapPostings<V>, tokens: &TokenSet, point_id: PointOffsetType, ) -> bool { // Check that all tokens are in document tokens.tokens().iter().all(|query_token| { postings .get(*query_token) // unwrap safety: all tokens exist in the vocabulary, otherwise there'd be no query tokens .unwrap() .visitor() .contains(point_id) }) } match &self.storage.postings { MmapPostingsEnum::Ids(postings) => check_intersection(postings, tokens, point_id), MmapPostingsEnum::WithPositions(postings) => { check_intersection(postings, tokens, point_id) } } } fn check_has_any(&self, tokens: &TokenSet, point_id: PointOffsetType) -> bool { if tokens.is_empty() { return false; } // check presence of the document if self.values_is_empty(point_id) { return false; } fn check_any<V: MmapPostingValue>( postings: &MmapPostings<V>, tokens: &TokenSet, point_id: PointOffsetType, ) -> bool { // Check that at least one token is in document tokens.tokens().iter().any(|token_id| { let posting_list = postings.get(*token_id).unwrap(); posting_list.visitor().contains(point_id) }) } match &self.storage.postings { MmapPostingsEnum::Ids(postings) => check_any(postings, tokens, point_id), MmapPostingsEnum::WithPositions(postings) => check_any(postings, tokens, point_id), } } /// Iterate over point ids whose documents contain all given tokens in the same order they are provided pub fn filter_has_phrase<'a>( &'a self, phrase: Document, ) -> impl Iterator<Item = PointOffsetType> + 'a { // in case of mmap immutable index, deleted points are still in the postings let is_active = move |idx| self.is_active(idx); match &self.storage.postings { MmapPostingsEnum::WithPositions(postings) => { Either::Right(intersect_compressed_postings_phrase_iterator( phrase, |token_id| postings.get(*token_id), is_active, )) } // cannot do phrase matching if there's no positional information MmapPostingsEnum::Ids(_postings) => Either::Left(std::iter::empty()), } } pub fn check_has_phrase(&self, phrase: &Document, point_id: PointOffsetType) -> bool { // in case of mmap immutable index, deleted points are still in the postings if !self.is_active(point_id) { return false; } match &self.storage.postings { MmapPostingsEnum::WithPositions(postings) => { check_compressed_postings_phrase(phrase, point_id, |token_id| { postings.get(*token_id) }) } // cannot do phrase matching if there's no positional information MmapPostingsEnum::Ids(_postings) => false, } } pub fn files(&self) -> Vec<PathBuf> { vec![ self.path.join(POSTINGS_FILE), self.path.join(VOCAB_FILE), self.path.join(POINT_TO_TOKENS_COUNT_FILE), self.path.join(DELETED_POINTS_FILE), ] } pub fn immutable_files(&self) -> Vec<PathBuf> { vec![ self.path.join(POSTINGS_FILE), self.path.join(VOCAB_FILE), self.path.join(POINT_TO_TOKENS_COUNT_FILE), ] } pub fn flusher(&self) -> Flusher { self.storage.deleted_points.flusher() } pub fn is_on_disk(&self) -> bool { self.is_on_disk } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { self.storage.postings.populate(); self.storage.vocab.populate()?; self.storage.point_to_tokens_count.populate()?; Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { let files = self.files(); for file in files { clear_disk_cache(&file)?; } Ok(()) } } impl InvertedIndex for MmapInvertedIndex { fn get_vocab_mut(&mut self) -> &mut HashMap<String, TokenId> { unreachable!("MmapInvertedIndex does not support mutable operations") } fn index_tokens( &mut self, _idx: PointOffsetType, _tokens: super::TokenSet, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { Err(OperationError::service_error( "Can't add values to mmap immutable text index", )) } fn index_document( &mut self, _idx: PointOffsetType, _document: Document, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { Err(OperationError::service_error( "Can't add values to mmap immutable text index", )) } fn remove(&mut self, idx: PointOffsetType) -> bool { let Some(is_deleted) = self.storage.deleted_points.get(idx as usize) else { return false; // Never existed }; if is_deleted { return false; // Already removed } self.storage.deleted_points.set(idx as usize, true); if let Some(count) = self.storage.point_to_tokens_count.get_mut(idx as usize) { *count = 0; // `deleted_points`'s length can be larger than `point_to_tokens_count`'s length. // Only if the index is within bounds of `point_to_tokens_count`, we decrement the active points count. self.active_points_count -= 1; } true } fn filter<'a>( &'a self, query: ParsedQuery, _hw_counter: &HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { match query { ParsedQuery::AllTokens(tokens) => self.filter_has_all(tokens), ParsedQuery::Phrase(phrase) => Box::new(self.filter_has_phrase(phrase)), ParsedQuery::AnyTokens(tokens) => Box::new(self.filter_has_any(tokens)), } } fn get_posting_len( &self, token_id: TokenId, _hw_counter: &HardwareCounterCell, ) -> Option<usize> { self.storage.postings.posting_len(token_id) } fn vocab_with_postings_len_iter(&self) -> impl Iterator<Item = (&str, usize)> + '_ { self.iter_vocab().filter_map(move |(token, &token_id)| { self.storage .postings .posting_len(token_id) .map(|posting_len| (token, posting_len)) }) } fn check_match(&self, parsed_query: &ParsedQuery, point_id: PointOffsetType) -> bool { match parsed_query { ParsedQuery::AllTokens(tokens) => self.check_has_subset(tokens, point_id), ParsedQuery::Phrase(phrase) => self.check_has_phrase(phrase, point_id), ParsedQuery::AnyTokens(tokens) => self.check_has_any(tokens, point_id), } } fn values_is_empty(&self, point_id: PointOffsetType) -> bool { if self .storage .deleted_points .get(point_id as usize) .unwrap_or(true) { return true; } self.storage .point_to_tokens_count .get(point_id as usize) .map(|count| *count == 0) // if the point does not exist, it is considered empty .unwrap_or(true) } fn values_count(&self, point_id: PointOffsetType) -> usize { if self .storage .deleted_points .get(point_id as usize) .unwrap_or(true) { return 0; } self.storage .point_to_tokens_count .get(point_id as usize) .copied() // if the point does not exist, it is considered empty .unwrap_or(0) } fn points_count(&self) -> usize { self.active_points_count } fn get_token_id(&self, token: &str, hw_counter: &HardwareCounterCell) -> Option<TokenId> { if self.is_on_disk { hw_counter.payload_index_io_read_counter().incr_delta( READ_ENTRY_OVERHEAD + size_of::<TokenId>(), // Avoid check overhead and assume token is always read ); } self.storage .vocab .get(token) .ok() .flatten() .and_then(<[TokenId]>::first) .copied() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tests/test_congruence.rs
lib/segment/src/index/field_index/full_text_index/tests/test_congruence.rs
use std::collections::HashSet; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use rstest::rstest; use serde_json::Value; use tempfile::{Builder, TempDir}; use crate::common::operation_error::OperationResult; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::open_db_with_existing_cf; use crate::data_types::index::TextIndexParams; use crate::fixtures::payload_fixtures::random_full_text_payload; use crate::index::field_index::field_index_base::PayloadFieldIndex; use crate::index::field_index::full_text_index::inverted_index::{ Document, InvertedIndex, ParsedQuery, TokenId, TokenSet, }; use crate::index::field_index::full_text_index::mmap_text_index::FullTextMmapIndexBuilder; use crate::index::field_index::full_text_index::mutable_text_index::MutableFullTextIndex; #[cfg(feature = "rocksdb")] use crate::index::field_index::full_text_index::text_index::FullTextIndexRocksDbBuilder; use crate::index::field_index::full_text_index::text_index::{ FullTextGridstoreIndexBuilder, FullTextIndex, }; use crate::index::field_index::{FieldIndexBuilderTrait, ValueIndexer}; use crate::json_path::JsonPath; use crate::types::{FieldCondition, ValuesCount}; #[cfg(feature = "rocksdb")] type Database = std::sync::Arc<parking_lot::RwLock<rocksdb::DB>>; #[cfg(not(feature = "rocksdb"))] type Database = (); const FIELD_NAME: &str = "test"; const TYPES: &[IndexType] = &[ #[cfg(feature = "rocksdb")] IndexType::MutableRocksdb, IndexType::MutableGridstore, #[cfg(feature = "rocksdb")] IndexType::ImmRamRocksDb, IndexType::ImmMmap, IndexType::ImmRamMmap, ]; #[derive(Clone, Copy, PartialEq, Debug)] enum IndexType { #[cfg(feature = "rocksdb")] MutableRocksdb, MutableGridstore, #[cfg(feature = "rocksdb")] ImmRamRocksDb, ImmMmap, ImmRamMmap, } enum IndexBuilder { #[cfg(feature = "rocksdb")] MutableRocksdb(FullTextIndexRocksDbBuilder), MutableGridstore(FullTextGridstoreIndexBuilder), #[cfg(feature = "rocksdb")] ImmRamRocksdb(FullTextIndexRocksDbBuilder), ImmMmap(FullTextMmapIndexBuilder), ImmRamMmap(FullTextMmapIndexBuilder), } impl IndexBuilder { fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::MutableRocksdb(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::MutableGridstore(builder) => { FieldIndexBuilderTrait::add_point(builder, id, payload, hw_counter) } #[cfg(feature = "rocksdb")] IndexBuilder::ImmRamRocksdb(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::ImmMmap(builder) => { FieldIndexBuilderTrait::add_point(builder, id, payload, hw_counter) } IndexBuilder::ImmRamMmap(builder) => { FieldIndexBuilderTrait::add_point(builder, id, payload, hw_counter) } } } fn finalize(self) -> OperationResult<FullTextIndex> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::MutableRocksdb(builder) => builder.finalize(), IndexBuilder::MutableGridstore(builder) => builder.finalize(), #[cfg(feature = "rocksdb")] IndexBuilder::ImmRamRocksdb(builder) => builder.finalize(), IndexBuilder::ImmMmap(builder) => builder.finalize(), IndexBuilder::ImmRamMmap(builder) => builder.finalize(), } } } fn create_builder( index_type: IndexType, phrase_matching: bool, ) -> (IndexBuilder, TempDir, Database) { let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap(); #[cfg(feature = "rocksdb")] let db = open_db_with_existing_cf(&temp_dir.path().join("test_db")).unwrap(); #[cfg(not(feature = "rocksdb"))] let db = (); let config = TextIndexParams { phrase_matching: Some(phrase_matching), ..TextIndexParams::default() }; let mut builder = match index_type { #[cfg(feature = "rocksdb")] IndexType::MutableRocksdb => IndexBuilder::MutableRocksdb( FullTextIndex::builder_rocksdb(db.clone(), config, FIELD_NAME, true).unwrap(), ), IndexType::MutableGridstore => IndexBuilder::MutableGridstore( FullTextIndex::builder_gridstore(temp_dir.path().to_path_buf(), config), ), #[cfg(feature = "rocksdb")] IndexType::ImmRamRocksDb => IndexBuilder::ImmRamRocksdb( FullTextIndex::builder_rocksdb(db.clone(), config, FIELD_NAME, false).unwrap(), ), IndexType::ImmMmap => IndexBuilder::ImmMmap(FullTextIndex::builder_mmap( temp_dir.path().to_path_buf(), config, true, )), IndexType::ImmRamMmap => IndexBuilder::ImmRamMmap(FullTextIndex::builder_mmap( temp_dir.path().to_path_buf(), config, false, )), }; match &mut builder { #[cfg(feature = "rocksdb")] IndexBuilder::MutableRocksdb(builder) => builder.init().unwrap(), IndexBuilder::MutableGridstore(builder) => builder.init().unwrap(), #[cfg(feature = "rocksdb")] IndexBuilder::ImmRamRocksdb(builder) => builder.init().unwrap(), IndexBuilder::ImmMmap(builder) => builder.init().unwrap(), IndexBuilder::ImmRamMmap(builder) => builder.init().unwrap(), } (builder, temp_dir, db) } fn reopen_index( index: FullTextIndex, index_type: IndexType, temp_dir: &TempDir, #[allow(unused_variables)] db: &Database, phrase_matching: bool, ) -> FullTextIndex { let config = TextIndexParams { phrase_matching: Some(phrase_matching), ..TextIndexParams::default() }; // Drop the original index to ensure files are flushed drop(index); // Reopen based on index type match index_type { #[cfg(feature = "rocksdb")] IndexType::MutableRocksdb => { FullTextIndex::new_rocksdb(db.clone(), config, FIELD_NAME, true, false) .unwrap() .expect("Failed to reopen MutableRocksdb index") } IndexType::MutableGridstore => { FullTextIndex::new_gridstore(temp_dir.path().to_path_buf(), config, false) .unwrap() .expect("Failed to reopen MutableGridstore index") } #[cfg(feature = "rocksdb")] IndexType::ImmRamRocksDb => { FullTextIndex::new_rocksdb(db.clone(), config, FIELD_NAME, false, false) .unwrap() .expect("Failed to reopen ImmRamRocksDb index") } IndexType::ImmMmap => { // Reopen with is_on_disk = true (mmap directly) FullTextIndex::new_mmap(temp_dir.path().to_path_buf(), config, true) .unwrap() .expect("Failed to reopen ImmMmap index") } IndexType::ImmRamMmap => { // Reopen with is_on_disk = false (load into RAM) // This is the path that will call ImmutableFullTextIndex::open_mmap FullTextIndex::new_mmap(temp_dir.path().to_path_buf(), config, false) .unwrap() .expect("Failed to reopen ImmRamMmap index") } } } fn build_random_index( num_points: usize, num_keywords: usize, keyword_len: usize, index_type: IndexType, phrase_matching: bool, deleted: bool, reopen: bool, ) -> (FullTextIndex, TempDir, Database) { let mut rnd = StdRng::seed_from_u64(42); let (mut builder, temp_dir, db) = create_builder(index_type, phrase_matching); for idx in 0..num_points { let keywords = random_full_text_payload( &mut rnd, num_keywords..=num_keywords, keyword_len..=keyword_len, ); let array_payload = Value::Array(keywords); builder .add_point( idx as PointOffsetType, &[&array_payload], &HardwareCounterCell::new(), ) .unwrap(); } let mut index = builder.finalize().unwrap(); assert_eq!(index.points_count(), num_points); // Delete some points before loading into a different format if deleted { index.remove_point(20).unwrap(); index.remove_point(21).unwrap(); index.remove_point(22).unwrap(); index.remove_point(200).unwrap(); index.remove_point(250).unwrap(); index.flusher()().expect("failed to flush deletions"); } // Reopen the index if requested let index = if reopen { reopen_index(index, index_type, &temp_dir, &db, phrase_matching) } else { index }; (index, temp_dir, db) } /// Tries to parse a query. If there is an unknown id to a token, returns `None` pub fn to_parsed_query( query: &[String], is_phrase: bool, token_to_id: impl Fn(&str) -> Option<TokenId>, ) -> Option<ParsedQuery> { let tokens = query.iter().map(|token| token_to_id(token.as_str())); let parsed = match is_phrase { false => ParsedQuery::AllTokens(tokens.collect::<Option<TokenSet>>()?), true => ParsedQuery::Phrase(tokens.collect::<Option<Document>>()?), }; Some(parsed) } pub fn parse_query(query: &[String], is_phrase: bool, index: &FullTextIndex) -> ParsedQuery { let hw_counter = HardwareCounterCell::disposable(); match index { FullTextIndex::Mutable(index) => { let token_to_id = |token: &str| index.inverted_index.get_token_id(token, &hw_counter); to_parsed_query(query, is_phrase, token_to_id).unwrap() } FullTextIndex::Immutable(index) => { let token_to_id = |token: &str| index.inverted_index.get_token_id(token, &hw_counter); to_parsed_query(query, is_phrase, token_to_id).unwrap() } FullTextIndex::Mmap(index) => { let token_to_id = |token: &str| index.inverted_index.get_token_id(token, &hw_counter); to_parsed_query(query, is_phrase, token_to_id).unwrap() } } } #[rstest] fn test_congruence( #[values(false, true)] deleted: bool, #[values(false, true)] phrase_matching: bool, #[values(false, true)] reopen: bool, ) { const POINT_COUNT: usize = 500; const KEYWORD_COUNT: usize = 20; const KEYWORD_LEN: usize = 2; let hw_counter = HardwareCounterCell::disposable(); let (mut indices, _data): (Vec<_>, Vec<_>) = TYPES .iter() .copied() .map(|index_type| { let (index, temp_dir, db) = build_random_index( POINT_COUNT, KEYWORD_COUNT, KEYWORD_LEN, index_type, phrase_matching, deleted, reopen, ); ((index, index_type), (temp_dir, db)) }) .unzip(); // Delete some points after loading if deleted { for (index, _type) in indices.iter_mut() { index.remove_point(10).unwrap(); index.remove_point(11).unwrap(); index.remove_point(12).unwrap(); index.remove_point(100).unwrap(); index.remove_point(150).unwrap(); } } // Grab 10 keywords to use for querying let (FullTextIndex::Mutable(index), _) = &indices[0] else { panic!("Expects mutable full text index as first"); }; let mut keywords = index .inverted_index .vocab .keys() .cloned() .collect::<Vec<_>>(); keywords.sort_unstable(); keywords.truncate(10); const EXISTING_IDS: &[PointOffsetType] = &[5, 19, 57, 223, 229, 499]; let existing_phrases = check_phrase::<KEYWORD_COUNT>(EXISTING_IDS, index, &indices, phrase_matching); for i in 1..indices.len() { let ((index_a, type_a), (index_b, type_b)) = (&indices[0], &indices[i]); eprintln!("Testing index type {type_a:?} vs {type_b:?}"); assert_eq!(index_a.points_count(), index_b.points_count()); for point_id in 0..POINT_COUNT as PointOffsetType { assert_eq!( index_a.values_count(point_id), index_b.values_count(point_id), ); assert_eq!( index_a.values_is_empty(point_id), index_b.values_is_empty(point_id), ); } assert_eq!( index_a.get_token("doesnotexist", &hw_counter), index_b.get_token("doesnotexist", &hw_counter), ); assert!( index_a.get_token(&keywords[0], &hw_counter).is_some() == index_b.get_token(&keywords[0], &hw_counter).is_some(), ); for query_range in [0..1, 2..4, 5..9, 0..10] { let keywords = &keywords[query_range]; let parsed_query_a = parse_query(keywords, false, index_a); let parsed_query_b = parse_query(keywords, false, index_b); // Mutable index behaves different versus the others on point deletion // Mutable index updates postings, the others do not. Cardinality estimations are // not expected to match because of it. if !deleted { let field_condition = FieldCondition::new_values_count( JsonPath::new(FIELD_NAME), ValuesCount::from(0..10), ); let cardinality_a = index_a.estimate_query_cardinality( &parsed_query_a, &field_condition, &hw_counter, ); let cardinality_b = index_b.estimate_query_cardinality( &parsed_query_b, &field_condition, &hw_counter, ); assert_eq!(cardinality_a, cardinality_b); } for point_id in 0..POINT_COUNT as PointOffsetType { assert_eq!( index_a.check_match(&parsed_query_a, point_id), index_b.check_match(&parsed_query_b, point_id), ); } assert_eq!( index_a .filter_query(parsed_query_a, &hw_counter) .collect::<HashSet<_>>(), index_b .filter_query(parsed_query_b, &hw_counter) .collect::<HashSet<_>>(), ); } if phrase_matching { for phrase in &existing_phrases { eprintln!("Phrase: {phrase:?}"); let parsed_query_a = parse_query(phrase, true, index_a); let parsed_query_b = parse_query(phrase, true, index_b); let field_condition = FieldCondition::new_values_count( JsonPath::new(FIELD_NAME), ValuesCount::from(0..10), ); assert_eq!( index_a.estimate_query_cardinality( &parsed_query_a, &field_condition, &hw_counter ), index_b.estimate_query_cardinality( &parsed_query_b, &field_condition, &hw_counter ), ); for point_id in 0..POINT_COUNT as PointOffsetType { assert_eq!( index_a.check_match(&parsed_query_a, point_id), index_b.check_match(&parsed_query_b, point_id), ); } // Assert that both indices return the same results assert_eq!( index_a .filter_query(parsed_query_a, &hw_counter) .collect::<HashSet<_>>(), index_b .filter_query(parsed_query_b, &hw_counter) .collect::<HashSet<_>>(), ); } } if !deleted { for threshold in 1..=10 { assert_eq!( index_a .payload_blocks(threshold, JsonPath::new(FIELD_NAME)) .count(), index_b .payload_blocks(threshold, JsonPath::new(FIELD_NAME)) .count(), ); } } } } /// Checks that the ids can be found when filtering and matching a phrase. /// /// Returns the phrases that were used fn check_phrase<const KEYWORD_COUNT: usize>( existing_ids: &[PointOffsetType], mutable_index: &MutableFullTextIndex, check_indexes: &[(FullTextIndex, IndexType)], phrase_matching: bool, ) -> Vec<Vec<String>> { // From the ids, choose a random phrase of 4 words. const PHRASE_LENGTH: usize = 4; let mut phrases = Vec::new(); let rng = &mut StdRng::seed_from_u64(43); for id in existing_ids { let doc = mutable_index.get_doc(*id).unwrap(); let rand_idx = rng.random_range(0..=KEYWORD_COUNT - PHRASE_LENGTH); let phrase = doc[rand_idx..rand_idx + PHRASE_LENGTH].to_vec(); phrases.push(phrase); } let hw_counter = HardwareCounterCell::disposable(); for (index, index_type) in check_indexes { eprintln!("Checking index type: {index_type:?}"); for (phrase, exp_id) in phrases.iter().zip(existing_ids) { eprintln!("Phrase: {phrase:?}"); let parsed_query = parse_query(phrase, phrase_matching, index); assert!(index.check_match(&parsed_query, *exp_id)); let result = index .filter_query(parsed_query, &hw_counter) .collect::<HashSet<_>>(); assert!(!result.is_empty()); assert!( result.contains(exp_id), "Expected ID {exp_id} not found in other index result: {result:?}" ); } } phrases }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tests/mod.rs
lib/segment/src/index/field_index/full_text_index/tests/mod.rs
mod test_congruence; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use tempfile::Builder; use crate::data_types::index::{TextIndexParams, TextIndexType, TokenizerType}; use crate::index::field_index::full_text_index::text_index::FullTextIndex; use crate::index::field_index::{FieldIndexBuilderTrait as _, ValueIndexer}; fn movie_titles() -> Vec<String> { vec![ "2430 A.D.".to_string(), "The Acquisitive Chuckle".to_string(), "Author! Author!".to_string(), "The Bicentennial Man".to_string(), "Big Game".to_string(), "The Billiard Ball".to_string(), "Birth of a Notion".to_string(), "Black Friar of the Flame".to_string(), "Blank!".to_string(), "Blind Alley".to_string(), "Breeds There a Man...?".to_string(), "Button, Button".to_string(), "Buy Jupiter".to_string(), "C-Chute".to_string(), "Cal".to_string(), "The Callistan Menace".to_string(), "Catch That Rabbit".to_string(), "Christmas on Ganymede".to_string(), "Darwinian Pool Room".to_string(), "Day of the Hunters".to_string(), "Death Sentence".to_string(), "Does a Bee Care?".to_string(), "Dreaming Is a Private Thing".to_string(), "The Dust of Death".to_string(), "The Dying Night".to_string(), "Each an Explorer".to_string(), "Escape!".to_string(), "Everest".to_string(), "Evidence".to_string(), "The Evitable Conflict".to_string(), "Exile to Hell".to_string(), "Eyes Do More Than See".to_string(), "The Feeling of Power".to_string(), "Feminine Intuition".to_string(), "First Law".to_string(), "Flies".to_string(), "For the Birds".to_string(), "Founding Father".to_string(), "The Fun They Had".to_string(), "Galley Slave".to_string(), "The Gentle Vultures".to_string(), "Getting Even".to_string(), "Gimmicks Three".to_string(), "Gold".to_string(), "Good Taste".to_string(), "The Greatest Asset".to_string(), "Green Patches".to_string(), "Half-Breed".to_string(), "Half-Breeds on Venus".to_string(), "Hallucination".to_string(), "The Hazing".to_string(), "Hell-Fire".to_string(), "Heredity".to_string(), "History".to_string(), "Homo Sol".to_string(), "Hostess".to_string(), "I Just Make Them Up, See!".to_string(), "I'm in Marsport Without Hilda".to_string(), "The Imaginary".to_string(), "The Immortal Bard".to_string(), "In a Good Cause—".to_string(), "Insert Knob A in Hole B".to_string(), "The Instability".to_string(), "It's Such a Beautiful Day".to_string(), "The Key".to_string(), "Kid Stuff".to_string(), "The Last Answer".to_string(), "The Last Question".to_string(), "The Last Trump".to_string(), "Left to Right".to_string(), "Legal Rites".to_string(), "Lenny".to_string(), "Lest We Remember".to_string(), "Let's Not".to_string(), "Liar!".to_string(), "Light Verse".to_string(), "Little Lost Robot".to_string(), "The Little Man on the Subway".to_string(), "Living Space".to_string(), "A Loint of Paw".to_string(), "The Magnificent Possession".to_string(), "Marching In".to_string(), "Marooned off Vesta".to_string(), "The Message".to_string(), "Mirror Image".to_string(), "Mother Earth".to_string(), "My Son, the Physicist".to_string(), "No Connection".to_string(), "No Refuge Could Save".to_string(), "Nobody Here But—".to_string(), "Not Final!".to_string(), "Obituary".to_string(), "Old-fashioned".to_string(), "Pâté de Foie Gras".to_string(), "The Pause".to_string(), "Ph as in Phony".to_string(), "The Portable Star".to_string(), "The Proper Study".to_string(), "Rain, Rain, Go Away".to_string(), "Reason".to_string(), "The Red Queen's Race".to_string(), "Rejection Slips".to_string(), "Ring Around the Sun".to_string(), "Risk".to_string(), "Robot AL-76 Goes Astray".to_string(), "Robot Dreams".to_string(), "Runaround".to_string(), "Sally".to_string(), "Satisfaction Guaranteed".to_string(), "The Secret Sense".to_string(), "Shah Guido G.".to_string(), "Silly Asses".to_string(), "The Singing Bell".to_string(), "Sixty Million Trillion Combinations".to_string(), "Spell My Name with an S".to_string(), "Star Light".to_string(), "A Statue for Father".to_string(), "Strikebreaker".to_string(), "Super-Neutron".to_string(), "Take a Match".to_string(), "The Talking Stone".to_string(), ". . . That Thou Art Mindful of Him".to_string(), "Thiotimoline".to_string(), "Time Pussy".to_string(), "Trends".to_string(), "Truth to Tell".to_string(), "The Ugly Little Boy".to_string(), "The Ultimate Crime".to_string(), "Unto the Fourth Generation".to_string(), "The Up-to-Date Sorcerer".to_string(), "Waterclap".to_string(), "The Watery Place".to_string(), "The Weapon".to_string(), "The Weapon Too Dreadful to Use".to_string(), "What If—".to_string(), "What Is This Thing Called Love?".to_string(), "What's in a Name?".to_string(), "The Winnowing".to_string(), ] } #[test] fn test_prefix_search() { let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap(); let config = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Prefix, min_token_len: None, max_token_len: None, lowercase: None, phrase_matching: None, stopwords: None, on_disk: None, stemmer: None, ascii_folding: None, }; let mut index = FullTextIndex::new_gridstore(temp_dir.path().to_path_buf(), config.clone(), true) .unwrap() .unwrap(); let hw_counter = HardwareCounterCell::new(); let texts = movie_titles(); for (i, text) in texts.iter().enumerate() { index .add_many(i as PointOffsetType, vec![text.clone()], &hw_counter) .unwrap(); } let res: Vec<_> = index.query("ROBO", &hw_counter).collect(); let query = index.parse_text_query("ROBO", &hw_counter).unwrap(); for idx in res.iter().copied() { assert!(index.check_match(&query, idx)); } assert_eq!(res.len(), 3); let res: Vec<_> = index.query("q231", &hw_counter).collect(); assert!(res.is_empty()); assert!(index.parse_text_query("q231", &hw_counter).is_none()); } #[test] fn test_phrase_matching() { let hw_counter = HardwareCounterCell::default(); // Create a text index with phrase matching enabled let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap(); let config = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::default(), min_token_len: None, max_token_len: None, lowercase: Some(true), on_disk: None, phrase_matching: Some(true), // Enable phrase matching stopwords: None, stemmer: None, ascii_folding: None, }; let mut mutable_index = FullTextIndex::builder_gridstore(temp_dir.path().to_path_buf(), config.clone()) .make_empty() .unwrap(); let mut mmap_builder = FullTextIndex::builder_mmap(temp_dir.path().to_path_buf(), config.clone(), true); mmap_builder.init().unwrap(); // Add some test documents with phrases let documents = vec![ (0, "the quick brown fox jumps over the lazy dog".to_string()), (1, "brown fox quick the jumps over lazy dog".to_string()), (2, "quick brown fox runs fast".to_string()), (3, "the lazy dog sleeps peacefully".to_string()), (4, "the brown brown fox".to_string()), ]; for (point_id, text) in documents { mutable_index .add_many(point_id, vec![text.clone()], &hw_counter) .unwrap(); mmap_builder .add_many(point_id, vec![text], &hw_counter) .unwrap(); } let mmap_index = mmap_builder.finalize().unwrap(); let check_matching = |index: FullTextIndex| { // Test regular text matching (should match documents containing all tokens regardless of order) let text_query = index .parse_text_query("quick brown fox", &hw_counter) .unwrap(); assert!(index.check_match(&text_query, 0)); assert!(index.check_match(&text_query, 1)); assert!(index.check_match(&text_query, 2)); let text_results: Vec<_> = index.filter_query(text_query, &hw_counter).collect(); // Should match documents 0, 1, and 2 (all contain "quick", "brown", "fox") assert_eq!(text_results.len(), 3); assert!(text_results.contains(&0)); assert!(text_results.contains(&1)); assert!(text_results.contains(&2)); // Test phrase matching (should only match documents with exact phrase in order) let phrase_query = index .parse_phrase_query("quick brown fox", &hw_counter) .unwrap(); assert!(index.check_match(&phrase_query, 0)); assert!(index.check_match(&phrase_query, 2)); let phrase_results: Vec<_> = index.filter_query(phrase_query, &hw_counter).collect(); // Should only match documents 0 and 2 (contain "quick brown fox" in that exact order) assert_eq!(phrase_results.len(), 2); assert!(phrase_results.contains(&0)); assert!(phrase_results.contains(&2)); assert!(!phrase_results.contains(&1)); // Document 1 has the words but not in the right order // Test phrase that doesn't exist let missing_query = index .parse_phrase_query("fox brown quick", &hw_counter) .unwrap(); let missing_results: Vec<_> = index.filter_query(missing_query, &hw_counter).collect(); // Should match no documents (no document contains this exact phrase) assert_eq!(missing_results.len(), 0); // Test valid phrase up to a token that doesn't exist let query_with_unknown_token = index.parse_phrase_query("quick brown bird", &hw_counter); // the phrase query is not valid because it contains an unknown token assert!(query_with_unknown_token.is_none()); // Test repeated words let phrase_query = index .parse_phrase_query("brown brown fox", &hw_counter) .unwrap(); assert!(index.check_match(&phrase_query, 4)); // Should only match document 4 let filter_results: Vec<_> = index.filter_query(phrase_query, &hw_counter).collect(); assert_eq!(filter_results.len(), 1); assert!(filter_results.contains(&4)); }; check_matching(mutable_index); check_matching(mmap_index); } #[test] fn test_ascii_folding_in_full_text_index_word() { let hw_counter = HardwareCounterCell::default(); let temp_dir = Builder::new().prefix("test_dir").tempdir().unwrap(); let config_enabled = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: None, on_disk: None, phrase_matching: None, stopwords: None, stemmer: None, ascii_folding: Some(true), }; let config_disabled = TextIndexParams { ascii_folding: Some(false), ..config_enabled.clone() }; // Index with folding enabled let mut index_enabled = FullTextIndex::new_gridstore(temp_dir.path().to_path_buf(), config_enabled.clone(), true) .unwrap() .unwrap(); // Index with folding disabled (separate storage path) let temp_dir2 = Builder::new().prefix("test_dir").tempdir().unwrap(); let mut index_disabled = FullTextIndex::new_gridstore( temp_dir2.path().to_path_buf(), config_disabled.clone(), true, ) .unwrap() .unwrap(); // Documents containing accents let docs = vec![ (0, "ação no coração".to_string()), (1, "café com leite".to_string()), ]; for (id, text) in &docs { index_enabled .add_many(*id as PointOffsetType, vec![text.clone()], &hw_counter) .unwrap(); index_disabled .add_many(*id as PointOffsetType, vec![text.clone()], &hw_counter) .unwrap(); } // ASCII-only queries should match only when folding is enabled let query_enabled = index_enabled.parse_text_query("acao", &hw_counter).unwrap(); assert!(index_enabled.check_match(&query_enabled, 0)); let results_enabled: Vec<_> = index_enabled .filter_query(query_enabled, &hw_counter) .collect(); assert!(results_enabled.contains(&0)); let query_disabled_opt = index_disabled.parse_text_query("acao", &hw_counter); // Query might still parse, but should not match anything if let Some(query_disabled) = query_disabled_opt { let results_disabled: Vec<_> = index_disabled .filter_query(query_disabled, &hw_counter) .collect(); assert!(!results_disabled.contains(&0)); } // Non-folded query must work in both let query_acento = index_enabled.parse_text_query("ação", &hw_counter).unwrap(); assert!(index_enabled.check_match(&query_acento, 0)); let results_acento: Vec<_> = index_enabled .filter_query(query_acento, &hw_counter) .collect(); assert!(results_acento.contains(&0)); let query_acento2 = index_disabled .parse_text_query("ação", &hw_counter) .unwrap(); let results_acento2: Vec<_> = index_disabled .filter_query(query_acento2, &hw_counter) .collect(); assert!(results_acento2.contains(&0)); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/ascii_folding.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/ascii_folding.rs
// ASCII folding implementation inspired by Lucene's ASCIIFoldingFilter. // Converts non-ASCII Latin letters and various symbols to ASCII equivalents. // This aims to be compatible with Lucene's mapping list used in ASCIIFoldingFilter. use std::borrow::Cow; pub fn fold_to_ascii_cow<'a>(input: Cow<'a, str>) -> Cow<'a, str> { // Cheap check if already ASCII if input.is_ascii() { return input; } Cow::Owned(fold_to_ascii(input.as_ref())) } pub fn fold_to_ascii(input: &str) -> String { let mut out = String::with_capacity(input.len()); for ch in input.chars() { if ch.is_ascii() { out.push(ch); continue; } match fold_char(ch) { Some(repl) => out.push_str(repl), None => out.push(ch), } } out.shrink_to_fit(); out } #[inline] fn fold_char(c: char) -> Option<&'static str> { // Quick test if c < '\u{0080}' { return None; } // Main mapping translated from Lucene's ASCIIFoldingFilter let s = match c { // A/a '\u{00C0}' | '\u{00C1}' | '\u{00C2}' | '\u{00C3}' | '\u{00C4}' | '\u{00C5}' | '\u{0100}' | '\u{0102}' | '\u{0104}' | '\u{01CD}' | '\u{01DE}' | '\u{01E0}' | '\u{01FA}' | '\u{0200}' | '\u{0202}' | '\u{0226}' | '\u{023A}' | '\u{1D00}' | '\u{1E00}' | '\u{1EA0}' | '\u{1EA2}' | '\u{1EA4}' | '\u{1EA6}' | '\u{1EA8}' | '\u{1EAA}' | '\u{1EAC}' | '\u{1EAE}' | '\u{1EB0}' | '\u{1EB2}' | '\u{1EB4}' | '\u{1EB6}' | '\u{24B6}' | '\u{FF21}' => "A", '\u{00E0}' | '\u{00E1}' | '\u{00E2}' | '\u{00E3}' | '\u{00E4}' | '\u{00E5}' | '\u{0101}' | '\u{0103}' | '\u{0105}' | '\u{01CE}' | '\u{01DF}' | '\u{01E1}' | '\u{01FB}' | '\u{0201}' | '\u{0203}' | '\u{0227}' | '\u{0250}' | '\u{0259}' | '\u{025A}' | '\u{1D8F}' | '\u{1D95}' | '\u{1E01}' | '\u{1E9A}' | '\u{1EA1}' | '\u{1EA3}' | '\u{1EA5}' | '\u{1EA7}' | '\u{1EA9}' | '\u{1EAB}' | '\u{1EAD}' | '\u{1EAF}' | '\u{1EB1}' | '\u{1EB3}' | '\u{1EB5}' | '\u{1EB7}' | '\u{2090}' | '\u{2094}' | '\u{24D0}' | '\u{2C65}' | '\u{2C6F}' | '\u{FF41}' => "a", '\u{A732}' => "AA", // Ꜳ '\u{00C6}' | '\u{01E2}' | '\u{01FC}' | '\u{1D01}' => "AE", '\u{A734}' => "AO", '\u{A736}' => "AU", '\u{A738}' | '\u{A73A}' => "AV", '\u{A73C}' => "AY", '\u{249C}' => "(a)", '\u{A733}' => "aa", '\u{00E6}' | '\u{01E3}' | '\u{01FD}' | '\u{1D02}' => "ae", '\u{A735}' => "ao", '\u{A737}' => "au", '\u{A739}' | '\u{A73B}' => "av", '\u{A73D}' => "ay", // B/b '\u{0181}' | '\u{0182}' | '\u{0243}' | '\u{0299}' | '\u{1D03}' | '\u{1E02}' | '\u{1E04}' | '\u{1E06}' | '\u{24B7}' | '\u{FF22}' => "B", '\u{0180}' | '\u{0183}' | '\u{0253}' | '\u{1D6C}' | '\u{1D80}' | '\u{1E03}' | '\u{1E05}' | '\u{1E07}' | '\u{24D1}' | '\u{FF42}' => "b", '\u{249D}' => "(b)", // C/c '\u{00C7}' | '\u{0106}' | '\u{0108}' | '\u{010A}' | '\u{010C}' | '\u{0187}' | '\u{023B}' | '\u{0297}' | '\u{1D04}' | '\u{1E08}' | '\u{24B8}' | '\u{FF23}' => "C", '\u{00E7}' | '\u{0107}' | '\u{0109}' | '\u{010B}' | '\u{010D}' | '\u{0188}' | '\u{023C}' | '\u{0255}' | '\u{1E09}' | '\u{2184}' | '\u{24D2}' | '\u{A73E}' | '\u{A73F}' | '\u{FF43}' => "c", '\u{249E}' => "(c)", // D/d '\u{00D0}' | '\u{010E}' | '\u{0110}' | '\u{0189}' | '\u{018A}' | '\u{018B}' | '\u{1D05}' | '\u{1D06}' | '\u{1E0A}' | '\u{1E0C}' | '\u{1E0E}' | '\u{1E10}' | '\u{1E12}' | '\u{24B9}' | '\u{A779}' | '\u{FF24}' => "D", '\u{00F0}' | '\u{010F}' | '\u{0111}' | '\u{018C}' | '\u{0221}' | '\u{0256}' | '\u{0257}' | '\u{1D6D}' | '\u{1D81}' | '\u{1D91}' | '\u{1E0B}' | '\u{1E0D}' | '\u{1E0F}' | '\u{1E11}' | '\u{1E13}' | '\u{24D3}' | '\u{A77A}' | '\u{FF44}' => "d", '\u{01C4}' | '\u{01F1}' => "DZ", '\u{01C5}' | '\u{01F2}' => "Dz", '\u{249F}' => "(d)", '\u{0238}' => "db", '\u{01C6}' | '\u{01F3}' | '\u{02A3}' | '\u{02A5}' => "dz", // E/e '\u{00C8}' | '\u{00C9}' | '\u{00CA}' | '\u{00CB}' | '\u{0112}' | '\u{0114}' | '\u{0116}' | '\u{0118}' | '\u{011A}' | '\u{018E}' | '\u{0190}' | '\u{0204}' | '\u{0206}' | '\u{0228}' | '\u{0246}' | '\u{1D07}' | '\u{1E14}' | '\u{1E16}' | '\u{1E18}' | '\u{1E1A}' | '\u{1E1C}' | '\u{1EB8}' | '\u{1EBA}' | '\u{1EBC}' | '\u{1EBE}' | '\u{1EC0}' | '\u{1EC2}' | '\u{1EC4}' | '\u{1EC6}' | '\u{24BA}' | '\u{2C7B}' | '\u{FF25}' => "E", '\u{00E8}' | '\u{00E9}' | '\u{00EA}' | '\u{00EB}' | '\u{0113}' | '\u{0115}' | '\u{0117}' | '\u{0119}' | '\u{011B}' | '\u{01DD}' | '\u{0205}' | '\u{0207}' | '\u{0229}' | '\u{0247}' | '\u{0258}' | '\u{025B}' | '\u{025C}' | '\u{025D}' | '\u{025E}' | '\u{029A}' | '\u{1D08}' | '\u{1D92}' | '\u{1D93}' | '\u{1D94}' | '\u{1E15}' | '\u{1E17}' | '\u{1E19}' | '\u{1E1B}' | '\u{1E1D}' | '\u{1EB9}' | '\u{1EBB}' | '\u{1EBD}' | '\u{1EBF}' | '\u{1EC1}' | '\u{1EC3}' | '\u{1EC5}' | '\u{1EC7}' | '\u{2091}' | '\u{24D4}' | '\u{2C78}' | '\u{FF45}' => "e", '\u{24A0}' => "(e)", // F/f and ligatures '\u{0191}' | '\u{1E1E}' | '\u{24BB}' | '\u{A730}' | '\u{A77B}' | '\u{A7FB}' | '\u{FF26}' => "F", '\u{0192}' | '\u{1D6E}' | '\u{1D82}' | '\u{1E1F}' | '\u{1E9B}' | '\u{24D5}' | '\u{A77C}' | '\u{FF46}' => "f", '\u{24A1}' => "(f)", '\u{FB00}' => "ff", '\u{FB03}' => "ffi", '\u{FB04}' => "ffl", '\u{FB01}' => "fi", '\u{FB02}' => "fl", // G/g '\u{011C}' | '\u{011E}' | '\u{0120}' | '\u{0122}' | '\u{0193}' | '\u{01E4}' | '\u{01E6}' | '\u{01F4}' | '\u{0262}' | '\u{029B}' | '\u{1E20}' | '\u{24BC}' | '\u{A77D}' | '\u{A77E}' | '\u{FF27}' => "G", '\u{011D}' | '\u{011F}' | '\u{0121}' | '\u{0123}' | '\u{01E5}' | '\u{01E7}' | '\u{01F5}' | '\u{0260}' | '\u{0261}' | '\u{1D77}' | '\u{1D79}' | '\u{1D83}' | '\u{1E21}' | '\u{24D6}' | '\u{A77F}' | '\u{FF47}' => "g", '\u{24A2}' => "(g)", // H/h, HV '\u{0124}' | '\u{0126}' | '\u{021E}' | '\u{029C}' | '\u{1E22}' | '\u{1E24}' | '\u{1E26}' | '\u{1E28}' | '\u{1E2A}' | '\u{24BD}' | '\u{2C67}' | '\u{2C75}' | '\u{FF28}' => "H", '\u{0125}' | '\u{0127}' | '\u{021F}' | '\u{0265}' | '\u{0266}' | '\u{02AE}' | '\u{02AF}' | '\u{1E23}' | '\u{1E25}' | '\u{1E27}' | '\u{1E29}' | '\u{1E2B}' | '\u{1E96}' | '\u{24D7}' | '\u{2C68}' | '\u{2C76}' | '\u{FF48}' => "h", '\u{01F6}' => "HV", '\u{24A3}' => "(h)", '\u{0195}' => "hv", // I/i, IJ '\u{00CC}' | '\u{00CD}' | '\u{00CE}' | '\u{00CF}' | '\u{0128}' | '\u{012A}' | '\u{012C}' | '\u{012E}' | '\u{0130}' | '\u{0196}' | '\u{0197}' | '\u{01CF}' | '\u{0208}' | '\u{020A}' | '\u{026A}' | '\u{1D7B}' | '\u{1E2C}' | '\u{1E2E}' | '\u{1EC8}' | '\u{1ECA}' | '\u{24BE}' | '\u{A7FE}' | '\u{FF29}' => "I", '\u{00EC}' | '\u{00ED}' | '\u{00EE}' | '\u{00EF}' | '\u{0129}' | '\u{012B}' | '\u{012D}' | '\u{012F}' | '\u{0131}' | '\u{01D0}' | '\u{0209}' | '\u{020B}' | '\u{0268}' | '\u{1D09}' | '\u{1D62}' | '\u{1D7C}' | '\u{1D96}' | '\u{1E2D}' | '\u{1E2F}' | '\u{1EC9}' | '\u{1ECB}' | '\u{2071}' | '\u{24D8}' | '\u{FF49}' => "i", '\u{0132}' => "IJ", '\u{24A4}' => "(i)", '\u{0133}' => "ij", // J/j '\u{0134}' | '\u{0248}' | '\u{1D0A}' | '\u{24BF}' | '\u{FF2A}' => "J", '\u{0135}' | '\u{01F0}' | '\u{0237}' | '\u{0249}' | '\u{025F}' | '\u{0284}' | '\u{029D}' | '\u{24D9}' | '\u{2C7C}' | '\u{FF4A}' => "j", '\u{24A5}' => "(j)", // K/k '\u{0136}' | '\u{0198}' | '\u{01E8}' | '\u{1D0B}' | '\u{1E30}' | '\u{1E32}' | '\u{1E34}' | '\u{24C0}' | '\u{2C69}' | '\u{A740}' | '\u{A742}' | '\u{A744}' | '\u{FF2B}' => "K", '\u{0137}' | '\u{0199}' | '\u{01E9}' | '\u{029E}' | '\u{1D84}' | '\u{1E31}' | '\u{1E33}' | '\u{1E35}' | '\u{24DA}' | '\u{2C6A}' | '\u{A741}' | '\u{A743}' | '\u{A745}' | '\u{FF4B}' => "k", '\u{24A6}' => "(k)", // L/l, LJ/ll etc. '\u{0139}' | '\u{013B}' | '\u{013D}' | '\u{013F}' | '\u{0141}' | '\u{023D}' | '\u{029F}' | '\u{1D0C}' | '\u{1E36}' | '\u{1E38}' | '\u{1E3A}' | '\u{1E3C}' | '\u{24C1}' | '\u{2C60}' | '\u{2C62}' | '\u{A746}' | '\u{A748}' | '\u{A780}' | '\u{FF2C}' => "L", '\u{013A}' | '\u{013C}' | '\u{013E}' | '\u{0140}' | '\u{0142}' | '\u{019A}' | '\u{0234}' | '\u{026B}' | '\u{026C}' | '\u{026D}' | '\u{1D85}' | '\u{1E37}' | '\u{1E39}' | '\u{1E3B}' | '\u{1E3D}' | '\u{24DB}' | '\u{2C61}' | '\u{A747}' | '\u{A749}' | '\u{A781}' | '\u{FF4C}' => "l", '\u{01C7}' => "LJ", '\u{1EFA}' => "LL", '\u{01C8}' => "Lj", '\u{24A7}' => "(l)", '\u{01C9}' => "lj", '\u{1EFB}' => "ll", '\u{02AA}' => "ls", '\u{02AB}' => "lz", // M/m '\u{019C}' | '\u{1D0D}' | '\u{1E3E}' | '\u{1E40}' | '\u{1E42}' | '\u{24C2}' | '\u{2C6E}' | '\u{A7FD}' | '\u{A7FF}' | '\u{FF2D}' => "M", '\u{026F}' | '\u{0270}' | '\u{0271}' | '\u{1D6F}' | '\u{1D86}' | '\u{1E3F}' | '\u{1E41}' | '\u{1E43}' | '\u{24DC}' | '\u{FF4D}' => "m", '\u{24A8}' => "(m)", // N/n, NJ '\u{00D1}' | '\u{0143}' | '\u{0145}' | '\u{0147}' | '\u{014A}' | '\u{019D}' | '\u{01F8}' | '\u{0220}' | '\u{0274}' | '\u{1D0E}' | '\u{1E44}' | '\u{1E46}' | '\u{1E48}' | '\u{1E4A}' | '\u{24C3}' | '\u{FF2E}' => "N", '\u{00F1}' | '\u{0144}' | '\u{0146}' | '\u{0148}' | '\u{0149}' | '\u{014B}' | '\u{019E}' | '\u{01F9}' | '\u{0235}' | '\u{0272}' | '\u{0273}' | '\u{1D70}' | '\u{1D87}' | '\u{1E45}' | '\u{1E47}' | '\u{1E49}' | '\u{1E4B}' | '\u{207F}' | '\u{24DD}' | '\u{FF4E}' => "n", '\u{01CA}' => "NJ", '\u{01CB}' => "Nj", '\u{24A9}' => "(n)", '\u{01CC}' => "nj", // O/o, OE/OO/OU '\u{00D2}' | '\u{00D3}' | '\u{00D4}' | '\u{00D5}' | '\u{00D6}' | '\u{00D8}' | '\u{014C}' | '\u{014E}' | '\u{0150}' | '\u{0186}' | '\u{019F}' | '\u{01A0}' | '\u{01D1}' | '\u{01EA}' | '\u{01EC}' | '\u{01FE}' | '\u{020C}' | '\u{020E}' | '\u{022A}' | '\u{022C}' | '\u{022E}' | '\u{0230}' | '\u{1D0F}' | '\u{1D10}' | '\u{1E4C}' | '\u{1E4E}' | '\u{1E50}' | '\u{1E52}' | '\u{1ECC}' | '\u{1ECE}' | '\u{1ED0}' | '\u{1ED2}' | '\u{1ED4}' | '\u{1ED6}' | '\u{1ED8}' | '\u{1EDA}' | '\u{1EDC}' | '\u{1EDE}' | '\u{1EE0}' | '\u{1EE2}' | '\u{24C4}' | '\u{A74A}' | '\u{A74C}' | '\u{FF2F}' => "O", '\u{00F2}' | '\u{00F3}' | '\u{00F4}' | '\u{00F5}' | '\u{00F6}' | '\u{00F8}' | '\u{014D}' | '\u{014F}' | '\u{0151}' | '\u{01A1}' | '\u{01D2}' | '\u{01EB}' | '\u{01ED}' | '\u{01FF}' | '\u{020D}' | '\u{020F}' | '\u{022B}' | '\u{022D}' | '\u{022F}' | '\u{0231}' | '\u{0254}' | '\u{0275}' | '\u{1D16}' | '\u{1D17}' | '\u{1D97}' | '\u{1E4D}' | '\u{1E4F}' | '\u{1E51}' | '\u{1E53}' | '\u{1ECD}' | '\u{1ECF}' | '\u{1ED1}' | '\u{1ED3}' | '\u{1ED5}' | '\u{1ED7}' | '\u{1ED9}' | '\u{1EDB}' | '\u{1EDD}' | '\u{1EDF}' | '\u{1EE1}' | '\u{1EE3}' | '\u{2092}' | '\u{24DE}' | '\u{2C7A}' | '\u{A74B}' | '\u{A74D}' | '\u{FF4F}' => "o", '\u{0152}' | '\u{0276}' => "OE", '\u{A74E}' => "OO", '\u{0222}' | '\u{1D15}' => "OU", '\u{24AA}' => "(o)", '\u{0153}' | '\u{1D14}' => "oe", '\u{A74F}' => "oo", '\u{0223}' => "ou", // P/p '\u{01A4}' | '\u{1D18}' | '\u{1E54}' | '\u{1E56}' | '\u{24C5}' | '\u{2C63}' | '\u{A750}' | '\u{A752}' | '\u{A754}' | '\u{FF30}' => "P", '\u{01A5}' | '\u{1D71}' | '\u{1D7D}' | '\u{1D88}' | '\u{1E55}' | '\u{1E57}' | '\u{24DF}' | '\u{A751}' | '\u{A753}' | '\u{A755}' | '\u{A7FC}' | '\u{FF50}' => "p", '\u{24AB}' => "(p)", // Q/q '\u{024A}' | '\u{24C6}' | '\u{A756}' | '\u{A758}' | '\u{FF31}' => "Q", '\u{0138}' | '\u{024B}' | '\u{02A0}' | '\u{24E0}' | '\u{A757}' | '\u{A759}' | '\u{FF51}' => "q", '\u{24AC}' => "(q)", '\u{0239}' => "qp", // R/r '\u{0154}' | '\u{0156}' | '\u{0158}' | '\u{0210}' | '\u{0212}' | '\u{024C}' | '\u{0280}' | '\u{0281}' | '\u{1D19}' | '\u{1D1A}' | '\u{1E58}' | '\u{1E5A}' | '\u{1E5C}' | '\u{1E5E}' | '\u{24C7}' | '\u{2C64}' | '\u{A75A}' | '\u{A782}' | '\u{FF32}' => "R", '\u{0155}' | '\u{0157}' | '\u{0159}' | '\u{0211}' | '\u{0213}' | '\u{024D}' | '\u{027C}' | '\u{027D}' | '\u{027E}' | '\u{027F}' | '\u{1D63}' | '\u{1D72}' | '\u{1D73}' | '\u{1D89}' | '\u{1E59}' | '\u{1E5B}' | '\u{1E5D}' | '\u{1E5F}' | '\u{24E1}' | '\u{A75B}' | '\u{A783}' | '\u{FF52}' => "r", '\u{24AD}' => "(r)", // S/s and ß/ẞ and st '\u{015A}' | '\u{015C}' | '\u{015E}' | '\u{0160}' | '\u{0218}' | '\u{1E60}' | '\u{1E62}' | '\u{1E64}' | '\u{1E66}' | '\u{1E68}' | '\u{24C8}' | '\u{A731}' | '\u{A785}' | '\u{FF33}' => "S", '\u{015B}' | '\u{015D}' | '\u{015F}' | '\u{0161}' | '\u{017F}' | '\u{0219}' | '\u{023F}' | '\u{0282}' | '\u{1D74}' | '\u{1D8A}' | '\u{1E61}' | '\u{1E63}' | '\u{1E65}' | '\u{1E67}' | '\u{1E69}' | '\u{1E9C}' | '\u{1E9D}' | '\u{24E2}' | '\u{A784}' | '\u{FF53}' => "s", '\u{1E9E}' => "SS", '\u{24AE}' => "(s)", '\u{00DF}' => "ss", '\u{FB06}' => "st", // T/t, TH/th, TZ '\u{0162}' | '\u{0164}' | '\u{0166}' | '\u{01AC}' | '\u{01AE}' | '\u{021A}' | '\u{023E}' | '\u{1D1B}' | '\u{1E6A}' | '\u{1E6C}' | '\u{1E6E}' | '\u{1E70}' | '\u{24C9}' | '\u{A786}' | '\u{FF34}' => "T", '\u{0163}' | '\u{0165}' | '\u{0167}' | '\u{01AB}' | '\u{01AD}' | '\u{021B}' | '\u{0236}' | '\u{0287}' | '\u{0288}' | '\u{1D75}' | '\u{1E6B}' | '\u{1E6D}' | '\u{1E6F}' | '\u{1E71}' | '\u{1E97}' | '\u{24E3}' | '\u{2C66}' | '\u{FF54}' => "t", '\u{00DE}' | '\u{A766}' => "TH", '\u{A728}' => "TZ", '\u{24AF}' => "(t)", '\u{02A8}' => "tc", '\u{00FE}' | '\u{1D7A}' | '\u{A767}' => "th", '\u{02A6}' => "ts", '\u{A729}' => "tz", // U/u '\u{00D9}' | '\u{00DA}' | '\u{00DB}' | '\u{00DC}' | '\u{0168}' | '\u{016A}' | '\u{016C}' | '\u{016E}' | '\u{0170}' | '\u{0172}' | '\u{01AF}' | '\u{01D3}' | '\u{01D5}' | '\u{01D7}' | '\u{01D9}' | '\u{01DB}' | '\u{0214}' | '\u{0216}' | '\u{0244}' | '\u{1D1C}' | '\u{1D7E}' | '\u{1E72}' | '\u{1E74}' | '\u{1E76}' | '\u{1E78}' | '\u{1E7A}' | '\u{1EE4}' | '\u{1EE6}' | '\u{1EE8}' | '\u{1EEA}' | '\u{1EEC}' | '\u{1EEE}' | '\u{1EF0}' | '\u{24CA}' | '\u{FF35}' => "U", '\u{00F9}' | '\u{00FA}' | '\u{00FB}' | '\u{00FC}' | '\u{0169}' | '\u{016B}' | '\u{016D}' | '\u{016F}' | '\u{0171}' | '\u{0173}' | '\u{01B0}' | '\u{01D4}' | '\u{01D6}' | '\u{01D8}' | '\u{01DA}' | '\u{01DC}' | '\u{0215}' | '\u{0217}' | '\u{0289}' | '\u{1D64}' | '\u{1D99}' | '\u{1E73}' | '\u{1E75}' | '\u{1E77}' | '\u{1E79}' | '\u{1E7B}' | '\u{1EE5}' | '\u{1EE7}' | '\u{1EE9}' | '\u{1EEB}' | '\u{1EED}' | '\u{1EEF}' | '\u{1EF1}' | '\u{24E4}' | '\u{FF55}' => "u", '\u{24B0}' => "(u)", '\u{1D6B}' => "ue", // V/v '\u{01B2}' | '\u{0245}' | '\u{1D20}' | '\u{1E7C}' | '\u{1E7E}' | '\u{1EFC}' | '\u{24CB}' | '\u{A75E}' | '\u{A768}' | '\u{FF36}' => "V", '\u{028B}' | '\u{028C}' | '\u{1D65}' | '\u{1D8C}' | '\u{1E7D}' | '\u{1E7F}' | '\u{24E5}' | '\u{2C71}' | '\u{2C74}' | '\u{A75F}' | '\u{FF56}' => "v", '\u{A760}' => "VY", '\u{24B1}' => "(v)", '\u{A761}' => "vy", // W/w '\u{0174}' | '\u{01F7}' | '\u{1D21}' | '\u{1E80}' | '\u{1E82}' | '\u{1E84}' | '\u{1E86}' | '\u{1E88}' | '\u{24CC}' | '\u{2C72}' | '\u{FF37}' => "W", '\u{0175}' | '\u{01BF}' | '\u{028D}' | '\u{1E81}' | '\u{1E83}' | '\u{1E85}' | '\u{1E87}' | '\u{1E89}' | '\u{1E98}' | '\u{24E6}' | '\u{2C73}' | '\u{FF57}' => "w", '\u{24B2}' => "(w)", // X/x '\u{1E8A}' | '\u{1E8C}' | '\u{24CD}' | '\u{FF38}' => "X", '\u{1D8D}' | '\u{1E8B}' | '\u{1E8D}' | '\u{2093}' | '\u{24E7}' | '\u{FF58}' => "x", '\u{24B3}' => "(x)", // Y/y '\u{00DD}' | '\u{0176}' | '\u{0178}' | '\u{01B3}' | '\u{0232}' | '\u{024E}' | '\u{028F}' | '\u{1E8E}' | '\u{1EF2}' | '\u{1EF4}' | '\u{1EF6}' | '\u{1EF8}' | '\u{1EFE}' | '\u{24CE}' | '\u{FF39}' => "Y", '\u{00FD}' | '\u{00FF}' | '\u{0177}' | '\u{01B4}' | '\u{0233}' | '\u{024F}' | '\u{028E}' | '\u{1E8F}' | '\u{1E99}' | '\u{1EF3}' | '\u{1EF5}' | '\u{1EF7}' | '\u{1EF9}' | '\u{1EFF}' | '\u{24E8}' | '\u{FF59}' => "y", '\u{24B4}' => "(y)", // Z/z '\u{0179}' | '\u{017B}' | '\u{017D}' | '\u{01B5}' | '\u{021C}' | '\u{0224}' | '\u{1D22}' | '\u{1E90}' | '\u{1E92}' | '\u{1E94}' | '\u{24CF}' | '\u{2C6B}' | '\u{A762}' | '\u{FF3A}' => "Z", '\u{017A}' | '\u{017C}' | '\u{017E}' | '\u{01B6}' | '\u{021D}' | '\u{0225}' | '\u{0240}' | '\u{0290}' | '\u{0291}' | '\u{1D76}' | '\u{1D8E}' | '\u{1E91}' | '\u{1E93}' | '\u{1E95}' | '\u{24E9}' | '\u{2C6C}' | '\u{A763}' | '\u{FF5A}' => "z", '\u{24B5}' => "(z)", // Digits and circled/parenthesized forms '\u{2070}' | '\u{2080}' | '\u{24EA}' | '\u{24FF}' | '\u{FF10}' => "0", '\u{00B9}' | '\u{2081}' | '\u{2460}' | '\u{24F5}' | '\u{2776}' | '\u{2780}' | '\u{278A}' | '\u{FF11}' => "1", '\u{2488}' => "1.", '\u{2474}' => "(1)", '\u{00B2}' | '\u{2082}' | '\u{2461}' | '\u{24F6}' | '\u{2777}' | '\u{2781}' | '\u{278B}' | '\u{FF12}' => "2", '\u{2489}' => "2.", '\u{2475}' => "(2)", '\u{00B3}' | '\u{2083}' | '\u{2462}' | '\u{24F7}' | '\u{2778}' | '\u{2782}' | '\u{278C}' | '\u{FF13}' => "3", '\u{248A}' => "3.", '\u{2476}' => "(3)", '\u{2074}' | '\u{2084}' | '\u{2463}' | '\u{24F8}' | '\u{2779}' | '\u{2783}' | '\u{278D}' | '\u{FF14}' => "4", '\u{248B}' => "4.", '\u{2477}' => "(4)", '\u{2075}' | '\u{2085}' | '\u{2464}' | '\u{24F9}' | '\u{277A}' | '\u{2784}' | '\u{278E}' | '\u{FF15}' => "5", '\u{248C}' => "5.", '\u{2478}' => "(5)", '\u{2076}' | '\u{2086}' | '\u{2465}' | '\u{24FA}' | '\u{277B}' | '\u{2785}' | '\u{278F}' | '\u{FF16}' => "6", '\u{248D}' => "6.", '\u{2479}' => "(6)", '\u{2077}' | '\u{2087}' | '\u{2466}' | '\u{24FB}' | '\u{277C}' | '\u{2786}' | '\u{2790}' | '\u{FF17}' => "7", '\u{248E}' => "7.", '\u{247A}' => "(7)", '\u{2078}' | '\u{2088}' | '\u{2467}' | '\u{24FC}' | '\u{277D}' | '\u{2787}' | '\u{2791}' | '\u{FF18}' => "8", '\u{248F}' => "8.", '\u{247B}' => "(8)", '\u{2079}' | '\u{2089}' | '\u{2468}' | '\u{24FD}' | '\u{277E}' | '\u{2788}' | '\u{2792}' | '\u{FF19}' => "9", '\u{2490}' => "9.", '\u{247C}' => "(9)", '\u{2469}' | '\u{24FE}' | '\u{277F}' | '\u{2789}' | '\u{2793}' => "10", '\u{2491}' => "10.", '\u{247D}' => "(10)", '\u{246A}' | '\u{24EB}' => "11", '\u{2492}' => "11.", '\u{247E}' => "(11)", '\u{246B}' | '\u{24EC}' => "12", '\u{2493}' => "12.", '\u{247F}' => "(12)", '\u{246C}' | '\u{24ED}' => "13", '\u{2494}' => "13.", '\u{2480}' => "(13)", '\u{246D}' | '\u{24EE}' => "14", '\u{2495}' => "14.", '\u{2481}' => "(14)", '\u{246E}' | '\u{24EF}' => "15", '\u{2496}' => "15.", '\u{2482}' => "(15)", '\u{246F}' | '\u{24F0}' => "16", '\u{2497}' => "16.", '\u{2483}' => "(16)", '\u{2470}' | '\u{24F1}' => "17", '\u{2498}' => "17.", '\u{2484}' => "(17)", '\u{2471}' | '\u{24F2}' => "18", '\u{2499}' => "18.", '\u{2485}' => "(18)", '\u{2472}' | '\u{24F3}' => "19", '\u{249A}' => "19.", '\u{2486}' => "(19)", '\u{2473}' | '\u{24F4}' => "20", '\u{249B}' => "20.", '\u{2487}' => "(20)", // Quotes/dashes and punctuation '\u{00AB}' | '\u{00BB}' | '\u{201C}' | '\u{201D}' | '\u{201E}' | '\u{2033}' | '\u{2036}' | '\u{275D}' | '\u{275E}' | '\u{276E}' | '\u{276F}' | '\u{FF02}' => "\"", '\u{2018}' | '\u{2019}' | '\u{201A}' | '\u{201B}' | '\u{2032}' | '\u{2035}' | '\u{2039}' | '\u{203A}' | '\u{275B}' | '\u{275C}' | '\u{FF07}' => "'", '\u{2010}' | '\u{2011}' | '\u{2012}' | '\u{2013}' | '\u{2014}' | '\u{207B}' | '\u{208B}' | '\u{FF0D}' => "-", '\u{2045}' | '\u{2772}' | '\u{FF3B}' => "[", '\u{2046}' | '\u{2773}' | '\u{FF3D}' => "]", '\u{207D}' | '\u{208D}' | '\u{2768}' | '\u{276A}' | '\u{FF08}' => "(", '\u{2E28}' => "((", '\u{207E}' | '\u{208E}' | '\u{2769}' | '\u{276B}' | '\u{FF09}' => ")", '\u{2E29}' => "))", '\u{276C}' | '\u{2770}' | '\u{FF1C}' => "<", '\u{276D}' | '\u{2771}' | '\u{FF1E}' => ">", '\u{2774}' | '\u{FF5B}' => "{", '\u{2775}' | '\u{FF5D}' => "}", '\u{207A}' | '\u{208A}' | '\u{FF0B}' => "+", '\u{207C}' | '\u{208C}' | '\u{FF1D}' => "=", '\u{FF01}' => "!", '\u{203C}' => "!!", '\u{2049}' => "!?", '\u{FF03}' => "#", '\u{FF04}' => "$", '\u{2052}' | '\u{FF05}' => "%", '\u{FF06}' => "&", '\u{204E}' | '\u{FF0A}' => "*", '\u{FF0C}' => ",", '\u{FF0E}' => ".", '\u{2044}' | '\u{FF0F}' => "/", '\u{FF1A}' => ":", '\u{204F}' | '\u{FF1B}' => ";", '\u{FF1F}' => "?", '\u{2047}' => "??", '\u{2048}' => "?!", '\u{FF20}' => "@", '\u{FF3C}' => "\\", '\u{2038}' | '\u{FF3E}' => "^", '\u{FF3F}' => "_", '\u{2053}' | '\u{FF5E}' => "~", _ => return None, }; Some(s) } #[cfg(test)] mod tests { use super::fold_to_ascii; #[test] fn basic_examples() { assert_eq!( fold_to_ascii("ação café jalapeño Über"), "acao cafe jalapeno Uber" ); assert_eq!(fold_to_ascii("Æsop and Œuvre"), "AEsop and OEuvre"); assert_eq!(fold_to_ascii("straße"), "strasse"); assert_eq!(fold_to_ascii("Łódź"), "Lodz"); assert_eq!( fold_to_ascii("“quote” — test 1②⒊"), "\"quote\" - test 123." ); // japanese characters are unchanged assert_eq!( fold_to_ascii("日本語のテキストです。Qdrantのコードで単体テストで使用されています。"), "日本語のテキストです。Qdrantのコードで単体テストで使用されています。" ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/japanese.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/japanese.rs
use std::borrow::Cow; use std::sync::LazyLock; use vaporetto::{Model, Predictor, Sentence}; use super::TokensProcessor; /// Vaporetto prediction model. Source: https://github.com/daac-tools/vaporetto-models/releases/tag/v0.5.0 const MODEL: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/tokenizer/bccwj-suw_c1.0.model" )); /// Sha512 checksum of the model to ensure integrity and make modifications or corrupt model file easier to detect. #[cfg(test)] const MODEL_CHECKSUM: [u8; 64] = [ 34, 108, 156, 130, 7, 199, 31, 24, 147, 156, 119, 202, 98, 129, 109, 101, 114, 8, 250, 182, 159, 28, 112, 122, 214, 50, 51, 191, 118, 112, 143, 237, 70, 15, 96, 45, 78, 76, 90, 62, 178, 14, 86, 194, 87, 33, 19, 79, 55, 50, 212, 99, 98, 65, 102, 171, 123, 150, 110, 229, 88, 224, 43, 203, ]; // Global initialization of the Japanese tokenizer. static GLOBAL_JAPANESE_TOKENIZER: LazyLock<JapaneseTokenizer> = LazyLock::new(JapaneseTokenizer::init); /// Tokenizer for Japanese text using vaporetto tokenizer. struct JapaneseTokenizer { predictor: Predictor, } impl JapaneseTokenizer { /// Initializes a new `JapaneseTokenizer`. Should only called once and then kept allocated somewhere for efficient reuse. fn init() -> Self { let model = Model::read_slice(MODEL).unwrap().0; let predictor = Predictor::new(model, false).unwrap(); Self { predictor } } fn tokenize<'a, C: FnMut(Cow<'a, str>)>( &self, input: &'a str, tokens_processor: &TokensProcessor, mut cb: C, ) { let Ok(mut s) = Sentence::from_raw(Cow::Borrowed(input)) else { return; }; self.predictor.predict(&mut s); // TODO(multilingual): Implement similar method to `iter_tokens()` that allows returning borrowed Cows instead of needlessly cloning here. for i in s.iter_tokens() { let surface = i.surface(); // Skip if all characters are not alphanumeric or if the surface is empty. if tokens_processor.is_stopword(surface) || surface.chars().all(|char| !char.is_alphabetic()) { continue; } let surface = if tokens_processor.lowercase { Cow::Owned(surface.to_lowercase()) } else { Cow::Owned(surface.to_string()) }; cb(surface); } } } /// Tokenizes the given `input` of Japanese text and calls `cb` with each tokens. pub fn tokenize<'a, C: FnMut(Cow<'a, str>)>(input: &'a str, config: &TokensProcessor, cb: C) { GLOBAL_JAPANESE_TOKENIZER.tokenize(input, config, cb); } #[cfg(test)] mod test { use std::io::Write; use sha2::{Digest, Sha512}; use super::*; #[test] fn test_assert_model_integrity() { // Sha512 checksum of model let mut sha512 = Sha512::new(); sha512.write_all(MODEL).unwrap(); let sum = sha512.finalize(); assert!( sum.as_ref() == MODEL_CHECKSUM, "Japanese Tokenizer Model integrity check failed! The file might be modified or corrupted." ); // The init() function is completely deterministic, since the model that gets loaded is included in // qdrant binary file. We test initialization here once to ensure it won't panic on runtime (eg. when a model has changed). let _ = JapaneseTokenizer::init(); } #[test] fn test_tokenization() { let input = "日本語のテキストです。Qdrantのコードで単体テストで使用されています。"; let tokens_processor = TokensProcessor::default(); let mut out = vec![]; tokenize(input, &tokens_processor, |i| { out.push(i.to_string()); }); assert_eq!( out, vec![ "日本", "語", "の", "テキスト", "です", "Qdrant", "の", "コード", "で", "単体", "テスト", "で", "使用", "さ", "れ", "て", "い", "ます", ] ); } #[test] fn test_tokenization_partially_japanese() { let input = "日本語のテキストです。It's used in Qdrant's code in a unit test"; let tokens_processor = TokensProcessor::default(); let mut out = vec![]; tokenize(input, &tokens_processor, |i| { out.push(i.to_string()); }); assert_eq!( out, vec![ "日本", "語", "の", "テキスト", "です", "It", "s", "used", "in", "Qdrant", "s", "code", "in", "a", "unit", "test" ] ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/tokens_processor.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/tokens_processor.rs
use std::borrow::Cow; use std::sync::Arc; use super::stemmer::Stemmer; use crate::index::field_index::full_text_index::stop_words::StopwordsFilter; // TODO(rocksdb): Remove `Clone` once rocksdb has been removed! #[derive(Debug, Clone, Default)] pub struct TokensProcessor { pub lowercase: bool, pub ascii_folding: bool, stopwords_filter: Arc<StopwordsFilter>, // TDOO(rocksdb): Remove once rocksdb has been removed! stemmer: Option<Stemmer>, pub min_token_len: Option<usize>, pub max_token_len: Option<usize>, } impl TokensProcessor { pub fn new( lowercase: bool, ascii_folding: bool, stopwords_filter: Arc<StopwordsFilter>, stemmer: Option<Stemmer>, min_token_len: Option<usize>, max_token_len: Option<usize>, ) -> Self { Self { lowercase, ascii_folding, stopwords_filter, stemmer, min_token_len, max_token_len, } } #[cfg(test)] pub fn set_stopwords(&mut self, stopwords_filter: Arc<StopwordsFilter>) { self.stopwords_filter = stopwords_filter; } /// Applies stemming if enabled and applies the configured stemming algorithm. Does nothing if /// stemming is disabled. pub fn stem_if_enabled<'a>(&self, input: Cow<'a, str>) -> Cow<'a, str> { let Some(stemmer) = self.stemmer.as_ref() else { return input; }; stemmer.stem(input) } /// Applies ASCII folding if enabled. Converts accented characters to their ASCII equivalents. pub fn fold_if_enabled<'a>(&self, input: Cow<'a, str>) -> Cow<'a, str> { if self.ascii_folding { super::ascii_folding::fold_to_ascii_cow(input) } else { input } } pub fn is_stopword(&self, token: &str) -> bool { self.stopwords_filter.is_stopword(token) } pub fn process_token_cow<'a>( &self, mut token_cow: Cow<'a, str>, check_max_len: bool, ) -> Option<Cow<'a, str>> { let Self { lowercase, stopwords_filter, stemmer, min_token_len, max_token_len, ascii_folding, } = self; if token_cow.is_empty() { return None; } // Handle ASCII folding (normalize accents) if *ascii_folding { token_cow = super::ascii_folding::fold_to_ascii_cow(token_cow); } // Handle lowercase if *lowercase { token_cow = Cow::Owned(token_cow.to_lowercase()); } // Handle stopwords if stopwords_filter.is_stopword(&token_cow) { return None; } // Handle stemming if let Some(stemmer) = stemmer.as_ref() { token_cow = stemmer.stem(token_cow); }; // Handle token length if min_token_len.is_some_and(|min_len| token_cow.chars().count() < min_len) || (check_max_len && max_token_len.is_some_and(|max_len| token_cow.chars().count() > max_len)) { return None; } Some(token_cow) } /// Processes a token for indexing. Applies all configured options to the token. /// /// Returns `None` if: /// - The token is empty. /// - The token is a stopword. /// - The token's chars length is outside of the `min_token_len` and (optionally) `max_token_len` range. pub fn process_token<'a>(&self, token: &'a str, check_max_len: bool) -> Option<Cow<'a, str>> { self.process_token_cow(Cow::Borrowed(token), check_max_len) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/stemmer.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/stemmer.rs
use std::borrow::Cow; use std::str::FromStr; use std::sync::Arc; use rust_stemmers::Algorithm; use crate::data_types::index::{SnowballLanguage, SnowballParams, StemmingAlgorithm}; /// Abstraction to handle different stemming libraries and algorithms with a clean API. #[derive(Clone)] pub enum Stemmer { // TODO(rocksdb): Remove `Clone` and this Arc once rocksdb has been removed! Snowball(Arc<rust_stemmers::Stemmer>), } impl std::fmt::Debug for Stemmer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Snowball(_) => f.debug_tuple("Snowball").finish(), } } } impl Stemmer { pub fn from_algorithm(config: &StemmingAlgorithm) -> Self { match config { StemmingAlgorithm::Snowball(SnowballParams { r#type: _, language, }) => Self::Snowball(Arc::new(rust_stemmers::Stemmer::create(Algorithm::from( *language, )))), } } /// Construct default stemmer for a given language. /// Returns `None` if the language is not supported. pub fn try_default_from_language(language: &str) -> Option<Self> { let language = SnowballLanguage::from_str(language).ok()?; Some(Self::Snowball(Arc::new(rust_stemmers::Stemmer::create( Algorithm::from(language), )))) } pub fn stem<'a>(&self, input: Cow<'a, str>) -> Cow<'a, str> { match self { Stemmer::Snowball(algorithm) => algorithm.stem_cow(input), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/multilingual.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/multilingual.rs
use std::borrow::Cow; use charabia::normalizer::{ClassifierOption, NormalizedTokenIter, NormalizerOption}; use charabia::{Language, Script, Segment, StrDetection}; use super::{TokensProcessor, japanese}; /// Default normalizer options from charabia(https://github.com/meilisearch/charabia/blob/main/charabia/src/normalizer/mod.rs#L82) used /// in `str::tokenize()`. const DEFAULT_NORMALIZER: NormalizerOption = NormalizerOption { create_char_map: false, lossy: true, classifier: ClassifierOption { stop_words: None, separators: None, }, }; pub struct MultilingualTokenizer; impl MultilingualTokenizer { pub fn tokenize<'a, C: FnMut(Cow<'a, str>)>( input: &'a str, config: &'a TokensProcessor, cb: C, ) { let script = detect_script_of_language(input); // If the script of the input is latin and we don't need to stem early, tokenize as-is. // This skips language detection, reduces overhead, and improves performance. if script_is_latin(script) { Self::tokenize_charabia(input, config, cb); return; } // If the script of the input is Japanese, use vaporetto to segment. if detect_language(input) == Some(Language::Jpn) { japanese::tokenize(input, config, cb); return; } Self::tokenize_charabia(input, config, cb); } // Tokenize input using charabia. Automatically applies stemming and filters stopwords if configured. fn tokenize_charabia<'a, C>(input: &'a str, tokens_processor: &'a TokensProcessor, mut cb: C) where C: FnMut(Cow<'a, str>), { for token in charabia_token_iter(input) { let lemma = token.lemma; if lemma.chars().all(|char| !char.is_alphabetic()) { // Skip tokens that are not alphanumeric. continue; } if let Some(processed_token) = tokens_processor.process_token_cow(lemma, true) { cb(processed_token); } } } } // Tokenize::tokenize() function from charabia unrolled due to lifetime issues // when using .tokenize() on a `str` directly. fn charabia_token_iter(inp: &str) -> NormalizedTokenIter<'_, '_, '_, '_> { inp.segment().normalize(&DEFAULT_NORMALIZER) } // Detect the script of the given input using charabia. fn detect_script_of_language(input: &str) -> Script { StrDetection::new(input, Some(SUPPORTED_LANGUAGES)).script() } // Detect the script of the given input using charabia. fn detect_language(input: &str) -> Option<charabia::Language> { StrDetection::new(input, Some(SUPPORTED_LANGUAGES)).language() } /// Returns `true` if the given `script` is latin. #[inline] fn script_is_latin(script: Script) -> bool { matches!(script, Script::Latin) } /// Languages that are supported by rust-stemmers and thus should be used in language detection white-list. /// Also includes Languages that we manually need to check against, such as Japanese and Chinese. const SUPPORTED_LANGUAGES: &[charabia::Language] = &[ charabia::Language::Eng, charabia::Language::Rus, charabia::Language::Por, charabia::Language::Ita, charabia::Language::Deu, charabia::Language::Ara, charabia::Language::Dan, charabia::Language::Swe, charabia::Language::Fin, charabia::Language::Tur, charabia::Language::Nld, charabia::Language::Hun, charabia::Language::Ell, charabia::Language::Tam, charabia::Language::Ron, charabia::Language::Cmn, charabia::Language::Jpn, ]; #[cfg(test)] mod test { use charabia::Language; use super::*; use crate::data_types::index::{SnowballLanguage, SnowballParams, StemmingAlgorithm}; use crate::index::field_index::full_text_index::tokenizers::stemmer::Stemmer; #[test] fn test_lang_detection() { // Japanese let input = "日本語のテキストです。Qdrantのコードで単体テストで使用されています。"; assert_eq!(detect_language(input), Some(Language::Jpn)); let input = "This is english text. It's being used within Qdrant's code in a unit test."; assert_eq!(detect_language(input), Some(Language::Eng)); let input = "Das ist ein deutscher Text. Er wird in Qdrants code in einem unit Test benutzt."; // codespell:ignore ist assert_eq!(detect_language(input), Some(Language::Deu)); // Chinese traditional let input = "這是一段德文文本。它用於 Qdrant 程式碼的單元測試中。"; assert_eq!(detect_language(input), Some(Language::Cmn)); // Chinese simplified let input = "这是一段德语文本。它用于 Qdrant 代码的单元测试中。"; assert_eq!(detect_language(input), Some(Language::Cmn)); } #[test] fn test_script_detection() { let input = "日本語のテキストです。Qdrantのコードで単体テストで使用されています。"; assert!(!script_is_latin(detect_script_of_language(input))); let input = "This is english text. It's being used within Qdrant's code in a unit test."; assert!(script_is_latin(detect_script_of_language(input))); let input = "Das ist ein deutscher Text. Er wird in Qdrants code in einem unit Test benutzt."; // codespell:ignore ist assert!(script_is_latin(detect_script_of_language(input))); } fn assert_tokenization(inp: &str, expected: &str) { let tokens_processor = TokensProcessor::default(); let mut out = vec![]; MultilingualTokenizer::tokenize(inp, &tokens_processor, |i| out.push(i.to_string())); let expected: Vec<_> = expected.split('|').collect(); for i in out.iter().zip(expected.iter()) { assert_eq!(i.0, i.1); } assert_eq!(out, expected) } #[test] fn test_multilingual_tokenization() { assert_tokenization("This is a test", "this|is|a|test"); assert_tokenization( "This is english text. It's being used within Qdrant's code in a unit test.", "this|is|english|text|it|s|being|used|within|qdrant|s|code|in|a|unit|test", ); assert_tokenization("Dies ist ein Test", "dies|ist|ein|test"); // codespell:ignore ist assert_tokenization("これはテストです", "これ|は|テスト|です"); assert_tokenization( "日本語のテキストです。Qdrantのコードで単体テストで使用されています。", "日本|語|の|テキスト|です|Qdrant|の|コード|で|単体|テスト|で|使用|さ|れ|て|い|ます", ); } #[test] fn test_multilingual_stemming() { let tokens_processor = TokensProcessor::new( true, false, Default::default(), Some(Stemmer::from_algorithm(&StemmingAlgorithm::Snowball( SnowballParams { r#type: Default::default(), language: SnowballLanguage::English, }, ))), None, None, ); let input = "Testing this"; let mut out = vec![]; MultilingualTokenizer::tokenize(input, &tokens_processor, |i| out.push(i.to_string())); assert_eq!(out, vec!["test", "this"]); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/tokenizers/mod.rs
lib/segment/src/index/field_index/full_text_index/tokenizers/mod.rs
use std::borrow::Cow; use std::sync::Arc; mod ascii_folding; mod japanese; mod multilingual; mod stemmer; pub mod tokens_processor; use multilingual::MultilingualTokenizer; pub use stemmer::Stemmer; pub use tokens_processor::TokensProcessor; use crate::data_types::index::{TextIndexParams, TokenizerType}; use crate::index::field_index::full_text_index::stop_words::StopwordsFilter; struct WhiteSpaceTokenizer; impl WhiteSpaceTokenizer { fn tokenize<'a, C: FnMut(Cow<'a, str>)>( text: &'a str, tokens_processor: &TokensProcessor, mut callback: C, ) { for token in text.split_whitespace() { let Some(token_cow) = tokens_processor.process_token(token, true) else { continue; }; callback(token_cow); } } } struct WordTokenizer; impl WordTokenizer { fn tokenize<'a, C: FnMut(Cow<'a, str>)>( text: &'a str, tokens_processor: &TokensProcessor, mut callback: C, ) { for token in text.split(|c| !char::is_alphanumeric(c)) { let Some(token_cow) = tokens_processor.process_token(token, true) else { continue; }; callback(token_cow); } } } struct PrefixTokenizer; impl PrefixTokenizer { fn tokenize<'a, C: FnMut(Cow<'a, str>)>( text: &'a str, tokens_processor: &TokensProcessor, mut callback: C, ) { let min_ngram = tokens_processor.min_token_len.unwrap_or(1); let max_ngram = tokens_processor.max_token_len.unwrap_or(usize::MAX); text.split(|c| !char::is_alphanumeric(c)).for_each(|word| { let Some(word_cow) = tokens_processor.process_token(word, false) else { return; }; for n in min_ngram..=max_ngram { let ngram = word_cow.as_ref().char_indices().map(|(i, _)| i).nth(n); match ngram { Some(end) => callback(truncate_cow_ref(&word_cow, end)), None => { callback(word_cow); break; } } } }); } /// For querying prefixes, it makes sense to use a maximal ngram only. /// E.g. /// /// Warn: Stopwords filter is not applied here, as if we want to start searching /// for matches before the full query is typed, we need to allow search on partial words. /// /// For example: /// /// Document: `["theory" -> ["th", "the", "theo", "theor", "theory"]]` /// Stopwords: `["the"]` /// Query: `"the"` -> should match "theory" as it is a prefix. /// /// Docs. tokens: `"hello"` -> `["he", "hel", "hell", "hello"]` /// Query tokens: `"hel"` -> `["hel"]` /// Query tokens: `"hell"` -> `["hell"]` /// Query tokens: `"hello"` -> `["hello"]` fn tokenize_query<'a, C: FnMut(Cow<'a, str>)>( text: &'a str, tokens_processor: &TokensProcessor, mut callback: C, ) { let max_ngram = tokens_processor.max_token_len.unwrap_or(usize::MAX); text.split(|c| !char::is_alphanumeric(c)) .filter(|token| !token.is_empty()) .for_each(|word| { // Apply ASCII folding if enabled let mut word_cow = tokens_processor.fold_if_enabled(Cow::Borrowed(word)); // Handle lowercase if tokens_processor.lowercase { word_cow = Cow::Owned(word_cow.to_lowercase()); } let word_cow = tokens_processor.stem_if_enabled(word_cow); if tokens_processor .min_token_len .is_some_and(|min_len| word_cow.chars().count() < min_len) { // Tokens shorter than min_token_len don't exist in the index return; } let ngram = word_cow.char_indices().map(|(i, _)| i).nth(max_ngram); match ngram { Some(end) => callback(truncate_cow(word_cow, end)), None => { callback(word_cow); } } }); } } /// Truncates a string inside a `Cow<str>` to the given `len` preserving the `Borrowed` and `Owned` state. fn truncate_cow<'a>(inp: Cow<'a, str>, len: usize) -> Cow<'a, str> { match inp { Cow::Borrowed(b) => Cow::Borrowed(&b[..len]), Cow::Owned(mut b) => { b.truncate(len); Cow::Owned(b) } } } /// Truncates a string inside a `&Cow<str>` to the given `len` preserving the `Borrowed` and `Owned` state. /// `truncate_cow` should be preferred over this function if Cow doesn't need to be passed as reference. fn truncate_cow_ref<'a>(inp: &Cow<'a, str>, len: usize) -> Cow<'a, str> { match inp { Cow::Borrowed(b) => Cow::Borrowed(&b[..len]), Cow::Owned(b) => Cow::Owned(b[..len].to_string()), } } #[derive(Debug, Clone)] pub struct Tokenizer { tokenizer_type: TokenizerType, tokens_processor: TokensProcessor, } impl Tokenizer { pub fn new_from_text_index_params(params: &TextIndexParams) -> Self { let TextIndexParams { r#type: _, tokenizer, min_token_len, max_token_len, lowercase, ascii_folding, on_disk: _, phrase_matching: _, stopwords, stemmer, } = params; let lowercase = lowercase.unwrap_or(true); let ascii_folding = ascii_folding.unwrap_or(false); let stopwords_filter = Arc::new(StopwordsFilter::new(stopwords, lowercase)); let tokens_processor = TokensProcessor::new( lowercase, ascii_folding, stopwords_filter, stemmer.as_ref().map(Stemmer::from_algorithm), *min_token_len, *max_token_len, ); Self::new(*tokenizer, tokens_processor) } pub fn new(tokenizer_type: TokenizerType, tokens_processor: TokensProcessor) -> Self { Self { tokenizer_type, tokens_processor, } } pub fn tokenize_doc<'a, C: FnMut(Cow<'a, str>)>(&'a self, text: &'a str, callback: C) { match self.tokenizer_type { TokenizerType::Whitespace => { WhiteSpaceTokenizer::tokenize(text, &self.tokens_processor, callback) } TokenizerType::Word => WordTokenizer::tokenize(text, &self.tokens_processor, callback), TokenizerType::Multilingual => { MultilingualTokenizer::tokenize(text, &self.tokens_processor, callback) } TokenizerType::Prefix => { PrefixTokenizer::tokenize(text, &self.tokens_processor, callback) } } } pub fn tokenize_query<'a, C: FnMut(Cow<'a, str>)>(&'a self, text: &'a str, callback: C) { match self.tokenizer_type { TokenizerType::Whitespace => { WhiteSpaceTokenizer::tokenize(text, &self.tokens_processor, callback) } TokenizerType::Word => WordTokenizer::tokenize(text, &self.tokens_processor, callback), TokenizerType::Multilingual => { MultilingualTokenizer::tokenize(text, &self.tokens_processor, callback) } TokenizerType::Prefix => { PrefixTokenizer::tokenize_query(text, &self.tokens_processor, callback) } } } } #[cfg(test)] mod tests { use std::default::Default; use itertools::Itertools; use super::*; use crate::data_types::index::{ Language, Snowball, SnowballLanguage, SnowballParams, StemmingAlgorithm, StopwordsInterface, TextIndexType, }; fn make_stemmer(language: SnowballLanguage) -> Stemmer { Stemmer::from_algorithm(&StemmingAlgorithm::Snowball(SnowballParams { r#type: Snowball::Snowball, language, })) } #[test] fn test_whitespace_tokenizer() { let text = "hello world"; let tokens_processor = TokensProcessor::default(); let mut tokens = Vec::new(); WhiteSpaceTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); assert_eq!(tokens.len(), 2); assert_eq!(tokens.first(), Some(&Cow::Borrowed("hello"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("world"))); } #[test] fn test_word_tokenizer() { let text = "hello, world! Привет, мир!"; let mut tokens_processor = TokensProcessor::default(); let mut tokens = Vec::new(); WordTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); assert_eq!(tokens.len(), 4); assert_eq!(tokens.first(), Some(&Cow::Borrowed("hello"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("world"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("Привет"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("мир"))); tokens.clear(); tokens_processor.lowercase = true; WordTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); assert_eq!(tokens.len(), 4); assert_eq!(tokens.first(), Some(&Cow::Borrowed("hello"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("world"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("привет"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("мир"))); } #[test] fn test_prefix_tokenizer() { let text = "hello, мир!"; let tokens_processor = TokensProcessor::new(true, false, Default::default(), None, Some(1), Some(4)); let mut tokens = Vec::new(); PrefixTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 7); assert_eq!(tokens.first(), Some(&Cow::Borrowed("h"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("he"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("hel"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("hell"))); assert_eq!(tokens.get(4), Some(&Cow::Borrowed("м"))); assert_eq!(tokens.get(5), Some(&Cow::Borrowed("ми"))); assert_eq!(tokens.get(6), Some(&Cow::Borrowed("мир"))); } #[test] fn test_prefix_query_tokenizer() { let text = "hello, мир!"; let tokens_processor = TokensProcessor::new(true, false, Default::default(), None, None, Some(4)); let mut tokens = Vec::new(); PrefixTokenizer::tokenize_query(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 2); assert_eq!(tokens.first(), Some(&Cow::Borrowed("hell"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("мир"))); } #[test] fn test_multilingual_tokenizer_japanese() { let text = "本日の日付は"; let tokens_processor = TokensProcessor::default(); let mut tokens = Vec::new(); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 4); assert_eq!(tokens.first(), Some(&Cow::Borrowed("本日"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("の"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("日付"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("は"))); tokens.clear(); // Test stopwords getting applied let filter = StopwordsFilter::new(&Some(StopwordsInterface::new_custom(&["の", "は"])), false); let tokens_processor = TokensProcessor::new(true, false, Arc::new(filter), None, None, None); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 2); assert_eq!(tokens.first(), Some(&Cow::Borrowed("本日"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("日付"))); } #[test] fn test_multilingual_tokenizer_chinese() { let text = "今天是星期一"; let tokens_processor = TokensProcessor::default(); let mut tokens = Vec::new(); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 4); assert_eq!(tokens.first(), Some(&Cow::Borrowed("今天"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("是"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("星期"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("一"))); tokens.clear(); // Test stopwords getting applied let filter = StopwordsFilter::new(&Some(StopwordsInterface::new_custom(&["是"])), false); let tokens_processor = TokensProcessor::new(true, false, Arc::new(filter), None, None, None); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 3); assert_eq!(tokens.first(), Some(&Cow::Borrowed("今天"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("星期"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("一"))); } #[test] fn test_multilingual_tokenizer_thai() { let text = "มาทำงานกันเถอะ"; let mut tokens = Vec::new(); let tokens_processor = TokensProcessor::default(); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 4); assert_eq!(tokens.first(), Some(&Cow::Borrowed("มา"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("ทางาน"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("กน"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("เถอะ"))); } #[test] fn test_multilingual_tokenizer_english() { let text = "What are you waiting for?"; let tokens_processor = TokensProcessor::default(); let mut tokens = Vec::new(); MultilingualTokenizer::tokenize(text, &tokens_processor, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 5); assert_eq!(tokens.first(), Some(&Cow::Borrowed("what"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("are"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("you"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("waiting"))); assert_eq!(tokens.get(4), Some(&Cow::Borrowed("for"))); } #[test] fn test_tokenizer() { let text = "Hello, Мир!"; let mut tokens = Vec::new(); let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Prefix, min_token_len: Some(1), max_token_len: Some(4), lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: None, stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); assert_eq!(tokens.len(), 7); assert_eq!(tokens.first(), Some(&Cow::Borrowed("h"))); assert_eq!(tokens.get(1), Some(&Cow::Borrowed("he"))); assert_eq!(tokens.get(2), Some(&Cow::Borrowed("hel"))); assert_eq!(tokens.get(3), Some(&Cow::Borrowed("hell"))); assert_eq!(tokens.get(4), Some(&Cow::Borrowed("м"))); assert_eq!(tokens.get(5), Some(&Cow::Borrowed("ми"))); assert_eq!(tokens.get(6), Some(&Cow::Borrowed("мир"))); } #[test] fn test_tokenizer_with_language_stopwords() { use crate::data_types::index::Language; let text = "The quick brown fox jumps over the lazy dog"; let mut tokens = Vec::new(); let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::Language(Language::English)), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // Check that stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("the"))); assert!(!tokens.contains(&Cow::Borrowed("over"))); // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("quick"))); assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("fox"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("lazy"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); } #[test] fn test_tokenizer_can_handle_apostrophes_parametrized() { use crate::data_types::index::TokenizerType; let text = "you'll be in town"; let tokenizer_types = [ TokenizerType::Word, TokenizerType::Whitespace, TokenizerType::Prefix, ]; for &tokenizer_type in &tokenizer_types { let mut tokens = Vec::new(); let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: tokenizer_type, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::Language(Language::English)), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); // Check that stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("you"))); assert!(!tokens.contains(&Cow::Borrowed("ll"))); assert!(!tokens.contains(&Cow::Borrowed("you'll"))); // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("town"))); } } #[test] fn test_tokenizer_with_mixed_stopwords() { let text = "The quick brown fox jumps over the lazy dog"; let mut tokens = Vec::new(); use crate::data_types::index::Language; let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::new_set( &[Language::English], &["quick", "fox"], )), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // Check that English stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("the"))); assert!(!tokens.contains(&Cow::Borrowed("over"))); // Check that custom stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("quick"))); assert!(!tokens.contains(&Cow::Borrowed("fox"))); // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("lazy"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); } #[test] fn test_tokenizer_with_custom_stopwords_as_the_a() { let text = "The quick brown fox jumps over the lazy dog as a test"; let mut tokens = Vec::new(); let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::new_custom(&["as", "the", "a"])), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("as"))); assert!(!tokens.contains(&Cow::Borrowed("the"))); assert!(!tokens.contains(&Cow::Borrowed("a"))); // non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("quick"))); assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("fox"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("over"))); assert!(tokens.contains(&Cow::Borrowed("lazy"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); assert!(tokens.contains(&Cow::Borrowed("test"))); } #[test] fn test_tokenizer_with_english_stopwords_string() { let text = "The quick brown fox jumps over the lazy dog"; let mut tokens = Vec::new(); use crate::data_types::index::Language; let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::Language(Language::English)), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // Check that English stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("the"))); assert!(!tokens.contains(&Cow::Borrowed("over"))); // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("quick"))); assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("fox"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("lazy"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); } #[test] fn test_tokenizer_with_languages_english_spanish_custom_aaa() { let text = "The quick brown fox jumps over the lazy dog I'd y de"; let mut tokens = Vec::new(); use crate::data_types::index::Language; let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::new_set( &[Language::English, Language::Spanish], &["I'd"], )), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // Check that English stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("the"))); assert!(!tokens.contains(&Cow::Borrowed("over"))); // Check that Spanish stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("y"))); assert!(!tokens.contains(&Cow::Borrowed("de"))); // Check that custom stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("i'd"))); // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("quick"))); assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("fox"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("lazy"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); } #[test] fn test_tokenizer_with_case_sensitive_stopwords() { let text = "The quick brown fox jumps over the lazy dog"; let mut tokens = Vec::new(); let params = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(false), // Case sensitivity is enabled ascii_folding: None, on_disk: None, phrase_matching: None, stopwords: Some(StopwordsInterface::new_custom(&["the", "The", "LAZY"])), stemmer: None, }; let tokenizer = Tokenizer::new_from_text_index_params(&params); tokenizer.tokenize_doc(text, |token| tokens.push(token)); eprintln!("tokens = {tokens:#?}"); // Check that exact case stopwords are filtered out assert!(!tokens.contains(&Cow::Borrowed("The"))); assert!(!tokens.contains(&Cow::Borrowed("the"))); // Check that different case stopwords are not filtered out assert!(tokens.contains(&Cow::Borrowed("lazy"))); // "LAZY" is in stopwords, but "lazy" is not // Check that non-stopwords are present assert!(tokens.contains(&Cow::Borrowed("quick"))); assert!(tokens.contains(&Cow::Borrowed("brown"))); assert!(tokens.contains(&Cow::Borrowed("fox"))); assert!(tokens.contains(&Cow::Borrowed("jumps"))); assert!(tokens.contains(&Cow::Borrowed("over"))); assert!(tokens.contains(&Cow::Borrowed("dog"))); } #[test] fn test_ascii_folding_word_tokenizer_on_off() { let text = "ação café jalapeño Über"; let expected_disabled = ["ação", "café", "jalapeño", "über"] .into_iter() .map(str::to_string) .collect_vec(); let expected_enabled = ["acao", "cafe", "jalapeno", "uber"] .into_iter() .map(str::to_string) .collect_vec(); // ascii_folding disabled (default) let params_disabled = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: Some(false), on_disk: None, phrase_matching: None, stopwords: None, stemmer: None, }; let tokenizer_disabled = Tokenizer::new_from_text_index_params(&params_disabled); let mut tokens_disabled = Vec::new(); tokenizer_disabled.tokenize_doc(text, |token| tokens_disabled.push(token.to_string())); assert_eq!(tokens_disabled, expected_disabled); // ascii_folding enabled let params_enabled = TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::Word, min_token_len: None, max_token_len: None, lowercase: Some(true), ascii_folding: Some(true), on_disk: None, phrase_matching: None, stopwords: None, stemmer: None, }; let tokenizer_enabled = Tokenizer::new_from_text_index_params(&params_enabled); let mut tokens_enabled = Vec::new(); tokenizer_enabled.tokenize_doc(text, |token| tokens_enabled.push(token.to_string())); assert_eq!(tokens_enabled, expected_enabled); } #[test] fn test_ascii_folding_prefix_tokenizer() { let text = "ação"; // With folding disabled: prefixes should preserve accents let tokens_processor_disabled = TokensProcessor::new(true, false, Default::default(), None, Some(1), Some(4)); let mut tokens_disabled = Vec::new(); PrefixTokenizer::tokenize(text, &tokens_processor_disabled, |t| { tokens_disabled.push(t.to_string()) }); assert!( tokens_disabled.contains(&"a".to_string()) || tokens_disabled.contains(&"a".to_string()) ); // Because the first char is 'a', but next prefixes should include accented letters assert!( tokens_disabled.iter().any(|t| t.starts_with("aç")) || tokens_disabled.iter().any(|t| t.contains('ç')) ); // With folding enabled: prefixes should be ASCII-only (acao, acao prefixes) let tokens_processor_enabled = TokensProcessor::new(true, true, Default::default(), None, Some(1), Some(4)); let mut tokens_enabled = Vec::new(); PrefixTokenizer::tokenize(text, &tokens_processor_enabled, |t| { tokens_enabled.push(t.to_string()) }); // We expect prefixes like a, ac, aca, acao assert!(tokens_enabled.contains(&"a".to_string())); assert!(tokens_enabled.contains(&"ac".to_string())); assert!(tokens_enabled.contains(&"aca".to_string())); assert!(tokens_enabled.contains(&"acao".to_string())); assert!(tokens_enabled.iter().all(|t| t.is_ascii())); } #[test] fn test_stemming_snowball() { let input = "interestingly proceeding living"; let mut tokens_processor = TokensProcessor::new( true, false, Default::default(), Some(make_stemmer(SnowballLanguage::English)), None, None, ); let mut out = Vec::new(); WhiteSpaceTokenizer::tokenize(input, &tokens_processor, |i| out.push(i.to_string())); assert_eq!(out, vec!["interest", "proceed", "live"]); out.clear(); WordTokenizer::tokenize(input, &tokens_processor, |i| out.push(i.to_string())); assert_eq!(out, vec!["interest", "proceed", "live"]); out.clear(); MultilingualTokenizer::tokenize(input, &tokens_processor, |i| out.push(i.to_string())); assert_eq!(out, vec!["interest", "proceed", "live"]); out.clear(); tokens_processor.min_token_len = Some(3); tokens_processor.max_token_len = Some(4); PrefixTokenizer::tokenize(input, &tokens_processor, |i| out.push(i.to_string())); assert_eq!(out, vec!["int", "inte", "pro", "proc", "liv", "live"]); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/indonesian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/indonesian.rs
pub const INDONESIAN_STOPWORDS: &[&str] = &[ "ada", "adalah", "adanya", "adapun", "agak", "agaknya", "agar", "akan", "akankah", "akhir", "akhiri", "akhirnya", "aku", "akulah", "amat", "amatlah", "anda", "andalah", "antar", "antara", "antaranya", "apa", "apaan", "apabila", "apakah", "apalagi", "apatah", "artinya", "asal", "asalkan", "atas", "atau", "ataukah", "ataupun", "awal", "awalnya", "bagai", "bagaikan", "bagaimana", "bagaimanakah", "bagaimanapun", "bagi", "bagian", "bahkan", "bahwa", "bahwasanya", "baik", "bakal", "bakalan", "balik", "banyak", "bapak", "baru", "bawah", "beberapa", "begini", "beginian", "beginikah", "beginilah", "begitu", "begitukah", "begitulah", "begitupun", "bekerja", "belakang", "belakangan", "belum", "belumlah", "benar", "benarkah", "benarlah", "berada", "berakhir", "berakhirlah", "berakhirnya", "berapa", "berapakah", "berapalah", "berapapun", "berarti", "berawal", "berbagai", "berdatangan", "beri", "berikan", "berikut", "berikutnya", "berjumlah", "berkali-kali", "berkata", "berkehendak", "berkeinginan", "berkenaan", "berlainan", "berlalu", "berlangsung", "berlebihan", "bermacam", "bermacam-macam", "bermaksud", "bermula", "bersama", "bersama-sama", "bersiap", "bersiap-siap", "bertanya", "bertanya-tanya", "berturut", "berturut-turut", "bertutur", "berujar", "berupa", "besar", "betul", "betulkah", "biasa", "biasanya", "bila", "bilakah", "bisa", "bisakah", "boleh", "bolehkah", "bolehlah", "buat", "bukan", "bukankah", "bukanlah", "bukannya", "bulan", "bung", "cara", "caranya", "cukup", "cukupkah", "cukuplah", "cuma", "dahulu", "dalam", "dan", "dapat", "dari", "daripada", "datang", "dekat", "demi", "demikian", "demikianlah", "dengan", "depan", "di", "dia", "diakhiri", "diakhirinya", "dialah", "diantara", "diantaranya", "diberi", "diberikan", "diberikannya", "dibuat", "dibuatnya", "didapat", "didatangkan", "digunakan", "diibaratkan", "diibaratkannya", "diingat", "diingatkan", "diinginkan", "dijawab", "dijelaskan", "dijelaskannya", "dikarenakan", "dikatakan", "dikatakannya", "dikerjakan", "diketahui", "diketahuinya", "dikira", "dilakukan", "dilalui", "dilihat", "dimaksud", "dimaksudkan", "dimaksudkannya", "dimaksudnya", "diminta", "dimintai", "dimisalkan", "dimulai", "dimulailah", "dimulainya", "dimungkinkan", "dini", "dipastikan", "diperbuat", "diperbuatnya", "dipergunakan", "diperkirakan", "diperlihatkan", "diperlukan", "diperlukannya", "dipersoalkan", "dipertanyakan", "dipunyai", "diri", "dirinya", "disampaikan", "disebut", "disebutkan", "disebutkannya", "disini", "disinilah", "ditambahkan", "ditandaskan", "ditanya", "ditanyai", "ditanyakan", "ditegaskan", "ditujukan", "ditunjuk", "ditunjuki", "ditunjukkan", "ditunjukkannya", "ditunjuknya", "dituturkan", "dituturkannya", "diucapkan", "diucapkannya", "diungkapkan", "dong", "dua", "dulu", "empat", "enggak", "enggaknya", "entah", "entahlah", "guna", "gunakan", "hal", "hampir", "hanya", "hanyalah", "hari", "harus", "haruslah", "harusnya", "hendak", "hendaklah", "hendaknya", "hingga", "ia", "ialah", "ibarat", "ibaratkan", "ibaratnya", "ibu", "ikut", "ingat", "ingat-ingat", "ingin", "inginkah", "inginkan", "ini", "inikah", "inilah", "itu", "itukah", "itulah", "jadi", "jadilah", "jadinya", "jangan", "jangankan", "janganlah", "jauh", "jawab", "jawaban", "jawabnya", "jelas", "jelaskan", "jelaslah", "jelasnya", "jika", "jikalau", "juga", "jumlah", "jumlahnya", "justru", "kala", "kalau", "kalaulah", "kalaupun", "kalian", "kami", "kamilah", "kamu", "kamulah", "kan", "kapan", "kapankah", "kapanpun", "karena", "karenanya", "kasus", "kata", "katakan", "katakanlah", "katanya", "ke", "keadaan", "kebetulan", "kecil", "kedua", "keduanya", "keinginan", "kelamaan", "kelihatan", "kelihatannya", "kelima", "keluar", "kembali", "kemudian", "kemungkinan", "kemungkinannya", "kenapa", "kepada", "kepadanya", "kesampaian", "keseluruhan", "keseluruhannya", "keterlaluan", "ketika", "khususnya", "kini", "kinilah", "kira", "kira-kira", "kiranya", "kita", "kitalah", "kok", "kurang", "lagi", "lagian", "lah", "lain", "lainnya", "lalu", "lama", "lamanya", "lanjut", "lanjutnya", "lebih", "lewat", "lima", "luar", "macam", "maka", "makanya", "makin", "malah", "malahan", "mampu", "mampukah", "mana", "manakala", "manalagi", "masa", "masalah", "masalahnya", "masih", "masihkah", "masing", "masing-masing", "mau", "maupun", "melainkan", "melakukan", "melalui", "melihat", "melihatnya", "memang", "memastikan", "memberi", "memberikan", "membuat", "memerlukan", "memihak", "meminta", "memintakan", "memisalkan", "memperbuat", "mempergunakan", "memperkirakan", "memperlihatkan", "mempersiapkan", "mempersoalkan", "mempertanyakan", "mempunyai", "memulai", "memungkinkan", "menaiki", "menambahkan", "menandaskan", "menanti", "menanti-nanti", "menantikan", "menanya", "menanyai", "menanyakan", "mendapat", "mendapatkan", "mendatang", "mendatangi", "mendatangkan", "menegaskan", "mengakhiri", "mengapa", "mengatakan", "mengatakannya", "mengenai", "mengerjakan", "mengetahui", "menggunakan", "menghendaki", "mengibaratkan", "mengibaratkannya", "mengingat", "mengingatkan", "menginginkan", "mengira", "mengucapkan", "mengucapkannya", "mengungkapkan", "menjadi", "menjawab", "menjelaskan", "menuju", "menunjuk", "menunjuki", "menunjukkan", "menunjuknya", "menurut", "menuturkan", "menyampaikan", "menyangkut", "menyatakan", "menyebutkan", "menyeluruh", "menyiapkan", "merasa", "mereka", "merekalah", "merupakan", "meski", "meskipun", "meyakini", "meyakinkan", "minta", "mirip", "misal", "misalkan", "misalnya", "mula", "mulai", "mulailah", "mulanya", "mungkin", "mungkinkah", "nah", "naik", "namun", "nanti", "nantinya", "nyaris", "nyatanya", "oleh", "olehnya", "pada", "padahal", "padanya", "pak", "paling", "panjang", "pantas", "para", "pasti", "pastilah", "penting", "pentingnya", "per", "percuma", "perlu", "perlukah", "perlunya", "pernah", "persoalan", "pertama", "pertama-tama", "pertanyaan", "pertanyakan", "pihak", "pihaknya", "pukul", "pula", "pun", "punya", "rasa", "rasanya", "rata", "rupanya", "saat", "saatnya", "saja", "sajalah", "saling", "sama", "sama-sama", "sambil", "sampai", "sampai-sampai", "sampaikan", "sana", "sangat", "sangatlah", "satu", "saya", "sayalah", "se", "sebab", "sebabnya", "sebagai", "sebagaimana", "sebagainya", "sebagian", "sebaik", "sebaik-baiknya", "sebaiknya", "sebaliknya", "sebanyak", "sebegini", "sebegitu", "sebelum", "sebelumnya", "sebenarnya", "seberapa", "sebesar", "sebetulnya", "sebisanya", "sebuah", "sebut", "sebutlah", "sebutnya", "secara", "secukupnya", "sedang", "sedangkan", "sedemikian", "sedikit", "sedikitnya", "seenaknya", "segala", "segalanya", "segera", "seharusnya", "sehingga", "seingat", "sejak", "sejauh", "sejenak", "sejumlah", "sekadar", "sekadarnya", "sekali", "sekali-kali", "sekalian", "sekaligus", "sekalipun", "sekarang", "sekarang", "sekecil", "seketika", "sekiranya", "sekitar", "sekitarnya", "sekurang-kurangnya", "sekurangnya", "sela", "selain", "selaku", "selalu", "selama", "selama-lamanya", "selamanya", "selanjutnya", "seluruh", "seluruhnya", "semacam", "semakin", "semampu", "semampunya", "semasa", "semasih", "semata", "semata-mata", "semaunya", "sementara", "semisal", "semisalnya", "sempat", "semua", "semuanya", "semula", "sendiri", "sendirian", "sendirinya", "seolah", "seolah-olah", "seorang", "sepanjang", "sepantasnya", "sepantasnyalah", "seperlunya", "seperti", "sepertinya", "sepihak", "sering", "seringnya", "serta", "serupa", "sesaat", "sesama", "sesampai", "sesegera", "sesekali", "seseorang", "sesuatu", "sesuatunya", "sesudah", "sesudahnya", "setelah", "setempat", "setengah", "seterusnya", "setiap", "setiba", "setibanya", "setidak-tidaknya", "setidaknya", "setinggi", "seusai", "sewaktu", "siap", "siapa", "siapakah", "siapapun", "sini", "sinilah", "soal", "soalnya", "suatu", "sudah", "sudahkah", "sudahlah", "supaya", "tadi", "tadinya", "tahu", "tahun", "tak", "tambah", "tambahnya", "tampak", "tampaknya", "tandas", "tandasnya", "tanpa", "tanya", "tanyakan", "tanyanya", "tapi", "tegas", "tegasnya", "telah", "tempat", "tengah", "tentang", "tentu", "tentulah", "tentunya", "tepat", "terakhir", "terasa", "terbanyak", "terdahulu", "terdapat", "terdiri", "terhadap", "terhadapnya", "teringat", "teringat-ingat", "terjadi", "terjadilah", "terjadinya", "terkira", "terlalu", "terlebih", "terlihat", "termasuk", "ternyata", "tersampaikan", "tersebut", "tersebutlah", "tertentu", "tertuju", "terus", "terutama", "tetap", "tetapi", "tiap", "tiba", "tiba-tiba", "tidak", "tidakkah", "tidaklah", "tiga", "tinggi", "toh", "tunjuk", "turut", "tutur", "tuturnya", "ucap", "ucapnya", "ujar", "ujarnya", "umum", "umumnya", "ungkap", "ungkapnya", "untuk", "usah", "usai", "waduh", "wah", "wahai", "waktu", "waktunya", "walau", "walaupun", "wong", "yaitu", "yakin", "yakni", "yang", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/german.rs
lib/segment/src/index/field_index/full_text_index/stop_words/german.rs
pub const GERMAN_STOPWORDS: &[&str] = &[ "aber", "alle", "allem", "allen", "aller", "alles", "als", "also", "am", "an", "ander", "andere", "anderem", "anderen", "anderer", "anderes", "anderm", "andern", "anderr", "anders", "auch", "auf", "aus", "bei", "bin", "bis", "bist", "da", "damit", "dann", "der", "den", "des", "dem", "die", "das", "dass", "daß", "derselbe", "derselben", "denselben", "desselben", "demselben", "dieselbe", "dieselben", "dasselbe", "dazu", "dein", "deine", "deinem", "deinen", "deiner", "deines", "denn", "derer", "dessen", "dich", "dir", "du", "dies", "diese", "diesem", "diesen", "dieser", "dieses", "doch", "dort", "durch", "ein", "eine", "einem", "einen", "einer", "eines", "einig", "einige", "einigem", "einigen", "einiger", "einiges", "einmal", "er", "ihn", "ihm", "es", "etwas", "euer", "eure", "eurem", "euren", "eurer", "eures", "für", "gegen", "gewesen", "hab", "habe", "haben", "hat", "hatte", "hatten", "hier", "hin", "hinter", "ich", "mich", "mir", "ihr", "ihre", "ihrem", "ihren", "ihrer", "ihres", "euch", "im", "in", "indem", "ins", "ist", "jede", "jedem", "jeden", "jeder", "jedes", "jene", "jenem", "jenen", "jener", "jenes", "jetzt", "kann", "kein", "keine", "keinem", "keinen", "keiner", "keines", "können", "könnte", "machen", "man", "manche", "manchem", "manchen", "mancher", "manches", "mein", "meine", "meinem", "meinen", "meiner", "meines", "mit", "muss", "musste", "nach", "nicht", "nichts", "noch", "nun", "nur", "ob", "oder", "ohne", "sehr", "sein", "seine", "seinem", "seinen", "seiner", "seines", "selbst", "sich", "sie", "ihnen", "sind", "so", "solche", "solchem", "solchen", "solcher", "solches", "soll", "sollte", "sondern", "sonst", "über", "um", "und", "uns", "unsere", "unserem", "unseren", "unser", "unseres", "unter", "viel", "vom", "von", "vor", "während", "war", "waren", "warst", "was", "weg", "weil", "weiter", "welche", "welchem", "welchen", "welcher", "welches", "wenn", "werde", "werden", "wie", "wieder", "will", "wir", "wird", "wirst", "wo", "wollen", "wollte", "würde", "würden", "zu", "zum", "zur", "zwar", "zwischen", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/spanish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/spanish.rs
pub const SPANISH_STOPWORDS: &[&str] = &[ "de", "la", "que", "el", "en", "y", "a", "los", "del", "se", "las", "por", "un", "para", "con", "no", "una", "su", "al", "lo", "como", "más", "pero", "sus", "le", "ya", "o", "este", "sí", "porque", "esta", "entre", "cuando", "muy", "sin", "sobre", "también", "me", "hasta", "hay", "donde", "quien", "desde", "todo", "nos", "durante", "todos", "uno", "les", "ni", "contra", "otros", "ese", "eso", "ante", "ellos", "e", "esto", "mí", "antes", "algunos", "qué", "unos", "yo", "otro", "otras", "otra", "él", "tanto", "esa", "estos", "mucho", "quienes", "nada", "muchos", "cual", "poco", "ella", "estar", "estas", "algunas", "algo", "nosotros", "mi", "mis", "tú", "te", "ti", "tu", "tus", "ellas", "nosotras", "vosotros", "vosotras", "os", "mío", "mía", "míos", "mías", "tuyo", "tuya", "tuyos", "tuyas", "suyo", "suya", "suyos", "suyas", "nuestro", "nuestra", "nuestros", "nuestras", "vuestro", "vuestra", "vuestros", "vuestras", "esos", "esas", "estoy", "estás", "está", "estamos", "estáis", "están", "esté", "estés", "estemos", "estéis", "estén", "estaré", "estarás", "estará", "estaremos", "estaréis", "estarán", "estaría", "estarías", "estaríamos", "estaríais", "estarían", "estaba", "estabas", "estábamos", "estabais", "estaban", "estuve", "estuviste", "estuvo", "estuvimos", "estuvisteis", "estuvieron", "estuviera", "estuvieras", "estuviéramos", "estuvierais", "estuvieran", "estuviese", "estuvieses", "estuviésemos", "estuvieseis", "estuviesen", "estando", "estado", "estada", "estados", "estadas", "estad", "he", "has", "ha", "hemos", "habéis", "han", "haya", "hayas", "hayamos", "hayáis", "hayan", "habré", "habrás", "habrá", "habremos", "habréis", "habrán", "habría", "habrías", "habríamos", "habríais", "habrían", "había", "habías", "habíamos", "habíais", "habían", "hube", "hubiste", "hubo", "hubimos", "hubisteis", "hubieron", "hubiera", "hubieras", "hubiéramos", "hubierais", "hubieran", "hubiese", "hubieses", "hubiésemos", "hubieseis", "hubiesen", "habiendo", "habido", "habida", "habidos", "habidas", "soy", "eres", "es", "somos", "sois", "son", "sea", "seas", "seamos", "seáis", "sean", "seré", "serás", "será", "seremos", "seréis", "serán", "sería", "serías", "seríamos", "seríais", "serían", "era", "eras", "éramos", "erais", "eran", "fui", "fuiste", "fue", "fuimos", "fuisteis", "fueron", "fuera", "fueras", "fuéramos", "fuerais", "fueran", "fuese", "fueses", "fuésemos", "fueseis", "fuesen", "sintiendo", "sentido", "sentida", "sentidos", "sentidas", "siente", "sentid", "tengo", "tienes", "tiene", "tenemos", "tenéis", "tienen", "tenga", "tengas", "tengamos", "tengáis", "tengan", "tendré", "tendrás", "tendrá", "tendremos", "tendréis", "tendrán", "tendría", "tendrías", "tendríamos", "tendríais", "tendrían", "tenía", "tenías", "teníamos", "teníais", "tenían", "tuve", "tuviste", "tuvo", "tuvimos", "tuvisteis", "tuvieron", "tuviera", "tuvieras", "tuviéramos", "tuvierais", "tuvieran", "tuviese", "tuvieses", "tuviésemos", "tuvieseis", "tuviesen", "teniendo", "tenido", "tenida", "tenidos", "tenidas", "tened", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/hinglish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/hinglish.rs
pub const HINGLISH_STOPWORDS: &[&str] = &[ "a", "aadi", "aaj", "aap", "aapne", "aata", "aati", "aaya", "aaye", "ab", "abbe", "abbey", "abe", "abhi", "able", "about", "above", "accha", "according", "accordingly", "acha", "achcha", "across", "actually", "after", "afterwards", "again", "against", "agar", "ain", "aint", "ain't", "aisa", "aise", "aisi", "alag", "all", "allow", "allows", "almost", "alone", "along", "already", "also", "although", "always", "am", "among", "amongst", "an", "and", "andar", "another", "any", "anybody", "anyhow", "anyone", "anything", "anyway", "anyways", "anywhere", "ap", "apan", "apart", "apna", "apnaa", "apne", "apni", "appear", "are", "aren", "arent", "aren't", "around", "arre", "as", "aside", "ask", "asking", "at", "aur", "avum", "aya", "aye", "baad", "baar", "bad", "bahut", "bana", "banae", "banai", "banao", "banaya", "banaye", "banayi", "banda", "bande", "bandi", "bane", "bani", "bas", "bata", "batao", "bc", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "best", "better", "between", "beyond", "bhai", "bheetar", "bhi", "bhitar", "bht", "bilkul", "bohot", "bol", "bola", "bole", "boli", "bolo", "bolta", "bolte", "bolti", "both", "brief", "bro", "btw", "but", "by", "came", "can", "cannot", "cant", "can't", "cause", "causes", "certain", "certainly", "chahiye", "chaiye", "chal", "chalega", "chhaiye", "clearly", "c'mon", "com", "come", "comes", "could", "couldn", "couldnt", "couldn't", "d", "de", "dede", "dega", "degi", "dekh", "dekha", "dekhe", "dekhi", "dekho", "denge", "dhang", "di", "did", "didn", "didnt", "didn't", "dijiye", "diya", "diyaa", "diye", "diyo", "do", "does", "doesn", "doesnt", "doesn't", "doing", "done", "dono", "dont", "don't", "doosra", "doosre", "down", "downwards", "dude", "dunga", "dungi", "during", "dusra", "dusre", "dusri", "dvaara", "dvara", "dwaara", "dwara", "each", "edu", "eg", "eight", "either", "ek", "else", "elsewhere", "enough", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere", "ex", "exactly", "example", "except", "far", "few", "fifth", "fir", "first", "five", "followed", "following", "follows", "for", "forth", "four", "from", "further", "furthermore", "gaya", "gaye", "gayi", "get", "gets", "getting", "ghar", "given", "gives", "go", "goes", "going", "gone", "good", "got", "gotten", "greetings", "haan", "had", "hadd", "hadn", "hadnt", "hadn't", "hai", "hain", "hamara", "hamare", "hamari", "hamne", "han", "happens", "har", "hardly", "has", "hasn", "hasnt", "hasn't", "have", "haven", "havent", "haven't", "having", "he", "hello", "help", "hence", "her", "here", "hereafter", "hereby", "herein", "here's", "hereupon", "hers", "herself", "he's", "hi", "him", "himself", "his", "hither", "hm", "hmm", "ho", "hoga", "hoge", "hogi", "hona", "honaa", "hone", "honge", "hongi", "honi", "hopefully", "hota", "hotaa", "hote", "hoti", "how", "howbeit", "however", "hoyenge", "hoyengi", "hu", "hua", "hue", "huh", "hui", "hum", "humein", "humne", "hun", "huye", "huyi", "i", "i'd", "idk", "ie", "if", "i'll", "i'm", "imo", "in", "inasmuch", "inc", "inhe", "inhi", "inho", "inka", "inkaa", "inke", "inki", "inn", "inner", "inse", "insofar", "into", "inward", "is", "ise", "isi", "iska", "iskaa", "iske", "iski", "isme", "isn", "isne", "isnt", "isn't", "iss", "isse", "issi", "isski", "it", "it'd", "it'll", "itna", "itne", "itni", "itno", "its", "it's", "itself", "ityaadi", "ityadi", "i've", "ja", "jaa", "jab", "jabh", "jaha", "jahaan", "jahan", "jaisa", "jaise", "jaisi", "jata", "jayega", "jidhar", "jin", "jinhe", "jinhi", "jinho", "jinhone", "jinka", "jinke", "jinki", "jinn", "jis", "jise", "jiska", "jiske", "jiski", "jisme", "jiss", "jisse", "jitna", "jitne", "jitni", "jo", "just", "jyaada", "jyada", "k", "ka", "kaafi", "kab", "kabhi", "kafi", "kaha", "kahaa", "kahaan", "kahan", "kahi", "kahin", "kahte", "kaisa", "kaise", "kaisi", "kal", "kam", "kar", "kara", "kare", "karega", "karegi", "karen", "karenge", "kari", "karke", "karna", "karne", "karni", "karo", "karta", "karte", "karti", "karu", "karun", "karunga", "karungi", "kaun", "kaunsa", "kayi", "kch", "ke", "keep", "keeps", "keh", "kehte", "kept", "khud", "ki", "kin", "kine", "kinhe", "kinho", "kinka", "kinke", "kinki", "kinko", "kinn", "kino", "kis", "kise", "kisi", "kiska", "kiske", "kiski", "kisko", "kisliye", "kisne", "kitna", "kitne", "kitni", "kitno", "kiya", "kiye", "know", "known", "knows", "ko", "koi", "kon", "konsa", "koyi", "krna", "krne", "kuch", "kuchch", "kuchh", "kul", "kull", "kya", "kyaa", "kyu", "kyuki", "kyun", "kyunki", "lagta", "lagte", "lagti", "last", "lately", "later", "le", "least", "lekar", "lekin", "less", "lest", "let", "let's", "li", "like", "liked", "likely", "little", "liya", "liye", "ll", "lo", "log", "logon", "lol", "look", "looking", "looks", "ltd", "lunga", "m", "maan", "maana", "maane", "maani", "maano", "magar", "mai", "main", "maine", "mainly", "mana", "mane", "mani", "mano", "many", "mat", "may", "maybe", "me", "mean", "meanwhile", "mein", "mera", "mere", "merely", "meri", "might", "mightn", "mightnt", "mightn't", "mil", "mjhe", "more", "moreover", "most", "mostly", "much", "mujhe", "must", "mustn", "mustnt", "mustn't", "my", "myself", "na", "naa", "naah", "nahi", "nahin", "nai", "name", "namely", "nd", "ne", "near", "nearly", "necessary", "neeche", "need", "needn", "neednt", "needn't", "needs", "neither", "never", "nevertheless", "new", "next", "nhi", "nine", "no", "nobody", "non", "none", "noone", "nope", "nor", "normally", "not", "nothing", "novel", "now", "nowhere", "o", "obviously", "of", "off", "often", "oh", "ok", "okay", "old", "on", "once", "one", "ones", "only", "onto", "or", "other", "others", "otherwise", "ought", "our", "ours", "ourselves", "out", "outside", "over", "overall", "own", "par", "pata", "pe", "pehla", "pehle", "pehli", "people", "per", "perhaps", "phla", "phle", "phli", "placed", "please", "plus", "poora", "poori", "provides", "pura", "puri", "q", "que", "quite", "raha", "rahaa", "rahe", "rahi", "rakh", "rakha", "rakhe", "rakhen", "rakhi", "rakho", "rather", "re", "really", "reasonably", "regarding", "regardless", "regards", "rehte", "rha", "rhaa", "rhe", "rhi", "ri", "right", "s", "sa", "saara", "saare", "saath", "sab", "sabhi", "sabse", "sahi", "said", "sakta", "saktaa", "sakte", "sakti", "same", "sang", "sara", "sath", "saw", "say", "saying", "says", "se", "second", "secondly", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sensible", "sent", "serious", "seriously", "seven", "several", "shall", "shan", "shant", "shan't", "she", "she's", "should", "shouldn", "shouldnt", "shouldn't", "should've", "si", "since", "six", "so", "soch", "some", "somebody", "somehow", "someone", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "still", "sub", "such", "sup", "sure", "t", "tab", "tabh", "tak", "take", "taken", "tarah", "teen", "teeno", "teesra", "teesre", "teesri", "tell", "tends", "tera", "tere", "teri", "th", "tha", "than", "thank", "thanks", "thanx", "that", "that'll", "thats", "that's", "the", "theek", "their", "theirs", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "theres", "there's", "thereupon", "these", "they", "they'd", "they'll", "they're", "they've", "thi", "thik", "thing", "think", "thinking", "third", "this", "tho", "thoda", "thodi", "thorough", "thoroughly", "those", "though", "thought", "three", "through", "throughout", "thru", "thus", "tjhe", "to", "together", "toh", "too", "took", "toward", "towards", "tried", "tries", "true", "truly", "try", "trying", "tu", "tujhe", "tum", "tumhara", "tumhare", "tumhari", "tune", "twice", "two", "um", "umm", "un", "under", "unhe", "unhi", "unho", "unhone", "unka", "unkaa", "unke", "unki", "unko", "unless", "unlikely", "unn", "unse", "until", "unto", "up", "upar", "upon", "us", "use", "used", "useful", "uses", "usi", "using", "uska", "uske", "usne", "uss", "usse", "ussi", "usually", "vaala", "vaale", "vaali", "vahaan", "vahan", "vahi", "vahin", "vaisa", "vaise", "vaisi", "vala", "vale", "vali", "various", "ve", "very", "via", "viz", "vo", "waala", "waale", "waali", "wagaira", "wagairah", "wagerah", "waha", "wahaan", "wahan", "wahi", "wahin", "waisa", "waise", "waisi", "wala", "wale", "wali", "want", "wants", "was", "wasn", "wasnt", "wasn't", "way", "we", "we'd", "well", "we'll", "went", "were", "we're", "weren", "werent", "weren't", "we've", "what", "whatever", "what's", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "where's", "whereupon", "wherever", "whether", "which", "while", "who", "whoever", "whole", "whom", "who's", "whose", "why", "will", "willing", "with", "within", "without", "wo", "woh", "wohi", "won", "wont", "won't", "would", "wouldn", "wouldnt", "wouldn't", "y", "ya", "yadi", "yah", "yaha", "yahaan", "yahan", "yahi", "yahin", "ye", "yeah", "yeh", "yehi", "yes", "yet", "you", "you'd", "you'll", "your", "you're", "yours", "yourself", "yourselves", "you've", "yup", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/greek.rs
lib/segment/src/index/field_index/full_text_index/stop_words/greek.rs
pub const GREEK_STOPWORDS: &[&str] = &[ "αλλα", "αν", "αντι", "απο", "αυτα", "αυτεσ", "αυτη", "αυτο", "αυτοι", "αυτοσ", "αυτουσ", "αυτων", "αἱ", "αἳ", "αἵ", "αὐτόσ", "αὐτὸς", "αὖ", "γάρ", "γα", "γα^", "γε", "για", "γοῦν", "γὰρ", "δ'", "δέ", "δή", "δαί", "δαίσ", "δαὶ", "δαὶς", "δε", "δεν", "δι'", "διά", "διὰ", "δὲ", "δὴ", "δ’", "εαν", "ειμαι", "ειμαστε", "ειναι", "εισαι", "ειστε", "εκεινα", "εκεινεσ", "εκεινη", "εκεινο", "εκεινοι", "εκεινοσ", "εκεινουσ", "εκεινων", "ενω", "επ", "επι", "εἰ", "εἰμί", "εἰμὶ", "εἰς", "εἰσ", "εἴ", "εἴμι", "εἴτε", "η", "θα", "ισωσ", "κ", "καί", "καίτοι", "καθ", "και", "κατ", "κατά", "κατα", "κατὰ", "καὶ", "κι", "κἀν", "κἂν", "μέν", "μή", "μήτε", "μα", "με", "μεθ", "μετ", "μετά", "μετα", "μετὰ", "μη", "μην", "μἐν", "μὲν", "μὴ", "μὴν", "να", "ο", "οι", "ομωσ", "οπωσ", "οσο", "οτι", "οἱ", "οἳ", "οἷς", "οὐ", "οὐδ", "οὐδέ", "οὐδείσ", "οὐδεὶς", "οὐδὲ", "οὐδὲν", "οὐκ", "οὐχ", "οὐχὶ", "οὓς", "οὔτε", "οὕτω", "οὕτως", "οὕτωσ", "οὖν", "οὗ", "οὗτος", "οὗτοσ", "παρ", "παρά", "παρα", "παρὰ", "περί", "περὶ", "ποια", "ποιεσ", "ποιο", "ποιοι", "ποιοσ", "ποιουσ", "ποιων", "ποτε", "που", "ποῦ", "προ", "προσ", "πρόσ", "πρὸ", "πρὸς", "πως", "πωσ", "σε", "στη", "στην", "στο", "στον", "σόσ", "σύ", "σύν", "σὸς", "σὺ", "σὺν", "τά", "τήν", "τί", "τίς", "τίσ", "τα", "ταῖς", "τε", "την", "τησ", "τι", "τινα", "τις", "τισ", "το", "τοί", "τοι", "τοιοῦτος", "τοιοῦτοσ", "τον", "τοτε", "του", "τούσ", "τοὺς", "τοῖς", "τοῦ", "των", "τό", "τόν", "τότε", "τὰ", "τὰς", "τὴν", "τὸ", "τὸν", "τῆς", "τῆσ", "τῇ", "τῶν", "τῷ", "ωσ", "ἀλλ'", "ἀλλά", "ἀλλὰ", "ἀλλ’", "ἀπ", "ἀπό", "ἀπὸ", "ἀφ", "ἂν", "ἃ", "ἄλλος", "ἄλλοσ", "ἄν", "ἄρα", "ἅμα", "ἐάν", "ἐγώ", "ἐγὼ", "ἐκ", "ἐμόσ", "ἐμὸς", "ἐν", "ἐξ", "ἐπί", "ἐπεὶ", "ἐπὶ", "ἐστι", "ἐφ", "ἐὰν", "ἑαυτοῦ", "ἔτι", "ἡ", "ἢ", "ἣ", "ἤ", "ἥ", "ἧς", "ἵνα", "ὁ", "ὃ", "ὃν", "ὃς", "ὅ", "ὅδε", "ὅθεν", "ὅπερ", "ὅς", "ὅσ", "ὅστις", "ὅστισ", "ὅτε", "ὅτι", "ὑμόσ", "ὑπ", "ὑπέρ", "ὑπό", "ὑπὲρ", "ὑπὸ", "ὡς", "ὡσ", "ὥς", "ὥστε", "ὦ", "ᾧ", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/hebrew.rs
lib/segment/src/index/field_index/full_text_index/stop_words/hebrew.rs
pub const HEBREW_STOPWORDS: &[&str] = &[ "אני", "את", "אתה", "אנחנו", "אתן", "אתם", "הם", "הן", "היא", "הוא", "שלי", "שלו", "שלך", "שלה", "שלנו", "שלכם", "שלכן", "שלהם", "שלהן", "לי", "לו", "לה", "לנו", "לכם", "לכן", "להם", "להן", "אותה", "אותו", "זה", "זאת", "אלה", "אלו", "תחת", "מתחת", "מעל", "בין", "עם", "עד", "נגר", "על", "אל", "מול", "של", "אצל", "כמו", "אחר", "אותו", "בלי", "לפני", "אחרי", "מאחורי", "עלי", "עליו", "עליה", "עליך", "עלינו", "עליכם", "לעיכן", "עליהם", "עליהן", "כל", "כולם", "כולן", "כך", "ככה", "כזה", "זה", "זות", "אותי", "אותה", "אותם", "אותך", "אותו", "אותן", "אותנו", "ואת", "את", "אתכם", "אתכן", "איתי", "איתו", "איתך", "איתה", "איתם", "איתן", "איתנו", "איתכם", "איתכן", "יהיה", "תהיה", "היתי", "היתה", "היה", "להיות", "עצמי", "עצמו", "עצמה", "עצמם", "עצמן", "עצמנו", "עצמהם", "עצמהן", "מי", "מה", "איפה", "היכן", "במקום שבו", "אם", "לאן", "למקום שבו", "מקום בו", "איזה", "מהיכן", "איך", "כיצד", "באיזו מידה", "מתי", "בשעה ש", "כאשר", "כש", "למרות", "לפני", "אחרי", "מאיזו סיבה", "הסיבה שבגללה", "למה", "מדוע", "לאיזו תכלית", "כי", "יש", "אין", "אך", "מנין", "מאין", "מאיפה", "יכל", "יכלה", "יכלו", "יכול", "יכולה", "יכולים", "יכולות", "יוכלו", "יוכל", "מסוגל", "לא", "רק", "אולי", "אין", "לאו", "אי", "כלל", "נגד", "אם", "עם", "אל", "אלה", "אלו", "אף", "על", "מעל", "מתחת", "מצד", "בשביל", "לבין", "באמצע", "בתוך", "דרך", "מבעד", "באמצעות", "למעלה", "למטה", "מחוץ", "מן", "לעבר", "מכאן", "כאן", "הנה", "הרי", "פה", "שם", "אך", "ברם", "שוב", "אבל", "מבלי", "בלי", "מלבד", "רק", "בגלל", "מכיוון", "עד", "אשר", "ואילו", "למרות", "אס", "כמו", "כפי", "אז", "אחרי", "כן", "לכן", "לפיכך", "מאד", "עז", "מעט", "מעטים", "במידה", "שוב", "יותר", "מדי", "גם", "כן", "נו", "אחר", "אחרת", "אחרים", "אחרות", "אשר", "או", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/kazakh.rs
lib/segment/src/index/field_index/full_text_index/stop_words/kazakh.rs
pub const KAZAKH_STOPWORDS: &[&str] = &[ "ах", "ох", "эх", "ай", "эй", "ой", "тағы", "тағыда", "әрине", "жоқ", "сондай", "осындай", "осылай", "солай", "мұндай", "бұндай", "мен", "сен", "ол", "біз", "біздер", "олар", "сіз", "сіздер", "маған", "оған", "саған", "біздің", "сіздің", "оның", "бізге", "сізге", "оларға", "біздерге", "сіздерге", "оларға", "менімен", "сенімен", "онымен", "бізбен", "сізбен", "олармен", "біздермен", "сіздермен", "менің", "сенің", "біздің", "сіздің", "оның", "біздердің", "сіздердің", "олардың", "маған", "саған", "оған", "менен", "сенен", "одан", "бізден", "сізден", "олардан", "біздерден", "сіздерден", "олардан", "айтпақшы", "сонымен", "сондықтан", "бұл", "осы", "сол", "анау", "мынау", "сонау", "осынау", "ана", "мына", "сона", "әні", "міне", "өй", "үйт", "бүйт", "біреу", "кейбіреу", "кейбір", "қайсыбір", "әрбір", "бірнеше", "бірдеме", "бірнеше", "әркім", "әрне", "әрқайсы", "әрқалай", "әлдекім", "әлдене", "әлдеқайдан", "әлденеше", "әлдеқалай", "әлдеқашан", "алдақашан", "еш", "ешкім", "ешбір", "ештеме", "дәнеңе", "ешқашан", "ешқандай", "ешқайсы", "емес", "бәрі", "барлық", "барша", "бар", "күллі", "бүкіл", "түгел", "өз", "өзім", "өзің", "өзінің", "өзіме", "өзіне", "өзімнің", "өзі", "өзге", "менде", "сенде", "онда", "менен", "сенен онан", "одан", "ау", "па", "ей", "әй", "е", "уа", "уау", "уай", "я", "пай", "ә", "о", "оһо", "ой", "ие", "аһа", "ау", "беу", "мәссаған", "бәрекелді", "әттегенай", "жаракімалла", "масқарай", "астапыралла", "япырмай", "ойпырмай", "кәне", "кәнеки", "ал", "әйда", "кәні", "міне", "әні", "сорап", "қош-қош", "пфша", "пішә", "құрау-құрау", "шәйт", "шек", "моһ", "тәк", "құрау", "құр", "кә", "кәһ", "күшім", "күшім", "мышы", "пырс", "әукім", "алақай", "паһ-паһ", "бәрекелді", "ура", "әттең", "әттеген-ай", "қап", "түге", "пішту", "шіркін", "алатау", "пай-пай", "үшін", "сайын", "сияқты", "туралы", "арқылы", "бойы", "бойымен", "шамалы", "шақты", "қаралы", "ғұрлы", "ғұрлым", "шейін", "дейін", "қарай", "таман", "салым", "тарта", "жуық", "таяу", "гөрі", "бері", "кейін", "соң", "бұрын", "бетер", "қатар", "бірге", "қоса", "арс", "гүрс", "дүрс", "қорс", "тарс", "тырс", "ырс", "барқ", "борт", "күрт", "кірт", "морт", "сарт", "шырт", "дүңк", "күңк", "қыңқ", "мыңқ", "маңқ", "саңқ", "шаңқ", "шіңк", "сыңқ", "таңқ", "тыңқ", "ыңқ", "болп", "былп", "жалп", "желп", "қолп", "ірк", "ырқ", "сарт-сұрт", "тарс-тұрс", "арс-ұрс", "жалт-жалт", "жалт-жұлт", "қалт-қалт", "қалт-құлт", "қаңқ-қаңқ", "қаңқ-құңқ", "шаңқ-шаңқ", "шаңқ-шұңқ", "арбаң-арбаң", "бүгжең-бүгжең", "арсалаң-арсалаң", "ербелең-ербелең", "батыр-бұтыр", "далаң-далаң", "тарбаң-тарбаң", "қызараң-қызараң", "қаңғыр-күңгір", "қайқаң-құйқаң", "митың-митың", "салаң-сұлаң", "ыржың-тыржың", "бірақ", "алайда", "дегенмен", "әйтпесе", "әйткенмен", "себебі", "өйткені", "сондықтан", "үшін", "сайын", "сияқты", "туралы", "арқылы", "бойы", "бойымен", "шамалы", "шақты", "қаралы", "ғұрлы", "ғұрлым", "гөрі", "бері", "кейін", "соң", "бұрын", "бетер", "қатар", "бірге", "қоса", "шейін", "дейін", "қарай", "таман", "салым", "тарта", "жуық", "таяу", "арнайы", "осындай", "ғана", "қана", "тек", "әншейін", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/japanese.rs
lib/segment/src/index/field_index/full_text_index/stop_words/japanese.rs
// Source: https://github.com/stopwords-iso/stopwords-ja/blob/master/stopwords-ja.txt // Removed a few words and added additional tokens from our tokenizer. pub const JAPANESE_STOPWORDS: &[&str] = &[ "あそこ", "あっ", "あの", "あのかた", "あの人", "あり", "あります", "ある", "あれ", "い", "いう", "いっ", "います", "いる", "う", "うち", "え", "お", "おい", "おけ", "および", "おり", "おります", "か", "かた", "かつて", "から", "が", "き", "ここ", "こちら", "こと", "この", "これ", "これら", "さ", "さらに", "し", "しかし", "する", "ず", "せ", "せる", "そこ", "そして", "その", "その他", "その後", "それ", "それぞれ", "それで", "た", "ただし", "たち", "ため", "たり", "だ", "だっ", "だれ", "つ", "つい", "て", "で", "でき", "できる", "です", "では", "でも", "と", "という", "といった", "とき", "ところ", "として", "とともに", "とも", "と共に", "どこ", "どの", "な", "ない", "なお", "なかっ", "ながら", "なっ", "など", "なに", "なら", "なり", "なる", "なん", "に", "において", "における", "について", "にて", "によって", "により", "による", "に対して", "に対する", "に関する", "の", "ので", "のみ", "は", "ば", "へ", "ほか", "ほとんど", "ほど", "ます", "また", "または", "まで", "も", "もの", "ものの", "や", "よう", "よっ", "より", "よる", "ら", "られ", "られる", "る", "れ", "れる", "を", "ん", "人", "他", "何", "共", "及び", "対し", "対する", "彼", "後", "我々", "方", "特に", "私", "私達", "貴方", "貴方方", "達", "関する", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/nepali.rs
lib/segment/src/index/field_index/full_text_index/stop_words/nepali.rs
pub const NEPALI_STOPWORDS: &[&str] = &[ "छ", "र", "पनि", "छन्", "लागि", "भएको", "गरेको", "भने", "गर्न", "गर्ने", "हो", "तथा", "यो", "रहेको", "उनले", "थियो", "हुने", "गरेका", "थिए", "गर्दै", "तर", "नै", "को", "मा", "हुन्", "भन्ने", "हुन", "गरी", "त", "हुन्छ", "अब", "के", "रहेका", "गरेर", "छैन", "दिए", "भए", "यस", "ले", "गर्नु", "औं", "सो", "त्यो", "कि", "जुन", "यी", "का", "गरि", "ती", "न", "छु", "छौं", "लाई", "नि", "उप", "अक्सर", "आदि", "कसरी", "क्रमशः", "चाले", "अगाडी", "अझै", "अनुसार", "अन्तर्गत", "अन्य", "अन्यत्र", "अन्यथा", "अरु", "अरुलाई", "अर्को", "अर्थात", "अर्थात्", "अलग", "आए", "आजको", "ओठ", "आत्म", "आफू", "आफूलाई", "आफ्नै", "आफ्नो", "आयो", "उदाहरण", "उनको", "उहालाई", "एउटै", "एक", "एकदम", "कतै", "कम से कम", "कसै", "कसैले", "कहाँबाट", "कहिलेकाहीं", "का", "किन", "किनभने", "कुनै", "कुरा", "कृपया", "केही", "कोही", "गए", "गरौं", "गर्छ", "गर्छु", "गर्नुपर्छ", "गयौ", "गैर", "चार", "चाहनुहुन्छ", "चाहन्छु", "चाहिए", "छू", "जताततै", "जब", "जबकि", "जसको", "जसबाट", "जसमा", "जसलाई", "जसले", "जस्तै", "जस्तो", "जस्तोसुकै", "जहाँ", "जान", "जाहिर", "जे", "जो", "ठीक", "तत्काल", "तदनुसार", "तपाईको", "तपाई", "पर्याप्त", "पहिले", "पहिलो", "पहिल्यै", "पाँच", "पाँचौं", "तल", "तापनी", "तिनी", "तिनीहरू", "तिनीहरुको", "तिनिहरुलाई", "तिमी", "तिर", "तीन", "तुरुन्तै", "तेस्रो", "तेस्कारण", "पूर्व", "प्रति", "प्रतेक", "प्लस", "फेरी", "बने", "त्सपछि", "त्सैले", "त्यहाँ", "थिएन", "दिनुभएको", "दिनुहुन्छ", "दुई", "देखि", "बरु", "बारे", "बाहिर", "देखिन्छ", "देखियो", "देखे", "देखेको", "देखेर", "दोस्रो", "धेरै", "नजिकै", "नत्र", "नयाँ", "निम्ति", "बाहेक", "बीच", "बीचमा", "भन", "निम्न", "निम्नानुसार", "निर्दिष्ट", "नौ", "पक्का", "पक्कै", "पछि", "पछिल्लो", "पटक", "पर्छ", "पर्थ्यो", "भन्छन्", "भन्", "भन्छु", "भन्दा", "भन्नुभयो", "भर", "भित्र", "भित्री", "म", "मलाई", "मात्र", "माथि", "मुख्य", "मेरो", "यति", "यथोचित", "यदि", "यद्यपि", "यसको", "यसपछि", "यसबाहेक", "यसरी", "यसो", "यस्तो", "यहाँ", "यहाँसम्म", "या", "रही", "राखे", "राख्छ", "राम्रो", "रूप", "लगभग", "वरीपरी", "वास्तवमा", "बिरुद्ध", "बिशेष", "सायद", "शायद", "संग", "संगै", "सक्छ", "सट्टा", "सधै", "सबै", "सबैलाई", "समय", "सम्भव", "सम्म", "सही", "साँच्चै", "सात", "साथ", "साथै", "सारा", "सोही", "स्पष्ट", "हरे", "हरेक", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/portuguese.rs
lib/segment/src/index/field_index/full_text_index/stop_words/portuguese.rs
pub const PORTUGUESE_STOPWORDS: &[&str] = &[ "a", "à", "ao", "aos", "aquela", "aquelas", "aquele", "aqueles", "aquilo", "as", "às", "até", "com", "como", "da", "das", "de", "dela", "delas", "dele", "deles", "depois", "do", "dos", "e", "é", "ela", "elas", "ele", "eles", "em", "entre", "era", "eram", "éramos", "essa", "essas", "esse", "esses", "esta", "está", "estamos", "estão", "estar", "estas", "estava", "estavam", "estávamos", "este", "esteja", "estejam", "estejamos", "estes", "esteve", "estive", "estivemos", "estiver", "estivera", "estiveram", "estivéramos", "estiverem", "estivermos", "estivesse", "estivessem", "estivéssemos", "estou", "eu", "foi", "fomos", "for", "fora", "foram", "fôramos", "forem", "formos", "fosse", "fossem", "fôssemos", "fui", "há", "haja", "hajam", "hajamos", "hão", "havemos", "haver", "hei", "houve", "houvemos", "houver", "houvera", "houverá", "houveram", "houvéramos", "houverão", "houverei", "houverem", "houveremos", "houveria", "houveriam", "houveríamos", "houvermos", "houvesse", "houvessem", "houvéssemos", "isso", "isto", "já", "lhe", "lhes", "mais", "mas", "me", "mesmo", "meu", "meus", "minha", "minhas", "muito", "na", "não", "nas", "nem", "no", "nos", "nós", "nossa", "nossas", "nosso", "nossos", "num", "numa", "o", "os", "ou", "para", "pela", "pelas", "pelo", "pelos", "por", "qual", "quando", "que", "quem", "são", "se", "seja", "sejam", "sejamos", "sem", "ser", "será", "serão", "serei", "seremos", "seria", "seriam", "seríamos", "seu", "seus", "só", "somos", "sou", "sua", "suas", "também", "te", "tem", "tém", "temos", "tenha", "tenham", "tenhamos", "tenho", "terá", "terão", "terei", "teremos", "teria", "teriam", "teríamos", "teu", "teus", "teve", "tinha", "tinham", "tínhamos", "tive", "tivemos", "tiver", "tivera", "tiveram", "tivéramos", "tiverem", "tivermos", "tivesse", "tivessem", "tivéssemos", "tu", "tua", "tuas", "um", "uma", "você", "vocês", "vos", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/finnish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/finnish.rs
pub const FINNISH_STOPWORDS: &[&str] = &[ "olla", "olen", "olet", "on", "olemme", "olette", "ovat", "ole", "oli", "olisi", "olisit", "olisin", "olisimme", "olisitte", "olisivat", "olit", "olin", "olimme", "olitte", "olivat", "ollut", "olleet", "en", "et", "ei", "emme", "ette", "eivät", "minä", "minun", "minut", "minua", "minussa", "minusta", "minuun", "minulla", "minulta", "minulle", "sinä", "sinun", "sinut", "sinua", "sinussa", "sinusta", "sinuun", "sinulla", "sinulta", "sinulle", "hän", "hänen", "hänet", "häntä", "hänessä", "hänestä", "häneen", "hänellä", "häneltä", "hänelle", "me", "meidän", "meidät", "meitä", "meissä", "meistä", "meihin", "meillä", "meiltä", "meille", "te", "teidän", "teidät", "teitä", "teissä", "teistä", "teihin", "teillä", "teiltä", "teille", "he", "heidän", "heidät", "heitä", "heissä", "heistä", "heihin", "heillä", "heiltä", "heille", "tämä", "tämän", "tätä", "tässä", "tästä", "tähän", "tallä", "tältä", "tälle", "tänä", "täksi", "tuo", "tuon", "tuotä", "tuossa", "tuosta", "tuohon", "tuolla", "tuolta", "tuolle", "tuona", "tuoksi", "se", "sen", "sitä", "siinä", "siitä", "siihen", "sillä", "siltä", "sille", "sinä", "siksi", "nämä", "näiden", "näitä", "näissä", "näistä", "näihin", "näillä", "näiltä", "näille", "näinä", "näiksi", "nuo", "noiden", "noita", "noissa", "noista", "noihin", "noilla", "noilta", "noille", "noina", "noiksi", "ne", "niiden", "niitä", "niissä", "niistä", "niihin", "niillä", "niiltä", "niille", "niinä", "niiksi", "kuka", "kenen", "kenet", "ketä", "kenessä", "kenestä", "keneen", "kenellä", "keneltä", "kenelle", "kenenä", "keneksi", "ketkä", "keiden", "ketkä", "keitä", "keissä", "keistä", "keihin", "keillä", "keiltä", "keille", "keinä", "keiksi", "mikä", "minkä", "minkä", "mitä", "missä", "mistä", "mihin", "millä", "miltä", "mille", "minä", "miksi", "mitkä", "joka", "jonka", "jota", "jossa", "josta", "johon", "jolla", "jolta", "jolle", "jona", "joksi", "jotka", "joiden", "joita", "joissa", "joista", "joihin", "joilla", "joilta", "joille", "joina", "joiksi", "että", "ja", "jos", "koska", "kuin", "mutta", "niin", "sekä", "sillä", "tai", "vaan", "vai", "vaikka", "kanssa", "mukaan", "noin", "poikki", "yli", "kun", "niin", "nyt", "itse", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/tajik.rs
lib/segment/src/index/field_index/full_text_index/stop_words/tajik.rs
pub const TAJIK_STOPWORDS: &[&str] = &[ "аз", "дар", "ба", "бо", "барои", "бе", "то", "ҷуз", "пеши", "назди", "рӯйи", "болои ", "паси", "ғайри", "ҳамон", "ҳамоно", "инҷониб", "замон", "замоно", "эътиборан", "пеш", "қабл", "дида", "сар карда", "агар ", "агар ки", "валекин ", "ки", "лекин", "аммо", "вале", "балки", "ва", "ҳарчанд", "чунки", "зеро", "зеро ки", "вақте ки", "то вақте ки", "барои он ки", "бо нияти он ки", "лекин ва ҳол он ки", "ё", "ё ин ки ", "бе он ки ", "дар ҳолате ки", "то даме ки ", "баъд аз он ки", "даме ки", "ба тразе ки ", "аз баҳри он ки", "гар ", "ар", "ба шарте", "азбаски ", "модоме ки", "агар чи", "гарчанде ки ", "бо вуҷуди он ки", "гӯё", "аз-баски ", "чун-ки", "агар-чанд", "агар-чи ", "гар-чи", "то ки", "чунон ки", "то даме ки", "ҳар қадар ки", "магар ", "оё", "наход", "ҳатто ", "ҳам ", "бале ", "оре ", "хуб ", "хуш", "хайр", "не", "на", "мана", "э", "фақат", "танҳо", "кошки ", "мабодо", "ҳтимол", "ана ҳамин", "наход ки", "ҳатто ки", "аз афташ", "майлаш куя", "ана", "ҳа", "канӣ", "гӯё ки", "ҳо ана", "на ин ки", "ваҳ", "ҳой", "и", "а", "о", "эҳ", "ҳе", "ҳу", "аҳа", "оҳе", "уҳа", "ҳм", "нм", "оббо", "ӯббо", "ҳой-ҳой ", "вой-вой", "ту-ту", "ҳмм", "эҳа", "тавба", "ӯҳӯ", "аҷабо", "ало", "аё", "ой", "ӯим ", "ором", "хом?ш", "ҳай-ҳай ", "бай-бай", "аз ", "он", "баъд", "азбаски", "ӯ", "ҳангоми", "чӣ", "кадом", "ин", "ҷо", "ҳам", "ё ки", "бояд", "аст", "чанд", "ҳар", "бар", "чаро ки", "агар", "то кӣ", "бинобар", "бинобар ин", "ҳаргиз", "асло", "нахот", "нахот ки", "кошкӣ", "шояд", "шояд ки", "охир", "аз рӯи", "аз рӯйи ", "рӯ", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/bengali.rs
lib/segment/src/index/field_index/full_text_index/stop_words/bengali.rs
pub const BENGALI_STOPWORDS: &[&str] = &[ "অতএব", "অথচ", "অথবা", "অনুযায়ী", "অনেক", "অনেকে", "অনেকেই", "অন্তত", "অন্য", "অবধি", "অবশ্য", "অর্থাত", "আই", "আগামী", "আগে", "আগেই", "আছে", "আজ", "আদ্যভাগে", "আপনার", "আপনি", "আবার", "আমরা", "আমাকে", "আমাদের", "আমার", "আমি", "আর", "আরও", "ই", "ইত্যাদি", "ইহা", "উচিত", "উত্তর", "উনি", "উপর", "উপরে", "এ", "এঁদের", "এঁরা", "এই", "একই", "একটি", "একবার", "একে", "এক্", "এখন", "এখনও", "এখানে", "এখানেই", "এটা", "এটাই", "এটি", "এত", "এতটাই", "এতে", "এদের", "এব", "এবং", "এবার", "এমন", "এমনকী", "এমনি", "এর", "এরা", "এল", "এস", "এসে", "ঐ", "ও", "ওঁদের", "ওঁর", "ওঁরা", "ওই", "ওকে", "ওখানে", "ওদের", "ওর", "ওরা", "কখনও", "কত", "কবে", "কমনে", "কয়েক", "কয়েকটি", "করছে", "করছেন", "করতে", "করবে", "করবেন", "করলে", "করলেন", "করা", "করাই", "করায়", "করার", "করি", "করিতে", "করিয়া", "করিয়ে", "করে", "করেই", "করেছিলেন", "করেছে", "করেছেন", "করেন", "কাউকে", "কাছ", "কাছে", "কাজ", "কাজে", "কারও", "কারণ", "কি", "কিংবা", "কিছু", "কিছুই", "কিন্তু", "কী", "কে", "কেউ", "কেউই", "কেখা", "কেন", "কোটি", "কোন", "কোনও", "কোনো", "ক্ষেত্রে", "কয়েক", "খুব", "গিয়ে", "গিয়েছে", "গিয়ে", "গুলি", "গেছে", "গেল", "গেলে", "গোটা", "চলে", "চান", "চায়", "চার", "চালু", "চেয়ে", "চেষ্টা", "ছাড়া", "ছাড়াও", "ছিল", "ছিলেন", "জন", "জনকে", "জনের", "জন্য", "জন্যওজে", "জানতে", "জানা", "জানানো", "জানায়", "জানিয়ে", "জানিয়েছে", "জে", "জ্নজন", "টি", "ঠিক", "তখন", "তত", "তথা", "তবু", "তবে", "তা", "তাঁকে", "তাঁদের", "তাঁর", "তাঁরা", "তাঁাহারা", "তাই", "তাও", "তাকে", "তাতে", "তাদের", "তার", "তারপর", "তারা", "তারৈ", "তাহলে", "তাহা", "তাহাতে", "তাহার", "তিনঐ", "তিনি", "তিনিও", "তুমি", "তুলে", "তেমন", "তো", "তোমার", "থাকবে", "থাকবেন", "থাকা", "থাকায়", "থাকে", "থাকেন", "থেকে", "থেকেই", "থেকেও", "দিকে", "দিতে", "দিন", "দিয়ে", "দিয়েছে", "দিয়েছেন", "দিলেন", "দু", "দুই", "দুটি", "দুটো", "দেওয়া", "দেওয়ার", "দেওয়া", "দেখতে", "দেখা", "দেখে", "দেন", "দেয়", "দ্বারা", "ধরা", "ধরে", "ধামার", "নতুন", "নয়", "না", "নাই", "নাকি", "নাগাদ", "নানা", "নিজে", "নিজেই", "নিজেদের", "নিজের", "নিতে", "নিয়ে", "নিয়ে", "নেই", "নেওয়া", "নেওয়ার", "নেওয়া", "নয়", "পক্ষে", "পর", "পরে", "পরেই", "পরেও", "পর্যন্ত", "পাওয়া", "পাচ", "পারি", "পারে", "পারেন", "পি", "পেয়ে", "পেয়্র্", "প্রতি", "প্রথম", "প্রভৃতি", "প্রযন্ত", "প্রাথমিক", "প্রায়", "প্রায়", "ফলে", "ফিরে", "ফের", "বক্তব্য", "বদলে", "বন", "বরং", "বলতে", "বলল", "বললেন", "বলা", "বলে", "বলেছেন", "বলেন", "বসে", "বহু", "বা", "বাদে", "বার", "বি", "বিনা", "বিভিন্ন", "বিশেষ", "বিষয়টি", "বেশ", "বেশি", "ব্যবহার", "ব্যাপারে", "ভাবে", "ভাবেই", "মতো", "মতোই", "মধ্যভাগে", "মধ্যে", "মধ্যেই", "মধ্যেও", "মনে", "মাত্র", "মাধ্যমে", "মোট", "মোটেই", "যখন", "যত", "যতটা", "যথেষ্ট", "যদি", "যদিও", "যা", "যাঁর", "যাঁরা", "যাওয়া", "যাওয়ার", "যাওয়া", "যাকে", "যাচ্ছে", "যাতে", "যাদের", "যান", "যাবে", "যায়", "যার", "যারা", "যিনি", "যে", "যেখানে", "যেতে", "যেন", "যেমন", "র", "রকম", "রয়েছে", "রাখা", "রেখে", "লক্ষ", "শুধু", "শুরু", "সঙ্গে", "সঙ্গেও", "সব", "সবার", "সমস্ত", "সম্প্রতি", "সহ", "সহিত", "সাধারণ", "সামনে", "সি", "সুতরাং", "সে", "সেই", "সেখান", "সেখানে", "সেটা", "সেটাই", "সেটাও", "সেটি", "স্পষ্ট", "স্বয়ং", "হইতে", "হইবে", "হইয়া", "হওয়া", "হওয়ায়", "হওয়ার", "হচ্ছে", "হত", "হতে", "হতেই", "হন", "হবে", "হবেন", "হয়", "হয়তো", "হয়নি", "হয়ে", "হয়েই", "হয়েছিল", "হয়েছে", "হয়েছেন", "হল", "হলে", "হলেই", "হলেও", "হলো", "হাজার", "হিসাবে", "হৈলে", "হোক", "হয়", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/azerbaijani.rs
lib/segment/src/index/field_index/full_text_index/stop_words/azerbaijani.rs
pub const AZERBAIJANI_STOPWORDS: &[&str] = &[ "a", "ad", "altı", "altmış", "amma", "arasında", "artıq", "ay", "az", "bax", "belə", "bəli", "bəlkə", "beş", "bəy", "bəzən", "bəzi", "bilər", "bir", "biraz", "biri", "birşey", "biz", "bizim", "bizlər", "bu", "buna", "bundan", "bunların", "bunu", "bunun", "buradan", "bütün", "ci", "cı", "çox", "cu", "cü", "çünki", "da", "daha", "də", "dedi", "dək", "dən", "dəqiqə", "deyil", "dir", "doqquz", "doqsan", "dörd", "düz", "ə", "edən", "edir", "əgər", "əlbəttə", "elə", "əlli", "ən", "əslində", "et", "etdi", "etmə", "etmək", "faiz", "gilə", "görə", "ha", "haqqında", "harada", "hə", "heç", "həm", "həmin", "həmişə", "hər", "ı", "idi", "iki", "il", "ildə", "ilə", "ilk", "in", "indi", "isə", "istifadə", "iyirmi", "ki", "kim", "kimə", "kimi", "lakin", "lap", "məhz", "mən", "mənə", "mirşey", "nə", "nəhayət", "niyə", "o", "obirisi", "of", "olan", "olar", "olaraq", "oldu", "olduğu", "olmadı", "olmaz", "olmuşdur", "olsun", "olur", "on", "ona", "ondan", "onlar", "onlardan", "onların ", "onsuzda", "onu", "onun", "oradan", "otuz", "öz", "özü", "qarşı", "qədər", "qırx", "saat", "sadəcə", "saniyə", "səhv", "səkkiz", "səksən", "sən", "sənə", "sənin", "siz", "sizin", "sizlər", "sonra", "təəssüf", "ü", "üç", "üçün", "var", "və", "xan", "xanım", "xeyr", "ya", "yalnız", "yaxşı", "yeddi", "yenə", "yəni", "yetmiş", "yox", "yoxdur", "yoxsa", "yüz", "zaman", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/italian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/italian.rs
pub const ITALIAN_STOPWORDS: &[&str] = &[ "ad", "al", "allo", "ai", "agli", "all", "agl", "alla", "alle", "con", "col", "coi", "da", "dal", "dallo", "dai", "dagli", "dall", "dagl", "dalla", "dalle", "di", "del", "dello", "dei", "degli", "dell", "degl", "della", "delle", "in", "nel", "nello", "nei", "negli", "nell", "negl", "nella", "nelle", "su", "sul", "sullo", "sui", "sugli", "sull", "sugl", "sulla", "sulle", "per", "tra", "contro", "io", "tu", "lui", "lei", "noi", "voi", "loro", "mio", "mia", "miei", "mie", "tuo", "tua", "tuoi", "tue", "suo", "sua", "suoi", "sue", "nostro", "nostra", "nostri", "nostre", "vostro", "vostra", "vostri", "vostre", "mi", "ti", "ci", "vi", "lo", "la", "li", "le", "gli", "ne", "il", "un", "uno", "una", "ma", "ed", "se", "perché", "anche", "come", "dov", "dove", "che", "chi", "cui", "non", "più", "quale", "quanto", "quanti", "quanta", "quante", "quello", "quelli", "quella", "quelle", "questo", "questi", "questa", "queste", "si", "tutto", "tutti", "a", "c", "e", "i", "l", "o", "ho", "hai", "ha", "abbiamo", "avete", "hanno", "abbia", "abbiate", "abbiano", "avrò", "avrai", "avrà", "avremo", "avrete", "avranno", "avrei", "avresti", "avrebbe", "avremmo", "avreste", "avrebbero", "avevo", "avevi", "aveva", "avevamo", "avevate", "avevano", "ebbi", "avesti", "ebbe", "avemmo", "aveste", "ebbero", "avessi", "avesse", "avessimo", "avessero", "avendo", "avuto", "avuta", "avuti", "avute", "sono", "sei", "è", "siamo", "siete", "sia", "siate", "siano", "sarò", "sarai", "sarà", "saremo", "sarete", "saranno", "sarei", "saresti", "sarebbe", "saremmo", "sareste", "sarebbero", "ero", "eri", "era", "eravamo", "eravate", "erano", "fui", "fosti", "fu", "fummo", "foste", "furono", "fossi", "fosse", "fossimo", "fossero", "essendo", "faccio", "fai", "facciamo", "fanno", "faccia", "facciate", "facciano", "farò", "farai", "farà", "faremo", "farete", "faranno", "farei", "faresti", "farebbe", "faremmo", "fareste", "farebbero", "facevo", "facevi", "faceva", "facevamo", "facevate", "facevano", "feci", "facesti", "fece", "facemmo", "faceste", "fecero", "facessi", "facesse", "facessimo", "facessero", "facendo", "sto", "stai", "sta", "stiamo", "stanno", "stia", "stiate", "stiano", "starò", "starai", "starà", "staremo", "starete", "staranno", "starei", "staresti", "starebbe", "staremmo", "stareste", "starebbero", "stavo", "stavi", "stava", "stavamo", "stavate", "stavano", "stetti", "stesti", "stette", "stemmo", "steste", "stettero", "stessi", "stesse", "stessimo", "stessero", "stando", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/mod.rs
lib/segment/src/index/field_index/full_text_index/stop_words/mod.rs
use ahash::AHashSet; use crate::data_types::index::{Language, StopwordsInterface}; pub mod arabic; pub mod azerbaijani; pub mod basque; pub mod bengali; pub mod catalan; pub mod chinese; pub mod danish; pub mod dutch; pub mod english; pub mod finnish; pub mod french; pub mod german; pub mod greek; pub mod hebrew; pub mod hinglish; pub mod hungarian; pub mod indonesian; pub mod italian; pub mod japanese; pub mod kazakh; pub mod nepali; pub mod norwegian; pub mod portuguese; pub mod romanian; pub mod russian; pub mod slovene; pub mod spanish; pub mod swedish; pub mod tajik; pub mod turkish; pub use arabic::ARABIC_STOPWORDS; pub use azerbaijani::AZERBAIJANI_STOPWORDS; pub use basque::BASQUE_STOPWORDS; pub use bengali::BENGALI_STOPWORDS; pub use catalan::CATALAN_STOPWORDS; pub use chinese::CHINESE_STOPWORDS; pub use danish::DANISH_STOPWORDS; pub use dutch::DUTCH_STOPWORDS; pub use english::ENGLISH_STOPWORDS; pub use finnish::FINNISH_STOPWORDS; pub use french::FRENCH_STOPWORDS; pub use german::GERMAN_STOPWORDS; pub use greek::GREEK_STOPWORDS; pub use hebrew::HEBREW_STOPWORDS; pub use hinglish::HINGLISH_STOPWORDS; pub use hungarian::HUNGARIAN_STOPWORDS; pub use indonesian::INDONESIAN_STOPWORDS; pub use italian::ITALIAN_STOPWORDS; pub use japanese::JAPANESE_STOPWORDS; pub use kazakh::KAZAKH_STOPWORDS; pub use nepali::NEPALI_STOPWORDS; pub use norwegian::NORWEGIAN_STOPWORDS; pub use portuguese::PORTUGUESE_STOPWORDS; pub use romanian::ROMANIAN_STOPWORDS; pub use russian::RUSSIAN_STOPWORDS; pub use slovene::SLOVENE_STOPWORDS; pub use spanish::SPANISH_STOPWORDS; pub use swedish::SWEDISH_STOPWORDS; pub use tajik::TAJIK_STOPWORDS; pub use turkish::TURKISH_STOPWORDS; #[derive(Debug, Clone, Default)] pub struct StopwordsFilter { stopwords: AHashSet<String>, } impl StopwordsFilter { pub fn new(option: &Option<StopwordsInterface>, lowercase: bool) -> Self { let mut this = Self::default(); if let Some(option) = option { match option { StopwordsInterface::Language(lang) => { this.add_language_stopwords(lang, lowercase); } StopwordsInterface::Set(set) => { // Add stopwords from all languages in the languages field if let Some(languages) = set.languages.as_ref() { // If languages are provided, add their stopwords for lang in languages { this.add_language_stopwords(lang, lowercase); } } if let Some(custom) = set.custom.as_ref() { // If custom stopwords are provided, add them for word in custom { this.add_stopword(word, lowercase); } } } } } this } /// Check if a token is a stopword pub fn is_stopword(&self, token: &str) -> bool { self.stopwords.contains(token) } fn add_stopword(&mut self, word: &str, lowercase: bool) { if lowercase { self.stopwords.insert(word.to_lowercase()); } else { self.stopwords.insert(word.to_string()); } } /// Add stopwords for a specific language fn add_language_stopwords(&mut self, language: &Language, lowercase: bool) { let stopwords_array = match language { Language::Arabic => ARABIC_STOPWORDS, Language::Azerbaijani => AZERBAIJANI_STOPWORDS, Language::Basque => BASQUE_STOPWORDS, Language::Bengali => BENGALI_STOPWORDS, Language::Catalan => CATALAN_STOPWORDS, Language::Chinese => CHINESE_STOPWORDS, Language::Danish => DANISH_STOPWORDS, Language::Dutch => DUTCH_STOPWORDS, Language::English => ENGLISH_STOPWORDS, Language::Finnish => FINNISH_STOPWORDS, Language::French => FRENCH_STOPWORDS, Language::German => GERMAN_STOPWORDS, Language::Greek => GREEK_STOPWORDS, Language::Hebrew => HEBREW_STOPWORDS, Language::Hinglish => HINGLISH_STOPWORDS, Language::Hungarian => HUNGARIAN_STOPWORDS, Language::Indonesian => INDONESIAN_STOPWORDS, Language::Italian => ITALIAN_STOPWORDS, Language::Japanese => JAPANESE_STOPWORDS, Language::Kazakh => KAZAKH_STOPWORDS, Language::Nepali => NEPALI_STOPWORDS, Language::Norwegian => NORWEGIAN_STOPWORDS, Language::Portuguese => PORTUGUESE_STOPWORDS, Language::Romanian => ROMANIAN_STOPWORDS, Language::Russian => RUSSIAN_STOPWORDS, Language::Slovene => SLOVENE_STOPWORDS, Language::Spanish => SPANISH_STOPWORDS, Language::Swedish => SWEDISH_STOPWORDS, Language::Tajik => TAJIK_STOPWORDS, Language::Turkish => TURKISH_STOPWORDS, }; for &word in stopwords_array { self.add_stopword(word, lowercase); } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_empty_stopwords() { let filter = StopwordsFilter::new(&None, true); assert!(!filter.is_stopword("the")); assert!(!filter.is_stopword("hello")); } #[test] fn test_language_stopwords() { let option = Some(StopwordsInterface::Language(Language::English)); let filter = StopwordsFilter::new(&option, true); assert!(filter.is_stopword("the")); assert!(filter.is_stopword("and")); assert!(filter.is_stopword("of")); assert!(!filter.is_stopword("hello")); } #[test] fn test_custom_stopwords() { let option = Some(StopwordsInterface::new_custom(&["hello", "world"])); let filter = StopwordsFilter::new(&option, true); assert!(filter.is_stopword("hello")); assert!(filter.is_stopword("world")); assert!(!filter.is_stopword("the")); } #[test] fn test_mixed_stopwords() { let option = Some(StopwordsInterface::new_set( &[Language::English], &["hello", "world"], )); let filter = StopwordsFilter::new(&option, true); assert!(filter.is_stopword("hello")); assert!(filter.is_stopword("world")); assert!(filter.is_stopword("the")); assert!(filter.is_stopword("and")); assert!(filter.is_stopword("mustn't")); assert!(!filter.is_stopword("programming")); } #[test] fn test_case_sensitivity() { let option = Some(StopwordsInterface::new_custom(&["Hello", "World"])); let filter = StopwordsFilter::new(&option, false); // Should match exact case assert!(filter.is_stopword("Hello")); assert!(filter.is_stopword("World")); // Should not match different case assert!(!filter.is_stopword("hello")); assert!(!filter.is_stopword("HELLO")); assert!(!filter.is_stopword("world")); assert!(!filter.is_stopword("WORLD")); } #[test] fn test_language_stopwords_case_sensitivity() { let option = Some(StopwordsInterface::Language(Language::English)); let filter = StopwordsFilter::new(&option, false); // English stopwords are typically lowercase in the source arrays assert!(filter.is_stopword("the")); assert!(filter.is_stopword("and")); // Should not match uppercase versions assert!(!filter.is_stopword("The")); assert!(!filter.is_stopword("AND")); } #[test] fn test_all_languages_stopwords() { // Test a common stopword for each language let languages = vec![ (Language::Arabic, "في"), (Language::Azerbaijani, "və"), (Language::Basque, "eta"), (Language::Bengali, "এবং"), (Language::Catalan, "i"), (Language::Chinese, "的"), (Language::Danish, "og"), (Language::Dutch, "en"), (Language::English, "and"), (Language::Finnish, "ja"), (Language::French, "et"), (Language::German, "und"), (Language::Greek, "και"), (Language::Hebrew, "את"), (Language::Hinglish, "aur"), (Language::Hungarian, "és"), (Language::Indonesian, "dan"), (Language::Italian, "e"), (Language::Japanese, "ます"), (Language::Kazakh, "жоқ"), (Language::Nepali, "र"), (Language::Norwegian, "og"), (Language::Portuguese, "e"), (Language::Romanian, "ar"), (Language::Russian, "и"), (Language::Slovene, "in"), (Language::Spanish, "y"), (Language::Swedish, "och"), (Language::Tajik, "ва"), (Language::Turkish, "ve"), ]; for (language, stopword) in languages { let option = Some(StopwordsInterface::Language(language.clone())); let filter = StopwordsFilter::new(&option, true); assert!( filter.is_stopword(stopword), "Expected '{stopword}' to be a stopword in {language:?}" ); assert!( !filter.is_stopword("qdrant"), "Expected 'qdrant' not to be a stopword" ); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/danish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/danish.rs
pub const DANISH_STOPWORDS: &[&str] = &[ "og", "i", "jeg", "det", "at", "en", "den", "til", "er", "som", "på", "de", "med", "han", "af", "for", "ikke", "der", "var", "mig", "sig", "men", "et", "har", "om", "vi", "min", "havde", "ham", "hun", "nu", "over", "da", "fra", "du", "ud", "sin", "dem", "os", "op", "man", "hans", "hvor", "eller", "hvad", "skal", "selv", "her", "alle", "vil", "blev", "kunne", "ind", "når", "være", "dog", "noget", "ville", "jo", "deres", "efter", "ned", "skulle", "denne", "end", "dette", "mit", "også", "under", "have", "dig", "anden", "hende", "mine", "alt", "meget", "sit", "sine", "vor", "mod", "disse", "hvis", "din", "nogle", "hos", "blive", "mange", "ad", "bliver", "hendes", "været", "thi", "jer", "sådan", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/basque.rs
lib/segment/src/index/field_index/full_text_index/stop_words/basque.rs
pub const BASQUE_STOPWORDS: &[&str] = &[ "ahala", "aitzitik", "al", "ala ", "alabadere", "alabaina", "alabaina", "aldiz ", "alta", "amaitu", "amaitzeko", "anitz", "antzina", "arabera", "arabera", "arabera", "argi", "arratsaldero", "arte", "artean", "asko", "aspaldiko", "aurrera", "aurrera", "azkenez", "azkenik", "azkenik", "ba", "bada", "bada ", "bada ", "bada ", "badarik", "badarik", "badarik ", "badere", "bai", "baina", "baina", "baina ", "baino", "baino", "baino", "baino", "baita", "baizik ", "baldin", "baldin", "barren", "bat", "batean", "batean", "batean", "batean", "batek", "baten", "batera", "batez", "bati", "batzuei", "batzuek", "batzuetan", "batzuk", "bazen", "bederen", "bederik", "beharrez", "behiala", "behin", "behin", "behin", "behin", "behinik", "behinola", "behintzat", "bera", "beraiek", "beranduago", "berau", "berauek", "beraz", "beraz ", "bere", "berean", "berebat", "berehala", "berori", "beroriek", "berriro", "berriz", "bertzalde", "bertzenaz", "bestalde", "beste", "bestela", "besterik", "bezain", "bezala", "bide", "bien", "bigarrenez", "bigarrenik", "bitartean", "bitartean", "bizkitartean", "bukaeran", "bukatzeko ", "da", "dago", "dago", "dela", "dela", "dela", "delarik", "den", "dena", "dena", "dezadan", "dira", "ditu", "du", "dute", "edo", "edo ", "edota ", "egin", "egin", "egun", "egun", "egunean", "emateko", "era", "erdi", "ere", "ere", "ere", "ere", "ere ", "esan", "esan", "esanak", "esandakoaren", "eta", "eta", "eta", "eta", "eta ", "eta ", "eurak", "ez", "ez ", "ez ", "eze ", "ezen", "ezer", "ezezik", "ezik", "ezpabere ", "ezpada ", "ezpere", "ezperen ", "ezta", "funtsean", "gabe", "gain", "gainera ", "gainera ", "gainerontzean ", "gaur", "gero", "gero", "gero", "geroago", "gisa", "gu", "gutxi", "guzti", "guztia", "guztiz ", "haatik", "haiei", "haiek", "haietan", "hain", "hainbeste", "hainbestez ", "hala", "hala", "hala", "halaber", "halako", "halatan", "han", "handik", "hango", "hara", "hargatik", "hari", "hark", "hartan", "hartan", "hasi", "hasi", "hasiera", "hasieran", "hasteaz", "hasteko", "hasteko", "hau", "hau", "hau", "hau", "hau", "hau", "hauei", "hauek", "hauetan", "hemen", "hemendik", "hemengo", "hi", "hona", "honebestez", "honek", "honela", "honela", "honela", "honen", "honen", "honetan", "honetaz", "honi", "hor", "hori", "hori", "hori", "horiei", "horiek", "horietan", "horko", "horra ", "horratik", "horregatik", "horregatik", "horrek", "horrela", "horrela", "horrela", "horren", "horrenbestez", "horretan", "horri", "hortaz", "hortaz", "hortik", "hura", "ikusi", "ikusi", "izan", "izan", "izan", "jarraituz", "kariaz", "kasuaz", "kontuan", "laburbilduz", "laburki", "laster", "laster", "lehen", "lehen", "lehen", "lehen", "lehenengo", "lehenengo", "lehenik", "lehen-lehenik", "litzateke", "medio", "mendean", "mundura", "nahiz", "ni", "noiz", "nola", "non", "nondik", "nongo", "nor", "nora", "on", "ondoren", "ondorio", "ondorioz", "ondorioz", "orain", "ordea ", "orduan", "orduan", "orduan ", "orduko", "ordura", "orobat", "ostean", "ostera", "osterantzean ", "pentsatuz", "ustez", "ze", "zein", "zein ", "zen", "zen", "zenbait", "zenbat", "zer", "zeren", "zergatik", "zergatik", "ziren", "zituen", "zu", "zuek", "zuen", "zuten", "zuzen", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/russian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/russian.rs
pub const RUSSIAN_STOPWORDS: &[&str] = &[ "и", "в", "во", "не", "что", "он", "на", "я", "с", "со", "как", "а", "то", "все", "она", "так", "его", "но", "да", "ты", "к", "у", "же", "вы", "за", "бы", "по", "только", "ее", "мне", "было", "вот", "от", "меня", "еще", "нет", "о", "из", "ему", "теперь", "когда", "даже", "ну", "вдруг", "ли", "если", "уже", "или", "ни", "быть", "был", "него", "до", "вас", "нибудь", "опять", "уж", "вам", "ведь", "там", "потом", "себя", "ничего", "ей", "может", "они", "тут", "где", "есть", "надо", "ней", "для", "мы", "тебя", "их", "чем", "была", "сам", "чтоб", "без", "будто", "чего", "раз", "тоже", "себе", "под", "будет", "ж", "тогда", "кто", "этот", "того", "потому", "этого", "какой", "совсем", "ним", "здесь", "этом", "один", "почти", "мой", "тем", "чтобы", "нее", "сейчас", "были", "куда", "зачем", "всех", "никогда", "можно", "при", "наконец", "два", "об", "другой", "хоть", "после", "над", "больше", "тот", "через", "эти", "нас", "про", "всего", "них", "какая", "много", "разве", "три", "эту", "моя", "впрочем", "хорошо", "свою", "этой", "перед", "иногда", "лучше", "чуть", "том", "нельзя", "такой", "им", "более", "всегда", "конечно", "всю", "между", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/arabic.rs
lib/segment/src/index/field_index/full_text_index/stop_words/arabic.rs
pub const ARABIC_STOPWORDS: &[&str] = &[ "إذ", "إذا", "إذما", "إذن", "أف", "أقل", "أكثر", "ألا", "إلا", "التي", "الذي", "الذين", "اللاتي", "اللائي", "اللتان", "اللتيا", "اللتين", "اللذان", "اللذين", "اللواتي", "إلى", "إليك", "إليكم", "إليكما", "إليكن", "أم", "أما", "أما", "إما", "أن", "إن", "إنا", "أنا", "أنت", "أنتم", "أنتما", "أنتن", "إنما", "إنه", "أنى", "أنى", "آه", "آها", "أو", "أولاء", "أولئك", "أوه", "آي", "أي", "أيها", "إي", "أين", "أين", "أينما", "إيه", "بخ", "بس", "بعد", "بعض", "بك", "بكم", "بكم", "بكما", "بكن", "بل", "بلى", "بما", "بماذا", "بمن", "بنا", "به", "بها", "بهم", "بهما", "بهن", "بي", "بين", "بيد", "تلك", "تلكم", "تلكما", "ته", "تي", "تين", "تينك", "ثم", "ثمة", "حاشا", "حبذا", "حتى", "حيث", "حيثما", "حين", "خلا", "دون", "ذا", "ذات", "ذاك", "ذان", "ذانك", "ذلك", "ذلكم", "ذلكما", "ذلكن", "ذه", "ذو", "ذوا", "ذواتا", "ذواتي", "ذي", "ذين", "ذينك", "ريث", "سوف", "سوى", "شتان", "عدا", "عسى", "عل", "على", "عليك", "عليه", "عما", "عن", "عند", "غير", "فإذا", "فإن", "فلا", "فمن", "في", "فيم", "فيما", "فيه", "فيها", "قد", "كأن", "كأنما", "كأي", "كأين", "كذا", "كذلك", "كل", "كلا", "كلاهما", "كلتا", "كلما", "كليكما", "كليهما", "كم", "كم", "كما", "كي", "كيت", "كيف", "كيفما", "لا", "لاسيما", "لدى", "لست", "لستم", "لستما", "لستن", "لسن", "لسنا", "لعل", "لك", "لكم", "لكما", "لكن", "لكنما", "لكي", "لكيلا", "لم", "لما", "لن", "لنا", "له", "لها", "لهم", "لهما", "لهن", "لو", "لولا", "لوما", "لي", "لئن", "ليت", "ليس", "ليسا", "ليست", "ليستا", "ليسوا", "ما", "ماذا", "متى", "مذ", "مع", "مما", "ممن", "من", "منه", "منها", "منذ", "مه", "مهما", "نحن", "نحو", "نعم", "ها", "هاتان", "هاته", "هاتي", "هاتين", "هاك", "هاهنا", "هذا", "هذان", "هذه", "هذي", "هذين", "هكذا", "هل", "هلا", "هم", "هما", "هن", "هنا", "هناك", "هنالك", "هو", "هؤلاء", "هي", "هيا", "هيت", "هيهات", "والذي", "والذين", "وإذ", "وإذا", "وإن", "ولا", "ولكن", "ولو", "وما", "ومن", "وهو", "يا", "أبٌ", "أخٌ", "حمٌ", "فو", "أنتِ", "يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر", "جانفي", "فيفري", "مارس", "أفريل", "ماي", "جوان", "جويلية", "أوت", "كانون", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين", "دولار", "دينار", "ريال", "درهم", "ليرة", "جنيه", "قرش", "مليم", "فلس", "هللة", "سنتيم", "يورو", "ين", "يوان", "شيكل", "واحد", "اثنان", "ثلاثة", "أربعة", "خمسة", "ستة", "سبعة", "ثمانية", "تسعة", "عشرة", "أحد", "اثنا", "اثني", "إحدى", "ثلاث", "أربع", "خمس", "ست", "سبع", "ثماني", "تسع", "عشر", "ثمان", "سبت", "أحد", "اثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "أول", "ثان", "ثاني", "ثالث", "رابع", "خامس", "سادس", "سابع", "ثامن", "تاسع", "عاشر", "حادي", "أ", "ب", "ت", "ث", "ج", "ح", "خ", "د", "ذ", "ر", "ز", "س", "ش", "ص", "ض", "ط", "ظ", "ع", "غ", "ف", "ق", "ك", "ل", "م", "ن", "ه", "و", "ي", "ء", "ى", "آ", "ؤ", "ئ", "أ", "ة", "ألف", "باء", "تاء", "ثاء", "جيم", "حاء", "خاء", "دال", "ذال", "راء", "زاي", "سين", "شين", "صاد", "ضاد", "طاء", "ظاء", "عين", "غين", "فاء", "قاف", "كاف", "لام", "ميم", "نون", "هاء", "واو", "ياء", "همزة", "ي", "نا", "ك", "كن", "ه", "إياه", "إياها", "إياهما", "إياهم", "إياهن", "إياك", "إياكما", "إياكم", "إياك", "إياكن", "إياي", "إيانا", "أولالك", "تانِ", "تانِك", "تِه", "تِي", "تَيْنِ", "ثمّ", "ثمّة", "ذانِ", "ذِه", "ذِي", "ذَيْنِ", "هَؤلاء", "هَاتانِ", "هَاتِه", "هَاتِي", "هَاتَيْنِ", "هَذا", "هَذانِ", "هَذِه", "هَذِي", "هَذَيْنِ", "الألى", "الألاء", "أل", "أنّى", "أيّ", "ّأيّان", "أنّى", "أيّ", "ّأيّان", "ذيت", "كأيّ", "كأيّن", "بضع", "فلان", "وا", "آمينَ", "آهِ", "آهٍ", "آهاً", "أُفٍّ", "أُفٍّ", "أفٍّ", "أمامك", "أمامكَ", "أوّهْ", "إلَيْكَ", "إلَيْكَ", "إليكَ", "إليكنّ", "إيهٍ", "بخٍ", "بسّ", "بَسْ", "بطآن", "بَلْهَ", "حاي", "حَذارِ", "حيَّ", "حيَّ", "دونك", "رويدك", "سرعان", "شتانَ", "شَتَّانَ", "صهْ", "صهٍ", "طاق", "طَق", "عَدَسْ", "كِخ", "مكانَك", "مكانَك", "مكانَك", "مكانكم", "مكانكما", "مكانكنّ", "نَخْ", "هاكَ", "هَجْ", "هلم", "هيّا", "هَيْهات", "وا", "واهاً", "وراءَك", "وُشْكَانَ", "وَيْ", "يفعلان", "تفعلان", "يفعلون", "تفعلون", "تفعلين", "اتخذ", "ألفى", "تخذ", "ترك", "تعلَّم", "جعل", "حجا", "حبيب", "خال", "حسب", "خال", "درى", "رأى", "زعم", "صبر", "ظنَّ", "عدَّ", "علم", "غادر", "ذهب", "وجد", "ورد", "وهب", "أسكن", "أطعم", "أعطى", "رزق", "زود", "سقى", "كسا", "أخبر", "أرى", "أعلم", "أنبأ", "حدَث", "خبَّر", "نبَّا", "أفعل به", "ما أفعله", "بئس", "ساء", "طالما", "قلما", "لات", "لكنَّ", "ءَ", "أجل", "إذاً", "أمّا", "إمّا", "إنَّ", "أنًّ", "أى", "إى", "أيا", "ب", "ثمَّ", "جلل", "جير", "رُبَّ", "س", "علًّ", "ف", "كأنّ", "كلَّا", "كى", "ل", "لات", "لعلَّ", "لكنَّ", "لكنَّ", "م", "نَّ", "هلّا", "وا", "أل", "إلّا", "ت", "ك", "لمّا", "ن", "ه", "و", "ا", "ي", "تجاه", "تلقاء", "جميع", "حسب", "سبحان", "شبه", "لعمر", "مثل", "معاذ", "أبو", "أخو", "حمو", "فو", "مئة", "مئتان", "ثلاثمئة", "أربعمئة", "خمسمئة", "ستمئة", "سبعمئة", "ثمنمئة", "تسعمئة", "مائة", "ثلاثمائة", "أربعمائة", "خمسمائة", "ستمائة", "سبعمائة", "ثمانمئة", "تسعمائة", "عشرون", "ثلاثون", "اربعون", "خمسون", "ستون", "سبعون", "ثمانون", "تسعون", "عشرين", "ثلاثين", "اربعين", "خمسين", "ستين", "سبعين", "ثمانين", "تسعين", "بضع", "نيف", "أجمع", "جميع", "عامة", "عين", "نفس", "لا سيما", "أصلا", "أهلا", "أيضا", "بؤسا", "بعدا", "بغتة", "تعسا", "حقا", "حمدا", "خلافا", "خاصة", "دواليك", "سحقا", "سرا", "سمعا", "صبرا", "صدقا", "صراحة", "طرا", "عجبا", "عيانا", "غالبا", "فرادى", "فضلا", "قاطبة", "كثيرا", "لبيك", "معاذ", "أبدا", "إزاء", "أصلا", "الآن", "أمد", "أمس", "آنفا", "آناء", "أنّى", "أول", "أيّان", "تارة", "ثمّ", "ثمّة", "حقا", "صباح", "مساء", "ضحوة", "عوض", "غدا", "غداة", "قطّ", "كلّما", "لدن", "لمّا", "مرّة", "قبل", "خلف", "أمام", "فوق", "تحت", "يمين", "شمال", "ارتدّ", "استحال", "أصبح", "أضحى", "آض", "أمسى", "انقلب", "بات", "تبدّل", "تحوّل", "حار", "رجع", "راح", "صار", "ظلّ", "عاد", "غدا", "كان", "ما انفك", "ما برح", "مادام", "مازال", "مافتئ", "ابتدأ", "أخذ", "اخلولق", "أقبل", "انبرى", "أنشأ", "أوشك", "جعل", "حرى", "شرع", "طفق", "علق", "قام", "كرب", "كاد", "هبّ", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/chinese.rs
lib/segment/src/index/field_index/full_text_index/stop_words/chinese.rs
pub const CHINESE_STOPWORDS: &[&str] = &[ "一", "一下", "一些", "一切", "一则", "一天", "一定", "一方面", "一旦", "一时", "一来", "一样", "一次", "一片", "一直", "一致", "一般", "一起", "一边", "一面", "万一", "上下", "上升", "上去", "上来", "上述", "上面", "下列", "下去", "下来", "下面", "不一", "不久", "不仅", "不会", "不但", "不光", "不单", "不变", "不只", "不可", "不同", "不够", "不如", "不得", "不怕", "不惟", "不成", "不拘", "不敢", "不断", "不是", "不比", "不然", "不特", "不独", "不管", "不能", "不要", "不论", "不足", "不过", "不问", "与", "与其", "与否", "与此同时", "专门", "且", "两者", "严格", "严重", "个", "个人", "个别", "中小", "中间", "丰富", "临", "为", "为主", "为了", "为什么", "为什麽", "为何", "为着", "主张", "主要", "举行", "乃", "乃至", "么", "之", "之一", "之前", "之后", "之後", "之所以", "之类", "乌乎", "乎", "乘", "也", "也好", "也是", "也罢", "了", "了解", "争取", "于", "于是", "于是乎", "云云", "互相", "产生", "人们", "人家", "什么", "什么样", "什麽", "今后", "今天", "今年", "今後", "仍然", "从", "从事", "从而", "他", "他人", "他们", "他的", "代替", "以", "以上", "以下", "以为", "以便", "以免", "以前", "以及", "以后", "以外", "以後", "以来", "以至", "以至于", "以致", "们", "任", "任何", "任凭", "任务", "企图", "伟大", "似乎", "似的", "但", "但是", "何", "何况", "何处", "何时", "作为", "你", "你们", "你的", "使得", "使用", "例如", "依", "依照", "依靠", "促进", "保持", "俺", "俺们", "倘", "倘使", "倘或", "倘然", "倘若", "假使", "假如", "假若", "做到", "像", "允许", "充分", "先后", "先後", "先生", "全部", "全面", "兮", "共同", "关于", "其", "其一", "其中", "其二", "其他", "其余", "其它", "其实", "其次", "具体", "具体地说", "具体说来", "具有", "再者", "再说", "冒", "冲", "决定", "况且", "准备", "几", "几乎", "几时", "凭", "凭借", "出去", "出来", "出现", "分别", "则", "别", "别的", "别说", "到", "前后", "前者", "前进", "前面", "加之", "加以", "加入", "加强", "十分", "即", "即令", "即使", "即便", "即或", "即若", "却不", "原来", "又", "及", "及其", "及时", "及至", "双方", "反之", "反应", "反映", "反过来", "反过来说", "取得", "受到", "变成", "另", "另一方面", "另外", "只是", "只有", "只要", "只限", "叫", "叫做", "召开", "叮咚", "可", "可以", "可是", "可能", "可见", "各", "各个", "各人", "各位", "各地", "各种", "各级", "各自", "合理", "同", "同一", "同时", "同样", "后来", "后面", "向", "向着", "吓", "吗", "否则", "吧", "吧哒", "吱", "呀", "呃", "呕", "呗", "呜", "呜呼", "呢", "周围", "呵", "呸", "呼哧", "咋", "和", "咚", "咦", "咱", "咱们", "咳", "哇", "哈", "哈哈", "哉", "哎", "哎呀", "哎哟", "哗", "哟", "哦", "哩", "哪", "哪个", "哪些", "哪儿", "哪天", "哪年", "哪怕", "哪样", "哪边", "哪里", "哼", "哼唷", "唉", "啊", "啐", "啥", "啦", "啪达", "喂", "喏", "喔唷", "嗡嗡", "嗬", "嗯", "嗳", "嘎", "嘎登", "嘘", "嘛", "嘻", "嘿", "因", "因为", "因此", "因而", "固然", "在", "在下", "地", "坚决", "坚持", "基本", "处理", "复杂", "多", "多少", "多数", "多次", "大力", "大多数", "大大", "大家", "大批", "大约", "大量", "失去", "她", "她们", "她的", "好的", "好象", "如", "如上所述", "如下", "如何", "如其", "如果", "如此", "如若", "存在", "宁", "宁可", "宁愿", "宁肯", "它", "它们", "它们的", "它的", "安全", "完全", "完成", "实现", "实际", "宣布", "容易", "密切", "对", "对于", "对应", "将", "少数", "尔后", "尚且", "尤其", "就", "就是", "就是说", "尽", "尽管", "属于", "岂但", "左右", "巨大", "巩固", "己", "已经", "帮助", "常常", "并", "并不", "并不是", "并且", "并没有", "广大", "广泛", "应当", "应用", "应该", "开外", "开始", "开展", "引起", "强烈", "强调", "归", "当", "当前", "当时", "当然", "当着", "形成", "彻底", "彼", "彼此", "往", "往往", "待", "後来", "後面", "得", "得出", "得到", "心里", "必然", "必要", "必须", "怎", "怎么", "怎么办", "怎么样", "怎样", "怎麽", "总之", "总是", "总的来看", "总的来说", "总的说来", "总结", "总而言之", "恰恰相反", "您", "意思", "愿意", "慢说", "成为", "我", "我们", "我的", "或", "或是", "或者", "战斗", "所", "所以", "所有", "所谓", "打", "扩大", "把", "抑或", "拿", "按", "按照", "换句话说", "换言之", "据", "掌握", "接着", "接著", "故", "故此", "整个", "方便", "方面", "旁人", "无宁", "无法", "无论", "既", "既是", "既然", "时候", "明显", "明确", "是", "是否", "是的", "显然", "显著", "普通", "普遍", "更加", "曾经", "替", "最后", "最大", "最好", "最後", "最近", "最高", "有", "有些", "有关", "有利", "有力", "有所", "有效", "有时", "有点", "有的", "有着", "有著", "望", "朝", "朝着", "本", "本着", "来", "来着", "极了", "构成", "果然", "果真", "某", "某个", "某些", "根据", "根本", "欢迎", "正在", "正如", "正常", "此", "此外", "此时", "此间", "毋宁", "每", "每个", "每天", "每年", "每当", "比", "比如", "比方", "比较", "毫不", "没有", "沿", "沿着", "注意", "深入", "清楚", "满足", "漫说", "焉", "然则", "然后", "然後", "然而", "照", "照着", "特别是", "特殊", "特点", "现代", "现在", "甚么", "甚而", "甚至", "用", "由", "由于", "由此可见", "的", "的话", "目前", "直到", "直接", "相似", "相信", "相反", "相同", "相对", "相对而言", "相应", "相当", "相等", "省得", "看出", "看到", "看来", "看看", "看见", "真是", "真正", "着", "着呢", "矣", "知道", "确定", "离", "积极", "移动", "突出", "突然", "立即", "第", "等", "等等", "管", "紧接着", "纵", "纵令", "纵使", "纵然", "练习", "组成", "经", "经常", "经过", "结合", "结果", "给", "绝对", "继续", "继而", "维持", "综上所述", "罢了", "考虑", "者", "而", "而且", "而况", "而外", "而已", "而是", "而言", "联系", "能", "能否", "能够", "腾", "自", "自个儿", "自从", "自各儿", "自家", "自己", "自身", "至", "至于", "良好", "若", "若是", "若非", "范围", "莫若", "获得", "虽", "虽则", "虽然", "虽说", "行为", "行动", "表明", "表示", "被", "要", "要不", "要不是", "要不然", "要么", "要是", "要求", "规定", "觉得", "认为", "认真", "认识", "让", "许多", "论", "设使", "设若", "该", "说明", "诸位", "谁", "谁知", "赶", "起", "起来", "起见", "趁", "趁着", "越是", "跟", "转动", "转变", "转贴", "较", "较之", "边", "达到", "迅速", "过", "过去", "过来", "运用", "还是", "还有", "这", "这个", "这么", "这么些", "这么样", "这么点儿", "这些", "这会儿", "这儿", "这就是说", "这时", "这样", "这点", "这种", "这边", "这里", "这麽", "进入", "进步", "进而", "进行", "连", "连同", "适应", "适当", "适用", "逐步", "逐渐", "通常", "通过", "造成", "遇到", "遭到", "避免", "那", "那个", "那么", "那么些", "那么样", "那些", "那会儿", "那儿", "那时", "那样", "那边", "那里", "那麽", "部分", "鄙人", "采取", "里面", "重大", "重新", "重要", "鉴于", "问题", "防止", "阿", "附近", "限制", "除", "除了", "除此之外", "除非", "随", "随着", "随著", "集中", "需要", "非但", "非常", "非徒", "靠", "顺", "顺着", "首先", "高兴", "是不是", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/french.rs
lib/segment/src/index/field_index/full_text_index/stop_words/french.rs
pub const FRENCH_STOPWORDS: &[&str] = &[ "au", "aux", "avec", "ce", "ces", "dans", "de", "des", "du", "elle", "en", "et", "eux", "il", "ils", "je", "la", "le", "les", "leur", "lui", "ma", "mais", "me", "même", "mes", "moi", "mon", "ne", "nos", "notre", "nous", "on", "ou", "par", "pas", "pour", "qu", "que", "qui", "sa", "se", "ses", "son", "sur", "ta", "te", "tes", "toi", "ton", "tu", "un", "une", "vos", "votre", "vous", "c", "d", "j", "l", "à", "m", "n", "s", "t", "y", "été", "étée", "étées", "étés", "étant", "étante", "étants", "étantes", "suis", "es", "est", "sommes", "êtes", "sont", "serai", "seras", "sera", "serons", "serez", "seront", "serais", "serait", "serions", "seriez", "seraient", "étais", "était", "étions", "étiez", "étaient", "fus", "fut", "fûmes", "fûtes", "furent", "sois", "soit", "soyons", "soyez", "soient", "fusse", "fusses", "fût", "fussions", "fussiez", "fussent", "ayant", "ayante", "ayantes", "ayants", "eu", "eue", "eues", "eus", "ai", "as", "avons", "avez", "ont", "aurai", "auras", "aura", "aurons", "aurez", "auront", "aurais", "aurait", "aurions", "auriez", "auraient", "avais", "avait", "avions", "aviez", "avaient", "eut", "eûmes", "eûtes", "eurent", "aie", "aies", "ait", "ayons", "ayez", "aient", "eusse", "eusses", "eût", "eussions", "eussiez", "eussent", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/hungarian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/hungarian.rs
pub const HUNGARIAN_STOPWORDS: &[&str] = &[ "a", "ahogy", "ahol", "aki", "akik", "akkor", "alatt", "által", "általában", "amely", "amelyek", "amelyekben", "amelyeket", "amelyet", "amelynek", "ami", "amit", "amolyan", "amíg", "amikor", "át", "abban", "ahhoz", "annak", "arra", "arról", "az", "azok", "azon", "azt", "azzal", "azért", "aztán", "azután", "azonban", "bár", "be", "belül", "benne", "cikk", "cikkek", "cikkeket", "csak", "de", "e", "eddig", "egész", "egy", "egyes", "egyetlen", "egyéb", "egyik", "egyre", "ekkor", "el", "elég", "ellen", "elõ", "elõször", "elõtt", "elsõ", "én", "éppen", "ebben", "ehhez", "emilyen", "ennek", "erre", "ez", "ezt", "ezek", "ezen", "ezzel", "ezért", "és", "fel", "felé", "hanem", "hiszen", "hogy", "hogyan", "igen", "így", "illetve", "ill.", "ill", "ilyen", "ilyenkor", "ison", "ismét", "itt", "jó", "jól", "jobban", "kell", "kellett", "keresztül", "keressünk", "ki", "kívül", "között", "közül", "legalább", "lehet", "lehetett", "legyen", "lenne", "lenni", "lesz", "lett", "maga", "magát", "majd", "majd", "már", "más", "másik", "meg", "még", "mellett", "mert", "mely", "melyek", "mi", "mit", "míg", "miért", "milyen", "mikor", "minden", "mindent", "mindenki", "mindig", "mint", "mintha", "mivel", "most", "nagy", "nagyobb", "nagyon", "ne", "néha", "nekem", "neki", "nem", "néhány", "nélkül", "nincs", "olyan", "ott", "össze", "õ", "õk", "õket", "pedig", "persze", "rá", "s", "saját", "sem", "semmi", "sok", "sokat", "sokkal", "számára", "szemben", "szerint", "szinte", "talán", "tehát", "teljes", "tovább", "továbbá", "több", "úgy", "ugyanis", "új", "újabb", "újra", "után", "utána", "utolsó", "vagy", "vagyis", "valaki", "valami", "valamint", "való", "vagyok", "van", "vannak", "volt", "voltam", "voltak", "voltunk", "vissza", "vele", "viszont", "volna", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/slovene.rs
lib/segment/src/index/field_index/full_text_index/stop_words/slovene.rs
pub const SLOVENE_STOPWORDS: &[&str] = &[ "ali", "ampak", "bodisi", "in", "kajti", "marveč", "namreč", "ne", "niti", "oziroma", "pa", "saj", "sicer", "temveč", "ter", "toda", "torej", "vendar", "vendarle", "zakaj", "če", "čeprav", "čeravno", "četudi", "čim", "da", "kadar", "kakor", "ker", "ki", "ko", "kot", "naj", "najsi", "odkar", "preden", "dve", "dvema", "dveh", "šest", "šestdeset", "šestindvajset", "šestintrideset", "šestnajst", "šeststo", "štiri", "štirideset", "štiriindvajset", "štirinajst", "štiristo", "deset", "devet", "devetdeset", "devetintrideset", "devetnajst", "devetsto", "dvainšestdeset", "dvaindvajset", "dvajset", "dvanajst", "dvesto", "enaindvajset", "enaintrideset", "enajst", "nič", "osem", "osemdeset", "oseminštirideset", "osemindevetdeset", "osemnajst", "pet", "petdeset", "petinštirideset", "petindevetdeset", "petindvajset", "petinosemdeset", "petinpetdeset", "petinsedemdeset", "petintrideset", "petnajst", "petsto", "sedem", "sedemdeset", "sedeminšestdeset", "sedemindvajset", "sedeminpetdeset", "sedemnajst", "sedemsto", "sto", "tisoč", "tri", "trideset", "triinšestdeset", "triindvajset", "triinpetdeset", "trinajst", "tristo", "šestdesetim", "šestim", "šestindvajsetim", "šestintridesetim", "šestnajstim", "šeststotim", "štiridesetim", "štiriindvajsetim", "štirim", "štirinajstim", "štiristotim", "desetim", "devetdesetim", "devetim", "devetintridesetim", "devetnajstim", "devetstotim", "dvainšestdesetim", "dvaindvajsetim", "dvajsetim", "dvanajstim", "dvestotim", "enaindvajsetim", "enaintridesetim", "enajstim", "osemdesetim", "oseminštiridesetim", "osemindevetdesetim", "osemnajstim", "osmim", "petdesetim", "petim", "petinštiridesetim", "petindevetdesetim", "petindvajsetim", "petinosemdesetim", "petinpetdesetim", "petinsedemdesetim", "petintridesetim", "petnajstim", "petstotim", "sedemdesetim", "sedeminšestdesetim", "sedemindvajsetim", "sedeminpetdesetim", "sedemnajstim", "sedemstotim", "sedmim", "stotim", "tisočim", "trem", "tridesetim", "triinšestdesetim", "triindvajsetim", "triinpetdesetim", "trinajstim", "tristotim", "šestdesetih", "šestih", "šestindvajsetih", "šestintridesetih", "šestnajstih", "šeststotih", "štiridesetih", "štirih", "štiriindvajsetih", "štirinajstih", "štiristotih", "desetih", "devetdesetih", "devetih", "devetintridesetih", "devetnajstih", "devetstotih", "dvainšestdesetih", "dvaindvajsetih", "dvajsetih", "dvanajstih", "dvestotih", "enaindvajsetih", "enaintridesetih", "enajstih", "osemdesetih", "oseminštiridesetih", "osemindevetdesetih", "osemnajstih", "osmih", "petdesetih", "petih", "petinštiridesetih", "petindevetdesetih", "petindvajsetih", "petinosemdesetih", "petinpetdesetih", "petinsedemdesetih", "petintridesetih", "petnajstih", "petstotih", "sedemdesetih", "sedeminšestdesetih", "sedemindvajsetih", "sedeminpetdesetih", "sedemnajstih", "sedemstotih", "sedmih", "stotih", "tisočih", "treh", "tridesetih", "triinšestdesetih", "triindvajsetih", "triinpetdesetih", "trinajstih", "tristotih", "šestdesetimi", "šestimi", "šestindvajsetimi", "šestintridesetimi", "šestnajstimi", "šeststotimi", "štiridesetimi", "štiriindvajsetimi", "štirimi", "štirinajstimi", "štiristotimi", "desetimi", "devetdesetimi", "devetimi", "devetintridesetimi", "devetnajstimi", "devetstotimi", "dvainšestdesetimi", "dvaindvajsetimi", "dvajsetimi", "dvanajstimi", "dvestotimi", "enaindvajsetimi", "enaintridesetimi", "enajstimi", "osemdesetimi", "oseminštiridesetimi", "osemindevetdesetimi", "osemnajstimi", "osmimi", "petdesetimi", "petimi", "petinštiridesetimi", "petindevetdesetimi", "petindvajsetimi", "petinosemdesetimi", "petinpetdesetimi", "petinsedemdesetimi", "petintridesetimi", "petnajstimi", "petstotimi", "sedemdesetimi", "sedeminšestdesetimi", "sedemindvajsetimi", "sedeminpetdesetimi", "sedemnajstimi", "sedemstotimi", "sedmimi", "stotimi", "tisočimi", "tremi", "tridesetimi", "triinšestdesetimi", "triindvajsetimi", "triinpetdesetimi", "trinajstimi", "tristotimi", "eno", "eni", "ene", "ena", "dva", "štirje", "trije", "en", "enega", "enemu", "enim", "enem", "eden", "dvojni", "trojni", "dvojnima", "trojnima", "dvojnih", "trojnih", "dvojne", "trojne", "dvojnim", "trojnim", "dvojnimi", "trojnimi", "dvojno", "trojno", "dvojna", "trojna", "dvojnega", "trojnega", "dvojen", "trojen", "dvojnemu", "trojnemu", "dvojnem", "trojnem", "četrti", "šestdeseti", "šesti", "šestnajsti", "štirideseti", "štiriindvajseti", "štirinajsti", "deseti", "devetdeseti", "deveti", "devetnajsti", "drugi", "dvaindevetdeseti", "dvajseti", "dvanajsti", "dvestoti", "enaindvajseti", "enajsti", "osemdeseti", "osemnajsti", "osmi", "petdeseti", "peti", "petinštirideseti", "petindvajseti", "petinosemdeseti", "petintrideseti", "petnajsti", "prvi", "sedemdeseti", "sedemindvajseti", "sedemnajsti", "sedmi", "stoti", "tisoči", "tretji", "trideseti", "triindvajseti", "triintrideseti", "trinajsti", "tristoti", "četrtima", "šestdesetima", "šestima", "šestnajstima", "štiridesetima", "štiriindvajsetima", "štirinajstima", "desetima", "devetdesetima", "devetima", "devetnajstima", "drugima", "dvaindevetdesetima", "dvajsetima", "dvanajstima", "dvestotima", "enaindvajsetima", "enajstima", "osemdesetima", "osemnajstima", "osmima", "petdesetima", "petima", "petinštiridesetima", "petindvajsetima", "petinosemdesetima", "petintridesetima", "petnajstima", "prvima", "sedemdesetima", "sedemindvajsetima", "sedemnajstima", "sedmima", "stotima", "tisočima", "tretjima", "tridesetima", "triindvajsetima", "triintridesetima", "trinajstima", "tristotima", "četrtih", "drugih", "dvaindevetdesetih", "prvih", "tretjih", "triintridesetih", "četrte", "šestdesete", "šeste", "šestnajste", "štiridesete", "štiriindvajsete", "štirinajste", "desete", "devetdesete", "devete", "devetnajste", "druge", "dvaindevetdesete", "dvajsete", "dvanajste", "dvestote", "enaindvajsete", "enajste", "osemdesete", "osemnajste", "osme", "petdesete", "pete", "petinštiridesete", "petindvajsete", "petinosemdesete", "petintridesete", "petnajste", "prve", "sedemdesete", "sedemindvajsete", "sedemnajste", "sedme", "stote", "tisoče", "tretje", "tridesete", "triindvajsete", "triintridesete", "trinajste", "tristote", "četrtim", "drugim", "dvaindevetdesetim", "prvim", "tretjim", "triintridesetim", "četrtimi", "drugimi", "dvaindevetdesetimi", "prvimi", "tretjimi", "triintridesetimi", "četrto", "šestdeseto", "šestnajsto", "šesto", "štirideseto", "štiriindvajseto", "štirinajsto", "deseto", "devetdeseto", "devetnajsto", "deveto", "drugo", "dvaindevetdeseto", "dvajseto", "dvanajsto", "dvestoto", "enaindvajseto", "enajsto", "osemdeseto", "osemnajsto", "osmo", "petdeseto", "petinštirideseto", "petindvajseto", "petinosemdeseto", "petintrideseto", "petnajsto", "peto", "prvo", "sedemdeseto", "sedemindvajseto", "sedemnajsto", "sedmo", "stoto", "tisočo", "tretjo", "trideseto", "triindvajseto", "triintrideseto", "trinajsto", "tristoto", "četrta", "šesta", "šestdeseta", "šestnajsta", "štirideseta", "štiriindvajseta", "štirinajsta", "deseta", "deveta", "devetdeseta", "devetnajsta", "druga", "dvaindevetdeseta", "dvajseta", "dvanajsta", "dvestota", "enaindvajseta", "enajsta", "osemdeseta", "osemnajsta", "osma", "peta", "petdeseta", "petinštirideseta", "petindvajseta", "petinosemdeseta", "petintrideseta", "petnajsta", "prva", "sedemdeseta", "sedemindvajseta", "sedemnajsta", "sedma", "stota", "tisoča", "tretja", "trideseta", "triindvajseta", "triintrideseta", "trinajsta", "tristota", "četrtega", "šestdesetega", "šestega", "šestnajstega", "štiridesetega", "štiriindvajsetega", "štirinajstega", "desetega", "devetdesetega", "devetega", "devetnajstega", "drugega", "dvaindevetdesetega", "dvajsetega", "dvanajstega", "dvestotega", "enaindvajsetega", "enajstega", "osemdesetega", "osemnajstega", "osmega", "petdesetega", "petega", "petinštiridesetega", "petindvajsetega", "petinosemdesetega", "petintridesetega", "petnajstega", "prvega", "sedemdesetega", "sedemindvajsetega", "sedemnajstega", "sedmega", "stotega", "tisočega", "tretjega", "tridesetega", "triindvajsetega", "triintridesetega", "trinajstega", "tristotega", "četrtemu", "šestdesetemu", "šestemu", "šestnajstemu", "štiridesetemu", "štiriindvajsetemu", "štirinajstemu", "desetemu", "devetdesetemu", "devetemu", "devetnajstemu", "drugemu", "dvaindevetdesetemu", "dvajsetemu", "dvanajstemu", "dvestotemu", "enaindvajsetemu", "enajstemu", "osemdesetemu", "osemnajstemu", "osmemu", "petdesetemu", "petemu", "petinštiridesetemu", "petindvajsetemu", "petinosemdesetemu", "petintridesetemu", "petnajstemu", "prvemu", "sedemdesetemu", "sedemindvajsetemu", "sedemnajstemu", "sedmemu", "stotemu", "tisočemu", "tretjemu", "tridesetemu", "triindvajsetemu", "triintridesetemu", "trinajstemu", "tristotemu", "četrtem", "šestdesetem", "šestem", "šestnajstem", "štiridesetem", "štiriindvajsetem", "štirinajstem", "desetem", "devetdesetem", "devetem", "devetnajstem", "drugem", "dvaindevetdesetem", "dvajsetem", "dvanajstem", "dvestotem", "enaindvajsetem", "enajstem", "osemdesetem", "osemnajstem", "osmem", "petdesetem", "petem", "petinštiridesetem", "petindvajsetem", "petinosemdesetem", "petintridesetem", "petnajstem", "prvem", "sedemdesetem", "sedemindvajsetem", "sedemnajstem", "sedmem", "stotem", "tisočem", "tretjem", "tridesetem", "triindvajsetem", "triintridesetem", "trinajstem", "tristotem", "deseteri", "dvakratni", "dvoji", "enkratni", "peteri", "stoteri", "tisočeri", "trikratni", "troji", "deseterima", "dvakratnima", "dvojima", "enkratnima", "peterima", "stoterima", "tisočerima", "trikratnima", "trojima", "deseterih", "dvakratnih", "dvojih", "enkratnih", "peterih", "stoterih", "tisočerih", "trikratnih", "trojih", "desetere", "dvakratne", "dvoje", "enkratne", "petere", "stotere", "tisočere", "trikratne", "troje", "deseterim", "dvakratnim", "dvojim", "enkratnim", "peterim", "stoterim", "tisočerim", "trikratnim", "trojim", "deseterimi", "dvakratnimi", "dvojimi", "enkratnimi", "peterimi", "stoterimi", "tisočerimi", "trikratnimi", "trojimi", "desetero", "dvakratno", "dvojo", "enkratno", "petero", "stotero", "tisočero", "trikratno", "trojo", "desetera", "dvakratna", "dvoja", "enkratna", "petera", "stotera", "tisočera", "trikratna", "troja", "deseterega", "dvakratnega", "dvojega", "enkratnega", "peterega", "stoterega", "tisočerega", "trikratnega", "trojega", "deseter", "dvakraten", "dvoj", "enkraten", "peter", "stoter", "tisočer", "trikraten", "troj", "deseteremu", "dvakratnemu", "dvojemu", "enkratnemu", "peteremu", "stoteremu", "tisočeremu", "trikratnemu", "trojemu", "deseterem", "dvakratnem", "dvojem", "enkratnem", "peterem", "stoterem", "tisočerem", "trikratnem", "trojem", "le-onega", "le-tega", "le-tistega", "le-toliko", "onega", "tega", "tistega", "toliko", "le-oni", "le-takšni", "le-taki", "le-te", "le-ti", "le-tisti", "oni", "takšni", "taki", "te", "ti", "tisti", "le-onima", "le-takšnima", "le-takima", "le-tema", "le-tistima", "onima", "takšnima", "takima", "tema", "tistima", "le-onih", "le-takšnih", "le-takih", "le-teh", "le-tistih", "onih", "takšnih", "takih", "teh", "tistih", "le-one", "le-takšne", "le-take", "le-tiste", "one", "takšne", "take", "tiste", "le-onim", "le-takšnim", "le-takim", "le-tem", "le-tistim", "onim", "takšnim", "takim", "tem", "tistim", "le-onimi", "le-takšnimi", "le-takimi", "le-temi", "le-tistimi", "onimi", "takšnimi", "takimi", "temi", "tistimi", "le-ono", "le-takšno", "le-tako", "le-tisto", "le-to", "ono", "takšno", "tako", "tisto", "to", "le-tej", "tej", "le-ona", "le-ta", "le-takšna", "le-taka", "le-tista", "ona", "ta", "takšna", "taka", "tista", "le-tak", "le-takšen", "tak", "takšen", "le-takšnega", "le-takega", "takšnega", "takega", "le-onemu", "le-takšnemu", "le-takemu", "le-temu", "le-tistemu", "onemu", "takšnemu", "takemu", "temu", "temuintemu", "tistemu", "le-onem", "le-takšnem", "le-takem", "le-tistem", "onem", "takšnem", "takem", "tistem", "vsakogar", "vsakomur", "vsakomer", "vsakdo", "obe", "vsaki", "vsakršni", "vsi", "obema", "vsakima", "vsakršnima", "vsema", "obeh", "vsakih", "vsakršnih", "vseh", "vsake", "vsakršne", "vse", "vsakim", "vsakršnim", "vsem", "vsakimi", "vsakršnimi", "vsemi", "vsako", "vsakršno", "vso", "vsej", "vsa", "vsaka", "vsakršna", "oba", "ves", "vsak", "vsakršen", "vsakega", "vsakršnega", "vsega", "vsakemu", "vsakršnemu", "vsemu", "vsakem", "vsakršnem", "enako", "istega", "koliko", "mnogo", "nekoga", "nekoliko", "precej", "kaj", "koga", "marsikaj", "marsikoga", "nekaj", "čemu", "komu", "marsičemu", "marsikomu", "nečemu", "nekomu", "česa", "marsičesa", "nečesa", "kom", "marsičim", "marsikom", "nečim", "nekom", "čem", "marsičem", "nečem", "kdo", "marsikdo", "nekdo", "čigavi", "drugačni", "enaki", "isti", "kakšni", "kaki", "kakršnikoli", "kateri", "katerikoli", "kolikšni", "koliki", "marsikateri", "nekakšni", "nekaki", "nekateri", "neki", "takile", "tele", "tile", "tolikšni", "toliki", "čigavima", "drugačnima", "enakima", "enima", "istima", "kakšnima", "kakima", "kakršnimakoli", "katerima", "katerimakoli", "kolikšnima", "kolikima", "marsikaterima", "nekakšnima", "nekakima", "nekaterima", "nekima", "takimale", "temale", "tolikšnima", "tolikima", "čigavih", "drugačnih", "enakih", "enih", "istih", "kakšnih", "kakih", "kakršnihkoli", "katerih", "katerihkoli", "kolikšnih", "kolikih", "marsikaterih", "nekakšnih", "nekakih", "nekaterih", "nekih", "takihle", "tehle", "tolikšnih", "tolikih", "čigave", "drugačne", "enake", "iste", "kakšne", "kake", "kakršnekoli", "katere", "katerekoli", "kolikšne", "kolike", "marsikatere", "nekakšne", "nekake", "nekatere", "neke", "takele", "tolikšne", "tolike", "čigavim", "drugačnim", "enakim", "istim", "kakšnim", "kakim", "kakršnimkoli", "katerim", "katerimkoli", "kolikšnim", "kolikim", "marsikaterim", "nekakšnim", "nekakim", "nekaterim", "nekim", "takimle", "temle", "tolikšnim", "tolikim", "čigavimi", "drugačnimi", "enakimi", "enimi", "istimi", "kakšnimi", "kakimi", "kakršnimikoli", "katerimi", "katerimikoli", "kolikšnimi", "kolikimi", "marsikaterimi", "nekakšnimi", "nekakimi", "nekaterimi", "nekimi", "takimile", "temile", "tolikšnimi", "tolikimi", "čigavo", "drugačno", "isto", "kakšno", "kako", "kakršnokoli", "katero", "katerokoli", "kolikšno", "marsikatero", "nekakšno", "nekako", "nekatero", "neko", "takole", "tole", "tolikšno", "tejle", "čigava", "drugačna", "enaka", "ista", "kakšna", "kaka", "kakršnakoli", "katera", "katerakoli", "kolikšna", "kolika", "marsikatera", "neka", "nekakšna", "nekaka", "nekatera", "takale", "tale", "tolikšna", "tolika", "čigav", "drug", "drugačen", "enak", "kak", "kakšen", "kakršenkoli", "kakršnegakoli", "kateregakoli", "kolik", "kolikšen", "nek", "nekak", "nekakšen", "takegale", "takle", "tegale", "tolik", "tolikšen", "čigavega", "drugačnega", "enakega", "kakšnega", "kakega", "katerega", "kolikšnega", "kolikega", "marsikaterega", "nekakšnega", "nekakega", "nekaterega", "nekega", "tolikšnega", "tolikega", "čigavemu", "drugačnemu", "enakemu", "istemu", "kakšnemu", "kakemu", "kakršnemukoli", "kateremu", "kateremukoli", "kolikšnemu", "kolikemu", "marsikateremu", "nekakšnemu", "nekakemu", "nekateremu", "nekemu", "takemule", "temule", "tolikšnemu", "tolikemu", "čigavem", "drugačnem", "enakem", "istem", "kakšnem", "kakem", "kakršnemkoli", "katerem", "kateremkoli", "kolikšnem", "kolikem", "marsikaterem", "nekakšnem", "nekakem", "nekaterem", "nekem", "takemle", "tolikšnem", "tolikem", "naju", "nama", "midva", "nas", "nam", "nami", "mi", "mene", "me", "meni", "mano", "menoj", "jaz", "vaju", "vama", "vidva", "vas", "vam", "vami", "vi", "tebe", "tebi", "tabo", "teboj", "njiju", "jih", "ju", "njima", "jima", "onedve", "onidve", "nje", "njih", "njim", "jim", "njimi", "njo", "jo", "njej", "nji", "ji", "je", "onadva", "njega", "ga", "njemu", "mu", "njem", "on", "čigar", "kolikor", "kar", "karkoli", "kogar", "kogarkoli", "čemur", "čemurkoli", "komur", "komurkoli", "česar", "česarkoli", "čimer", "čimerkoli", "komer", "komerkoli", "čemer", "čemerkoli", "kdor", "kdorkoli", "kakršni", "kakršnima", "kakršnih", "kakršne", "kakršnim", "kakršnimi", "kakršno", "kakršna", "kakršen", "kakršnega", "kakršnemu", "kakršnem", "najini", "naši", "moji", "najinima", "našima", "mojima", "najinih", "naših", "mojih", "najine", "naše", "moje", "najinim", "našim", "mojim", "najinimi", "našimi", "mojimi", "najino", "našo", "mojo", "najina", "naša", "moja", "najin", "najinega", "naš", "našega", "moj", "mojega", "najinemu", "našemu", "mojemu", "najinem", "našem", "mojem", "vajini", "vaši", "tvoji", "vajinima", "vašima", "tvojima", "vajinih", "vaših", "tvojih", "vajine", "vaše", "tvoje", "vajinim", "vašim", "tvojim", "vajinimi", "vašimi", "tvojimi", "vajino", "vašo", "tvojo", "vajina", "vaša", "tvoja", "vajin", "vajinega", "vaš", "vašega", "tvoj", "tvojega", "vajinemu", "vašemu", "tvojemu", "vajinem", "vašem", "tvojem", "njuni", "njihovi", "njeni", "njegovi", "njunima", "njihovima", "njenima", "njegovima", "njunih", "njihovih", "njenih", "njegovih", "njune", "njihove", "njene", "njegove", "njunim", "njihovim", "njenim", "njegovim", "njunimi", "njihovimi", "njenimi", "njegovimi", "njuno", "njihovo", "njeno", "njegovo", "njuna", "njihova", "njena", "njegova", "njun", "njunega", "njihov", "njihovega", "njen", "njenega", "njegov", "njegovega", "njunemu", "njihovemu", "njenemu", "njegovemu", "njunem", "njihovem", "njenem", "njegovem", "se", "si", "sebe", "sebi", "sabo", "seboj", "svoji", "svojima", "svojih", "svoje", "svojim", "svojimi", "svojo", "svoja", "svoj", "svojega", "svojemu", "svojem", "nikogar", "noben", "ničemur", "nikomur", "ničesar", "ničimer", "nikomer", "ničemer", "nihče", "nikakršni", "nobeni", "nikakršnima", "nobenima", "nikakršnih", "nobenih", "nikakršne", "nobene", "nikakršnim", "nobenim", "nikakršnimi", "nobenimi", "nikakršno", "nobeno", "nikakršna", "nobena", "nikakršen", "nikakršnega", "nobenega", "nikakršnemu", "nobenemu", "nikakršnem", "nobenem", "še", "šele", "žal", "že", "baje", "bojda", "bržčas", "bržkone", "celo", "dobesedno", "domala", "edinole", "gotovo", "itak", "ja", "kajne", "kajpada", "kajpak", "koli", "komaj", "le", "malone", "mar", "menda", "morda", "morebiti", "nadvse", "najbrž", "nemara", "nerad", "neradi", "nikar", "pač", "pogodu", "prav", "pravzaprav", "predvsem", "preprosto", "rad", "rada", "rade", "radi", "ravno", "res", "resda", "samo", "seveda", "skoraj", "skorajda", "spet", "sploh", "tudi", "všeč", "verjetno", "vnovič", "vred", "vsaj", "zadosti", "zapored", "zares", "zgolj", "zlasti", "zopet", "čezenj", "čeznje", "mednje", "mednju", "medse", "nadenj", "nadme", "nadnje", "name", "nanj", "nanje", "nanjo", "nanju", "nase", "nate", "obenj", "podnjo", "pome", "ponj", "ponje", "ponjo", "pote", "predenj", "predme", "prednje", "predse", "skozenj", "skoznje", "skoznjo", "skozte", "vame", "vanj", "vanje", "vanjo", "vanju", "vase", "vate", "zame", "zanj", "zanje", "zanjo", "zanju", "zase", "zate", "čez", "med", "na", "nad", "ob", "po", "pod", "pred", "raz", "skoz", "skozi", "v", "za", "zoper", "h", "k", "kljub", "nasproti", "navkljub", "navzlic", "proti", "ž", "blizu", "brez", "dno", "do", "iz", "izmed", "iznad", "izpod", "izpred", "izven", "izza", "krog", "mimo", "namesto", "naokoli", "naproti", "od", "okoli", "okrog", "onkraj", "onstran", "poleg", "povrh", "povrhu", "prek", "preko", "razen", "s", "spod", "spričo", "sredi", "vštric", "vpričo", "vrh", "vrhu", "vzdolž", "z", "zaradi", "zavoljo", "zraven", "zunaj", "o", "pri", "bi", "bova", "bomo", "bom", "bosta", "boste", "boš", "bodo", "bojo", "bo", "sva", "nisva", "smo", "nismo", "sem", "nisem", "sta", "nista", "ste", "niste", "nisi", "so", "niso", "ni", "bodiva", "bodimo", "bodita", "bodite", "bodi", "biti", "bili", "bila", "bile", "bil", "bilo", "želiva", "dovoliva", "hočeva", "marava", "morava", "moreva", "smeva", "zmoreva", "nočeva", "želimo", "dovolimo", "hočemo", "maramo", "moramo", "moremo", "smemo", "zmoremo", "nočemo", "želim", "dovolim", "hočem", "maram", "moram", "morem", "smem", "zmorem", "nočem", "želita", "dovolita", "hočeta", "marata", "morata", "moreta", "smeta", "zmoreta", "nočeta", "želite", "dovolite", "hočete", "marate", "morate", "morete", "smete", "zmorete", "nočete", "želiš", "dovoliš", "hočeš", "maraš", "moraš", "moreš", "smeš", "zmoreš", "nočeš", "želijo", "dovolijo", "hočejo", "marajo", "morajo", "morejo", "smejo", "zmorejo", "nočejo", "želi", "dovoli", "hoče", "mara", "mora", "more", "sme", "zmore", "noče", "hotiva", "marajva", "hotimo", "marajmo", "hotita", "marajta", "hotite", "marajte", "hoti", "maraj", "želeti", "dovoliti", "hoteti", "marati", "moči", "morati", "smeti", "zmoči", "želeni", "dovoljeni", "želena", "dovoljena", "želene", "dovoljene", "želen", "dovoljen", "želeno", "dovoljeno", "želeli", "dovolili", "hoteli", "marali", "mogli", "morali", "smeli", "zmogli", "želela", "dovolila", "hotela", "marala", "mogla", "morala", "smela", "zmogla", "želele", "dovolile", "hotele", "marale", "mogle", "morale", "smele", "zmogle", "želel", "dovolil", "hotel", "maral", "mogel", "moral", "smel", "zmogel", "želelo", "dovolilo", "hotelo", "maralo", "moglo", "moralo", "smelo", "zmogl", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/norwegian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/norwegian.rs
pub const NORWEGIAN_STOPWORDS: &[&str] = &[ "og", "i", "jeg", "det", "at", "en", "et", "den", "til", "er", "som", "på", "de", "med", "han", "av", "ikke", "ikkje", "der", "så", "var", "meg", "seg", "men", "ett", "har", "om", "vi", "min", "mitt", "ha", "hadde", "hun", "nå", "over", "da", "ved", "fra", "du", "ut", "sin", "dem", "oss", "opp", "man", "kan", "hans", "hvor", "eller", "hva", "skal", "selv", "sjøl", "her", "alle", "vil", "bli", "ble", "blei", "blitt", "kunne", "inn", "når", "være", "kom", "noen", "noe", "ville", "dere", "som", "deres", "kun", "ja", "etter", "ned", "skulle", "denne", "for", "deg", "si", "sine", "sitt", "mot", "å", "meget", "hvorfor", "dette", "disse", "uten", "hvordan", "ingen", "din", "ditt", "blir", "samme", "hvilken", "hvilke", "sånn", "inni", "mellom", "vår", "hver", "hvem", "vors", "hvis", "både", "bare", "enn", "fordi", "før", "mange", "også", "slik", "vært", "være", "båe", "begge", "siden", "dykk", "dykkar", "dei", "deira", "deires", "deim", "di", "då", "eg", "ein", "eit", "eitt", "elles", "honom", "hjå", "ho", "hoe", "henne", "hennar", "hennes", "hoss", "hossen", "ikkje", "ingi", "inkje", "korleis", "korso", "kva", "kvar", "kvarhelst", "kven", "kvi", "kvifor", "me", "medan", "mi", "mine", "mykje", "no", "nokon", "noka", "nokor", "noko", "nokre", "si", "sia", "sidan", "so", "somt", "somme", "um", "upp", "vere", "vore", "verte", "vort", "varte", "vart", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/dutch.rs
lib/segment/src/index/field_index/full_text_index/stop_words/dutch.rs
pub const DUTCH_STOPWORDS: &[&str] = &[ "de", "en", "van", "ik", "te", "dat", "die", "in", "een", "hij", "het", "niet", "zijn", "is", "was", "op", "aan", "met", "als", "voor", "had", "er", "maar", "om", "hem", "dan", "zou", "of", "wat", "mijn", "men", "dit", "zo", "door", "over", "ze", "zich", "bij", "ook", "tot", "je", "mij", "uit", "der", "daar", "haar", "naar", "heb", "hoe", "heeft", "hebben", "deze", "u", "want", "nog", "zal", "me", "zij", "nu", "ge", "geen", "omdat", "iets", "worden", "toch", "al", "waren", "veel", "meer", "doen", "toen", "moet", "ben", "zonder", "kan", "hun", "dus", "alles", "onder", "ja", "eens", "hier", "wie", "werd", "altijd", "doch", "wordt", "wezen", "kunnen", "ons", "zelf", "tegen", "na", "reeds", "wil", "kon", "niets", "uw", "iemand", "geweest", "andere", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/catalan.rs
lib/segment/src/index/field_index/full_text_index/stop_words/catalan.rs
pub const CATALAN_STOPWORDS: &[&str] = &[ "a", "abans", "ací", "ah", "així", "això", "al", "aleshores", "algun", "alguna", "algunes", "alguns", "alhora", "allà", "allí", "allò", "als", "altra", "altre", "altres", "amb", "ambdues", "ambdós", "anar", "ans", "apa", "aquell", "aquella", "aquelles", "aquells", "aquest", "aquesta", "aquestes", "aquests", "aquí", "baix", "bastant", "bé", "cada", "cadascuna", "cadascunes", "cadascuns", "cadascú", "com", "consegueixo", "conseguim", "conseguir", "consigueix", "consigueixen", "consigueixes", "contra", "d'un", "d'una", "d'unes", "d'uns", "dalt", "de", "del", "dels", "des", "des de", "després", "dins", "dintre", "donat", "doncs", "durant", "d", "e", "eh", "el", "elles", "ells", "els", "em", "en", "encara", "ens", "entre", "era", "erem", "eren", "eres", "es", "esta", "estan", "estat", "estava", "estaven", "estem", "esteu", "estic", "està", "estàvem", "estàveu", "et", "etc", "ets", "fa", "faig", "fan", "fas", "fem", "fer", "feu", "fi", "fins", "fora", "gairebé", "ha", "han", "has", "haver", "havia", "he", "hem", "heu", "hi", "ho", "i", "igual", "iguals", "inclòs", "ja", "jo", "l'hi", "la", "les", "li", "li'n", "llarg", "llavors", "m'he", "ma", "mal", "malgrat", "mateix", "mateixa", "mateixes", "mateixos", "me", "mentre", "meu", "meus", "meva", "meves", "mode", "molt", "molta", "moltes", "molts", "mon", "mons", "més", "n'he", "n'hi", "ne", "ni", "no", "nogensmenys", "només", "nosaltres", "nostra", "nostre", "nostres", "o", "oh", "oi", "on", "pas", "pel", "pels", "per", "per que", "perquè", "però", "poc", "poca", "pocs", "podem", "poden", "poder", "podeu", "poques", "potser", "primer", "propi", "puc", "qual", "quals", "quan", "quant", "que", "quelcom", "qui", "quin", "quina", "quines", "quins", "què", "s'ha", "s'han", "sa", "sabem", "saben", "saber", "sabeu", "sap", "saps", "semblant", "semblants", "sense", "ser", "ses", "seu", "seus", "seva", "seves", "si", "sobre", "sobretot", "soc", "solament", "sols", "som", "son", "sons", "sota", "sou", "sóc", "són", "t'ha", "t'han", "t'he", "ta", "tal", "també", "tampoc", "tan", "tant", "tanta", "tantes", "te", "tene", "tenim", "tenir", "teniu", "teu", "teus", "teva", "teves", "tinc", "ton", "tons", "tot", "tota", "totes", "tots", "un", "una", "unes", "uns", "us", "va", "vaig", "vam", "van", "vas", "veu", "vosaltres", "vostra", "vostre", "vostres", "érem", "éreu", "és", "éssent", "últim", "ús", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/english.rs
lib/segment/src/index/field_index/full_text_index/stop_words/english.rs
pub const ENGLISH_STOPWORDS: &[&str] = &[ "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "you're", "you've", "you'll", "you'd", "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "she's", "her", "hers", "herself", "it", "it's", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "that'll", "these", "those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "don't", "should", "should've", "now", "d", "ll", "m", "o", "re", "ve", "y", "ain", "aren", "aren't", "couldn", "couldn't", "didn", "didn't", "doesn", "doesn't", "hadn", "hadn't", "hasn", "hasn't", "haven", "haven't", "isn", "isn't", "ma", "mightn", "mightn't", "mustn", "mustn't", "needn", "needn't", "shan", "shan't", "shouldn", "shouldn't", "wasn", "wasn't", "weren", "weren't", "won", "won't", "wouldn", "wouldn't", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/romanian.rs
lib/segment/src/index/field_index/full_text_index/stop_words/romanian.rs
pub const ROMANIAN_STOPWORDS: &[&str] = &[ "a", "abia", "acea", "aceasta", "această", "aceea", "aceeasi", "acei", "aceia", "acel", "acela", "acelasi", "acele", "acelea", "acest", "acesta", "aceste", "acestea", "acestei", "acestia", "acestui", "aceşti", "aceştia", "adica", "ai", "aia", "aibă", "aici", "al", "ala", "ale", "alea", "alt", "alta", "altceva", "altcineva", "alte", "altfel", "alti", "altii", "altul", "am", "anume", "apoi", "ar", "are", "as", "asa", "asta", "astea", "astfel", "asupra", "atare", "atat", "atata", "atatea", "atatia", "ati", "atit", "atita", "atitea", "atitia", "atunci", "au", "avea", "avem", "aveţi", "avut", "aş", "aţi", "ba", "ca", "cam", "cand", "care", "careia", "carora", "caruia", "cat", "catre", "ce", "cea", "ceea", "cei", "ceilalti", "cel", "cele", "celor", "ceva", "chiar", "ci", "cind", "cine", "cineva", "cit", "cita", "cite", "citeva", "citi", "citiva", "cu", "cui", "cum", "cumva", "cât", "câte", "câtva", "câţi", "cînd", "cît", "cîte", "cîtva", "cîţi", "că", "căci", "cărei", "căror", "cărui", "către", "da", "daca", "dacă", "dar", "dat", "dată", "dau", "de", "deasupra", "deci", "decit", "deja", "desi", "despre", "deşi", "din", "dintr", "dintr-", "dintre", "doar", "doi", "doilea", "două", "drept", "dupa", "după", "dă", "e", "ea", "ei", "el", "ele", "era", "eram", "este", "eu", "eşti", "face", "fara", "fata", "fel", "fi", "fie", "fiecare", "fii", "fim", "fiu", "fiţi", "foarte", "fost", "fără", "i", "ia", "iar", "ii", "il", "imi", "in", "inainte", "inapoi", "inca", "incit", "insa", "intr", "intre", "isi", "iti", "la", "le", "li", "lor", "lui", "lângă", "lîngă", "m", "ma", "mai", "mea", "mei", "mele", "mereu", "meu", "mi", "mie", "mine", "mod", "mult", "multa", "multe", "multi", "multă", "mulţi", "mâine", "mîine", "mă", "ne", "ni", "nici", "nimeni", "nimic", "niste", "nişte", "noastre", "noastră", "noi", "nostri", "nostru", "nou", "noua", "nouă", "noştri", "nu", "numai", "o", "or", "ori", "oricare", "orice", "oricine", "oricum", "oricând", "oricât", "oricînd", "oricît", "oriunde", "pai", "parca", "patra", "patru", "pe", "pentru", "peste", "pic", "pina", "poate", "pot", "prea", "prima", "primul", "prin", "printr-", "putini", "puţin", "puţina", "puţină", "până", "pînă", "sa", "sa-mi", "sa-ti", "sai", "sale", "sau", "se", "si", "sint", "sintem", "spate", "spre", "sub", "sunt", "suntem", "sunteţi", "sus", "să", "săi", "său", "t", "ta", "tale", "te", "ti", "tine", "toata", "toate", "toată", "tocmai", "tot", "toti", "totul", "totusi", "totuşi", "toţi", "trei", "treia", "treilea", "tu", "tuturor", "tăi", "tău", "u", "ul", "ului", "un", "una", "unde", "undeva", "unei", "uneia", "unele", "uneori", "unii", "unor", "unora", "unu", "unui", "unuia", "unul", "v", "va", "vi", "voastre", "voastră", "voi", "vom", "vor", "vostru", "vouă", "voştri", "vreo", "vreun", "vă", "zi", "zice", "îi", "îl", "îmi", "în", "îţi", "ăla", "ălea", "ăsta", "ăstea", "ăştia", "şi", "ţi", "ţie", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/swedish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/swedish.rs
pub const SWEDISH_STOPWORDS: &[&str] = &[ "och", "det", "att", "i", "en", "jag", "hon", "som", "han", "på", "den", "med", "var", "sig", "för", "så", "till", "är", "men", "ett", "om", "hade", "de", "av", "icke", "mig", "du", "henne", "då", "sin", "nu", "har", "inte", "hans", "honom", "skulle", "hennes", "där", "min", "man", "ej", "vid", "kunde", "något", "från", "ut", "när", "efter", "upp", "vi", "dem", "vara", "vad", "över", "än", "dig", "kan", "sina", "här", "ha", "mot", "alla", "under", "någon", "eller", "allt", "mycket", "sedan", "ju", "denna", "själv", "detta", "åt", "utan", "varit", "hur", "ingen", "mitt", "ni", "bli", "blev", "oss", "din", "dessa", "några", "deras", "blir", "mina", "samma", "vilken", "er", "sådan", "vår", "blivit", "dess", "inom", "mellan", "sådant", "varför", "varje", "vilka", "ditt", "vem", "vilket", "sitta", "sådana", "vart", "dina", "vars", "vårt", "våra", "ert", "era", "vilkas", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/full_text_index/stop_words/turkish.rs
lib/segment/src/index/field_index/full_text_index/stop_words/turkish.rs
pub const TURKISH_STOPWORDS: &[&str] = &[ "acaba", "ama", "aslında", "az", "bazı", "belki", "biri", "birkaç", "birşey", "biz", "bu", "çok", "çünkü", "da", "daha", "de", "defa", "diye", "eğer", "en", "gibi", "hem", "hep", "hepsi", "her", "hiç", "için", "ile", "ise", "kez", "ki", "kim", "mı", "mu", "mü", "nasıl", "ne", "neden", "nerde", "nerede", "nereye", "niçin", "niye", "o", "sanki", "şey", "siz", "şu", "tüm", "ve", "veya", "ya", "yani", ];
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/null_index/mod.rs
lib/segment/src/index/field_index/null_index/mod.rs
pub mod mutable_null_index; pub use mutable_null_index::MutableNullIndex;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/null_index/mutable_null_index.rs
lib/segment/src/index/field_index/null_index/mutable_null_index.rs
use std::path::{Path, PathBuf}; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use fs_err as fs; use serde_json::Value; use crate::common::Flusher; use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags; use crate::common::flags::roaring_flags::RoaringFlags; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::{ CardinalityEstimation, FieldIndexBuilderTrait, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, }; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{FieldCondition, PayloadKeyType}; const HAS_VALUES_DIRNAME: &str = "has_values"; const IS_NULL_DIRNAME: &str = "is_null"; /// Mutable variant of null index that uses roaring bitmaps for in-memory operations /// and buffers updates before persisting them to DynamicMmapFlags. pub struct MutableNullIndex { base_dir: PathBuf, storage: Storage, total_point_count: usize, } struct Storage { /// Points which have at least one value has_values_flags: RoaringFlags, /// Points which have null values is_null_flags: RoaringFlags, } impl MutableNullIndex { pub fn builder(path: &Path) -> OperationResult<MutableNullIndexBuilder> { Ok(MutableNullIndexBuilder( Self::open(path, 0, true)?.ok_or_else(|| { OperationError::service_error(format!( "Failed to create and open mutable null index at path: {}", path.display(), )) })?, )) } /// Open and load or create a mutable null index at the given path. /// /// # Arguments /// - `path` - The directory where the index files should live, must be exclusive to this index. /// - `total_point_count` - Total number of points in the segment. /// - `create_if_missing` - If true, creates the index if it doesn't exist. pub fn open( path: &Path, total_point_count: usize, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let has_values_dir = path.join(HAS_VALUES_DIRNAME); // If has values directory doesn't exist, assume the index doesn't exist on disk if !has_values_dir.is_dir() && !create_if_missing { return Ok(None); } Ok(Some(Self::open_or_create(path, total_point_count)?)) } fn open_or_create(path: &Path, total_point_count: usize) -> OperationResult<Self> { fs::create_dir_all(path).map_err(|err| { OperationError::service_error(format!( "Failed to create mutable-null-index directory: {err}, path: {path:?}" )) })?; let has_values_path = path.join(HAS_VALUES_DIRNAME); let has_values_mmap = DynamicMmapFlags::open(&has_values_path, false)?; let has_values_flags = RoaringFlags::new(has_values_mmap); let is_null_path = path.join(IS_NULL_DIRNAME); let is_null_mmap = DynamicMmapFlags::open(&is_null_path, false)?; let is_null_flags = RoaringFlags::new(is_null_mmap); let storage = Storage { has_values_flags, is_null_flags, }; Ok(Self { base_dir: path.to_path_buf(), storage, total_point_count, }) } pub fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let mut is_null = false; let mut has_values = false; for value in payload { match value { Value::Null => { is_null = true; } Value::Bool(_) => { has_values = true; } Value::Number(_) => { has_values = true; } Value::String(_) => { has_values = true; } Value::Array(array) => { if array.iter().any(|v| v.is_null()) { is_null = true; } if !array.is_empty() { has_values = true; } } Value::Object(_) => { has_values = true; } } if is_null && has_values { break; } } self.storage.has_values_flags.set(id, has_values); self.storage.is_null_flags.set(id, is_null); // Bump total points self.total_point_count = std::cmp::max(self.total_point_count, id as usize + 1); // Account for I/O cost as if we were writing to disk now hw_counter.payload_index_io_write_counter().incr_delta(2); Ok(()) } pub fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { // Update bitmaps immediately self.storage.has_values_flags.set(id, false); self.storage.is_null_flags.set(id, false); // Bump total points // We MUST bump the total point count when removing a point too // On upsert without this respective field, remove point is called rather than add point // Bumping the total point count ensures we correctly estimate the number of points // Bug: <https://github.com/qdrant/qdrant/pull/6882> self.total_point_count = std::cmp::max(self.total_point_count, id as usize + 1); // Account for I/O cost as if we were writing to disk now let hw_counter = HardwareCounterCell::disposable(); hw_counter.payload_index_io_write_counter().incr_delta(2); Ok(()) } pub fn values_count(&self, id: PointOffsetType) -> usize { usize::from(self.storage.has_values_flags.get(id)) } pub fn values_is_empty(&self, id: PointOffsetType) -> bool { !self.storage.has_values_flags.get(id) } pub fn values_is_null(&self, id: PointOffsetType) -> bool { self.storage.is_null_flags.get(id) } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { let points_count = self.storage.has_values_flags.len(); PayloadIndexTelemetry { field_name: None, points_count, points_values_count: points_count, histogram_bucket_size: None, index_type: "mutable_null_index", } } pub fn populate(&self) -> OperationResult<()> { Ok(()) } pub fn is_on_disk(&self) -> bool { false } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { self.storage.is_null_flags.clear_cache()?; self.storage.has_values_flags.clear_cache() } pub fn get_mutability_type(&self) -> IndexMutability { IndexMutability::Mutable } pub fn get_storage_type(&self) -> StorageType { StorageType::Mmap { is_on_disk: self.is_on_disk(), } } } impl PayloadFieldIndex for MutableNullIndex { fn count_indexed_points(&self) -> usize { self.storage.has_values_flags.len() } fn wipe(self) -> OperationResult<()> { let base_dir = self.base_dir.clone(); // drop mmap handles before deleting files drop(self); if base_dir.is_dir() { fs::remove_dir_all(&base_dir)?; } Ok(()) } fn flusher(&self) -> Flusher { let flush_has_values = self.storage.has_values_flags.flusher(); let flush_is_null = self.storage.is_null_flags.flusher(); Box::new(move || { flush_has_values()?; flush_is_null()?; Ok(()) }) } fn files(&self) -> Vec<PathBuf> { let mut files = self.storage.has_values_flags.files(); files.extend(self.storage.is_null_flags.files()); files } fn immutable_files(&self) -> Vec<PathBuf> { Vec::new() // everything is mutable } fn filter<'a>( &'a self, condition: &'a FieldCondition, _hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { let FieldCondition { key: _, r#match: _, range: _, geo_bounding_box: _, geo_radius: _, geo_polygon: _, values_count: _, is_empty, is_null, } = condition; if let Some(is_empty) = is_empty { if *is_empty { // Return points that don't have values let iter = self.storage.has_values_flags.iter_falses(); Some(Box::new(iter)) } else { // Return points that have values let iter = self.storage.has_values_flags.iter_trues(); Some(Box::new(iter)) } } else if let Some(is_null) = is_null { if *is_null { // Return points that have null values let iter = self.storage.is_null_flags.iter_trues(); Some(Box::new(iter)) } else { // Return points that don't have null values let iter = self.storage.is_null_flags.iter_falses(); Some(Box::new(iter)) } } else { None } } fn estimate_cardinality( &self, condition: &FieldCondition, _hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { let FieldCondition { key, r#match: _, range: _, geo_bounding_box: _, geo_radius: _, geo_polygon: _, values_count: _, is_empty, is_null, } = condition; if let Some(is_empty) = is_empty { if *is_empty { let has_values_count = self.storage.has_values_flags.count_trues(); let estimated = self.total_point_count.saturating_sub(has_values_count); Some(CardinalityEstimation { min: 0, exp: 2 * estimated / 3, // assuming 1/3 of the points are deleted max: estimated, primary_clauses: vec![PrimaryCondition::from(FieldCondition::new_is_empty( key.clone(), true, ))], }) } else { let count = self.storage.has_values_flags.count_trues(); Some(CardinalityEstimation::exact(count).with_primary_clause( PrimaryCondition::from(FieldCondition::new_is_empty(key.clone(), false)), )) } } else if let Some(is_null) = is_null { if *is_null { let count = self.storage.is_null_flags.count_trues(); Some(CardinalityEstimation::exact(count).with_primary_clause( PrimaryCondition::from(FieldCondition::new_is_null(key.clone(), true)), )) } else { let is_null_count = self.storage.is_null_flags.count_trues(); let estimated = self.total_point_count.saturating_sub(is_null_count); Some(CardinalityEstimation { min: 0, exp: 2 * estimated / 3, // assuming 1/3 of the points are deleted max: estimated, primary_clauses: vec![PrimaryCondition::from(FieldCondition::new_is_null( key.clone(), false, ))], }) } } else { None } } fn payload_blocks( &self, _threshold: usize, _key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { // No payload blocks Box::new(std::iter::empty()) } } pub struct MutableNullIndexBuilder(MutableNullIndex); impl FieldIndexBuilderTrait for MutableNullIndexBuilder { type FieldIndexType = MutableNullIndex; fn init(&mut self) -> OperationResult<()> { // After Self is created, it is already initialized Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&serde_json::Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { // Flush any remaining buffered updates self.0.flusher()()?; Ok(self.0) } } #[cfg(test)] mod tests { use common::counter::hardware_accumulator::HwMeasurementAcc; use tempfile::TempDir; use super::*; use crate::json_path::JsonPath; #[test] fn test_build_and_use_mutable_null_index() { let dir = TempDir::with_prefix("test_mutable_null_index").unwrap(); let null_value = Value::Null; let null_value_in_array = Value::Array(vec![Value::String("test".to_string()), Value::Null]); let mut builder = MutableNullIndex::builder(dir.path()).unwrap(); let n = 100; let hw_counter = HardwareCounterCell::new(); for i in 0..n { match i % 4 { 0 => builder.add_point(i, &[&null_value], &hw_counter).unwrap(), 1 => builder .add_point(i, &[&null_value_in_array], &hw_counter) .unwrap(), 2 => builder.add_point(i, &[], &hw_counter).unwrap(), 3 => builder .add_point(i, &[&Value::Bool(true)], &hw_counter) .unwrap(), _ => unreachable!(), } } let null_index = builder.finalize().unwrap(); let key = JsonPath::new("test"); let filter_is_null = FieldCondition::new_is_null(key.clone(), true); let filter_is_not_empty = FieldCondition { key: key.clone(), r#match: None, range: None, geo_bounding_box: None, geo_radius: None, geo_polygon: None, values_count: None, is_empty: Some(false), is_null: None, }; let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let is_null_values: Vec<_> = null_index .filter(&filter_is_null, &hw_counter) .unwrap() .collect(); let not_empty_values: Vec<_> = null_index .filter(&filter_is_not_empty, &hw_counter) .unwrap() .collect(); let is_empty_values: Vec<_> = (0..n) .filter(|&id| null_index.values_is_empty(id)) .collect(); let not_null_values: Vec<_> = (0..n) .filter(|&id| !null_index.values_is_null(id)) .collect(); for i in 0..n { match i % 4 { 0 => { // &[&null_value] assert!(is_null_values.contains(&i)); assert!(!not_empty_values.contains(&i)); assert!(!not_null_values.contains(&i)); assert!(is_empty_values.contains(&i)); } 1 => { // &[&null_value_in_array] assert!(is_null_values.contains(&i)); assert!(not_empty_values.contains(&i)); assert!(!not_null_values.contains(&i)); assert!(!is_empty_values.contains(&i)); } 2 => { // &[] assert!(!is_null_values.contains(&i)); assert!(!not_empty_values.contains(&i)); assert!(not_null_values.contains(&i)); assert!(is_empty_values.contains(&i)); } 3 => { // &[&Value::Bool(true)] assert!(!is_null_values.contains(&i)); assert!(not_empty_values.contains(&i)); assert!(not_null_values.contains(&i)); assert!(!is_empty_values.contains(&i)); } _ => unreachable!(), } } let hw_cell = HardwareCounterCell::new(); let is_null_cardinality = null_index .estimate_cardinality(&filter_is_null, &hw_cell) .unwrap(); let non_empty_cardinality = null_index .estimate_cardinality(&filter_is_not_empty, &hw_cell) .unwrap(); assert_eq!(is_null_cardinality.exp, 50); assert_eq!(non_empty_cardinality.exp, 50); } #[test] fn test_manual_buffer_flushing() { let dir = TempDir::with_prefix("test_manual_buffer_flushing").unwrap(); let mut index = MutableNullIndex::builder(dir.path()).unwrap().0; let hw_counter = HardwareCounterCell::new(); // Add points without automatic flushing for i in 0..10 { index .add_point(i as PointOffsetType, &[&Value::Bool(true)], &hw_counter) .unwrap(); } // Manually flush via flusher index.flusher()().unwrap(); // Verify data is in bitmaps for i in 0..10 { assert!(!index.values_is_empty(i as PointOffsetType)); assert!(!index.values_is_null(i as PointOffsetType)); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/tests/histogram_test_utils.rs
lib/segment/src/index/field_index/tests/histogram_test_utils.rs
use std::collections::BTreeSet; use std::fmt::Display; use std::io; use std::io::Write; use serde::Serialize; use serde::de::DeserializeOwned; use crate::index::field_index::histogram::{Histogram, Numericable, Point}; pub fn print_results<T: Numericable + Serialize + DeserializeOwned + Display>( points_index: &BTreeSet<Point<T>>, histogram: &Histogram<T>, pnt: Option<Point<T>>, ) { for point in points_index.iter() { if let Some(border_count) = histogram.borders().get(point) { if pnt.is_some() && pnt.as_ref().unwrap().idx == point.idx { eprint!(" {}x{} ", border_count.left, border_count.right); } else { eprint!(" {}|{} ", border_count.left, border_count.right); } } else if pnt.is_some() && pnt.as_ref().unwrap().idx == point.idx { eprint!("x"); } else { eprint!("."); } } eprintln!("[{}]", histogram.total_count()); io::stdout().flush().unwrap(); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/tests/histogram_i64_tests.rs
lib/segment/src/index/field_index/tests/histogram_i64_tests.rs
use std::collections::BTreeSet; use std::collections::Bound::Included; use common::types::PointOffsetType; use rand::prelude::SliceRandom; use crate::index::field_index::histogram::{Histogram, Point}; use crate::index::field_index::tests::histogram_test_utils::print_results; use crate::index::field_index::tests::histogram_tests::{build_histogram, count_range}; pub fn histogram_fixture_values() -> Vec<i64> { vec![ 818038026092414779, 817430997309065005, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 814525614605207302, 813881787313817334, 813881787313817334, 813881787313817334, 813881787313817334, 813881787313817334, 797077632540739163, 797077632540739163, 797077632540739163, 797077632540739163, 797077632540739163, 797077632540739163, 797077632540739163, 796753381031937625, 796753381031937625, 796753381031937625, 796753381031937625, 796753381031937625, 796753381031937625, 794845070900594257, 794845070900594257, 794845070900594257, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793390195280971343, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793353484098340430, 793115156044318285, 793115156044318285, 793115156044318285, 793115156044318285, 793115156044318285, 793115156044318285, 793115156044318285, 792916109618579020, 792916109618579020, 792916109618579020, 789197549817824843, 789197549817824843, 789197549817824843, 789197549817824843, 789197549817824843, 789197549817824843, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787850537872655946, 787649226061383241, 787647249260742216, 784221654430516807, 784221654430516807, 784221654430516807, 784221654430516807, 784221654430516807, 782529989596677701, ] } #[test] fn test_fixture_ok() { for x in histogram_fixture_values() { assert!(x > 0); println!("{} {}", x, x as f64); } } #[test] fn test_histogram() { let max_bucket_size = 1000; let precision = 0.01; // let points = (0..100000).map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i }).collect_vec(); let mut points: Vec<_> = histogram_fixture_values(); points.shuffle(&mut rand::rng()); let points: Vec<_> = points .into_iter() .enumerate() .map(|(idx, val)| Point::new(val, idx as PointOffsetType)) .collect(); let (histogram, points_index) = build_histogram(max_bucket_size, precision, points); print_results(&points_index, &histogram, None); } pub fn request_histogram_i64(histogram: &Histogram<i64>, points_index: &BTreeSet<Point<i64>>) { let (est_min, estimation, est_max) = histogram.estimate(Included(0), Included(0)); let real = count_range(points_index, 0, 0); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(0), Included(100)); let real = count_range(points_index, 0, 100); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(-100), Included(100)); let real = count_range(points_index, -100, 100); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); for _ in 0..100 { let from = rand::random::<i64>(); let to = from.saturating_add(rand::random::<i64>()); let (est_min, estimation, est_max) = histogram.estimate(Included(from), Included(to)); let real = count_range(points_index, from, to); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); } } #[test] fn test_build_i64_histogram() { let max_bucket_size = 1000; let precision = 0.01; let num_samples = 100_000; // let points = (0..100000).map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i }).collect_vec(); let points: Vec<_> = (0..num_samples) .map(|i| Point { val: rand::random::<i64>(), idx: i, }) .collect(); let (histogram, points_index) = build_histogram(max_bucket_size, precision, points); request_histogram_i64(&histogram, &points_index); // test_range_by_cardinality(&histogram); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/tests/mod.rs
lib/segment/src/index/field_index/tests/mod.rs
mod histogram_i64_tests; mod histogram_test_utils; mod histogram_tests;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/tests/histogram_tests.rs
lib/segment/src/index/field_index/tests/histogram_tests.rs
use std::cell::Cell; use std::collections::BTreeSet; use std::collections::Bound::{Excluded, Included, Unbounded}; use itertools::Itertools; use rand::prelude::StdRng; use rand::{Rng, SeedableRng}; use rand_distr::StandardNormal; use serde::Serialize; use serde::de::DeserializeOwned; use crate::index::field_index::histogram::{Histogram, Numericable, Point}; use crate::index::field_index::tests::histogram_test_utils::print_results; pub fn count_range<T: PartialOrd>(points_index: &BTreeSet<Point<T>>, a: T, b: T) -> usize { points_index .iter() .filter(|x| a <= x.val && x.val <= b) .count() } #[test] fn test_build_histogram_small() { let max_bucket_size = 10; let precision = 0.01; let num_samples = 1000; let mut rnd = StdRng::seed_from_u64(42); // let points = (0..100000).map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i }).collect_vec(); let points = (0..num_samples) .map(|i| Point { val: f64::round(rnd.sample::<f64, _>(StandardNormal) * 10.0), idx: i % num_samples / 2, }) .collect_vec(); let mut points_index: BTreeSet<Point<_>> = Default::default(); let mut histogram = Histogram::new(max_bucket_size, precision); for point in &points { points_index.insert(point.clone()); // print_results(&points_index, &histogram, Some(point.clone())); histogram.insert( point.clone(), |x| { points_index .range((Unbounded, Excluded(x))) .next_back() .cloned() }, |x| points_index.range((Excluded(x), Unbounded)).next().cloned(), ); } for point in &points { print_results(&points_index, &histogram, Some(point.clone())); points_index.remove(point); histogram.remove( point, |x| { points_index .range((Unbounded, Excluded(x))) .next_back() .cloned() }, |x| points_index.range((Excluded(x), Unbounded)).next().cloned(), ); } } pub fn test_range_by_cardinality(histogram: &Histogram<f64>) { let from = Unbounded; let range_size = 100; let to = histogram.get_range_by_size(from, range_size); let estimation = histogram.estimate(from, to); eprintln!("({from:?} - {to:?}) -> {estimation:?} / {range_size}"); assert!( (estimation.1 as i64 - range_size as i64).abs() < 2 * histogram.current_bucket_size() as i64 ); let from = Unbounded; let range_size = 1000; let to = histogram.get_range_by_size(from, range_size); let estimation = histogram.estimate(from, to); eprintln!("({from:?} - {to:?}) -> {estimation:?} / {range_size}"); assert!( (estimation.1 as i64 - range_size as i64).abs() < 2 * histogram.current_bucket_size() as i64 ); let from = Excluded(0.1); let range_size = 100; let to = histogram.get_range_by_size(from, range_size); let estimation = histogram.estimate(from, to); eprintln!("({from:?} - {to:?}) -> {estimation:?} / {range_size}"); assert!( (estimation.1 as i64 - range_size as i64).abs() < 2 * histogram.current_bucket_size() as i64 ); let from = Excluded(0.1); let range_size = 1000; let to = histogram.get_range_by_size(from, range_size); let estimation = histogram.estimate(from, to); eprintln!("({from:?} - {to:?}) -> {estimation:?} / {range_size}"); assert!( (estimation.1 as i64 - range_size as i64).abs() < 2 * histogram.current_bucket_size() as i64 ); let from = Excluded(0.1); let range_size = 100_000; let to = histogram.get_range_by_size(from, range_size); let estimation = histogram.estimate(from, to); eprintln!("({from:?} - {to:?}) -> {estimation:?} / {range_size}"); assert!(matches!(to, Unbounded)); } pub fn request_histogram(histogram: &Histogram<f64>, points_index: &BTreeSet<Point<f64>>) { let (est_min, estimation, est_max) = histogram.estimate(Included(0.0), Included(0.0)); let real = count_range(points_index, 0., 0.); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(0.0), Included(0.0001)); let real = count_range(points_index, 0., 0.0001); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(0.0), Included(0.01)); let real = count_range(points_index, 0., 0.01); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(0.), Included(1.)); let real = count_range(points_index, 0., 1.); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(0.), Included(100.)); let real = count_range(points_index, 0., 100.); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(-100.), Included(100.)); let real = count_range(points_index, -100., 100.); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); let (est_min, estimation, est_max) = histogram.estimate(Included(20.), Included(100.)); let real = count_range(points_index, 20., 100.); eprintln!( "{real} / ({est_min}, {estimation}, {est_max}) = {}", estimation as f64 / real as f64, ); assert!(real.abs_diff(estimation) < 2 * histogram.current_bucket_size()); } pub fn build_histogram<T: Numericable + Serialize + DeserializeOwned + std::fmt::Debug>( max_bucket_size: usize, precision: f64, points: Vec<Point<T>>, ) -> (Histogram<T>, BTreeSet<Point<T>>) { let mut points_index: BTreeSet<Point<T>> = Default::default(); let mut histogram = Histogram::new(max_bucket_size, precision); let read_counter = Cell::new(0); for point in points { points_index.insert(point.clone()); // print_results(&points_index, &histogram, Some(point.clone())); histogram.insert( point, |x| { read_counter.set(read_counter.get() + 1); points_index .range((Unbounded, Excluded(x))) .next_back() .cloned() }, |x| { read_counter.set(read_counter.get() + 1); points_index.range((Excluded(x), Unbounded)).next().cloned() }, ); } eprintln!("read_counter.get() = {:#?}", read_counter.get()); eprintln!("histogram.borders.len() = {:#?}", histogram.borders().len()); for border in histogram.borders().iter().take(5) { eprintln!("border = {border:?}"); } (histogram, points_index) } #[test] fn test_build_histogram_round() { let max_bucket_size = 100; let precision = 0.01; let num_samples = 100_000; let mut rnd = StdRng::seed_from_u64(42); // let points = (0..100000).map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i }).collect_vec(); let points = (0..num_samples).map(|i| Point { val: f64::round(rnd.sample::<f64, _>(StandardNormal) * 100.0), idx: i, }); let (histogram, points_index) = build_histogram(max_bucket_size, precision, points.collect()); request_histogram(&histogram, &points_index); } #[test] fn test_build_histogram() { let max_bucket_size = 1000; let precision = 0.01; let num_samples = 100_000; let mut rnd = StdRng::seed_from_u64(42); // let points = (0..100000).map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i }).collect_vec(); let points = (0..num_samples) .map(|i| Point { val: rnd.sample(StandardNormal), idx: i, }) .collect_vec(); let (histogram, points_index) = build_histogram(max_bucket_size, precision, points); request_histogram(&histogram, &points_index); test_range_by_cardinality(&histogram); } #[test] fn test_save_load_histogram() { let max_bucket_size = 1000; let precision = 0.01; let num_samples = 100_000; let mut rnd = StdRng::seed_from_u64(42); let points = (0..num_samples) .map(|i| Point { val: rnd.random_range(-10.0..10.0), idx: i, }) .collect_vec(); let (histogram, _) = build_histogram(max_bucket_size, precision, points); let dir = tempfile::Builder::new() .prefix("histogram_dir") .tempdir() .unwrap(); histogram.save(dir.path()).unwrap(); let loaded_histogram = Histogram::<f64>::load(dir.path()).unwrap(); assert_eq!(histogram, loaded_histogram); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/map_index/mutable_map_index.rs
lib/segment/src/index/field_index/map_index/mutable_map_index.rs
use std::borrow::Borrow; use std::collections::HashMap; use std::iter; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use gridstore::config::StorageOptions; use gridstore::{Blob, Gridstore}; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; use roaring::RoaringBitmap; #[cfg(feature = "rocksdb")] use rocksdb::DB; #[cfg(feature = "rocksdb")] use super::MapIndex; use super::{IdIter, MapIndexKey}; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::payload_config::StorageType; /// Default options for Gridstore storage const fn default_gridstore_options(block_size: usize) -> StorageOptions { StorageOptions { // Size dependent on map value type block_size_bytes: Some(block_size), compression: Some(gridstore::config::Compression::None), page_size_bytes: Some(block_size * 8192 * 32), // 4 to 8 MiB = block_size * region_blocks * regions, region_size_blocks: None, } } pub struct MutableMapIndex<N: MapIndexKey + ?Sized> where Vec<N::Owned>: Blob + Send + Sync, { pub(super) map: HashMap<N::Owned, RoaringBitmap>, pub(super) point_to_values: Vec<Vec<N::Owned>>, /// Amount of point which have at least one indexed payload value pub(super) indexed_points: usize, pub(super) values_count: usize, storage: Storage<N::Owned>, } enum Storage<T> where Vec<T>: Blob + Send + Sync, { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Gridstore(Gridstore<Vec<T>>), } impl<N: MapIndexKey + ?Sized> MutableMapIndex<N> where Vec<N::Owned>: Blob + Send + Sync, { /// Open mutable map index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db: Arc<RwLock<DB>>, field_name: &str, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let store_cf_name = MapIndex::<N>::storage_cf_name(field_name); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); Self::open_rocksdb_db_wrapper(db_wrapper, create_if_missing) } #[cfg(feature = "rocksdb")] pub fn open_rocksdb_db_wrapper( db_wrapper: DatabaseColumnScheduledDeleteWrapper, create_if_missing: bool, ) -> OperationResult<Option<Self>> { if !db_wrapper.has_column_family()? { if create_if_missing { db_wrapper.recreate_column_family()?; } else { // Column family doesn't exist, cannot load return Ok(None); } }; // Load in-memory index from RocksDB let mut map = HashMap::<_, RoaringBitmap>::new(); let mut point_to_values = Vec::new(); let mut indexed_points = 0; let mut values_count = 0; for (record, _) in db_wrapper.lock_db().iter()? { let record = std::str::from_utf8(&record).map_err(|_| { OperationError::service_error("Index load error: UTF8 error while DB parsing") })?; let (value, idx) = MapIndex::<N>::decode_db_record(record)?; if point_to_values.len() <= idx as usize { point_to_values.resize_with(idx as usize + 1, Vec::new) } let point_values = &mut point_to_values[idx as usize]; if point_values.is_empty() { indexed_points += 1; } values_count += 1; point_values.push(value.clone()); map.entry(value).or_default().insert(idx); } Ok(Some(Self { map, point_to_values, indexed_points, values_count, storage: Storage::RocksDb(db_wrapper), })) } /// Open and load mutable map index from Gridstore storage /// /// The `create_if_missing` parameter indicates whether to create a new Gridstore if it does /// not exist. If false and files don't exist, the load function will indicate nothing could be /// loaded. pub fn open_gridstore(path: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { let store = if create_if_missing { let options = default_gridstore_options(N::gridstore_block_size()); Gridstore::open_or_create(path, options).map_err(|err| { OperationError::service_error(format!( "failed to open mutable map index on gridstore: {err}" )) })? } else if path.exists() { Gridstore::open(path).map_err(|err| { OperationError::service_error(format!( "failed to open mutable map index on gridstore: {err}" )) })? } else { // Files don't exist, cannot load return Ok(None); }; // Load in-memory index from Gridstore let mut map = HashMap::<_, RoaringBitmap>::new(); let mut point_to_values = Vec::new(); let mut indexed_points = 0; let mut values_count = 0; let hw_counter = HardwareCounterCell::disposable(); let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); store .iter::<_, ()>( |idx, values: Vec<_>| { for value in values { if point_to_values.len() <= idx as usize { point_to_values.resize_with(idx as usize + 1, Vec::new) } let point_values = &mut point_to_values[idx as usize]; if point_values.is_empty() { indexed_points += 1; } values_count += 1; point_values.push(value.clone()); map.entry(value).or_default().insert(idx); } Ok(true) }, hw_counter_ref, ) // unwrap safety: never returns an error .unwrap(); Ok(Some(Self { map, point_to_values, indexed_points, values_count, storage: Storage::Gridstore(store), })) } pub fn add_many_to_map<Q>( &mut self, idx: PointOffsetType, values: Vec<Q>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> where Q: Into<N::Owned> + Clone, { if values.is_empty() { return Ok(()); } self.values_count += values.len(); if self.point_to_values.len() <= idx as usize { self.point_to_values.resize_with(idx as usize + 1, Vec::new) } self.point_to_values[idx as usize] = Vec::with_capacity(values.len()); match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); for value in values { let entry = self.map.entry(value.into()); self.point_to_values[idx as usize].push(entry.key().clone()); let db_record = MapIndex::encode_db_record(entry.key().borrow(), idx); entry.or_default().insert(idx); hw_cell_wb.incr_delta(db_record.len()); db_wrapper.put(db_record, [])?; } } Storage::Gridstore(store) => { let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); for value in values.clone() { let entry = self.map.entry(value.into()); self.point_to_values[idx as usize].push(entry.key().clone()); entry.or_default().insert(idx); } let values = values.into_iter().map(|v| v.into()).collect::<Vec<_>>(); store .put_value(idx, &values, hw_counter_ref) .map_err(|err| { OperationError::service_error(format!( "failed to put value in mutable map index gridstore: {err}" )) })?; } } self.indexed_points += 1; Ok(()) } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { if self.point_to_values.len() <= idx as usize { return Ok(()); } let removed_values = std::mem::take(&mut self.point_to_values[idx as usize]); if !removed_values.is_empty() { self.indexed_points -= 1; } self.values_count -= removed_values.len(); for value in &removed_values { if let Some(vals) = self.map.get_mut(value.borrow()) { vals.remove(idx); } } match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { for value in &removed_values { let key = MapIndex::encode_db_record(value.borrow(), idx); db_wrapper.remove(key)?; } } Storage::Gridstore(store) => { store.delete_value(idx); } } Ok(()) } #[inline] pub(super) fn clear(&mut self) -> OperationResult<()> { match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.recreate_column_family(), Storage::Gridstore(store) => store.clear().map_err(|err| { OperationError::service_error(format!("Failed to clear mutable map index: {err}",)) }), } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Gridstore(store) => store.wipe().map_err(|err| { OperationError::service_error(format!("Failed to wipe mutable map index: {err}",)) }), } } /// Clear cache /// /// Only clears cache of Gridstore storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Gridstore(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable map index gridstore cache: {err}" )) }), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Gridstore(store) => store.files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.flusher(), Storage::Gridstore(store) => { let storage_flusher = store.flusher(); Box::new(move || { storage_flusher().map_err(|err| { OperationError::service_error(format!( "Failed to flush mutable map index gridstore: {err}" )) }) }) } } } pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&N) -> bool) -> bool { self.point_to_values .get(idx as usize) .map(|values| values.iter().any(|v| check_fn(v.borrow()))) .unwrap_or(false) } pub fn get_values(&self, idx: PointOffsetType) -> Option<impl Iterator<Item = &N> + '_> { Some( self.point_to_values .get(idx as usize)? .iter() .map(|v| v.borrow()), ) } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.point_to_values.get(idx as usize).map(Vec::len) } pub fn get_indexed_points(&self) -> usize { self.indexed_points } pub fn get_values_count(&self) -> usize { self.values_count } pub fn get_unique_values_count(&self) -> usize { self.map.len() } pub fn get_count_for_value(&self, value: &N) -> Option<usize> { self.map.get(value).map(|p| p.len() as usize) } pub fn iter_counts_per_value(&self) -> impl Iterator<Item = (&N, usize)> + '_ { self.map.iter().map(|(k, v)| (k.borrow(), v.len() as usize)) } pub fn iter_values_map(&self) -> impl Iterator<Item = (&N, IdIter<'_>)> { self.map .iter() .map(move |(k, v)| (k.borrow(), Box::new(v.iter()) as IdIter)) } pub fn get_iterator(&self, value: &N) -> IdIter<'_> { self.map .get(value) .map(|ids| Box::new(ids.iter()) as IdIter) .unwrap_or_else(|| Box::new(iter::empty::<PointOffsetType>())) } pub fn iter_values(&self) -> Box<dyn Iterator<Item = &N> + '_> { Box::new(self.map.keys().map(|v| v.borrow())) } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Gridstore(_) => StorageType::Gridstore, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Gridstore(_) => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/map_index/mmap_map_index.rs
lib/segment/src/index/field_index/map_index/mmap_map_index.rs
use std::borrow::Borrow; use std::iter; use std::mem::size_of; use std::path::{Path, PathBuf}; use ahash::HashMap; use common::counter::conditioned_counter::ConditionedCounter; use common::counter::hardware_counter::HardwareCounterCell; use common::counter::iterator_hw_measurement::HwMeasurementIteratorExt; use common::mmap_hashmap::{Key, MmapHashMap, READ_ENTRY_OVERHEAD}; use common::types::PointOffsetType; use fs_err as fs; use io::file_operations::{atomic_save_json, read_json}; use itertools::Itertools; use memmap2::MmapMut; use memory::fadvise::clear_disk_cache; use memory::madvise::AdviceSetting; use memory::mmap_ops::{self, create_and_ensure_length}; use memory::mmap_type::MmapBitSlice; use serde::{Deserialize, Serialize}; use super::{IdIter, MapIndexKey}; use crate::common::Flusher; use crate::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::mmap_point_to_values::MmapPointToValues; const DELETED_PATH: &str = "deleted.bin"; const HASHMAP_PATH: &str = "values_to_points.bin"; const CONFIG_PATH: &str = "mmap_field_index_config.json"; pub struct MmapMapIndex<N: MapIndexKey + Key + ?Sized> { path: PathBuf, pub(super) storage: Storage<N>, // pub(super) value_to_points: MmapHashMap<N, PointOffsetType>, // point_to_values: MmapPointToValues<N>, // pub(super) deleted: MmapBitSliceBufferedUpdateWrapper, deleted_count: usize, total_key_value_pairs: usize, is_on_disk: bool, } pub(super) struct Storage<N: MapIndexKey + Key + ?Sized> { pub(super) value_to_points: MmapHashMap<N, PointOffsetType>, point_to_values: MmapPointToValues<N>, pub(super) deleted: MmapBitSliceBufferedUpdateWrapper, } #[derive(Debug, Clone, Serialize, Deserialize)] struct MmapMapIndexConfig { total_key_value_pairs: usize, } impl<N: MapIndexKey + Key + ?Sized> MmapMapIndex<N> { /// Open and load mmap map index from the given path pub fn open(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let hashmap_path = path.join(HASHMAP_PATH); let deleted_path = path.join(DELETED_PATH); let config_path = path.join(CONFIG_PATH); // If config doesn't exist, assume the index doesn't exist on disk if !config_path.is_file() { return Ok(None); } let config: MmapMapIndexConfig = read_json(&config_path)?; let do_populate = !is_on_disk; let hashmap = MmapHashMap::open(&hashmap_path, do_populate)?; let point_to_values = MmapPointToValues::open(path, do_populate)?; let deleted = mmap_ops::open_write_mmap(&deleted_path, AdviceSetting::Global, do_populate)?; let deleted = MmapBitSlice::from(deleted, 0); let deleted_count = deleted.count_ones(); Ok(Some(Self { path: path.to_path_buf(), storage: Storage { value_to_points: hashmap, point_to_values, deleted: MmapBitSliceBufferedUpdateWrapper::new(deleted), }, deleted_count, total_key_value_pairs: config.total_key_value_pairs, is_on_disk, })) } pub fn build( path: &Path, point_to_values: Vec<Vec<N::Owned>>, values_to_points: HashMap<N::Owned, Vec<PointOffsetType>>, is_on_disk: bool, ) -> OperationResult<Self> { fs::create_dir_all(path)?; let hashmap_path = path.join(HASHMAP_PATH); let deleted_path = path.join(DELETED_PATH); let config_path = path.join(CONFIG_PATH); atomic_save_json( &config_path, &MmapMapIndexConfig { total_key_value_pairs: point_to_values.iter().map(|v| v.len()).sum(), }, )?; MmapHashMap::create( &hashmap_path, values_to_points .iter() .map(|(value, ids)| (value.borrow(), ids.iter().copied())), )?; MmapPointToValues::<N>::from_iter( path, point_to_values.iter().enumerate().map(|(idx, values)| { ( idx as PointOffsetType, values.iter().map(|value| N::as_referenced(value.borrow())), ) }), )?; { let deleted_flags_count = point_to_values.len(); let deleted_file = create_and_ensure_length( &deleted_path, deleted_flags_count .div_ceil(u8::BITS as usize) .next_multiple_of(size_of::<usize>()), )?; let mut deleted_mmap = unsafe { MmapMut::map_mut(&deleted_file)? }; deleted_mmap.fill(0); let mut deleted_bitflags = MmapBitSlice::from(deleted_mmap, 0); for (idx, values) in point_to_values.iter().enumerate() { if values.is_empty() { deleted_bitflags.set(idx, true); } } } Self::open(path, is_on_disk)?.ok_or_else(|| { OperationError::service_error("Failed to open MmapMapIndex after building it") }) } pub fn flusher(&self) -> Flusher { self.storage.deleted.flusher() } pub fn wipe(self) -> OperationResult<()> { let files = self.files(); let path = self.path.clone(); // drop mmap handles before deleting files drop(self); for file in files { fs::remove_file(file)?; } let _ = fs::remove_dir(path); Ok(()) } pub fn files(&self) -> Vec<PathBuf> { let mut files = vec![ self.path.join(HASHMAP_PATH), self.path.join(DELETED_PATH), self.path.join(CONFIG_PATH), ]; files.extend(self.storage.point_to_values.files()); files } pub fn immutable_files(&self) -> Vec<PathBuf> { let mut files = vec![self.path.join(HASHMAP_PATH), self.path.join(CONFIG_PATH)]; files.extend(self.storage.point_to_values.immutable_files()); files } pub fn remove_point(&mut self, idx: PointOffsetType) { let idx = idx as usize; if let Some(deleted) = self.storage.deleted.get(idx) && !deleted { self.storage.deleted.set(idx, true); self.deleted_count += 1; } } pub fn check_values_any( &self, idx: PointOffsetType, hw_counter: &HardwareCounterCell, check_fn: impl Fn(&N) -> bool, ) -> bool { let hw_counter = self.make_conditioned_counter(hw_counter); // Measure self.deleted access. hw_counter .payload_index_io_read_counter() .incr_delta(size_of::<bool>()); self.storage .deleted .get(idx as usize) .filter(|b| !b) .is_some_and(|_| { self.storage.point_to_values.check_values_any( idx, |v| check_fn(N::from_referenced(&v)), &hw_counter, ) }) } pub fn get_values( &self, idx: PointOffsetType, ) -> Option<Box<dyn Iterator<Item = N::Referenced<'_>> + '_>> { self.storage .deleted .get(idx as usize) .filter(|b| !b) .and_then(|_| { Some(Box::new(self.storage.point_to_values.get_values(idx)?) as Box<dyn Iterator<Item = N::Referenced<'_>>>) }) } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.storage .deleted .get(idx as usize) .filter(|b| !b) .and_then(|_| self.storage.point_to_values.get_values_count(idx)) } pub fn get_indexed_points(&self) -> usize { self.storage .point_to_values .len() .saturating_sub(self.deleted_count) } /// Returns the number of key-value pairs in the index. /// Note that is doesn't count deleted pairs. pub fn get_values_count(&self) -> usize { self.total_key_value_pairs } pub fn get_unique_values_count(&self) -> usize { self.storage.value_to_points.keys_count() } pub fn get_count_for_value( &self, value: &N, hw_counter: &HardwareCounterCell, ) -> Option<usize> { let hw_counter = self.make_conditioned_counter(hw_counter); // Since `value_to_points.get` doesn't actually force read from disk for all values // we need to only account for the overhead of hashmap lookup hw_counter .payload_index_io_read_counter() .incr_delta(READ_ENTRY_OVERHEAD); match self.storage.value_to_points.get(value) { Ok(Some(points)) => Some(points.len()), Ok(None) => None, Err(err) => { debug_assert!( false, "Error while getting count for value {value:?}: {err:?}", ); log::error!("Error while getting count for value {value:?}: {err:?}"); None } } } pub fn get_iterator(&self, value: &N, hw_counter: &HardwareCounterCell) -> IdIter<'_> { let hw_counter = self.make_conditioned_counter(hw_counter); match self.storage.value_to_points.get(value) { Ok(Some(slice)) => { // We're iterating over the whole (mmapped) slice hw_counter .payload_index_io_read_counter() .incr_delta(size_of_val(slice) + READ_ENTRY_OVERHEAD); Box::new( slice .iter() .filter(|idx| !self.storage.deleted.get(**idx as usize).unwrap_or(false)) .copied(), ) } Ok(None) => { hw_counter .payload_index_io_read_counter() .incr_delta(READ_ENTRY_OVERHEAD); Box::new(iter::empty()) } Err(err) => { debug_assert!( false, "Error while getting iterator for value {value:?}: {err:?}", ); log::error!("Error while getting iterator for value {value:?}: {err:?}"); Box::new(iter::empty()) } } } pub fn iter_values(&self) -> impl Iterator<Item = &N> + '_ { self.storage.value_to_points.keys() } pub fn iter_counts_per_value(&self) -> impl Iterator<Item = (&N, usize)> + '_ { self.storage.value_to_points.iter().map(|(k, v)| { let count = v .iter() .filter(|idx| !self.storage.deleted.get(**idx as usize).unwrap_or(true)) .unique() .count(); (k, count) }) } pub fn iter_values_map<'a>( &'a self, hw_counter: &'a HardwareCounterCell, ) -> impl Iterator<Item = (&'a N, IdIter<'a>)> + 'a { let hw_counter = self.make_conditioned_counter(hw_counter); self.storage.value_to_points.iter().map(move |(k, v)| { hw_counter .payload_index_io_read_counter() .incr_delta(k.write_bytes()); ( k, Box::new( v.iter() .copied() .filter(|idx| !self.storage.deleted.get(*idx as usize).unwrap_or(true)) .measure_hw_with_acc( hw_counter.new_accumulator(), size_of::<PointOffsetType>(), |i| i.payload_index_io_read_counter(), ), ) as IdIter, ) }) } fn make_conditioned_counter<'a>( &self, hw_counter: &'a HardwareCounterCell, ) -> ConditionedCounter<'a> { ConditionedCounter::new(self.is_on_disk, hw_counter) } pub fn is_on_disk(&self) -> bool { self.is_on_disk } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { self.storage.value_to_points.populate()?; self.storage.point_to_values.populate(); Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { let value_to_points_path = self.path.join(HASHMAP_PATH); let deleted_path = self.path.join(DELETED_PATH); clear_disk_cache(&value_to_points_path)?; clear_disk_cache(&deleted_path)?; self.storage.point_to_values.clear_cache()?; Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/map_index/immutable_map_index.rs
lib/segment/src/index/field_index/map_index/immutable_map_index.rs
use std::borrow::Borrow as _; use std::collections::HashMap; use std::iter; use std::ops::Range; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use bitvec::vec::BitVec; use common::mmap_hashmap::Key; use common::types::PointOffsetType; use gridstore::Blob; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; #[cfg(feature = "rocksdb")] use super::MapIndex; use super::mmap_map_index::MmapMapIndex; use super::{IdIter, MapIndexKey}; use crate::common::Flusher; use crate::common::operation_error::OperationResult; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::immutable_point_to_values::ImmutablePointToValues; use crate::index::field_index::mmap_point_to_values::MmapValue; use crate::index::payload_config::StorageType; pub struct ImmutableMapIndex<N: MapIndexKey + Key + ?Sized> { value_to_points: HashMap<N::Owned, ContainerSegment>, /// Container holding a slice of point IDs per value. `value_to_point` holds the range per value. /// Each slice MUST be sorted so that we can binary search over it. value_to_points_container: Vec<PointOffsetType>, deleted_value_to_points_container: BitVec, point_to_values: ImmutablePointToValues<N::Owned>, /// Amount of point which have at least one indexed payload value indexed_points: usize, values_count: usize, // Backing storage, source of state, persists deletions storage: Storage<N>, } enum Storage<N: MapIndexKey + Key + ?Sized> { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Mmap(Box<MmapMapIndex<N>>), } pub(super) struct ContainerSegment { /// Range in the container which holds point IDs for the value. range: Range<u32>, /// Number of available point IDs in the range, excludes number of deleted points. count: u32, } impl<N: MapIndexKey + ?Sized> ImmutableMapIndex<N> where Vec<N::Owned>: Blob + Send + Sync, { /// Open and load immutable numeric index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb(db: Arc<RwLock<DB>>, field_name: &str) -> OperationResult<Option<Self>> { use crate::index::field_index::map_index::mutable_map_index::MutableMapIndex; let store_cf_name = MapIndex::<N>::storage_cf_name(field_name); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); // To avoid code duplication, use `MutableMapIndex` to load data from db // and convert to immutable state let Some(mutable) = MutableMapIndex::<N>::open_rocksdb_db_wrapper(db_wrapper.clone(), false)? else { // Column family doesn't exist, cannot load return Ok(None); }; let MutableMapIndex::<N> { map, point_to_values, indexed_points, values_count, .. } = mutable; let mut value_to_points = HashMap::new(); let mut value_to_points_container = Vec::with_capacity(values_count); // flatten values-to-points map for (value, points) in map { let points = points.into_iter().collect::<Vec<_>>(); let container_len = value_to_points_container.len() as u32; let range = container_len..container_len + points.len() as u32; value_to_points.insert( value, ContainerSegment { count: range.len() as u32, range, }, ); value_to_points_container.extend(points); } value_to_points.shrink_to_fit(); // Sort IDs in each slice of points // This is very important because we binary search for value in value_to_points.keys() { if let Some((slice, _offset)) = Self::get_mut_point_ids_slice( &value_to_points, &mut value_to_points_container, value.borrow(), ) { slice.sort_unstable(); } else { debug_assert!( false, "value {} not found in value_to_points", value.borrow(), ); } } Ok(Some(Self { value_to_points, value_to_points_container, deleted_value_to_points_container: BitVec::new(), point_to_values: ImmutablePointToValues::new(point_to_values), indexed_points, values_count, storage: Storage::RocksDb(db_wrapper), })) } /// Open and load immutable numeric index from mmap storage pub(super) fn open_mmap(index: MmapMapIndex<N>) -> Self { // Construct intermediate values to points map from backing storage let mapping = || { index.storage.value_to_points.iter().map(|(value, ids)| { ( value, ids.iter().copied().filter(|idx| { let is_deleted = index.storage.deleted.get(*idx as usize).unwrap_or(false); !is_deleted }), ) }) }; let mut indexed_points = 0; let mut values_count = 0; let mut value_to_points = HashMap::new(); // Create points to values mapping let mut point_to_values: Vec<Vec<N::Owned>> = vec![]; for (value, ids) in mapping() { for idx in ids { if point_to_values.len() <= idx as usize { point_to_values.resize_with(idx as usize + 1, Vec::new) } let point_values = &mut point_to_values[idx as usize]; if point_values.is_empty() { indexed_points += 1; } values_count += 1; point_values.push(value.to_owned()); } } let point_to_values = ImmutablePointToValues::new(point_to_values); // Create flattened values-to-points mapping let mut value_to_points_container = Vec::with_capacity(values_count); for (value, points) in mapping() { let points = points.into_iter().collect::<Vec<_>>(); let container_len = value_to_points_container.len() as u32; let range = container_len..container_len + points.len() as u32; value_to_points.insert( value.to_owned(), ContainerSegment { count: range.len() as u32, range, }, ); value_to_points_container.extend(points); } value_to_points.shrink_to_fit(); // Sort IDs in each slice of points // This is very important because we binary search for value in value_to_points.keys() { if let Some((slice, _offset)) = Self::get_mut_point_ids_slice( &value_to_points, &mut value_to_points_container, value.borrow(), ) { slice.sort_unstable(); } else { debug_assert!( false, "value {} not found in value_to_points", value.borrow(), ); } } debug_assert_eq!(indexed_points, index.get_indexed_points()); // Index is now loaded into memory, clear cache of backing mmap storage if let Err(err) = index.clear_cache() { log::warn!("Failed to clear mmap cache of ram mmap map index: {err}"); } Self { value_to_points, value_to_points_container, deleted_value_to_points_container: BitVec::new(), point_to_values, indexed_points, values_count, storage: Storage::Mmap(Box::new(index)), } } /// Return mutable slice of a container which holds point_ids for given value. /// /// The returned slice is sorted and does contain deleted values. /// The returned offset is the start of the range in the container. fn get_mut_point_ids_slice<'a>( value_to_points: &HashMap<N::Owned, ContainerSegment>, value_to_points_container: &'a mut [PointOffsetType], value: &N, ) -> Option<(&'a mut [PointOffsetType], usize)> { match value_to_points.get(value) { Some(entry) if entry.count > 0 => { let range = entry.range.start as usize..entry.range.end as usize; let vals = &mut value_to_points_container[range]; Some((vals, entry.range.start as usize)) } _ => None, } } /// Shrinks the range of values-to-points by one. /// /// Returns true if the last element was removed. fn shrink_value_range( value_to_points: &mut HashMap<N::Owned, ContainerSegment>, value: &N, ) -> bool { if let Some(entry) = value_to_points.get_mut(value) { entry.count = entry.count.saturating_sub(1); return entry.count == 0; } false } /// Removes `idx` from values-to-points-container. /// It is implemented by shrinking the range of values-to-points by one and moving the removed element /// out of the range. /// Previously last element is swapped with the removed one and then the range is shrank by one. /// /// /// Example: /// Before: /// /// value_to_points -> { /// "a": 0..5, /// "b": 5..10 /// } /// value_to_points_container -> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] /// /// Args: /// value: "a" /// idx: 3 /// /// After: /// /// value_to_points -> { /// "a": 0..4, /// "b": 5..10 /// } /// /// value_to_points_container -> [0, 1, 2, 4, (3), 5, 6, 7, 8, 9] fn remove_idx_from_value_list( value_to_points: &mut HashMap<N::Owned, ContainerSegment>, value_to_points_container: &mut [PointOffsetType], deleted_value_to_points_container: &mut BitVec, value: &N, idx: PointOffsetType, ) { let Some((values, offset)) = Self::get_mut_point_ids_slice(value_to_points, value_to_points_container, value) else { debug_assert!(false, "value {value} not found in value_to_points"); return; }; // Finds the index of `idx` in values-to-points map which we want to remove // We mark it as removed in deleted flags if let Ok(local_pos) = values.binary_search(&idx) { let pos = offset + local_pos; if deleted_value_to_points_container.len() < pos + 1 { deleted_value_to_points_container.resize(pos + 1, false); } #[allow(unused_variables)] let did_exist = !deleted_value_to_points_container.replace(pos, true); debug_assert!(did_exist, "value {value} was already deleted"); } if Self::shrink_value_range(value_to_points, value) { value_to_points.remove(value); } } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { if let Some(removed_values) = self.point_to_values.get_values(idx) { let mut removed_values_count = 0; for value in removed_values { Self::remove_idx_from_value_list( &mut self.value_to_points, &mut self.value_to_points_container, &mut self.deleted_value_to_points_container, value.borrow(), idx, ); // Update persisted storage match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => { let key = MapIndex::encode_db_record(value.borrow(), idx); db_wrapper.remove(key)?; } Storage::Mmap(ref mut index) => { index.remove_point(idx); } } removed_values_count += 1; } if removed_values_count > 0 { self.indexed_points -= 1; } self.values_count = self .values_count .checked_sub(removed_values_count) .unwrap_or_default(); } self.point_to_values.remove_point(idx); Ok(()) } #[cfg(all(test, feature = "rocksdb"))] pub fn db_wrapper(&self) -> Option<&DatabaseColumnScheduledDeleteWrapper> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => Some(db_wrapper), Storage::Mmap(_) => None, } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Mmap(index) => index.wipe(), } } /// Clear cache /// /// Only clears cache of mmap storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Mmap(ref index) => index.clear_cache(), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(ref index) => index.files(), } } #[inline] pub(super) fn immutable_files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(index) => index.immutable_files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(ref db_wrapper) => db_wrapper.flusher(), Storage::Mmap(ref index) => index.flusher(), } } pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&N) -> bool) -> bool { let mut hw_count_val = 0; self.point_to_values.check_values_any(idx, |v| { let v = v.borrow(); hw_count_val += <N as MmapValue>::mmapped_size(v.as_referenced()); check_fn(v) }) } pub fn get_values(&self, idx: PointOffsetType) -> Option<impl Iterator<Item = &N> + '_> { Some(self.point_to_values.get_values(idx)?.map(|v| v.borrow())) } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.point_to_values.get_values_count(idx) } pub fn get_indexed_points(&self) -> usize { self.indexed_points } pub fn get_values_count(&self) -> usize { self.values_count } pub fn get_unique_values_count(&self) -> usize { self.value_to_points.len() } pub fn get_count_for_value(&self, value: &N) -> Option<usize> { self.value_to_points .get(value) .map(|entry| entry.count as usize) } pub fn iter_counts_per_value(&self) -> impl Iterator<Item = (&N, usize)> + '_ { self.value_to_points .iter() .map(|(k, entry)| (k.borrow(), entry.count as usize)) } pub fn iter_values_map(&self) -> impl Iterator<Item = (&N, IdIter<'_>)> { self.value_to_points.keys().map(move |k| { ( k.borrow(), Box::new(self.get_iterator(k.borrow())) as IdIter, ) }) } pub fn get_iterator(&self, value: &N) -> IdIter<'_> { if let Some(entry) = self.value_to_points.get(value) { let range = entry.range.start as usize..entry.range.end as usize; let deleted_flags = self .deleted_value_to_points_container .iter() .by_vals() .skip(range.start) .chain(std::iter::repeat(false)); let values = self.value_to_points_container[range] .iter() .zip(deleted_flags) .filter(|(_, is_deleted)| !is_deleted) .map(|(idx, _)| *idx); Box::new(values) } else { Box::new(iter::empty::<PointOffsetType>()) } } pub fn iter_values(&self) -> Box<dyn Iterator<Item = &N> + '_> { Box::new(self.value_to_points.keys().map(|v| v.borrow())) } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Mmap(_) => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/map_index/mod.rs
lib/segment/src/index/field_index/map_index/mod.rs
use std::borrow::Borrow; use std::collections::hash_map::Entry; use std::fmt::{Debug, Display}; use std::hash::{BuildHasher, Hash}; use std::iter; use std::path::{Path, PathBuf}; use std::str::FromStr; #[cfg(feature = "rocksdb")] use std::sync::Arc; use ahash::HashMap; use common::counter::hardware_counter::HardwareCounterCell; use common::mmap_hashmap::Key; use common::types::PointOffsetType; use ecow::EcoString; use gridstore::Blob; use indexmap::IndexSet; use itertools::Itertools; use mmap_map_index::MmapMapIndex; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use serde_json::Value; use uuid::Uuid; use self::immutable_map_index::ImmutableMapIndex; use self::mutable_map_index::MutableMapIndex; use super::FieldIndexBuilderTrait; use super::facet_index::FacetIndex; use super::mmap_point_to_values::MmapValue; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::facets::{FacetHit, FacetValueRef}; use crate::index::field_index::stat_tools::number_of_selected_points; use crate::index::field_index::{ CardinalityEstimation, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, ValueIndexer, }; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::index::query_estimator::combine_should_estimations; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{ AnyVariants, FieldCondition, IntPayloadType, Match, MatchAny, MatchExcept, MatchValue, PayloadKeyType, UuidIntType, ValueVariants, }; pub mod immutable_map_index; pub mod mmap_map_index; pub mod mutable_map_index; /// Block size in Gridstore for keyword map index. /// Keyword(s) are stored as cbor vector. /// - "text" - 6 bytes /// - "some", "text", "here" - 16 bytes pub(super) const BLOCK_SIZE_KEYWORD: usize = 16; pub type IdRefIter<'a> = Box<dyn Iterator<Item = &'a PointOffsetType> + 'a>; pub type IdIter<'a> = Box<dyn Iterator<Item = PointOffsetType> + 'a>; pub trait MapIndexKey: Key + MmapValue + Eq + Display + Debug { type Owned: Borrow<Self> + Hash + Eq + Clone + FromStr + Default + 'static; fn to_owned(&self) -> Self::Owned; fn gridstore_block_size() -> usize { size_of::<Self::Owned>() } } impl MapIndexKey for str { type Owned = EcoString; fn to_owned(&self) -> Self::Owned { EcoString::from(self) } fn gridstore_block_size() -> usize { BLOCK_SIZE_KEYWORD } } impl MapIndexKey for IntPayloadType { type Owned = IntPayloadType; fn to_owned(&self) -> Self::Owned { *self } } impl MapIndexKey for UuidIntType { type Owned = UuidIntType; fn to_owned(&self) -> Self::Owned { *self } } pub enum MapIndex<N: MapIndexKey + ?Sized> where Vec<N::Owned>: Blob + Send + Sync, { Mutable(MutableMapIndex<N>), Immutable(ImmutableMapIndex<N>), Mmap(Box<MmapMapIndex<N>>), } impl<N: MapIndexKey + ?Sized> MapIndex<N> where Vec<N::Owned>: Blob + Send + Sync, { #[cfg(feature = "rocksdb")] pub fn new_rocksdb( db: Arc<RwLock<DB>>, field_name: &str, is_appendable: bool, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let index = if is_appendable { MutableMapIndex::open_rocksdb(db, field_name, create_if_missing)?.map(MapIndex::Mutable) } else { ImmutableMapIndex::open_rocksdb(db, field_name)?.map(MapIndex::Immutable) }; Ok(index) } /// Load immutable mmap based index, either in RAM or on disk pub fn new_mmap(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let Some(mmap_index) = MmapMapIndex::open(path, is_on_disk)? else { // Files don't exist, cannot load return Ok(None); }; let index = if is_on_disk { // Use on mmap directly MapIndex::Mmap(Box::new(mmap_index)) } else { // Load into RAM, use mmap as backing storage MapIndex::Immutable(ImmutableMapIndex::open_mmap(mmap_index)) }; Ok(Some(index)) } pub fn new_gridstore(dir: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { let index = MutableMapIndex::open_gridstore(dir, create_if_missing)?; Ok(index.map(MapIndex::Mutable)) } #[cfg(feature = "rocksdb")] pub fn builder_rocksdb( db: Arc<RwLock<DB>>, field_name: &str, ) -> OperationResult<MapIndexBuilder<N>> { Ok(MapIndexBuilder(MapIndex::Mutable( MutableMapIndex::open_rocksdb(db, field_name, true)?.ok_or_else(|| { OperationError::service_error(format!( "Failed to create and load mutable map index builder for field '{field_name}'", )) })?, ))) } pub fn builder_mmap(path: &Path, is_on_disk: bool) -> MapIndexMmapBuilder<N> { MapIndexMmapBuilder { path: path.to_owned(), point_to_values: Default::default(), values_to_points: Default::default(), is_on_disk, } } pub fn builder_gridstore(dir: PathBuf) -> MapIndexGridstoreBuilder<N> { MapIndexGridstoreBuilder::new(dir) } pub fn check_values_any( &self, idx: PointOffsetType, hw_counter: &HardwareCounterCell, check_fn: impl Fn(&N) -> bool, ) -> bool { match self { MapIndex::Mutable(index) => index.check_values_any(idx, check_fn), MapIndex::Immutable(index) => index.check_values_any(idx, check_fn), MapIndex::Mmap(index) => index.check_values_any(idx, hw_counter, check_fn), } } pub fn get_values( &self, idx: PointOffsetType, ) -> Option<Box<dyn Iterator<Item = N::Referenced<'_>> + '_>> { match self { MapIndex::Mutable(index) => Some(Box::new( index.get_values(idx)?.map(|v| N::as_referenced(v)), )), MapIndex::Immutable(index) => Some(Box::new( index.get_values(idx)?.map(|v| N::as_referenced(v)), )), MapIndex::Mmap(index) => Some(Box::new(index.get_values(idx)?)), } } pub fn values_count(&self, idx: PointOffsetType) -> usize { match self { MapIndex::Mutable(index) => index.values_count(idx).unwrap_or_default(), MapIndex::Immutable(index) => index.values_count(idx).unwrap_or_default(), MapIndex::Mmap(index) => index.values_count(idx).unwrap_or_default(), } } fn get_indexed_points(&self) -> usize { match self { MapIndex::Mutable(index) => index.get_indexed_points(), MapIndex::Immutable(index) => index.get_indexed_points(), MapIndex::Mmap(index) => index.get_indexed_points(), } } fn get_values_count(&self) -> usize { match self { MapIndex::Mutable(index) => index.get_values_count(), MapIndex::Immutable(index) => index.get_values_count(), MapIndex::Mmap(index) => index.get_values_count(), } } pub fn get_unique_values_count(&self) -> usize { match self { MapIndex::Mutable(index) => index.get_unique_values_count(), MapIndex::Immutable(index) => index.get_unique_values_count(), MapIndex::Mmap(index) => index.get_unique_values_count(), } } fn get_count_for_value(&self, value: &N, hw_counter: &HardwareCounterCell) -> Option<usize> { match self { MapIndex::Mutable(index) => index.get_count_for_value(value), MapIndex::Immutable(index) => index.get_count_for_value(value), MapIndex::Mmap(index) => index.get_count_for_value(value, hw_counter), } } fn get_iterator(&self, value: &N, hw_counter: &HardwareCounterCell) -> IdIter<'_> { match self { MapIndex::Mutable(index) => index.get_iterator(value), MapIndex::Immutable(index) => index.get_iterator(value), MapIndex::Mmap(index) => index.get_iterator(value, hw_counter), } } pub fn iter_values(&self) -> Box<dyn Iterator<Item = &N> + '_> { match self { MapIndex::Mutable(index) => index.iter_values(), MapIndex::Immutable(index) => index.iter_values(), MapIndex::Mmap(index) => Box::new(index.iter_values()), } } pub fn iter_counts_per_value(&self) -> Box<dyn Iterator<Item = (&N, usize)> + '_> { match self { MapIndex::Mutable(index) => Box::new(index.iter_counts_per_value()), MapIndex::Immutable(index) => Box::new(index.iter_counts_per_value()), MapIndex::Mmap(index) => Box::new(index.iter_counts_per_value()), } } pub fn iter_values_map<'a>( &'a self, hw_cell: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = (&'a N, IdIter<'a>)> + 'a> { match self { MapIndex::Mutable(index) => Box::new(index.iter_values_map()), MapIndex::Immutable(index) => Box::new(index.iter_values_map()), MapIndex::Mmap(index) => Box::new(index.iter_values_map(hw_cell)), } } pub fn storage_cf_name(field: &str) -> String { format!("{field}_map") } fn flusher(&self) -> Flusher { match self { MapIndex::Mutable(index) => index.flusher(), MapIndex::Immutable(index) => index.flusher(), MapIndex::Mmap(index) => index.flusher(), } } fn match_cardinality( &self, value: &N, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { let values_count = self.get_count_for_value(value, hw_counter).unwrap_or(0); CardinalityEstimation::exact(values_count) } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, points_count: self.get_indexed_points(), points_values_count: self.get_values_count(), histogram_bucket_size: None, index_type: match self { MapIndex::Mutable(_) => "mutable_map", MapIndex::Immutable(_) => "immutable_map", MapIndex::Mmap(_) => "mmap_map", }, } } pub fn encode_db_record(value: &N, idx: PointOffsetType) -> String { format!("{value}/{idx}") } pub fn decode_db_record(s: &str) -> OperationResult<(N::Owned, PointOffsetType)> { const DECODE_ERR: &str = "Index db parsing error: wrong data format"; let separator_pos = s .rfind('/') .ok_or_else(|| OperationError::service_error(DECODE_ERR))?; if separator_pos == s.len() - 1 { return Err(OperationError::service_error(DECODE_ERR)); } let value_str = &s[..separator_pos]; let value = N::Owned::from_str(value_str).map_err(|_| OperationError::service_error(DECODE_ERR))?; let idx_str = &s[separator_pos + 1..]; let idx = PointOffsetType::from_str(idx_str) .map_err(|_| OperationError::service_error(DECODE_ERR))?; Ok((value, idx)) } pub fn values_is_empty(&self, idx: PointOffsetType) -> bool { self.values_count(idx) == 0 } fn wipe(self) -> OperationResult<()> { match self { MapIndex::Mutable(index) => index.wipe(), MapIndex::Immutable(index) => index.wipe(), MapIndex::Mmap(index) => index.wipe(), } } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { match self { MapIndex::Mutable(index) => index.remove_point(id), MapIndex::Immutable(index) => index.remove_point(id), MapIndex::Mmap(index) => { index.remove_point(id); Ok(()) } } } fn files(&self) -> Vec<PathBuf> { match self { MapIndex::Mutable(index) => index.files(), MapIndex::Immutable(index) => index.files(), MapIndex::Mmap(index) => index.files(), } } fn immutable_files(&self) -> Vec<PathBuf> { match self { MapIndex::Mutable(_) => vec![], MapIndex::Immutable(index) => index.immutable_files(), MapIndex::Mmap(index) => index.immutable_files(), } } /// Estimates cardinality for `except` clause /// /// # Arguments /// /// * 'excluded' - values, which are not considered as matching /// /// # Returns /// /// * `CardinalityEstimation` - estimation of cardinality fn except_cardinality<'a>( &'a self, excluded: impl Iterator<Item = &'a N>, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { // Minimal case: we exclude as many points as possible. // In this case, excluded points do not have any other values except excluded ones. // So the first step - we estimate how many other points is needed to fit unused values. // Example: // Values: 20, 20 // Unique values: 5 // Total points: 100 // Total values: 110 // total_excluded_value_count = 40 // non_excluded_values_count = 110 - 40 = 70 // max_values_per_point = 5 - 2 = 3 // min_not_excluded_by_values = 70 / 3 = 24 // min = max(24, 100 - 40) = 60 // exp = ... // max = min(20, 70) = 20 // Values: 60, 60 // Unique values: 5 // Total points: 100 // Total values: 200 // total_excluded_value_count = 120 // non_excluded_values_count = 200 - 120 = 80 // max_values_per_point = 5 - 2 = 3 // min_not_excluded_by_values = 80 / 3 = 27 // min = max(27, 100 - 120) = 27 // exp = ... // max = min(60, 80) = 60 // Values: 60, 60, 60 // Unique values: 5 // Total points: 100 // Total values: 200 // total_excluded_value_count = 180 // non_excluded_values_count = 200 - 180 = 20 // max_values_per_point = 5 - 3 = 2 // min_not_excluded_by_values = 20 / 2 = 10 // min = max(10, 100 - 180) = 10 // exp = ... // max = min(60, 20) = 20 let excluded_value_counts: Vec<_> = excluded .map(|val| { self.get_count_for_value(val.borrow(), hw_counter) .unwrap_or(0) }) .collect(); let total_excluded_value_count: usize = excluded_value_counts.iter().sum(); debug_assert!(total_excluded_value_count <= self.get_values_count()); let non_excluded_values_count = self .get_values_count() .saturating_sub(total_excluded_value_count); let max_values_per_point = self .get_unique_values_count() .saturating_sub(excluded_value_counts.len()); if max_values_per_point == 0 { // All points are excluded, so we can't select any point debug_assert_eq!(non_excluded_values_count, 0); return CardinalityEstimation::exact(0); } // Minimal amount of points, required to fit all unused values. // Cardinality can't be less than this value. let min_not_excluded_by_values = non_excluded_values_count.div_ceil(max_values_per_point); let min = min_not_excluded_by_values.max( self.get_indexed_points() .saturating_sub(total_excluded_value_count), ); // Maximum scenario: selected points overlap as much as possible. // From one side, all excluded values should be assigned to the same point // => we can take the value with the maximum amount of points. // From another side, all other values should be enough to fill all other points. let max_excluded_value_count = excluded_value_counts.iter().max().copied().unwrap_or(0); let max = self .get_indexed_points() .saturating_sub(max_excluded_value_count) .min(non_excluded_values_count); // Expected case: we assume that all points are filled equally. // So we can estimate the probability of the point to have non-excluded value. let exp = number_of_selected_points(self.get_indexed_points(), non_excluded_values_count) .max(min) .min(max); CardinalityEstimation { primary_clauses: vec![], min, exp, max, } } fn except_set<'a, K, A>( &'a self, excluded: &'a IndexSet<K, A>, hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> where A: BuildHasher, K: Borrow<N> + Hash + Eq, { Box::new( self.iter_values() .filter(|key| !excluded.contains((*key).borrow())) .flat_map(move |key| self.get_iterator(key.borrow(), hw_counter)) .unique(), ) } pub fn is_on_disk(&self) -> bool { match self { MapIndex::Mutable(_) => false, MapIndex::Immutable(_) => false, MapIndex::Mmap(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { MapIndex::Mutable(index) => index.is_rocksdb(), MapIndex::Immutable(index) => index.is_rocksdb(), MapIndex::Mmap(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { MapIndex::Mutable(_) => {} // Not a mmap MapIndex::Immutable(_) => {} // Not a mmap MapIndex::Mmap(index) => index.populate()?, } Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { // Only clears backing mmap storage if used, not in-memory representation MapIndex::Mutable(index) => index.clear_cache()?, // Only clears backing mmap storage if used, not in-memory representation MapIndex::Immutable(index) => index.clear_cache()?, MapIndex::Mmap(index) => index.clear_cache()?, } Ok(()) } pub fn get_mutability_type(&self) -> IndexMutability { match self { Self::Mutable(_) => IndexMutability::Mutable, Self::Immutable(_) => IndexMutability::Immutable, Self::Mmap(_) => IndexMutability::Immutable, } } pub fn get_storage_type(&self) -> StorageType { match self { Self::Mutable(index) => index.storage_type(), Self::Immutable(index) => index.storage_type(), Self::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } } pub struct MapIndexBuilder<N: MapIndexKey + ?Sized>(MapIndex<N>) where Vec<N::Owned>: Blob + Send + Sync; impl<N: MapIndexKey + ?Sized> FieldIndexBuilderTrait for MapIndexBuilder<N> where MapIndex<N>: PayloadFieldIndex + ValueIndexer, Vec<N::Owned>: Blob + Send + Sync, { type FieldIndexType = MapIndex<N>; fn init(&mut self) -> OperationResult<()> { match &mut self.0 { MapIndex::Mutable(index) => index.clear(), MapIndex::Immutable(_) => unreachable!(), MapIndex::Mmap(_) => unreachable!(), } } fn add_point( &mut self, id: PointOffsetType, values: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, values, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(self.0) } } pub struct MapIndexMmapBuilder<N: MapIndexKey + ?Sized> { path: PathBuf, point_to_values: Vec<Vec<N::Owned>>, values_to_points: HashMap<N::Owned, Vec<PointOffsetType>>, is_on_disk: bool, } impl<N: MapIndexKey + ?Sized> FieldIndexBuilderTrait for MapIndexMmapBuilder<N> where Vec<N::Owned>: Blob + Send + Sync, MapIndex<N>: PayloadFieldIndex + ValueIndexer, <MapIndex<N> as ValueIndexer>::ValueType: Into<N::Owned>, { type FieldIndexType = MapIndex<N>; fn init(&mut self) -> OperationResult<()> { Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let mut flatten_values: Vec<_> = vec![]; for value in payload.iter() { let payload_values = <MapIndex<N> as ValueIndexer>::get_values(value); flatten_values.extend(payload_values); } let flatten_values: Vec<N::Owned> = flatten_values.into_iter().map(Into::into).collect(); if self.point_to_values.len() <= id as usize { self.point_to_values.resize_with(id as usize + 1, Vec::new); } self.point_to_values[id as usize].extend(flatten_values.clone()); let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); for value in flatten_values { let entry = self.values_to_points.entry(value); if let Entry::Vacant(e) = &entry { let size = N::mmapped_size(N::as_referenced(e.key().borrow())); hw_cell_wb.incr_delta(size); } hw_cell_wb.incr_delta(size_of_val(&id)); entry.or_default().push(id); } Ok(()) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(MapIndex::Mmap(Box::new(MmapMapIndex::build( &self.path, self.point_to_values, self.values_to_points, self.is_on_disk, )?))) } } pub struct MapIndexGridstoreBuilder<N: MapIndexKey + ?Sized> where Vec<N::Owned>: Blob + Send + Sync, { dir: PathBuf, index: Option<MapIndex<N>>, } impl<N: MapIndexKey + ?Sized> MapIndexGridstoreBuilder<N> where Vec<N::Owned>: Blob + Send + Sync, { fn new(dir: PathBuf) -> Self { Self { dir, index: None } } } impl<N: MapIndexKey + ?Sized> FieldIndexBuilderTrait for MapIndexGridstoreBuilder<N> where Vec<N::Owned>: Blob + Send + Sync, MapIndex<N>: PayloadFieldIndex + ValueIndexer, <MapIndex<N> as ValueIndexer>::ValueType: Into<N::Owned>, { type FieldIndexType = MapIndex<N>; fn init(&mut self) -> OperationResult<()> { assert!( self.index.is_none(), "index must be initialized exactly once", ); self.index.replace( MapIndex::new_gridstore(self.dir.clone(), true)?.ok_or_else(|| { OperationError::service_error("Failed to create mutable map index") })?, ); Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let Some(index) = &mut self.index else { return Err(OperationError::service_error( "MapIndexGridstoreBuilder: index must be initialized before adding points", )); }; index.add_point(id, payload, hw_counter) } fn finalize(mut self) -> OperationResult<Self::FieldIndexType> { let Some(index) = self.index.take() else { return Err(OperationError::service_error( "MapIndexGridstoreBuilder: index must be initialized to finalize", )); }; index.flusher()()?; Ok(index) } } impl PayloadFieldIndex for MapIndex<str> { fn count_indexed_points(&self) -> usize { self.get_indexed_points() } fn wipe(self) -> OperationResult<()> { self.wipe() } fn flusher(&self) -> Flusher { MapIndex::flusher(self) } fn files(&self) -> Vec<PathBuf> { self.files() } fn immutable_files(&self) -> Vec<PathBuf> { self.immutable_files() } fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match &condition.r#match { Some(Match::Value(MatchValue { value })) => match value { ValueVariants::String(keyword) => { Some(Box::new(self.get_iterator(keyword.as_str(), hw_counter))) } ValueVariants::Integer(_) => None, ValueVariants::Bool(_) => None, }, Some(Match::Any(MatchAny { any: any_variant })) => match any_variant { AnyVariants::Strings(keywords) => Some(Box::new( keywords .iter() .flat_map(move |keyword| self.get_iterator(keyword.as_str(), hw_counter)) .unique(), )), AnyVariants::Integers(integers) => { if integers.is_empty() { Some(Box::new(iter::empty())) } else { None } } }, Some(Match::Except(MatchExcept { except })) => match except { AnyVariants::Strings(keywords) => Some(self.except_set(keywords, hw_counter)), AnyVariants::Integers(other) => { if other.is_empty() { Some(Box::new(iter::empty())) } else { None } } }, _ => None, } } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { match &condition.r#match { Some(Match::Value(MatchValue { value })) => match value { ValueVariants::String(keyword) => { let mut estimation = self.match_cardinality(keyword.as_str(), hw_counter); estimation .primary_clauses .push(PrimaryCondition::Condition(Box::new(condition.clone()))); Some(estimation) } ValueVariants::Integer(_) => None, ValueVariants::Bool(_) => None, }, Some(Match::Any(MatchAny { any: any_variant })) => match any_variant { AnyVariants::Strings(keywords) => { let estimations = keywords .iter() .map(|keyword| self.match_cardinality(keyword.as_str(), hw_counter)) .collect::<Vec<_>>(); let estimation = if estimations.is_empty() { CardinalityEstimation::exact(0) } else { combine_should_estimations(&estimations, self.get_indexed_points()) }; Some( estimation.with_primary_clause(PrimaryCondition::Condition(Box::new( condition.clone(), ))), ) } AnyVariants::Integers(integers) => { if integers.is_empty() { Some(CardinalityEstimation::exact(0).with_primary_clause( PrimaryCondition::Condition(Box::new(condition.clone())), )) } else { None } } }, Some(Match::Except(MatchExcept { except })) => match except { AnyVariants::Strings(keywords) => { Some(self.except_cardinality(keywords.iter().map(|k| k.as_str()), hw_counter)) } AnyVariants::Integers(others) => { if others.is_empty() { Some(CardinalityEstimation::exact(0).with_primary_clause( PrimaryCondition::Condition(Box::new(condition.clone())), )) } else { None } } }, _ => None, } } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { Box::new( self.iter_values() .map(|value| { ( value, self.get_count_for_value(value, &HardwareCounterCell::disposable()) // Payload_blocks only used in HNSW building, which is unmeasured. .unwrap_or(0), ) }) .filter(move |(_value, count)| *count > threshold) .map(move |(value, count)| PayloadBlockCondition { condition: FieldCondition::new_match(key.clone(), value.to_string().into()), cardinality: count, }), ) } } impl PayloadFieldIndex for MapIndex<UuidIntType> { fn count_indexed_points(&self) -> usize { self.get_indexed_points() } fn wipe(self) -> OperationResult<()> { self.wipe() } fn flusher(&self) -> Flusher { MapIndex::flusher(self) } fn files(&self) -> Vec<PathBuf> { self.files() } fn immutable_files(&self) -> Vec<PathBuf> { self.immutable_files() } fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match &condition.r#match { Some(Match::Value(MatchValue { value })) => match value { ValueVariants::String(uuid_string) => { let uuid = Uuid::from_str(uuid_string).ok()?; Some(Box::new(self.get_iterator(&uuid.as_u128(), hw_counter))) } ValueVariants::Integer(_) => None, ValueVariants::Bool(_) => None, }, Some(Match::Any(MatchAny { any: any_variant })) => match any_variant { AnyVariants::Strings(uuids_string) => { let uuids: Result<IndexSet<u128>, _> = uuids_string .iter() .map(|uuid_string| Uuid::from_str(uuid_string).map(|x| x.as_u128())) .collect(); let uuids = uuids.ok()?; Some(Box::new( uuids .into_iter() .flat_map(move |uuid| self.get_iterator(&uuid, hw_counter)) .unique(), )) } AnyVariants::Integers(integers) => { if integers.is_empty() { Some(Box::new(iter::empty())) } else { None } } }, Some(Match::Except(MatchExcept { except })) => match except { AnyVariants::Strings(uuids_string) => { let uuids: Result<IndexSet<u128>, _> = uuids_string .iter() .map(|uuid_string| Uuid::from_str(uuid_string).map(|x| x.as_u128())) .collect(); let excluded_uuids = uuids.ok()?; let exclude_iter = self .iter_values() .filter(move |key| !excluded_uuids.contains(*key)) .flat_map(move |key| self.get_iterator(key, hw_counter)) .unique(); Some(Box::new(exclude_iter)) } AnyVariants::Integers(other) => { if other.is_empty() { Some(Box::new(iter::empty())) } else { None } } }, _ => None, } } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell,
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/numeric_index/tests.rs
lib/segment/src/index/field_index/numeric_index/tests.rs
use common::counter::hardware_accumulator::HwMeasurementAcc; use itertools::Itertools; use rand::prelude::StdRng; use rand::{Rng, SeedableRng}; use rstest::rstest; use tempfile::{Builder, TempDir}; use super::*; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::open_db_with_existing_cf; use crate::json_path::JsonPath; #[cfg(feature = "rocksdb")] const COLUMN_NAME: &str = "test"; #[derive(Clone, Copy)] enum IndexType { #[cfg(feature = "rocksdb")] Mutable, MutableGridstore, #[cfg(feature = "rocksdb")] Immutable, Mmap, RamMmap, } enum IndexBuilder { #[cfg(feature = "rocksdb")] Mutable(NumericIndexBuilder<FloatPayloadType, FloatPayloadType>), MutableGridstore(NumericIndexGridstoreBuilder<FloatPayloadType, FloatPayloadType>), #[cfg(feature = "rocksdb")] Immutable(NumericIndexImmutableBuilder<FloatPayloadType, FloatPayloadType>), Mmap(NumericIndexMmapBuilder<FloatPayloadType, FloatPayloadType>), } impl IndexBuilder { fn finalize(self) -> OperationResult<NumericIndex<FloatPayloadType, FloatPayloadType>> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::Mutable(builder) => builder.finalize(), IndexBuilder::MutableGridstore(builder) => builder.finalize(), #[cfg(feature = "rocksdb")] IndexBuilder::Immutable(builder) => builder.finalize(), IndexBuilder::Mmap(builder) => builder.finalize(), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] IndexBuilder::Mutable(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::MutableGridstore(builder) => builder.add_point(id, payload, hw_counter), #[cfg(feature = "rocksdb")] IndexBuilder::Immutable(builder) => builder.add_point(id, payload, hw_counter), IndexBuilder::Mmap(builder) => builder.add_point(id, payload, hw_counter), } } } fn get_index_builder(index_type: IndexType) -> (TempDir, IndexBuilder) { let temp_dir = Builder::new() .prefix("test_numeric_index") .tempdir() .unwrap(); #[cfg(feature = "rocksdb")] let db = open_db_with_existing_cf(temp_dir.path()).unwrap(); let mut builder = match index_type { #[cfg(feature = "rocksdb")] IndexType::Mutable => IndexBuilder::Mutable( NumericIndex::<FloatPayloadType, FloatPayloadType>::builder_rocksdb(db, COLUMN_NAME) .unwrap(), ), IndexType::MutableGridstore => IndexBuilder::MutableGridstore(NumericIndex::< FloatPayloadType, FloatPayloadType, >::builder_gridstore( temp_dir.path().to_path_buf(), )), #[cfg(feature = "rocksdb")] IndexType::Immutable => IndexBuilder::Immutable(NumericIndex::< FloatPayloadType, FloatPayloadType, >::builder_rocksdb_immutable( db, COLUMN_NAME )), IndexType::Mmap | IndexType::RamMmap => IndexBuilder::Mmap(NumericIndex::< FloatPayloadType, FloatPayloadType, >::builder_mmap( temp_dir.path(), false )), }; match &mut builder { #[cfg(feature = "rocksdb")] IndexBuilder::Mutable(builder) => builder.init().unwrap(), IndexBuilder::MutableGridstore(builder) => builder.init().unwrap(), #[cfg(feature = "rocksdb")] IndexBuilder::Immutable(builder) => builder.init().unwrap(), IndexBuilder::Mmap(builder) => builder.init().unwrap(), } (temp_dir, builder) } fn random_index( num_points: usize, values_per_point: usize, index_type: IndexType, ) -> (TempDir, NumericIndex<FloatPayloadType, FloatPayloadType>) { let mut rng = StdRng::seed_from_u64(42); let (temp_dir, mut index_builder) = get_index_builder(index_type); let hw_counter = HardwareCounterCell::new(); for i in 0..num_points { let values = (0..values_per_point) .map(|_| Value::from(rng.random_range(0.0..100.0))) .collect_vec(); let values = values.iter().collect_vec(); index_builder .add_point(i as PointOffsetType, &values, &hw_counter) .unwrap(); } let mut index = index_builder.finalize().unwrap(); if matches!(index_type, IndexType::RamMmap) { let NumericIndexInner::Mmap(mmap_index) = index.inner else { panic!("Expected mmap index"); }; index = NumericIndex { inner: NumericIndexInner::Immutable(ImmutableNumericIndex::open_mmap(mmap_index)), _phantom: Default::default(), }; } (temp_dir, index) } fn cardinality_request( index: &NumericIndex<FloatPayloadType, FloatPayloadType>, query: Range<FloatPayloadType>, hw_acc: HwMeasurementAcc, ) -> CardinalityEstimation { let hw_counter = hw_acc.get_counter_cell(); let ordered_range = Range { lt: query.lt.map(OrderedFloat::from), gt: query.gt.map(OrderedFloat::from), gte: query.gte.map(OrderedFloat::from), lte: query.lte.map(OrderedFloat::from), }; let estimation = index .inner() .range_cardinality(&RangeInterface::Float(ordered_range)); let result = index .inner() .filter( &FieldCondition::new_range(JsonPath::new("unused"), ordered_range), &hw_counter, ) .unwrap() .unique() .collect_vec(); eprintln!("estimation = {estimation:#?}"); eprintln!("result.len() = {:#?}", result.len()); assert!(estimation.min <= result.len()); assert!(estimation.max >= result.len()); estimation } #[test] fn test_set_empty_payload() { let (_temp_dir, mut index) = random_index(1000, 1, IndexType::MutableGridstore); let point_id = 42; let values_count = index.inner().get_values(point_id).unwrap().count(); assert_ne!(values_count, 0); let hw_counter = HardwareCounterCell::new(); let payload = serde_json::json!(null); index.add_point(point_id, &[&payload], &hw_counter).unwrap(); let values_count = index.inner().get_values(point_id).unwrap().count(); assert_eq!(values_count, 0); } #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_cardinality_exp(#[case] index_type: IndexType) { let (_temp_dir, index) = random_index(1000, 1, index_type); cardinality_request( &index, Range { lt: Some(20.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); cardinality_request( &index, Range { lt: Some(60.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); let (_temp_dir, index) = random_index(1000, 2, index_type); cardinality_request( &index, Range { lt: Some(20.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); cardinality_request( &index, Range { lt: Some(60.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); cardinality_request( &index, Range { lt: None, gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); cardinality_request( &index, Range { lt: None, gt: None, gte: Some(110.0), lte: None, }, HwMeasurementAcc::new(), ); } #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_payload_blocks(#[case] index_type: IndexType) { let (_temp_dir, index) = random_index(1000, 2, index_type); let threshold = 100; let blocks = index .inner() .payload_blocks(threshold, JsonPath::new("test")) .collect_vec(); assert!(!blocks.is_empty()); eprintln!("threshold {threshold}, blocks.len() = {:#?}", blocks.len()); let threshold = 500; let blocks = index .inner() .payload_blocks(threshold, JsonPath::new("test")) .collect_vec(); assert!(!blocks.is_empty()); eprintln!("threshold {threshold}, blocks.len() = {:#?}", blocks.len()); let threshold = 1000; let blocks = index .inner() .payload_blocks(threshold, JsonPath::new("test")) .collect_vec(); assert!(!blocks.is_empty()); eprintln!("threshold {threshold}, blocks.len() = {:#?}", blocks.len()); let threshold = 10000; let blocks = index .inner() .payload_blocks(threshold, JsonPath::new("test")) .collect_vec(); assert!(!blocks.is_empty()); eprintln!("threshold {threshold}, blocks.len() = {:#?}", blocks.len()); } #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_payload_blocks_small(#[case] index_type: IndexType) { let (_temp_dir, mut index_builder) = get_index_builder(index_type); let threshold = 4; let values = vec![ vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![2.0], vec![2.0], vec![2.0], vec![2.0], ]; let hw_counter = HardwareCounterCell::new(); values.into_iter().enumerate().for_each(|(idx, values)| { let values = values.iter().map(|v| Value::from(*v)).collect_vec(); let values = values.iter().collect_vec(); index_builder .add_point(idx as PointOffsetType + 1, &values, &hw_counter) .unwrap(); }); let index = index_builder.finalize().unwrap(); let blocks = index .inner() .payload_blocks(threshold, JsonPath::new("test")) .collect_vec(); assert!(!blocks.is_empty()); } #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_numeric_index_load_from_disk(#[case] index_type: IndexType) { let (temp_dir, mut index_builder) = get_index_builder(index_type); let values = vec![ vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![2.0], vec![2.5], vec![2.6], vec![3.0], ]; let hw_counter = HardwareCounterCell::new(); values.into_iter().enumerate().for_each(|(idx, values)| { let values = values.iter().map(|v| Value::from(*v)).collect_vec(); let values = values.iter().collect_vec(); index_builder .add_point(idx as PointOffsetType + 1, &values, &hw_counter) .unwrap(); }); let index = index_builder.finalize().unwrap(); #[cfg(feature = "rocksdb")] let db = match index.inner() { NumericIndexInner::Mutable(index) => index.db_wrapper().map(|db| db.get_database()), NumericIndexInner::Immutable(index) => index.db_wrapper().map(|db| db.get_database()), NumericIndexInner::Mmap(_) => None, }; drop(index); let new_index = match index_type { #[cfg(feature = "rocksdb")] IndexType::Mutable => { NumericIndexInner::<FloatPayloadType>::new_rocksdb(db.unwrap(), COLUMN_NAME, true, true) .unwrap() .unwrap() } IndexType::MutableGridstore => NumericIndexInner::<FloatPayloadType>::new_gridstore( temp_dir.path().to_path_buf(), true, ) .unwrap() .unwrap(), #[cfg(feature = "rocksdb")] IndexType::Immutable => NumericIndexInner::<FloatPayloadType>::new_rocksdb( db.unwrap(), COLUMN_NAME, false, true, ) .unwrap() .unwrap(), IndexType::Mmap => NumericIndexInner::<FloatPayloadType>::new_mmap(temp_dir.path(), true) .unwrap() .unwrap(), IndexType::RamMmap => { NumericIndexInner::<FloatPayloadType>::new_mmap(temp_dir.path(), false) .unwrap() .unwrap() } }; test_cond( &new_index, Range { gt: None, gte: None, lt: None, lte: Some(2.6), }, vec![1, 2, 3, 4, 5, 6, 7, 8], ); } #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_numeric_index(#[case] index_type: IndexType) { let (_temp_dir, mut index_builder) = get_index_builder(index_type); let values = vec![ vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![1.0], vec![2.0], vec![2.5], vec![2.6], vec![3.0], ]; let hw_counter = HardwareCounterCell::new(); values.into_iter().enumerate().for_each(|(idx, values)| { let values = values.iter().map(|v| Value::from(*v)).collect_vec(); let values = values.iter().collect_vec(); index_builder .add_point(idx as PointOffsetType + 1, &values, &hw_counter) .unwrap(); }); let mut index = index_builder.finalize().unwrap(); test_cond( index.inner(), Range { gt: Some(1.0), gte: None, lt: None, lte: None, }, vec![6, 7, 8, 9], ); test_cond( index.inner(), Range { gt: None, gte: Some(1.0), lt: None, lte: None, }, vec![1, 2, 3, 4, 5, 6, 7, 8, 9], ); test_cond( index.inner(), Range { gt: None, gte: None, lt: Some(2.6), lte: None, }, vec![1, 2, 3, 4, 5, 6, 7], ); test_cond( index.inner(), Range { gt: None, gte: None, lt: None, lte: Some(2.6), }, vec![1, 2, 3, 4, 5, 6, 7, 8], ); test_cond( index.inner(), Range { gt: None, gte: Some(2.0), lt: None, lte: Some(2.6), }, vec![6, 7, 8], ); // Remove some points index.remove_point(1).unwrap(); index.remove_point(2).unwrap(); index.remove_point(5).unwrap(); test_cond( index.inner(), Range { gt: Some(1.0), gte: None, lt: None, lte: None, }, vec![6, 7, 8, 9], ); test_cond( index.inner(), Range { gt: None, gte: Some(1.0), lt: None, lte: None, }, vec![3, 4, 6, 7, 8, 9], ); test_cond( index.inner(), Range { gt: None, gte: None, lt: Some(2.6), lte: None, }, vec![3, 4, 6, 7], ); test_cond( index.inner(), Range { gt: None, gte: None, lt: None, lte: Some(2.6), }, vec![3, 4, 6, 7, 8], ); test_cond( index.inner(), Range { gt: None, gte: Some(2.0), lt: None, lte: Some(2.6), }, vec![6, 7, 8], ); } fn test_cond< T: Encodable + Numericable + PartialOrd + Clone + MmapValue + Send + Sync + Default + 'static, >( index: &NumericIndexInner<T>, rng: Range<FloatPayloadType>, result: Vec<u32>, ) where Vec<T>: Blob, { let ordered_range = Range { lt: rng.lt.map(OrderedFloat::from), gt: rng.gt.map(OrderedFloat::from), gte: rng.gte.map(OrderedFloat::from), lte: rng.lte.map(OrderedFloat::from), }; let condition = FieldCondition::new_range(JsonPath::new("unused"), ordered_range); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let offsets = index.filter(&condition, &hw_counter).unwrap().collect_vec(); assert_eq!(offsets, result); } // Check we don't panic on an empty index. See <https://github.com/qdrant/qdrant/pull/2933>. #[rstest] #[cfg_attr(feature = "rocksdb", case(IndexType::Mutable))] #[case(IndexType::MutableGridstore)] #[cfg_attr(feature = "rocksdb", case(IndexType::Immutable))] #[case(IndexType::Mmap)] #[case(IndexType::RamMmap)] fn test_empty_cardinality(#[case] index_type: IndexType) { let (_temp_dir, index) = random_index(0, 1, index_type); cardinality_request( &index, Range { lt: Some(20.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); let (_temp_dir, index) = random_index(0, 0, index_type); cardinality_request( &index, Range { lt: Some(20.0), gt: None, gte: Some(10.0), lte: None, }, HwMeasurementAcc::new(), ); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/numeric_index/mod.rs
lib/segment/src/index/field_index/numeric_index/mod.rs
pub mod immutable_numeric_index; pub mod mmap_numeric_index; pub mod mutable_numeric_index; #[cfg(test)] mod tests; use std::cmp::{max, min}; use std::marker::PhantomData; use std::ops::Bound; use std::ops::Bound::{Excluded, Included, Unbounded}; use std::path::{Path, PathBuf}; use std::str::FromStr; #[cfg(feature = "rocksdb")] use std::sync::Arc; use chrono::DateTime; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use delegate::delegate; use gridstore::Blob; use mmap_numeric_index::MmapNumericIndex; use mutable_numeric_index::{InMemoryNumericIndex, MutableNumericIndex}; use ordered_float::OrderedFloat; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use serde::Serialize; use serde::de::DeserializeOwned; use serde_json::Value; use uuid::Uuid; use self::immutable_numeric_index::ImmutableNumericIndex; use super::FieldIndexBuilderTrait; use super::histogram::Point; use super::mmap_point_to_values::MmapValue; use super::utils::check_boundaries; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::histogram::{Histogram, Numericable}; use crate::index::field_index::stat_tools::estimate_multi_value_selection_cardinality; use crate::index::field_index::{ CardinalityEstimation, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, ValueIndexer, }; use crate::index::key_encoding::{ decode_f64_key_ascending, decode_i64_key_ascending, decode_u128_key_ascending, encode_f64_key_ascending, encode_i64_key_ascending, encode_u128_key_ascending, }; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{ DateTimePayloadType, FieldCondition, FloatPayloadType, IntPayloadType, Match, MatchValue, PayloadKeyType, Range, RangeInterface, UuidIntType, UuidPayloadType, ValueVariants, }; const HISTOGRAM_MAX_BUCKET_SIZE: usize = 10_000; const HISTOGRAM_PRECISION: f64 = 0.01; pub trait StreamRange<T> { fn stream_range( &self, range: &RangeInterface, ) -> Box<dyn DoubleEndedIterator<Item = (T, PointOffsetType)> + '_>; } pub trait Encodable: Copy + Serialize + DeserializeOwned + 'static { fn encode_key(&self, id: PointOffsetType) -> Vec<u8>; fn decode_key(key: &[u8]) -> (PointOffsetType, Self); fn cmp_encoded(&self, other: &Self) -> std::cmp::Ordering; } impl Encodable for IntPayloadType { fn encode_key(&self, id: PointOffsetType) -> Vec<u8> { encode_i64_key_ascending(*self, id) } fn decode_key(key: &[u8]) -> (PointOffsetType, Self) { decode_i64_key_ascending(key) } fn cmp_encoded(&self, other: &Self) -> std::cmp::Ordering { self.cmp(other) } } impl Encodable for u128 { fn encode_key(&self, id: PointOffsetType) -> Vec<u8> { encode_u128_key_ascending(*self, id) } fn decode_key(key: &[u8]) -> (PointOffsetType, Self) { decode_u128_key_ascending(key) } fn cmp_encoded(&self, other: &Self) -> std::cmp::Ordering { self.cmp(other) } } impl Encodable for FloatPayloadType { fn encode_key(&self, id: PointOffsetType) -> Vec<u8> { encode_f64_key_ascending(*self, id) } fn decode_key(key: &[u8]) -> (PointOffsetType, Self) { decode_f64_key_ascending(key) } fn cmp_encoded(&self, other: &Self) -> std::cmp::Ordering { if self.is_nan() && other.is_nan() { return std::cmp::Ordering::Equal; } if self.is_nan() { return std::cmp::Ordering::Less; } if other.is_nan() { return std::cmp::Ordering::Greater; } self.partial_cmp(other).unwrap() } } /// Encodes timestamps as i64 in microseconds impl Encodable for DateTimePayloadType { fn encode_key(&self, id: PointOffsetType) -> Vec<u8> { encode_i64_key_ascending(self.timestamp(), id) } fn decode_key(key: &[u8]) -> (PointOffsetType, Self) { let (id, timestamp) = decode_i64_key_ascending(key); let datetime = DateTime::from_timestamp(timestamp / 1000, (timestamp % 1000) as u32 * 1_000_000) .unwrap_or_else(|| { log::warn!("Failed to decode timestamp {timestamp}, fallback to UNIX_EPOCH"); DateTime::UNIX_EPOCH }); (id, datetime.into()) } fn cmp_encoded(&self, other: &Self) -> std::cmp::Ordering { self.timestamp().cmp(&other.timestamp()) } } impl<T: Encodable + Numericable> Range<T> { pub(in crate::index::field_index::numeric_index) fn as_index_key_bounds( &self, ) -> (Bound<Point<T>>, Bound<Point<T>>) { let start_bound = match self { Range { gt: Some(gt), .. } => Excluded(Point::new(*gt, PointOffsetType::MAX)), Range { gte: Some(gte), .. } => Included(Point::new(*gte, PointOffsetType::MIN)), _ => Unbounded, }; let end_bound = match self { Range { lt: Some(lt), .. } => Excluded(Point::new(*lt, PointOffsetType::MIN)), Range { lte: Some(lte), .. } => Included(Point::new(*lte, PointOffsetType::MAX)), _ => Unbounded, }; (start_bound, end_bound) } } pub enum NumericIndexInner<T: Encodable + Numericable + MmapValue + Send + Sync + Default> where Vec<T>: Blob, { Mutable(MutableNumericIndex<T>), Immutable(ImmutableNumericIndex<T>), Mmap(MmapNumericIndex<T>), } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default> NumericIndexInner<T> where Vec<T>: Blob, { #[cfg(feature = "rocksdb")] pub fn new_rocksdb( db: Arc<RwLock<DB>>, field: &str, is_appendable: bool, create_if_missing: bool, ) -> OperationResult<Option<Self>> { if is_appendable { Ok( MutableNumericIndex::open_rocksdb(db, field, create_if_missing)? .map(NumericIndexInner::Mutable), ) } else { Ok(ImmutableNumericIndex::open_rocksdb(db, field)?.map(NumericIndexInner::Immutable)) } } /// Load immutable mmap based index, either in RAM or on disk pub fn new_mmap(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let Some(mmap_index) = MmapNumericIndex::open(path, is_on_disk)? else { // Files don't exist, cannot load return Ok(None); }; if is_on_disk { // Use on mmap directly Ok(Some(NumericIndexInner::Mmap(mmap_index))) } else { // Load into RAM, use mmap as backing storage Ok(Some(NumericIndexInner::Immutable( ImmutableNumericIndex::open_mmap(mmap_index), ))) } } pub fn new_gridstore(dir: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { Ok(MutableNumericIndex::open_gridstore(dir, create_if_missing)? .map(NumericIndexInner::Mutable)) } fn get_histogram(&self) -> &Histogram<T> { match self { NumericIndexInner::Mutable(index) => index.get_histogram(), NumericIndexInner::Immutable(index) => index.get_histogram(), NumericIndexInner::Mmap(index) => index.get_histogram(), } } fn get_points_count(&self) -> usize { match self { NumericIndexInner::Mutable(index) => index.get_points_count(), NumericIndexInner::Immutable(index) => index.get_points_count(), NumericIndexInner::Mmap(index) => index.get_points_count(), } } fn total_unique_values_count(&self) -> usize { match self { NumericIndexInner::Mutable(index) => index.total_unique_values_count(), NumericIndexInner::Immutable(index) => index.total_unique_values_count(), NumericIndexInner::Mmap(index) => index.total_unique_values_count(), } } pub fn flusher(&self) -> Flusher { match self { NumericIndexInner::Mutable(index) => index.flusher(), NumericIndexInner::Immutable(index) => index.flusher(), NumericIndexInner::Mmap(index) => index.flusher(), } } pub fn files(&self) -> Vec<PathBuf> { match self { NumericIndexInner::Mutable(index) => index.files(), NumericIndexInner::Immutable(index) => index.files(), NumericIndexInner::Mmap(index) => index.files(), } } pub fn immutable_files(&self) -> Vec<PathBuf> { match self { NumericIndexInner::Mutable(_) => vec![], NumericIndexInner::Immutable(index) => index.immutable_files(), NumericIndexInner::Mmap(index) => index.immutable_files(), } } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { match self { NumericIndexInner::Mutable(index) => index.remove_point(idx), NumericIndexInner::Immutable(index) => index.remove_point(idx), NumericIndexInner::Mmap(index) => { index.remove_point(idx); Ok(()) } } } pub fn check_values_any( &self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool, hw_counter: &HardwareCounterCell, ) -> bool { match self { NumericIndexInner::Mutable(index) => index.check_values_any(idx, check_fn), NumericIndexInner::Immutable(index) => index.check_values_any(idx, check_fn), NumericIndexInner::Mmap(index) => index.check_values_any(idx, check_fn, hw_counter), } } pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>> { match self { NumericIndexInner::Mutable(index) => index.get_values(idx), NumericIndexInner::Immutable(index) => index.get_values(idx), NumericIndexInner::Mmap(index) => index.get_values(idx), } } pub fn values_count(&self, idx: PointOffsetType) -> usize { match self { NumericIndexInner::Mutable(index) => index.values_count(idx).unwrap_or_default(), NumericIndexInner::Immutable(index) => index.values_count(idx).unwrap_or_default(), NumericIndexInner::Mmap(index) => index.values_count(idx).unwrap_or_default(), } } /// Maximum number of values per point /// /// # Warning /// /// Zero if the index is empty. pub fn max_values_per_point(&self) -> usize { match self { NumericIndexInner::Mutable(index) => index.get_max_values_per_point(), NumericIndexInner::Immutable(index) => index.get_max_values_per_point(), NumericIndexInner::Mmap(index) => index.get_max_values_per_point(), } } fn range_cardinality(&self, range: &RangeInterface) -> CardinalityEstimation { let max_values_per_point = self.max_values_per_point(); if max_values_per_point == 0 { return CardinalityEstimation::exact(0); } let range = match range { RangeInterface::Float(float_range) => float_range.map(|float| T::from_f64(float.0)), RangeInterface::DateTime(datetime_range) => { datetime_range.map(|dt| T::from_u128(dt.timestamp() as u128)) } }; let lbound = if let Some(lte) = range.lte { Included(lte) } else if let Some(lt) = range.lt { Excluded(lt) } else { Unbounded }; let gbound = if let Some(gte) = range.gte { Included(gte) } else if let Some(gt) = range.gt { Excluded(gt) } else { Unbounded }; let histogram_estimation = self.get_histogram().estimate(gbound, lbound); let min_estimation = histogram_estimation.0; let max_estimation = histogram_estimation.2; let total_values = self.total_unique_values_count(); // Example: points_count = 1000, total values = 2000, values_count = 500 // min = max(1, 500 - (2000 - 1000)) = 1 // exp = 500 / (2000 / 1000) = 250 // max = min(1000, 500) = 500 // Example: points_count = 1000, total values = 1200, values_count = 500 // min = max(1, 500 - (1200 - 1000)) = 300 // exp = 500 / (1200 / 1000) = 416 // max = min(1000, 500) = 500 // Note: max_values_per_point is never zero here because we check it above let expected_min = max( min_estimation / max_values_per_point, max( min(1, min_estimation), min_estimation.saturating_sub(total_values - self.get_points_count()), ), ); let expected_max = min(self.get_points_count(), max_estimation); let estimation = estimate_multi_value_selection_cardinality( self.get_points_count(), total_values, histogram_estimation.1, ) .round() as usize; CardinalityEstimation { primary_clauses: vec![], min: expected_min, exp: min(expected_max, max(estimation, expected_min)), max: expected_max, } } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, points_count: self.get_points_count(), points_values_count: self.get_histogram().get_total_count(), histogram_bucket_size: Some(self.get_histogram().current_bucket_size()), index_type: match self { NumericIndexInner::Mutable(_) => "mutable_numeric", NumericIndexInner::Immutable(_) => "immutable_numeric", NumericIndexInner::Mmap(_) => "mmap_numeric", }, } } pub fn values_is_empty(&self, idx: PointOffsetType) -> bool { self.values_count(idx) == 0 } pub fn point_ids_by_value<'a>( &'a self, value: T, hw_counter: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = PointOffsetType> + 'a> { let start = Bound::Included(Point::new(value, PointOffsetType::MIN)); let end = Bound::Included(Point::new(value, PointOffsetType::MAX)); match &self { NumericIndexInner::Mutable(mutable) => Box::new(mutable.values_range(start, end)), NumericIndexInner::Immutable(immutable) => Box::new(immutable.values_range(start, end)), NumericIndexInner::Mmap(mmap) => Box::new(mmap.values_range(start, end, hw_counter)), } } /// Tries to estimate the amount of points for a given key. pub fn estimate_points(&self, value: &T, hw_counter: &HardwareCounterCell) -> usize { let start = Bound::Included(Point::new(*value, PointOffsetType::MIN)); let end = Bound::Included(Point::new(*value, PointOffsetType::MAX)); hw_counter .payload_index_io_read_counter() // We have to do 2 times binary search in mmap and immutable storage. .incr_delta(2 * ((self.total_unique_values_count() as f32).log2().ceil() as usize)); match &self { NumericIndexInner::Mutable(mutable) => { let mut iter = mutable.map().range((start, end)); let first = iter.next(); let last = iter.next_back(); match (first, last) { (Some(_), None) => 1, (Some(start), Some(end)) => (start.idx..end.idx).len(), (None, _) => 0, } } NumericIndexInner::Immutable(immutable) => { let range_size = immutable.values_range_size(start, end); if range_size == 0 { return 0; } let avg_values_per_point = self.total_unique_values_count() as f32 / self.get_points_count() as f32; (range_size as f32 / avg_values_per_point).max(1.0).round() as usize } NumericIndexInner::Mmap(mmap) => { let range_size = mmap.values_range_size(start, end); if range_size == 0 { return 0; } let avg_values_per_point = self.total_unique_values_count() as f32 / self.get_points_count() as f32; (range_size as f32 / avg_values_per_point).max(1.0).round() as usize } } } pub fn is_on_disk(&self) -> bool { match self { NumericIndexInner::Mutable(_) => false, NumericIndexInner::Immutable(_) => false, NumericIndexInner::Mmap(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { NumericIndexInner::Mutable(index) => index.is_rocksdb(), NumericIndexInner::Immutable(index) => index.is_rocksdb(), NumericIndexInner::Mmap(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { NumericIndexInner::Mutable(_) => {} // Not a mmap NumericIndexInner::Immutable(_) => {} // Not a mmap NumericIndexInner::Mmap(index) => index.populate()?, } Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { // Only clears backing mmap storage if used, not in-memory representation NumericIndexInner::Mutable(index) => index.clear_cache()?, // Only clears backing mmap storage if used, not in-memory representation NumericIndexInner::Immutable(index) => index.clear_cache()?, NumericIndexInner::Mmap(index) => index.clear_cache()?, } Ok(()) } } pub struct NumericIndex<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> where Vec<T>: Blob, { inner: NumericIndexInner<T>, _phantom: PhantomData<P>, } pub trait NumericIndexIntoInnerValue<T, P> { fn into_inner_value(value: P) -> T; } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> NumericIndex<T, P> where Vec<T>: Blob, { #[cfg(feature = "rocksdb")] pub fn new_rocksdb( db: Arc<RwLock<DB>>, field: &str, is_appendable: bool, create_if_missing: bool, ) -> OperationResult<Option<Self>> { Ok( NumericIndexInner::new_rocksdb(db, field, is_appendable, create_if_missing)?.map( |inner| Self { inner, _phantom: PhantomData, }, ), ) } /// Load immutable mmap based index, either in RAM or on disk pub fn new_mmap(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let index = NumericIndexInner::new_mmap(path, is_on_disk)?; Ok(index.map(|inner| Self { inner, _phantom: PhantomData, })) } pub fn new_gridstore(dir: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { let index = NumericIndexInner::new_gridstore(dir, create_if_missing)?; Ok(index.map(|inner| Self { inner, _phantom: PhantomData, })) } #[cfg(feature = "rocksdb")] pub fn builder_rocksdb( db: Arc<RwLock<DB>>, field: &str, ) -> OperationResult<NumericIndexBuilder<T, P>> where Self: ValueIndexer<ValueType = P>, { Ok(NumericIndexBuilder( Self::new_rocksdb(db, field, true, true)?.ok_or_else(|| { OperationError::service_error(format!( "Failed to create and load mutable numeric index builder for field '{field}'", )) })?, )) } #[cfg(all(test, feature = "rocksdb"))] pub fn builder_rocksdb_immutable( db: Arc<RwLock<DB>>, field: &str, ) -> NumericIndexImmutableBuilder<T, P> where Self: ValueIndexer<ValueType = P>, { NumericIndexImmutableBuilder { index: Self::new_rocksdb(db.clone(), field, true, true) // unwrap safety: only used in testing .unwrap() .unwrap(), field: field.to_owned(), db, } } pub fn builder_mmap(path: &Path, is_on_disk: bool) -> NumericIndexMmapBuilder<T, P> where Self: ValueIndexer<ValueType = P> + NumericIndexIntoInnerValue<T, P>, { NumericIndexMmapBuilder { path: path.to_owned(), in_memory_index: InMemoryNumericIndex::default(), is_on_disk, _phantom: PhantomData, } } pub fn builder_gridstore(dir: PathBuf) -> NumericIndexGridstoreBuilder<T, P> where Self: ValueIndexer<ValueType = P>, { NumericIndexGridstoreBuilder::new(dir) } pub fn inner(&self) -> &NumericIndexInner<T> { &self.inner } pub fn mut_inner(&mut self) -> &mut NumericIndexInner<T> { &mut self.inner } pub fn get_mutability_type(&self) -> IndexMutability { match &self.inner { NumericIndexInner::Mutable(_) => IndexMutability::Mutable, NumericIndexInner::Immutable(_) => IndexMutability::Immutable, NumericIndexInner::Mmap(_) => IndexMutability::Immutable, } } pub fn get_storage_type(&self) -> StorageType { match &self.inner { NumericIndexInner::Mutable(index) => index.storage_type(), NumericIndexInner::Immutable(index) => index.storage_type(), NumericIndexInner::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } delegate! { to self.inner { pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool, hw_counter: &HardwareCounterCell) -> bool; pub fn wipe(self) -> OperationResult<()>; pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry; pub fn values_count(&self, idx: PointOffsetType) -> usize; pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>>; pub fn values_is_empty(&self, idx: PointOffsetType) -> bool; pub fn is_on_disk(&self) -> bool; pub fn populate(&self) -> OperationResult<()>; pub fn clear_cache(&self) -> OperationResult<()>; } } #[cfg(feature = "rocksdb")] delegate! { to self.inner { pub fn is_rocksdb(&self) -> bool; } } } pub struct NumericIndexBuilder<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P>( NumericIndex<T, P>, ) where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob; impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> FieldIndexBuilderTrait for NumericIndexBuilder<T, P> where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { type FieldIndexType = NumericIndex<T, P>; fn init(&mut self) -> OperationResult<()> { match &mut self.0.inner { NumericIndexInner::Mutable(index) => index.clear(), NumericIndexInner::Immutable(_) => unreachable!(), NumericIndexInner::Mmap(_) => unreachable!(), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { self.0.inner.flusher()()?; Ok(self.0) } } #[cfg(all(test, feature = "rocksdb"))] pub struct NumericIndexImmutableBuilder< T: Encodable + Numericable + MmapValue + Send + Sync + Default, P, > where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { index: NumericIndex<T, P>, field: String, db: Arc<RwLock<DB>>, } #[cfg(all(test, feature = "rocksdb"))] impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> FieldIndexBuilderTrait for NumericIndexImmutableBuilder<T, P> where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { type FieldIndexType = NumericIndex<T, P>; fn init(&mut self) -> OperationResult<()> { match &mut self.index.inner { NumericIndexInner::Mutable(index) => index.clear(), NumericIndexInner::Immutable(_) => unreachable!(), NumericIndexInner::Mmap(_) => unreachable!(), } } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.index.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { self.index.inner.flusher()()?; drop(self.index); let inner: NumericIndexInner<T> = NumericIndexInner::new_rocksdb(self.db, &self.field, false, false)? // unwrap safety: only used in testing .unwrap(); Ok(NumericIndex { inner, _phantom: PhantomData, }) } } pub struct NumericIndexMmapBuilder<T, P> where T: Encodable + Numericable + MmapValue + Send + Sync + Default, NumericIndex<T, P>: ValueIndexer<ValueType = P> + NumericIndexIntoInnerValue<T, P>, Vec<T>: Blob, { path: PathBuf, in_memory_index: InMemoryNumericIndex<T>, is_on_disk: bool, _phantom: PhantomData<P>, } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> FieldIndexBuilderTrait for NumericIndexMmapBuilder<T, P> where NumericIndex<T, P>: ValueIndexer<ValueType = P> + NumericIndexIntoInnerValue<T, P>, Vec<T>: Blob, { type FieldIndexType = NumericIndex<T, P>; fn init(&mut self) -> OperationResult<()> { Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.in_memory_index.remove_point(id); let mut flatten_values: Vec<_> = vec![]; for value in payload.iter() { let payload_values = <NumericIndex<T, P> as ValueIndexer>::get_values(value); flatten_values.extend(payload_values); } let flatten_values = flatten_values .into_iter() .map(NumericIndex::into_inner_value) .collect(); hw_counter .payload_index_io_write_counter() .incr_delta(size_of_val(&flatten_values)); self.in_memory_index.add_many_to_list(id, flatten_values); Ok(()) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { let inner = MmapNumericIndex::build(self.in_memory_index, &self.path, self.is_on_disk)?; Ok(NumericIndex { inner: NumericIndexInner::Mmap(inner), _phantom: PhantomData, }) } } pub struct NumericIndexGridstoreBuilder< T: Encodable + Numericable + MmapValue + Send + Sync + Default, P, > where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { dir: PathBuf, index: Option<NumericIndex<T, P>>, } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> NumericIndexGridstoreBuilder<T, P> where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { fn new(dir: PathBuf) -> Self { Self { dir, index: None } } } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default, P> FieldIndexBuilderTrait for NumericIndexGridstoreBuilder<T, P> where NumericIndex<T, P>: ValueIndexer<ValueType = P>, Vec<T>: Blob, { type FieldIndexType = NumericIndex<T, P>; fn init(&mut self) -> OperationResult<()> { assert!( self.index.is_none(), "index must be initialized exactly once", ); self.index.replace( NumericIndex::new_gridstore(self.dir.clone(), true)? // unwrap safety: cannot fail because create_if_missing is true .unwrap(), ); Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let Some(index) = &mut self.index else { return Err(OperationError::service_error( "NumericIndexGridstoreBuilder: index must be initialized before adding points", )); }; index.add_point(id, payload, hw_counter) } fn finalize(mut self) -> OperationResult<Self::FieldIndexType> { let Some(index) = self.index.take() else { return Err(OperationError::service_error( "NumericIndexGridstoreBuilder: index must be initialized to finalize", )); }; index.inner.flusher()()?; Ok(index) } } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default> PayloadFieldIndex for NumericIndexInner<T> where Vec<T>: Blob, { fn count_indexed_points(&self) -> usize { self.get_points_count() } fn wipe(self) -> OperationResult<()> { match self { NumericIndexInner::Mutable(index) => index.wipe(), NumericIndexInner::Immutable(index) => index.wipe(), NumericIndexInner::Mmap(index) => index.wipe(), } } fn flusher(&self) -> Flusher { NumericIndexInner::flusher(self) } fn files(&self) -> Vec<PathBuf> { NumericIndexInner::files(self) } fn immutable_files(&self) -> Vec<PathBuf> { NumericIndexInner::immutable_files(self) } fn filter<'a>( &'a self, condition: &FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { if let Some(Match::Value(MatchValue { value: ValueVariants::String(keyword), })) = &condition.r#match { let keyword = keyword.as_str(); if let Ok(uuid) = Uuid::from_str(keyword) { let value = T::from_u128(uuid.as_u128()); return Some(self.point_ids_by_value(value, hw_counter)); } } let range_cond = condition.range.as_ref()?; let (start_bound, end_bound) = match range_cond { RangeInterface::Float(float_range) => float_range.map(|float| T::from_f64(float.0)), RangeInterface::DateTime(datetime_range) => { datetime_range.map(|dt| T::from_u128(dt.timestamp() as u128)) } } .as_index_key_bounds(); // map.range // Panics if range start > end. Panics if range start == end and both bounds are Excluded. if !check_boundaries(&start_bound, &end_bound) { return Some(Box::new(std::iter::empty())); } Some(match self { NumericIndexInner::Mutable(index) => { Box::new(index.values_range(start_bound, end_bound)) } NumericIndexInner::Immutable(index) => { Box::new(index.values_range(start_bound, end_bound)) } NumericIndexInner::Mmap(index) => { Box::new(index.values_range(start_bound, end_bound, hw_counter)) } }) } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { if let Some(Match::Value(MatchValue { value: ValueVariants::String(keyword), })) = &condition.r#match { let keyword = keyword.as_str(); if let Ok(uuid) = Uuid::from_str(keyword) { let key = T::from_u128(uuid.as_u128()); let estimated_count = self.estimate_points(&key, hw_counter); return Some( CardinalityEstimation::exact(estimated_count).with_primary_clause( PrimaryCondition::Condition(Box::new(condition.clone())), ), ); } } condition.range.as_ref().map(|range| {
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/numeric_index/immutable_numeric_index.rs
lib/segment/src/index/field_index/numeric_index/immutable_numeric_index.rs
use std::collections::BTreeSet; use std::ops::Bound; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use bitvec::vec::BitVec; use common::ext::BitSliceExt as _; use common::types::PointOffsetType; use gridstore::Blob; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use super::Encodable; use super::mmap_numeric_index::MmapNumericIndex; use super::mutable_numeric_index::InMemoryNumericIndex; use crate::common::Flusher; use crate::common::operation_error::OperationResult; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::histogram::{Histogram, Numericable, Point}; use crate::index::field_index::immutable_point_to_values::ImmutablePointToValues; use crate::index::field_index::mmap_point_to_values::MmapValue; use crate::index::payload_config::StorageType; pub struct ImmutableNumericIndex<T: Encodable + Numericable + MmapValue + Default> { map: NumericKeySortedVec<T>, histogram: Histogram<T>, points_count: usize, max_values_per_point: usize, point_to_values: ImmutablePointToValues<T>, // Backing storage, source of state, persists deletions storage: Storage<T>, } enum Storage<T: Encodable + Numericable + MmapValue + Default> { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Mmap(Box<MmapNumericIndex<T>>), } pub(super) struct NumericKeySortedVec<T: Encodable + Numericable> { data: Vec<Point<T>>, deleted: BitVec, deleted_count: usize, } pub(super) struct NumericKeySortedVecIterator<'a, T: Encodable + Numericable> { set: &'a NumericKeySortedVec<T>, start_index: usize, end_index: usize, } impl<T: Encodable + Numericable> NumericKeySortedVec<T> { fn from_btree_set(map: BTreeSet<Point<T>>) -> Self { Self { deleted: BitVec::repeat(false, map.len()), data: map.into_iter().collect(), deleted_count: 0, } } fn len(&self) -> usize { self.data.len() - self.deleted_count } fn remove(&mut self, key: &Point<T>) -> bool { if let Ok(index) = self.data.binary_search(key) && let Some(is_deleted) = self.deleted.get_mut(index).as_deref_mut() { if !*is_deleted { self.deleted_count += 1; *is_deleted = true; } return true; } false } fn values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> NumericKeySortedVecIterator<'_, T> { let start_index = self.find_start_index(start_bound); let end_index = self.find_end_index(start_index, end_bound); NumericKeySortedVecIterator { set: self, start_index, end_index, } } pub(super) fn find_start_index(&self, bound: Bound<Point<T>>) -> usize { match bound { Bound::Included(bound) => self.data.binary_search(&bound).unwrap_or_else(|idx| idx), Bound::Excluded(bound) => match self.data.binary_search(&bound) { Ok(idx) => idx + 1, Err(idx) => idx, }, Bound::Unbounded => 0, } } pub(super) fn find_end_index(&self, start: usize, bound: Bound<Point<T>>) -> usize { if start >= self.data.len() { // the range `end` should never be less than `start` return start; } match bound { Bound::Included(bound) => match self.data[start..].binary_search(&bound) { Ok(idx) => idx + 1 + start, Err(idx) => idx + start, }, Bound::Excluded(bound) => { let end_bound = self.data[start..].binary_search(&bound); end_bound.unwrap_or_else(|idx| idx) + start } Bound::Unbounded => self.data.len(), } } } impl<T: Encodable + Numericable> Iterator for NumericKeySortedVecIterator<'_, T> { type Item = Point<T>; fn next(&mut self) -> Option<Self::Item> { while self.start_index < self.end_index { let key = self.set.data[self.start_index].clone(); let deleted = self.set.deleted.get_bit(self.start_index).unwrap_or(true); self.start_index += 1; if deleted { continue; } return Some(key); } None } } impl<T: Encodable + Numericable> DoubleEndedIterator for NumericKeySortedVecIterator<'_, T> { fn next_back(&mut self) -> Option<Self::Item> { while self.start_index < self.end_index { let key = self.set.data[self.end_index - 1].clone(); let deleted = self.set.deleted.get_bit(self.end_index - 1).unwrap_or(true); self.end_index -= 1; if deleted { continue; } return Some(key); } None } } impl<T: Encodable + Numericable + MmapValue + Send + Sync + Default> ImmutableNumericIndex<T> where Vec<T>: Blob, { /// Open and load immutable numeric index from RocksDB storage #[cfg(feature = "rocksdb")] pub(super) fn open_rocksdb(db: Arc<RwLock<DB>>, field: &str) -> OperationResult<Option<Self>> { use crate::index::field_index::numeric_index::mutable_numeric_index::MutableNumericIndex; let store_cf_name = super::numeric_index_storage_cf_name(field); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); // Load through mutable numeric index structure let Some(mutable) = MutableNumericIndex::<T>::open_rocksdb_db_wrapper(db_wrapper.clone(), false)? else { // Column family doesn't exist, cannot load return Ok(None); }; let InMemoryNumericIndex { map, histogram, points_count, max_values_per_point, point_to_values, } = mutable.into_in_memory_index(); Ok(Some(Self { map: NumericKeySortedVec::from_btree_set(map), histogram, points_count, max_values_per_point, point_to_values: ImmutablePointToValues::new(point_to_values), storage: Storage::RocksDb(db_wrapper), })) } /// Open and load immutable numeric index from mmap storage pub(super) fn open_mmap(index: MmapNumericIndex<T>) -> Self { // Load in-memory index from mmap storage let InMemoryNumericIndex { map, histogram, points_count, max_values_per_point, point_to_values, } = InMemoryNumericIndex::from_mmap(&index); // Index is now loaded into memory, clear cache of backing mmap storage if let Err(err) = index.clear_cache() { log::warn!("Failed to clear mmap cache of ram mmap numeric index: {err}"); } Self { map: NumericKeySortedVec::from_btree_set(map), histogram, points_count, max_values_per_point, point_to_values: ImmutablePointToValues::new(point_to_values), storage: Storage::Mmap(Box::new(index)), } } #[cfg(all(test, feature = "rocksdb"))] pub(super) fn db_wrapper(&self) -> Option<&DatabaseColumnScheduledDeleteWrapper> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => Some(db_wrapper), Storage::Mmap(_) => None, } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.recreate_column_family(), Storage::Mmap(index) => index.wipe(), } } /// Clear cache /// /// Only clears cache of mmap storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Mmap(index) => index.clear_cache(), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(index) => index.files(), } } #[inline] pub(super) fn immutable_files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Mmap(index) => index.immutable_files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.flusher(), Storage::Mmap(index) => index.flusher(), } } pub(super) fn check_values_any( &self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool, ) -> bool { self.point_to_values.check_values_any(idx, |v| check_fn(v)) } pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>> { Some(Box::new( self.point_to_values .get_values(idx) .map(|iter| iter.copied())?, )) } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.point_to_values.get_values_count(idx) } pub(super) fn total_unique_values_count(&self) -> usize { self.map.len() } pub(super) fn values_range_size( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> usize { let iterator = self.map.values_range(start_bound, end_bound); iterator.end_index - iterator.start_index } pub(super) fn values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl Iterator<Item = PointOffsetType> { self.map .values_range(start_bound, end_bound) .map(|Point { idx, .. }| idx) } pub(super) fn orderable_values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl DoubleEndedIterator<Item = (T, PointOffsetType)> + '_ { self.map .values_range(start_bound, end_bound) .map(|Point { val, idx, .. }| (val, idx)) } #[cfg_attr(not(feature = "rocksdb"), expect(clippy::unnecessary_wraps))] pub(super) fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { if let Some(removed_values) = self.point_to_values.get_values(idx) { let mut removed_count = 0; for value in removed_values { let key = Point::new(*value, idx); Self::remove_from_map(&mut self.map, &mut self.histogram, &key); // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { let encoded = value.encode_key(idx); db_wrapper.remove(encoded)?; } Storage::Mmap(index) => { index.remove_point(idx); } } removed_count += 1; } if removed_count > 0 { self.points_count -= 1; } } self.point_to_values.remove_point(idx); Ok(()) } pub(super) fn get_histogram(&self) -> &Histogram<T> { &self.histogram } pub(super) fn get_points_count(&self) -> usize { self.points_count } pub(super) fn get_max_values_per_point(&self) -> usize { self.max_values_per_point } fn remove_from_map( map: &mut NumericKeySortedVec<T>, histogram: &mut Histogram<T>, key: &Point<T>, ) { if map.remove(key) { histogram.remove( key, |x| Self::get_histogram_left_neighbor(map, x), |x| Self::get_histogram_right_neighbor(map, x), ); } } fn get_histogram_left_neighbor( map: &NumericKeySortedVec<T>, point: &Point<T>, ) -> Option<Point<T>> { map.values_range(Bound::Unbounded, Bound::Excluded(point.clone())) .next_back() } fn get_histogram_right_neighbor( map: &NumericKeySortedVec<T>, point: &Point<T>, ) -> Option<Point<T>> { map.values_range(Bound::Excluded(point.clone()), Bound::Unbounded) .next() } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Mmap(_) => false, } } } #[cfg(test)] mod tests { use std::ops::Bound; use super::*; use crate::types::FloatPayloadType; fn check_range( key_set: &NumericKeySortedVec<FloatPayloadType>, encoded_map: &BTreeSet<Point<FloatPayloadType>>, start_bound: Bound<Point<FloatPayloadType>>, end_bound: Bound<Point<FloatPayloadType>>, ) { let set1 = key_set .values_range(start_bound.clone(), end_bound.clone()) .collect::<Vec<_>>(); let set2 = encoded_map .range((start_bound, end_bound)) .cloned() .collect::<Vec<_>>(); for (k1, k2) in set1.iter().zip(set2.iter()) { assert_eq!(k1, k2); } } fn check_ranges( key_set: &NumericKeySortedVec<FloatPayloadType>, encoded_map: &BTreeSet<Point<FloatPayloadType>>, ) { check_range(key_set, encoded_map, Bound::Unbounded, Bound::Unbounded); check_range( key_set, encoded_map, Bound::Unbounded, Bound::Included(Point::new(0.4, 2)), ); check_range( key_set, encoded_map, Bound::Unbounded, Bound::Excluded(Point::new(0.4, 2)), ); check_range( key_set, encoded_map, Bound::Included(Point::new(0.4, 2)), Bound::Unbounded, ); check_range( key_set, encoded_map, Bound::Excluded(Point::new(0.4, 2)), Bound::Unbounded, ); check_range( key_set, encoded_map, Bound::Included(Point::new(-5.0, 1)), Bound::Included(Point::new(5.0, 1)), ); check_range( key_set, encoded_map, Bound::Included(Point::new(-5.0, 1)), Bound::Excluded(Point::new(5.0, 1)), ); check_range( key_set, encoded_map, Bound::Excluded(Point::new(-5.0, 1)), Bound::Included(Point::new(5.0, 1)), ); check_range( key_set, encoded_map, Bound::Excluded(Point::new(-5.0, 1)), Bound::Excluded(Point::new(5.0, 1)), ); check_range( key_set, encoded_map, Bound::Included(Point::new(-5.0, 1000)), Bound::Included(Point::new(5.0, 1000)), ); check_range( key_set, encoded_map, Bound::Excluded(Point::new(-5.0, 1000)), Bound::Excluded(Point::new(5.0, 1000)), ); check_range( key_set, encoded_map, Bound::Excluded(Point::new(-50000.0, 1000)), Bound::Excluded(Point::new(50000.0, 1000)), ); } #[test] fn test_numeric_index_key_set() { let pairs = [ Point::new(0.0, 1), Point::new(0.0, 3), Point::new(-0.0, 2), Point::new(-0.0, 4), Point::new(0.4, 2), Point::new(-0.4, 3), Point::new(5.0, 1), Point::new(-5.0, 1), Point::new(f64::INFINITY, 0), Point::new(f64::NEG_INFINITY, 1), Point::new(f64::NEG_INFINITY, 2), Point::new(f64::NEG_INFINITY, 3), ]; let mut set_byte: BTreeSet<Point<FloatPayloadType>> = pairs.iter().cloned().collect(); let mut set_keys = NumericKeySortedVec::<FloatPayloadType>::from_btree_set(set_byte.clone()); check_ranges(&set_keys, &set_byte); // test deletion and ranges after deletion let deleted_key = Point::new(0.4, 2); set_keys.remove(&deleted_key); set_byte.remove(&deleted_key); check_ranges(&set_keys, &set_byte); // test deletion and ranges after deletion let deleted_key = Point::new(-5.0, 1); set_keys.remove(&deleted_key); set_byte.remove(&deleted_key); check_ranges(&set_keys, &set_byte); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/numeric_index/mutable_numeric_index.rs
lib/segment/src/index/field_index/numeric_index/mutable_numeric_index.rs
use std::collections::BTreeSet; use std::ops::Bound; use std::ops::Bound::{Excluded, Unbounded}; use std::path::PathBuf; #[cfg(feature = "rocksdb")] use std::sync::Arc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use gridstore::config::StorageOptions; use gridstore::{Blob, Gridstore}; #[cfg(feature = "rocksdb")] use parking_lot::RwLock; #[cfg(feature = "rocksdb")] use rocksdb::DB; use super::mmap_numeric_index::MmapNumericIndex; use super::{Encodable, HISTOGRAM_MAX_BUCKET_SIZE, HISTOGRAM_PRECISION}; use crate::common::Flusher; use crate::common::operation_error::{OperationError, OperationResult}; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::histogram::{Histogram, Numericable, Point}; use crate::index::field_index::mmap_point_to_values::MmapValue; use crate::index::payload_config::StorageType; /// Default options for Gridstore storage const fn default_gridstore_options<T: Sized>() -> StorageOptions { let block_size = size_of::<T>(); StorageOptions { // Size of numeric values in index block_size_bytes: Some(block_size), // Compressing numeric values is unreasonable compression: Some(gridstore::config::Compression::None), // Scale page size down with block size, prevents overhead of first page when there's (almost) no values page_size_bytes: Some(block_size * 8192 * 32), // 4 to 8 MiB = block_size * region_blocks * regions, region_size_blocks: None, } } pub struct MutableNumericIndex<T: Encodable + Numericable> where Vec<T>: Blob, { // Backing storage, source of state, persists deletions storage: Storage<T>, in_memory_index: InMemoryNumericIndex<T>, } enum Storage<T: Encodable + Numericable> where Vec<T>: Blob, { #[cfg(feature = "rocksdb")] RocksDb(DatabaseColumnScheduledDeleteWrapper), Gridstore(Gridstore<Vec<T>>), } // Numeric Index with insertions and deletions without persistence pub struct InMemoryNumericIndex<T: Encodable + Numericable> { pub map: BTreeSet<Point<T>>, pub histogram: Histogram<T>, pub points_count: usize, pub max_values_per_point: usize, pub point_to_values: Vec<Vec<T>>, } impl<T: Encodable + Numericable> Default for InMemoryNumericIndex<T> { fn default() -> Self { Self { map: BTreeSet::new(), histogram: Histogram::new(HISTOGRAM_MAX_BUCKET_SIZE, HISTOGRAM_PRECISION), points_count: 0, max_values_per_point: 0, point_to_values: Default::default(), } } } impl<T: Encodable + Numericable + Default> FromIterator<(PointOffsetType, T)> for InMemoryNumericIndex<T> { fn from_iter<I: IntoIterator<Item = (PointOffsetType, T)>>(iter: I) -> Self { let mut index = InMemoryNumericIndex::default(); for pair in iter { let (idx, value) = pair; if index.point_to_values.len() <= idx as usize { index .point_to_values .resize_with(idx as usize + 1, Vec::new) } index.point_to_values[idx as usize].push(value); let key = Point::new(value, idx); InMemoryNumericIndex::add_to_map(&mut index.map, &mut index.histogram, key); } for values in &index.point_to_values { if !values.is_empty() { index.points_count += 1; index.max_values_per_point = index.max_values_per_point.max(values.len()); } } index } } impl<T: Encodable + Numericable + Default + MmapValue> InMemoryNumericIndex<T> { /// Construct in-memroy index from given mmap index /// /// # Warning /// /// Expensive because this reads the full mmap index. pub(super) fn from_mmap(mmap_index: &MmapNumericIndex<T>) -> Self { let point_count = mmap_index.storage.point_to_values.len(); (0..point_count as PointOffsetType) .filter_map(|idx| mmap_index.get_values(idx).map(|values| (idx, values))) .flat_map(|(idx, values)| values.into_iter().map(move |value| (idx, value))) .collect::<InMemoryNumericIndex<T>>() } } impl<T: Encodable + Numericable + Default> InMemoryNumericIndex<T> { pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool) -> bool { self.point_to_values .get(idx as usize) .map(|values| values.iter().any(check_fn)) .unwrap_or(false) } pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>> { Some(Box::new( self.point_to_values .get(idx as usize) .map(|v| v.iter().cloned())?, )) } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.point_to_values.get(idx as usize).map(Vec::len) } pub fn total_unique_values_count(&self) -> usize { self.map.len() } pub fn values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl Iterator<Item = PointOffsetType> { self.map .range((start_bound, end_bound)) .map(|point| point.idx) } pub fn orderable_values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl DoubleEndedIterator<Item = (T, PointOffsetType)> + '_ { self.map .range((start_bound, end_bound)) .map(|point| (point.val, point.idx)) } pub fn add_many_to_list(&mut self, idx: PointOffsetType, values: Vec<T>) { if self.point_to_values.len() <= idx as usize { self.point_to_values.resize_with(idx as usize + 1, Vec::new) } for value in &values { let key = Point::new(*value, idx); Self::add_to_map(&mut self.map, &mut self.histogram, key); } if !values.is_empty() { self.points_count += 1; self.max_values_per_point = self.max_values_per_point.max(values.len()); } self.point_to_values[idx as usize] = values; } pub fn remove_point(&mut self, idx: PointOffsetType) { if let Some(values) = self.point_to_values.get_mut(idx as usize) { if !values.is_empty() { self.points_count = self.points_count.checked_sub(1).unwrap_or_default(); } for value in values.iter() { let key = Point::new(*value, idx); Self::remove_from_map(&mut self.map, &mut self.histogram, key); } *values = Default::default(); } } fn add_to_map(map: &mut BTreeSet<Point<T>>, histogram: &mut Histogram<T>, key: Point<T>) { let was_added = map.insert(key.clone()); // Histogram works with unique values (idx + value) only, so we need to // make sure that we don't add the same value twice. // key is a combination of value + idx, so we can use it to ensure than the pair is unique if was_added { histogram.insert( key, |x| Self::get_histogram_left_neighbor(map, x.clone()), |x| Self::get_histogram_right_neighbor(map, x.clone()), ); } } fn remove_from_map(map: &mut BTreeSet<Point<T>>, histogram: &mut Histogram<T>, key: Point<T>) { let was_removed = map.remove(&key); if was_removed { histogram.remove( &key, |x| Self::get_histogram_left_neighbor(map, x.clone()), |x| Self::get_histogram_right_neighbor(map, x.clone()), ); } } fn get_histogram_left_neighbor(map: &BTreeSet<Point<T>>, key: Point<T>) -> Option<Point<T>> { map.range((Unbounded, Excluded(key))).next_back().cloned() } fn get_histogram_right_neighbor(map: &BTreeSet<Point<T>>, key: Point<T>) -> Option<Point<T>> { map.range((Excluded(key), Unbounded)).next().cloned() } pub fn get_histogram(&self) -> &Histogram<T> { &self.histogram } pub fn get_points_count(&self) -> usize { self.points_count } pub fn get_max_values_per_point(&self) -> usize { self.max_values_per_point } } impl<T: Encodable + Numericable + Send + Sync + Default> MutableNumericIndex<T> where Vec<T>: Blob, { /// Open and load mutable numeric index from RocksDB storage #[cfg(feature = "rocksdb")] pub fn open_rocksdb( db: Arc<RwLock<DB>>, field: &str, create_if_missing: bool, ) -> OperationResult<Option<Self>> { let store_cf_name = super::numeric_index_storage_cf_name(field); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); Self::open_rocksdb_db_wrapper(db_wrapper, create_if_missing) } #[cfg(feature = "rocksdb")] pub fn open_rocksdb_db_wrapper( db_wrapper: DatabaseColumnScheduledDeleteWrapper, create_if_missing: bool, ) -> OperationResult<Option<Self>> { if !db_wrapper.has_column_family()? { if create_if_missing { db_wrapper.recreate_column_family()?; } else { // Column family doesn't exist, cannot load return Ok(None); } }; // Load in-memory index from RocksDB let in_memory_index = db_wrapper .lock_db() .iter()? .map(|(key, value)| { let value_idx = u32::from_be_bytes(value.as_ref().try_into().map_err(|_| { OperationError::service_error("incorrect numeric index value") })?); let (idx, value) = T::decode_key(&key); if idx != value_idx { return Err(OperationError::service_error( "incorrect numeric index key-value pair", )); } Ok((idx, value)) }) .collect::<Result<InMemoryNumericIndex<_>, OperationError>>()?; Ok(Some(Self { storage: Storage::RocksDb(db_wrapper), in_memory_index, })) } /// Open and load mutable numeric index from Gridstore storage /// /// The `create_if_missing` parameter indicates whether to create a new Gridstore if it does /// not exist. If false and files don't exist, this will return `None` to indicate nothing /// could be loaded. pub fn open_gridstore(path: PathBuf, create_if_missing: bool) -> OperationResult<Option<Self>> { let store = if create_if_missing { let options = default_gridstore_options::<T>(); Gridstore::open_or_create(path, options).map_err(|err| { OperationError::service_error(format!( "failed to open mutable numeric index on gridstore: {err}" )) })? } else if path.exists() { Gridstore::open(path).map_err(|err| { OperationError::service_error(format!( "failed to open mutable numeric index on gridstore: {err}" )) })? } else { // Files don't exist, cannot load return Ok(None); }; // Load in-memory index from Gridstore let mut in_memory_index = InMemoryNumericIndex::default(); let hw_counter = HardwareCounterCell::disposable(); let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); store .iter::<_, ()>( |idx, values: Vec<T>| { in_memory_index.add_many_to_list(idx, values); Ok(true) }, hw_counter_ref, ) // unwrap safety: never returns an error .unwrap(); Ok(Some(Self { storage: Storage::Gridstore(store), in_memory_index, })) } pub fn into_in_memory_index(self) -> InMemoryNumericIndex<T> { self.in_memory_index } #[cfg(all(test, feature = "rocksdb"))] pub(super) fn db_wrapper(&self) -> Option<&DatabaseColumnScheduledDeleteWrapper> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => Some(db_wrapper), Storage::Gridstore(_) => None, } } #[inline] pub(super) fn clear(&mut self) -> OperationResult<()> { match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.recreate_column_family(), Storage::Gridstore(store) => store.clear().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable numeric index: {err}", )) }), } } #[inline] pub(super) fn wipe(self) -> OperationResult<()> { match self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.remove_column_family(), Storage::Gridstore(store) => store.wipe().map_err(|err| { OperationError::service_error(format!( "Failed to wipe mutable numeric index: {err}", )) }), } } /// Clear cache /// /// Only clears cache of Gridstore storage if used. Does not clear in-memory representation of /// index. pub fn clear_cache(&self) -> OperationResult<()> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => Ok(()), Storage::Gridstore(index) => index.clear_cache().map_err(|err| { OperationError::service_error(format!( "Failed to clear mutable numeric index gridstore cache: {err}" )) }), } } #[inline] pub(super) fn files(&self) -> Vec<PathBuf> { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => vec![], Storage::Gridstore(store) => store.files(), } } #[inline] pub(super) fn flusher(&self) -> Flusher { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => db_wrapper.flusher(), Storage::Gridstore(store) => { let storage_flusher = store.flusher(); Box::new(move || { storage_flusher().map_err(|err| { OperationError::service_error(format!( "Failed to flush mutable numeric index gridstore: {err}" )) }) }) } } } pub fn add_many_to_list( &mut self, idx: PointOffsetType, values: Vec<T>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { let mut hw_cell_wb = hw_counter .payload_index_io_write_counter() .write_back_counter(); for value in &values { let key = value.encode_key(idx); db_wrapper.put(&key, idx.to_be_bytes())?; hw_cell_wb.incr_delta(size_of_val(&key) + size_of_val(&idx)); } } // We cannot store empty value, then delete instead Storage::Gridstore(store) if values.is_empty() => { store.delete_value(idx); } Storage::Gridstore(store) => { let hw_counter_ref = hw_counter.ref_payload_index_io_write_counter(); store .put_value(idx, &values, hw_counter_ref) .map_err(|err| { OperationError::service_error(format!( "failed to put value in mutable numeric index gridstore: {err}" )) })?; } } self.in_memory_index.add_many_to_list(idx, values); Ok(()) } pub fn remove_point(&mut self, idx: PointOffsetType) -> OperationResult<()> { // Update persisted storage match &mut self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(db_wrapper) => { self.in_memory_index .get_values(idx) .map(|mut values| { values.try_for_each(|value| { let key = value.encode_key(idx); db_wrapper.remove(key) }) }) .transpose()?; } Storage::Gridstore(store) => { store.delete_value(idx); } } self.in_memory_index.remove_point(idx); Ok(()) } pub fn map(&self) -> &BTreeSet<Point<T>> { &self.in_memory_index.map } #[inline] pub fn total_unique_values_count(&self) -> usize { self.in_memory_index.total_unique_values_count() } #[inline] pub fn check_values_any(&self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool) -> bool { self.in_memory_index.check_values_any(idx, check_fn) } #[inline] pub fn get_points_count(&self) -> usize { self.in_memory_index.get_points_count() } #[inline] pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>> { self.in_memory_index.get_values(idx) } #[inline] pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { self.in_memory_index.values_count(idx) } #[inline] pub fn values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl Iterator<Item = PointOffsetType> { self.in_memory_index.values_range(start_bound, end_bound) } #[inline] pub fn orderable_values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl DoubleEndedIterator<Item = (T, PointOffsetType)> + '_ { self.in_memory_index .orderable_values_range(start_bound, end_bound) } #[inline] pub fn get_histogram(&self) -> &Histogram<T> { self.in_memory_index.get_histogram() } #[inline] pub fn get_max_values_per_point(&self) -> usize { self.in_memory_index.get_max_values_per_point() } pub fn storage_type(&self) -> StorageType { match &self.storage { #[cfg(feature = "rocksdb")] Storage::RocksDb(_) => StorageType::RocksDb, Storage::Gridstore(_) => StorageType::Gridstore, } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self.storage { Storage::RocksDb(_) => true, Storage::Gridstore(_) => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/numeric_index/mmap_numeric_index.rs
lib/segment/src/index/field_index/numeric_index/mmap_numeric_index.rs
use std::ops::Bound; use std::path::{Path, PathBuf}; use common::counter::conditioned_counter::ConditionedCounter; use common::counter::hardware_counter::HardwareCounterCell; use common::counter::iterator_hw_measurement::HwMeasurementIteratorExt; use common::types::PointOffsetType; use fs_err as fs; use io::file_operations::{atomic_save_json, read_json}; use memmap2::MmapMut; use memory::fadvise::clear_disk_cache; use memory::madvise::AdviceSetting; use memory::mmap_ops::{self, create_and_ensure_length}; use memory::mmap_type::{MmapBitSlice, MmapSlice}; use serde::{Deserialize, Serialize}; use super::Encodable; use super::mutable_numeric_index::InMemoryNumericIndex; use crate::common::Flusher; use crate::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::histogram::{Histogram, Numericable, Point}; use crate::index::field_index::mmap_point_to_values::{MmapPointToValues, MmapValue}; const PAIRS_PATH: &str = "data.bin"; const DELETED_PATH: &str = "deleted.bin"; const CONFIG_PATH: &str = "mmap_field_index_config.json"; pub struct MmapNumericIndex<T: Encodable + Numericable + Default + MmapValue + 'static> { path: PathBuf, pub(super) storage: Storage<T>, histogram: Histogram<T>, deleted_count: usize, max_values_per_point: usize, is_on_disk: bool, } pub(super) struct Storage<T: Encodable + Numericable + Default + MmapValue + 'static> { deleted: MmapBitSliceBufferedUpdateWrapper, // sorted pairs (id + value), sorted by value (by id if values are equal) pairs: MmapSlice<Point<T>>, pub(super) point_to_values: MmapPointToValues<T>, } #[derive(Debug, Clone, Serialize, Deserialize)] struct MmapNumericIndexConfig { max_values_per_point: usize, } pub(super) struct NumericIndexPairsIterator<'a, T: Encodable + Numericable> { pairs: &'a [Point<T>], deleted: &'a MmapBitSliceBufferedUpdateWrapper, start_index: usize, end_index: usize, } impl<T: Encodable + Numericable> Iterator for NumericIndexPairsIterator<'_, T> { type Item = Point<T>; fn next(&mut self) -> Option<Self::Item> { while self.start_index < self.end_index { let key = self.pairs[self.start_index].clone(); let deleted = self.deleted.get(key.idx as usize).unwrap_or(true); self.start_index += 1; if deleted { continue; } return Some(key); } None } } impl<T: Encodable + Numericable> DoubleEndedIterator for NumericIndexPairsIterator<'_, T> { fn next_back(&mut self) -> Option<Self::Item> { while self.start_index < self.end_index { let key = self.pairs[self.end_index - 1].clone(); let deleted = self.deleted.get(key.idx as usize).unwrap_or(true); self.end_index -= 1; if deleted { continue; } return Some(key); } None } } impl<T: Encodable + Numericable + Default + MmapValue> MmapNumericIndex<T> { pub fn build( in_memory_index: InMemoryNumericIndex<T>, path: &Path, is_on_disk: bool, ) -> OperationResult<Self> { fs::create_dir_all(path)?; let pairs_path = path.join(PAIRS_PATH); let deleted_path = path.join(DELETED_PATH); let config_path = path.join(CONFIG_PATH); atomic_save_json( &config_path, &MmapNumericIndexConfig { max_values_per_point: in_memory_index.max_values_per_point, }, )?; in_memory_index.histogram.save(path)?; MmapPointToValues::<T>::from_iter( path, in_memory_index .point_to_values .iter() .enumerate() .map(|(idx, values)| { ( idx as PointOffsetType, values.iter().map(|v| T::as_referenced(v)), ) }), )?; { let pairs_file = create_and_ensure_length( &pairs_path, in_memory_index.map.len() * std::mem::size_of::<Point<T>>(), )?; let pairs_mmap = unsafe { MmapMut::map_mut(&pairs_file)? }; let mut pairs = unsafe { MmapSlice::<Point<T>>::try_from(pairs_mmap)? }; for (src, dst) in in_memory_index.map.iter().zip(pairs.iter_mut()) { *dst = src.clone(); } } { const BITS_IN_BYTE: usize = 8; let deleted_flags_count = in_memory_index.point_to_values.len(); let deleted_file = create_and_ensure_length( &deleted_path, BITS_IN_BYTE * BITS_IN_BYTE * deleted_flags_count.div_ceil(BITS_IN_BYTE * BITS_IN_BYTE), )?; let mut deleted_mmap = unsafe { MmapMut::map_mut(&deleted_file)? }; deleted_mmap.fill(0); let mut deleted_bitflags = MmapBitSlice::from(deleted_mmap, 0); for (idx, values) in in_memory_index.point_to_values.iter().enumerate() { if values.is_empty() { deleted_bitflags.set(idx, true); } } } Self::open(path, is_on_disk)?.ok_or_else(|| { OperationError::service_error("Failed to open MmapNumericIndex after building it") }) } /// Open and load mmap numeric index from the given path pub fn open(path: &Path, is_on_disk: bool) -> OperationResult<Option<Self>> { let pairs_path = path.join(PAIRS_PATH); let deleted_path = path.join(DELETED_PATH); let config_path = path.join(CONFIG_PATH); // If config doesn't exist, assume the index doesn't exist on disk if !config_path.is_file() { return Ok(None); } let histogram = Histogram::<T>::load(path)?; let config: MmapNumericIndexConfig = read_json(&config_path)?; let deleted = mmap_ops::open_write_mmap(&deleted_path, AdviceSetting::Global, false)?; let deleted = MmapBitSlice::from(deleted, 0); let deleted_count = deleted.count_ones(); let do_populate = !is_on_disk; let map = unsafe { MmapSlice::try_from(mmap_ops::open_write_mmap( &pairs_path, AdviceSetting::Global, do_populate, )?)? }; let point_to_values = MmapPointToValues::open(path, do_populate)?; Ok(Some(Self { path: path.to_path_buf(), storage: Storage { pairs: map, deleted: MmapBitSliceBufferedUpdateWrapper::new(deleted), point_to_values, }, histogram, deleted_count, max_values_per_point: config.max_values_per_point, is_on_disk, })) } pub fn wipe(self) -> OperationResult<()> { let files = self.files(); let path = self.path.clone(); // drop mmap handles before deleting files drop(self); for file in files { fs::remove_file(file)?; } let _ = fs::remove_dir(path); Ok(()) } pub fn files(&self) -> Vec<PathBuf> { let mut files = vec![ self.path.join(PAIRS_PATH), self.path.join(DELETED_PATH), self.path.join(CONFIG_PATH), ]; files.extend(self.storage.point_to_values.files()); files.extend(Histogram::<T>::files(&self.path)); files } pub fn immutable_files(&self) -> Vec<PathBuf> { let mut files = vec![self.path.join(PAIRS_PATH), self.path.join(CONFIG_PATH)]; files.extend(self.storage.point_to_values.immutable_files()); files.extend(Histogram::<T>::immutable_files(&self.path)); files } pub fn flusher(&self) -> Flusher { self.storage.deleted.flusher() } pub fn check_values_any( &self, idx: PointOffsetType, check_fn: impl Fn(&T) -> bool, hw_counter: &HardwareCounterCell, ) -> bool { let hw_counter = self.make_conditioned_counter(hw_counter); if self.storage.deleted.get(idx as usize) == Some(false) { self.storage.point_to_values.check_values_any( idx, |v| check_fn(T::from_referenced(&v)), &hw_counter, ) } else { false } } pub fn get_values(&self, idx: PointOffsetType) -> Option<Box<dyn Iterator<Item = T> + '_>> { if self.storage.deleted.get(idx as usize) == Some(false) { Some(Box::new( self.storage .point_to_values .get_values(idx)? .map(|v| *T::from_referenced(&v)), )) } else { None } } pub fn values_count(&self, idx: PointOffsetType) -> Option<usize> { if self.storage.deleted.get(idx as usize) == Some(false) { self.storage.point_to_values.get_values_count(idx) } else { None } } /// Returns the number of key-value pairs in the index. /// Note that is doesn't count deleted pairs. pub(super) fn total_unique_values_count(&self) -> usize { self.storage.pairs.len() } pub(super) fn values_range<'a>( &'a self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, hw_counter: &'a HardwareCounterCell, ) -> impl Iterator<Item = PointOffsetType> + 'a { let hw_counter = self.make_conditioned_counter(hw_counter); self.values_range_iterator(start_bound, end_bound) .map(|Point { idx, .. }| idx) .measure_hw_with_condition_cell(hw_counter, size_of::<Point<T>>(), |i| { i.payload_index_io_read_counter() }) } pub(super) fn orderable_values_range( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> impl DoubleEndedIterator<Item = (T, PointOffsetType)> + '_ { self.values_range_iterator(start_bound, end_bound) .map(|Point { val, idx }| (val, idx)) } pub fn remove_point(&mut self, idx: PointOffsetType) { let idx = idx as usize; if idx < self.storage.deleted.len() && !self.storage.deleted.get(idx).unwrap_or(true) { self.storage.deleted.set(idx, true); self.deleted_count += 1; } } pub(super) fn get_histogram(&self) -> &Histogram<T> { &self.histogram } pub(super) fn get_points_count(&self) -> usize { self.storage.point_to_values.len() - self.deleted_count } pub(super) fn get_max_values_per_point(&self) -> usize { self.max_values_per_point } pub(super) fn values_range_size( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> usize { let iter = self.values_range_iterator(start_bound, end_bound); iter.end_index - iter.start_index } // get iterator fn values_range_iterator( &self, start_bound: Bound<Point<T>>, end_bound: Bound<Point<T>>, ) -> NumericIndexPairsIterator<'_, T> { let start_index = match start_bound { Bound::Included(bound) => self .storage .pairs .binary_search(&bound) .unwrap_or_else(|idx| idx), Bound::Excluded(bound) => match self.storage.pairs.binary_search(&bound) { Ok(idx) => idx + 1, Err(idx) => idx, }, Bound::Unbounded => 0, }; if start_index >= self.storage.pairs.len() { return NumericIndexPairsIterator { pairs: &self.storage.pairs, deleted: &self.storage.deleted, start_index: self.storage.pairs.len(), end_index: self.storage.pairs.len(), }; } let end_index = match end_bound { Bound::Included(bound) => match self.storage.pairs[start_index..].binary_search(&bound) { Ok(idx) => idx + 1 + start_index, Err(idx) => idx + start_index, }, Bound::Excluded(bound) => { let end_bound = self.storage.pairs[start_index..].binary_search(&bound); end_bound.unwrap_or_else(|idx| idx) + start_index } Bound::Unbounded => self.storage.pairs.len(), }; NumericIndexPairsIterator { pairs: &self.storage.pairs, deleted: &self.storage.deleted, start_index, end_index, } } fn make_conditioned_counter<'a>( &self, hw_counter: &'a HardwareCounterCell, ) -> ConditionedCounter<'a> { ConditionedCounter::new(self.is_on_disk, hw_counter) } pub fn is_on_disk(&self) -> bool { self.is_on_disk } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { self.storage.pairs.populate()?; self.storage.point_to_values.populate(); Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { let pairs_path = self.path.join(PAIRS_PATH); let deleted_path = self.path.join(DELETED_PATH); clear_disk_cache(&pairs_path)?; clear_disk_cache(&deleted_path)?; self.storage.point_to_values.clear_cache()?; Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/bool_index/mutable_bool_index.rs
lib/segment/src/index/field_index/bool_index/mutable_bool_index.rs
use std::path::{Path, PathBuf}; use common::counter::hardware_counter::HardwareCounterCell; use common::counter::iterator_hw_measurement::HwMeasurementIteratorExt; use common::types::PointOffsetType; use fs_err as fs; use roaring::RoaringBitmap; use super::BoolIndex; use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags; use crate::common::flags::roaring_flags::RoaringFlags; use crate::common::operation_error::{OperationError, OperationResult}; use crate::index::field_index::map_index::IdIter; use crate::index::field_index::{ CardinalityEstimation, FieldIndexBuilderTrait, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, ValueIndexer, }; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{FieldCondition, Match, MatchValue, PayloadKeyType, ValueVariants}; const TRUES_DIRNAME: &str = "trues"; const FALSES_DIRNAME: &str = "falses"; /// Payload index for boolean values, in-memory via roaring bitmaps, stored in memory-mapped bitslices. pub struct MutableBoolIndex { base_dir: PathBuf, indexed_count: usize, trues_count: usize, falses_count: usize, storage: Storage, } struct Storage { trues_flags: RoaringFlags, falses_flags: RoaringFlags, } impl MutableBoolIndex { pub fn builder(path: &Path) -> OperationResult<MutableBoolIndexBuilder> { Ok(MutableBoolIndexBuilder( Self::open(path, true)?.ok_or_else(|| { OperationError::service_error("Failed to create and open MutableBoolIndex") })?, )) } /// Open and load or create a boolean index at the given path. /// /// # Arguments /// - `path` - The directory where the index files should live, must be exclusive to this index. /// - `is_on_disk` - If the index should be kept on disk. Memory will be populated if false. /// - `create_if_missing` - If true, creates the index if it doesn't exist. pub fn open(path: &Path, create_if_missing: bool) -> OperationResult<Option<Self>> { let falses_dir = path.join(FALSES_DIRNAME); // If falses directory doesn't exist, assume the index doesn't exist on disk if !falses_dir.is_dir() && !create_if_missing { return Ok(None); } Ok(Some(Self::open_or_create(path)?)) } fn open_or_create(path: &Path) -> OperationResult<Self> { fs::create_dir_all(path).map_err(|err| { OperationError::service_error(format!( "Failed to create mmap bool index directory: {err}" )) })?; // Trues bitslice let trues_path = path.join(TRUES_DIRNAME); let trues_slice = DynamicMmapFlags::open(&trues_path, false)?; let trues_flags = RoaringFlags::new(trues_slice); // Falses bitslice let falses_path = path.join(FALSES_DIRNAME); let falses_slice = DynamicMmapFlags::open(&falses_path, false)?; let falses_flags = RoaringFlags::new(falses_slice); let trues_count = trues_flags.count_trues(); let falses_count = falses_flags.count_trues(); let indexed_count = { let trues = trues_flags.get_bitmap(); let falses = falses_flags.get_bitmap(); trues.union_len(falses) as usize }; Ok(Self { base_dir: path.to_path_buf(), storage: Storage { trues_flags, falses_flags, }, trues_count, falses_count, indexed_count, }) } fn set_or_insert(&mut self, id: u32, has_true: bool, has_false: bool) { // Set or insert the flags let prev_true = self.storage.trues_flags.set(id, has_true); let prev_false = self.storage.falses_flags.set(id, has_false); let was_indexed = prev_true || prev_false; let is_indexed = has_true || has_false; // update indexed_count match (was_indexed, is_indexed) { (false, true) => { self.indexed_count += 1; } (true, false) => { self.indexed_count = self.indexed_count.saturating_sub(1); } _ => {} } // update trues_count match (prev_true, has_true) { (false, true) => { self.trues_count += 1; } (true, false) => { self.trues_count = self.trues_count.saturating_sub(1); } _ => {} } // update falses_count match (prev_false, has_false) { (false, true) => { self.falses_count += 1; } (true, false) => { self.falses_count = self.falses_count.saturating_sub(1); } _ => {} } } fn get_bitmap_for(&self, value: bool) -> &RoaringBitmap { if value { self.storage.trues_flags.get_bitmap() } else { self.storage.falses_flags.get_bitmap() } } fn get_count_for(&self, value: bool) -> usize { if value { self.trues_count } else { self.falses_count } } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, points_count: self.indexed_count, points_values_count: (self.trues_count + self.falses_count), histogram_bucket_size: None, index_type: "mmap_bool", } } pub fn values_count(&self, point_id: PointOffsetType) -> usize { let has_true = self.storage.trues_flags.get(point_id); let has_false = self.storage.falses_flags.get(point_id); usize::from(has_true) + usize::from(has_false) } pub fn check_values_any(&self, point_id: PointOffsetType, is_true: bool) -> bool { if is_true { self.storage.trues_flags.get(point_id) } else { self.storage.falses_flags.get(point_id) } } pub fn values_is_empty(&self, point_id: PointOffsetType) -> bool { !self.storage.trues_flags.get(point_id) && !self.storage.falses_flags.get(point_id) } pub fn iter_values_map<'a>( &'a self, hw_counter: &'a HardwareCounterCell, ) -> impl Iterator<Item = (bool, IdIter<'a>)> + 'a { [ ( false, Box::new(self.storage.falses_flags.iter_trues()) as IdIter, ), ( true, Box::new(self.storage.trues_flags.iter_trues()) as IdIter, ), ] .into_iter() .measure_hw_with_acc(hw_counter.new_accumulator(), u8::BITS as usize, |i| { i.payload_index_io_read_counter() }) } pub fn iter_values(&self) -> impl Iterator<Item = bool> + '_ { [ self.storage.falses_flags.iter_trues().next().map(|_| false), self.storage.trues_flags.iter_trues().next().map(|_| true), ] .into_iter() .flatten() } pub fn iter_counts_per_value(&self) -> impl Iterator<Item = (bool, usize)> + '_ { [ (false, self.storage.falses_flags.count_trues()), (true, self.storage.trues_flags.count_trues()), ] .into_iter() } pub(crate) fn get_point_values(&self, point_id: u32) -> Vec<bool> { [ self.storage.trues_flags.get(point_id).then_some(true), self.storage.falses_flags.get(point_id).then_some(false), ] .into_iter() .flatten() .collect() } pub fn is_on_disk(&self) -> bool { false } pub fn populate(&self) -> OperationResult<()> { // The true and false flags are always in memory Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { self.storage.trues_flags.clear_cache()?; self.storage.falses_flags.clear_cache() } } pub struct MutableBoolIndexBuilder(MutableBoolIndex); impl FieldIndexBuilderTrait for MutableBoolIndexBuilder { type FieldIndexType = BoolIndex; fn init(&mut self) -> OperationResult<()> { // After Self is created, it is already initialized Ok(()) } fn add_point( &mut self, id: PointOffsetType, payload: &[&serde_json::Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(BoolIndex::Mmap(self.0)) } } impl ValueIndexer for MutableBoolIndex { type ValueType = bool; fn add_many( &mut self, id: PointOffsetType, values: Vec<Self::ValueType>, _hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { if values.is_empty() { return Ok(()); } let has_true = values.iter().any(|v| *v); let has_false = values.iter().any(|v| !*v); self.set_or_insert(id, has_true, has_false); Ok(()) } fn get_value(value: &serde_json::Value) -> Option<Self::ValueType> { value.as_bool() } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { self.set_or_insert(id, false, false); Ok(()) } } impl PayloadFieldIndex for MutableBoolIndex { fn count_indexed_points(&self) -> usize { self.indexed_count } fn wipe(self) -> OperationResult<()> { let base_dir = self.base_dir.clone(); // drop mmap handles before deleting files drop(self); if base_dir.is_dir() { fs::remove_dir_all(&base_dir)?; }; Ok(()) } fn flusher(&self) -> crate::common::Flusher { let Self { base_dir: _, indexed_count: _, trues_count: _, falses_count: _, storage, } = self; let Storage { trues_flags, falses_flags, } = storage; let trues_flusher = trues_flags.flusher(); let falses_flusher = falses_flags.flusher(); Box::new(move || { trues_flusher()?; falses_flusher()?; Ok(()) }) } fn files(&self) -> Vec<std::path::PathBuf> { let mut files = self.storage.trues_flags.files(); files.extend(self.storage.falses_flags.files()); files } fn immutable_files(&self) -> Vec<PathBuf> { Vec::new() // everything is mutable } fn filter<'a>( &'a self, condition: &'a FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match &condition.r#match { Some(Match::Value(MatchValue { value: ValueVariants::Bool(value), })) => { let iter = self .get_bitmap_for(*value) .iter() .map(|x| x as PointOffsetType) .measure_hw_with_acc_and_fraction( hw_counter.new_accumulator(), u8::BITS as usize, |i| i.payload_index_io_read_counter(), ); Some(Box::new(iter)) } _ => None, } } fn estimate_cardinality( &self, condition: &FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { match &condition.r#match { Some(Match::Value(MatchValue { value: ValueVariants::Bool(value), })) => { let count = self.get_count_for(*value); hw_counter .payload_index_io_read_counter() .incr_delta(size_of::<usize>()); let estimation = CardinalityEstimation::exact(count) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); Some(estimation) } _ => None, } } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { let make_block = |count, value, key: PayloadKeyType| { if count > threshold { Some(PayloadBlockCondition { condition: FieldCondition::new_match( key, Match::Value(MatchValue { value: ValueVariants::Bool(value), }), ), cardinality: count, }) } else { None } }; // just two possible blocks: true and false let iter = [ make_block(self.trues_count, true, key.clone()), make_block(self.falses_count, false, key), ] .into_iter() .flatten(); Box::new(iter) } } #[cfg(test)] mod tests { use std::collections::HashSet; use tempfile::TempDir; use walkdir::WalkDir; use super::MutableBoolIndex; use crate::index::field_index::PayloadFieldIndex; #[test] fn test_files() { let dir = TempDir::with_prefix("test_mmap_bool_index").unwrap(); let index = MutableBoolIndex::open(dir.path(), true).unwrap().unwrap(); let reported = index.files().into_iter().collect::<HashSet<_>>(); let actual = WalkDir::new(dir.path()) .into_iter() .filter_map(|entry| { let entry = entry.ok()?; entry.path().is_file().then_some(entry.into_path()) }) .collect::<HashSet<_>>(); assert_eq!(reported, actual); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/bool_index/mod.rs
lib/segment/src/index/field_index/bool_index/mod.rs
use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use mutable_bool_index::MutableBoolIndex; #[cfg(feature = "rocksdb")] use simple_bool_index::SimpleBoolIndex; use super::facet_index::FacetIndex; use super::map_index::IdIter; use super::{PayloadFieldIndex, ValueIndexer}; use crate::common::operation_error::OperationResult; use crate::data_types::facets::{FacetHit, FacetValueRef}; use crate::index::payload_config::{IndexMutability, StorageType}; use crate::telemetry::PayloadIndexTelemetry; pub mod mutable_bool_index; #[cfg(feature = "rocksdb")] pub mod simple_bool_index; pub enum BoolIndex { #[cfg(feature = "rocksdb")] Simple(SimpleBoolIndex), Mmap(MutableBoolIndex), } impl BoolIndex { pub fn get_point_values(&self, point_id: PointOffsetType) -> Vec<bool> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.get_point_values(point_id), BoolIndex::Mmap(index) => index.get_point_values(point_id), } } pub fn iter_values_map<'a>( &'a self, hw_acc: &'a HardwareCounterCell, ) -> Box<dyn Iterator<Item = (bool, IdIter<'a>)> + 'a> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => Box::new(index.iter_values_map()), BoolIndex::Mmap(index) => Box::new(index.iter_values_map(hw_acc)), } } pub fn iter_values(&self) -> Box<dyn Iterator<Item = bool> + '_> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => Box::new(index.iter_values()), BoolIndex::Mmap(index) => Box::new(index.iter_values()), } } pub fn iter_counts_per_value(&self) -> Box<dyn Iterator<Item = (bool, usize)> + '_> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => Box::new(index.iter_counts_per_value()), BoolIndex::Mmap(index) => Box::new(index.iter_counts_per_value()), } } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.get_telemetry_data(), BoolIndex::Mmap(index) => index.get_telemetry_data(), } } pub fn values_count(&self, point_id: PointOffsetType) -> usize { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.values_count(point_id), BoolIndex::Mmap(index) => index.values_count(point_id), } } pub fn check_values_any( &self, point_id: PointOffsetType, is_true: bool, _hw_counter: &HardwareCounterCell, ) -> bool { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.check_values_any(point_id, is_true), BoolIndex::Mmap(index) => index.check_values_any(point_id, is_true), } } pub fn values_is_empty(&self, point_id: PointOffsetType) -> bool { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.values_is_empty(point_id), BoolIndex::Mmap(index) => index.values_is_empty(point_id), } } pub fn is_on_disk(&self) -> bool { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => false, BoolIndex::Mmap(index) => index.is_on_disk(), } } #[cfg(feature = "rocksdb")] pub fn is_rocksdb(&self) -> bool { match self { BoolIndex::Simple(_) => true, BoolIndex::Mmap(_) => false, } } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => {} // Not a mmap BoolIndex::Mmap(index) => index.populate()?, } Ok(()) } /// Drop disk cache. pub fn clear_cache(&self) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => {} // Not a mmap BoolIndex::Mmap(index) => index.clear_cache()?, } Ok(()) } pub fn get_mutability_type(&self) -> IndexMutability { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => IndexMutability::Mutable, // Mmap bool index can be both mutable and immutable, so we pick mutable BoolIndex::Mmap(_) => IndexMutability::Mutable, } } pub fn get_storage_type(&self) -> StorageType { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => crate::index::payload_config::StorageType::RocksDb, BoolIndex::Mmap(index) => StorageType::Mmap { is_on_disk: index.is_on_disk(), }, } } } impl PayloadFieldIndex for BoolIndex { fn count_indexed_points(&self) -> usize { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.count_indexed_points(), BoolIndex::Mmap(index) => index.count_indexed_points(), } } fn wipe(self) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.wipe(), BoolIndex::Mmap(index) => index.wipe(), } } fn flusher(&self) -> crate::common::Flusher { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.flusher(), BoolIndex::Mmap(index) => index.flusher(), } } fn files(&self) -> Vec<std::path::PathBuf> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.files(), BoolIndex::Mmap(index) => index.files(), } } fn immutable_files(&self) -> Vec<std::path::PathBuf> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(_) => vec![], BoolIndex::Mmap(index) => index.immutable_files(), } } fn filter<'a>( &'a self, condition: &'a crate::types::FieldCondition, hw_counter: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.filter(condition, hw_counter), BoolIndex::Mmap(index) => index.filter(condition, hw_counter), } } fn estimate_cardinality( &self, condition: &crate::types::FieldCondition, hw_counter: &HardwareCounterCell, ) -> Option<super::CardinalityEstimation> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.estimate_cardinality(condition, hw_counter), BoolIndex::Mmap(index) => index.estimate_cardinality(condition, hw_counter), } } fn payload_blocks( &self, threshold: usize, key: crate::types::PayloadKeyType, ) -> Box<dyn Iterator<Item = super::PayloadBlockCondition> + '_> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.payload_blocks(threshold, key), BoolIndex::Mmap(index) => index.payload_blocks(threshold, key), } } } impl FacetIndex for BoolIndex { fn get_point_values( &self, point_id: PointOffsetType, ) -> impl Iterator<Item = FacetValueRef<'_>> + '_ { self.get_point_values(point_id) .into_iter() .map(FacetValueRef::Bool) } fn iter_values(&self) -> impl Iterator<Item = FacetValueRef<'_>> + '_ { self.iter_values().map(FacetValueRef::Bool) } fn iter_values_map<'a>( &'a self, hw_counter: &'a HardwareCounterCell, ) -> impl Iterator<Item = (FacetValueRef<'a>, IdIter<'a>)> + 'a { self.iter_values_map(hw_counter) .map(|(value, iter)| (FacetValueRef::Bool(value), iter)) } fn iter_counts_per_value(&self) -> impl Iterator<Item = FacetHit<FacetValueRef<'_>>> + '_ { self.iter_counts_per_value().map(|(value, count)| FacetHit { value: FacetValueRef::Bool(value), count, }) } } impl ValueIndexer for BoolIndex { type ValueType = bool; fn add_many( &mut self, id: PointOffsetType, values: Vec<Self::ValueType>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.add_many(id, values, hw_counter), BoolIndex::Mmap(index) => index.add_many(id, values, hw_counter), } } fn get_value(value: &serde_json::Value) -> Option<Self::ValueType> { match value { serde_json::Value::Bool(value) => Some(*value), _ => None, } } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { match self { #[cfg(feature = "rocksdb")] BoolIndex::Simple(index) => index.remove_point(id), BoolIndex::Mmap(index) => index.remove_point(id), } } } #[cfg(test)] mod tests { use std::path::Path; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::counter::hardware_counter::HardwareCounterCell; use itertools::Itertools; use rstest::rstest; use serde_json::json; use tempfile::Builder; use super::BoolIndex; use super::mutable_bool_index::MutableBoolIndex; #[cfg(feature = "rocksdb")] use super::simple_bool_index::SimpleBoolIndex; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::open_db_with_existing_cf; use crate::index::field_index::{FieldIndexBuilderTrait as _, PayloadFieldIndex, ValueIndexer}; use crate::json_path::JsonPath; const FIELD_NAME: &str = "bool_field"; const DB_NAME: &str = "test_db"; trait OpenIndex { fn open_at(path: &Path) -> BoolIndex; } #[cfg(feature = "rocksdb")] impl OpenIndex for SimpleBoolIndex { fn open_at(path: &Path) -> BoolIndex { let db = open_db_with_existing_cf(path).unwrap(); let index = SimpleBoolIndex::new(db.clone(), FIELD_NAME, true) .unwrap() .unwrap(); BoolIndex::Simple(index) } } impl OpenIndex for MutableBoolIndex { fn open_at(path: &Path) -> BoolIndex { MutableBoolIndex::builder(path) .unwrap() .make_empty() .unwrap() } } fn match_bool(value: bool) -> crate::types::FieldCondition { crate::types::FieldCondition::new_match( JsonPath::new(FIELD_NAME), crate::types::Match::Value(crate::types::MatchValue { value: crate::types::ValueVariants::Bool(value), }), ) } fn bools_fixture() -> Vec<serde_json::Value> { vec![ json!(true), json!(false), json!([true, false]), json!([false, true]), json!([true, true]), json!([false, false]), json!([true, false, true]), serde_json::Value::Null, json!(1), json!("test"), json!([false]), json!([true]), ] } fn filter<I: OpenIndex>(given: serde_json::Value, match_on: bool, expected_count: usize) { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_counter = HardwareCounterCell::new(); index.add_point(0, &[&given], &hw_counter).unwrap(); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let count = index .filter(&match_bool(match_on), &hw_counter) .unwrap() .count(); assert_eq!(count, expected_count); } #[rstest] #[case(json!(true), 1)] #[case(json!(false), 0)] #[case(json!([true]), 1)] #[case(json!([false]), 0)] #[case(json!([true, false]), 1)] #[case(json!([false, true]), 1)] #[case(json!([false, false]), 0)] #[case(json!([true, true]), 1)] fn test_filter_true(#[case] given: serde_json::Value, #[case] expected_count: usize) { #[cfg(feature = "rocksdb")] filter::<SimpleBoolIndex>(given.clone(), true, expected_count); filter::<MutableBoolIndex>(given, true, expected_count); } #[rstest] #[case(json!(true), 0)] #[case(json!(false), 1)] #[case(json!([true]), 0)] #[case(json!([false]), 1)] #[case(json!([true, false]), 1)] #[case(json!([false, true]), 1)] #[case(json!([false, false]), 1)] #[case(json!([true, true]), 0)] fn test_filter_false(#[case] given: serde_json::Value, #[case] expected_count: usize) { #[cfg(feature = "rocksdb")] filter::<SimpleBoolIndex>(given.clone(), false, expected_count); filter::<MutableBoolIndex>(given, false, expected_count); } #[test] fn test_load_from_disk() { #[cfg(feature = "rocksdb")] load_from_disk::<SimpleBoolIndex>(); load_from_disk::<MutableBoolIndex>(); } fn load_from_disk<I: OpenIndex>() { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_counter = HardwareCounterCell::new(); bools_fixture() .into_iter() .enumerate() .for_each(|(i, value)| { index.add_point(i as u32, &[&value], &hw_counter).unwrap(); }); index.flusher()().unwrap(); drop(index); let new_index = I::open_at(tmp_dir.path()); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let point_offsets = new_index .filter(&match_bool(false), &hw_counter) .unwrap() .collect_vec(); assert_eq!(point_offsets, vec![1, 2, 3, 5, 6, 10]); let point_offsets = new_index .filter(&match_bool(true), &hw_counter) .unwrap() .collect_vec(); assert_eq!(point_offsets, vec![0, 2, 3, 4, 6, 11]); assert_eq!(new_index.count_indexed_points(), 9); } #[rstest] #[case(json!(false), json!(true))] #[case(json!([false, true]), json!(true))] fn test_modify_value(#[case] before: serde_json::Value, #[case] after: serde_json::Value) { #[cfg(feature = "rocksdb")] modify_value::<SimpleBoolIndex>(before.clone(), after.clone()); modify_value::<MutableBoolIndex>(before, after); } /// Try to modify from falsy to only true fn modify_value<I: OpenIndex>(before: serde_json::Value, after: serde_json::Value) { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_cell = HardwareCounterCell::new(); let idx = 1000; index.add_point(idx, &[&before], &hw_cell).unwrap(); let hw_acc = HwMeasurementAcc::new(); let hw_counter = hw_acc.get_counter_cell(); let point_offsets = index .filter(&match_bool(false), &hw_counter) .unwrap() .collect_vec(); assert_eq!(point_offsets, vec![idx]); index.add_point(idx, &[&after], &hw_cell).unwrap(); let point_offsets = index .filter(&match_bool(true), &hw_counter) .unwrap() .collect_vec(); assert_eq!(point_offsets, vec![idx]); let point_offsets = index .filter(&match_bool(false), &hw_counter) .unwrap() .collect_vec(); assert!(point_offsets.is_empty()); } #[test] fn test_indexed_count() { #[cfg(feature = "rocksdb")] indexed_count::<SimpleBoolIndex>(); indexed_count::<MutableBoolIndex>(); } fn indexed_count<I: OpenIndex>() { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_counter = HardwareCounterCell::new(); bools_fixture() .into_iter() .enumerate() .for_each(|(i, value)| { index.add_point(i as u32, &[&value], &hw_counter).unwrap(); }); assert_eq!(index.count_indexed_points(), 9); } #[test] fn test_payload_blocks() { #[cfg(feature = "rocksdb")] payload_blocks::<SimpleBoolIndex>(); payload_blocks::<MutableBoolIndex>(); } fn payload_blocks<I: OpenIndex>() { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_counter = HardwareCounterCell::new(); bools_fixture() .into_iter() .enumerate() .for_each(|(i, value)| { index.add_point(i as u32, &[&value], &hw_counter).unwrap(); }); let blocks = index .payload_blocks(0, JsonPath::new(FIELD_NAME)) .collect_vec(); assert_eq!(blocks.len(), 2); assert_eq!(blocks[0].cardinality, 6); assert_eq!(blocks[1].cardinality, 6); } #[test] fn test_estimate_cardinality() { #[cfg(feature = "rocksdb")] estimate_cardinality::<SimpleBoolIndex>(); estimate_cardinality::<MutableBoolIndex>(); } fn estimate_cardinality<I: OpenIndex>() { let tmp_dir = Builder::new().prefix(DB_NAME).tempdir().unwrap(); let mut index = I::open_at(tmp_dir.path()); let hw_counter = HardwareCounterCell::new(); bools_fixture() .into_iter() .enumerate() .for_each(|(i, value)| { index.add_point(i as u32, &[&value], &hw_counter).unwrap(); }); let hw_counter = HardwareCounterCell::new(); let cardinality = index .estimate_cardinality(&match_bool(true), &hw_counter) .unwrap(); assert_eq!(cardinality.exp, 6); let cardinality = index .estimate_cardinality(&match_bool(false), &hw_counter) .unwrap(); assert_eq!(cardinality.exp, 6); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/field_index/bool_index/simple_bool_index.rs
lib/segment/src/index/field_index/bool_index/simple_bool_index.rs
use std::path::PathBuf; use std::sync::Arc; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use parking_lot::RwLock; use rocksdb::DB; use serde_json::Value; use self::memory::{BoolMemory, BooleanItem}; use super::BoolIndex; use crate::common::operation_error::{OperationError, OperationResult}; use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper; use crate::common::rocksdb_wrapper::DatabaseColumnWrapper; use crate::index::field_index::map_index::IdIter; use crate::index::field_index::{ CardinalityEstimation, FieldIndexBuilderTrait, PayloadBlockCondition, PayloadFieldIndex, PrimaryCondition, ValueIndexer, }; use crate::telemetry::PayloadIndexTelemetry; use crate::types::{FieldCondition, Match, MatchValue, PayloadKeyType, ValueVariants}; mod memory { use bitvec::vec::BitVec; use common::ext::BitSliceExt as _; use common::types::PointOffsetType; pub struct BooleanItem { value: u8, } impl BooleanItem { const HAS_TRUE: u8 = 0b0000_0001; const HAS_FALSE: u8 = 0b0000_0010; pub fn empty() -> Self { Self { value: 0 } } pub fn has_true(&self) -> bool { self.value & Self::HAS_TRUE != 0 } pub fn has_false(&self) -> bool { self.value & Self::HAS_FALSE != 0 } pub fn set(&mut self, flag: u8, value: bool) { if value { self.value |= flag; } else { self.value &= !flag; } } pub fn from_bools(has_true: bool, has_false: bool) -> Self { let mut item = Self::empty(); item.set(Self::HAS_TRUE, has_true); item.set(Self::HAS_FALSE, has_false); item } pub fn as_bytes(&self) -> [u8; 1] { [self.value] } } impl From<u8> for BooleanItem { fn from(value: u8) -> Self { Self { value } } } pub struct BoolMemory { trues: BitVec, falses: BitVec, trues_count: usize, falses_count: usize, indexed_count: usize, } impl BoolMemory { pub fn new() -> Self { Self { trues: BitVec::new(), falses: BitVec::new(), trues_count: 0, falses_count: 0, indexed_count: 0, } } pub fn get(&self, id: PointOffsetType) -> BooleanItem { debug_assert!(self.trues.len() == self.falses.len()); let has_true = self.trues.get_bit(id as usize).unwrap_or(false); let has_false = self.falses.get_bit(id as usize).unwrap_or(false); BooleanItem::from_bools(has_true, has_false) } pub fn set_or_insert(&mut self, id: PointOffsetType, item: &BooleanItem) { if (id as usize) >= self.trues.len() { self.trues.resize(id as usize + 1, false); self.falses.resize(id as usize + 1, false); } debug_assert!(self.trues.len() == self.falses.len()); let has_true = item.has_true(); let had_true = self.trues.replace(id as usize, has_true); match (had_true, has_true) { (false, true) => self.trues_count += 1, (true, false) => self.trues_count -= 1, _ => {} } let has_false = item.has_false(); let had_false = self.falses.replace(id as usize, has_false); match (had_false, has_false) { (false, true) => self.falses_count += 1, (true, false) => self.falses_count -= 1, _ => {} } let was_indexed = had_true || had_false; let is_indexed = has_true || has_false; match (was_indexed, is_indexed) { (false, true) => { self.indexed_count += 1; } (true, false) => { self.indexed_count = self.indexed_count.saturating_sub(1); } _ => {} } } /// Removes the point from the index and tries to shrink the vectors if possible. If the index is not within bounds, does nothing pub fn remove(&mut self, id: PointOffsetType) { if (id as usize) >= self.trues.len() { return; } let had_true = self.trues.replace(id as usize, false); let had_false = self.falses.replace(id as usize, false); if had_true { self.trues_count -= 1; } if had_false { self.falses_count -= 1; } if had_false || had_true { self.indexed_count -= 1; } } pub fn trues_count(&self) -> usize { self.trues_count } pub fn falses_count(&self) -> usize { self.falses_count } pub fn indexed_count(&self) -> usize { self.indexed_count } pub fn iter_has_true(&self) -> impl Iterator<Item = PointOffsetType> + '_ { self.trues.iter_ones().map(|v| v as PointOffsetType) } pub fn iter_has_false(&self) -> impl Iterator<Item = PointOffsetType> + '_ { self.falses.iter_ones().map(|v| v as PointOffsetType) } } } /// Payload index for boolean values, persisted in a RocksDB column family pub struct SimpleBoolIndex { memory: BoolMemory, db_wrapper: DatabaseColumnScheduledDeleteWrapper, } impl SimpleBoolIndex { pub fn new( db: Arc<RwLock<DB>>, field_name: &str, create_if_missing: bool, ) -> OperationResult<Option<SimpleBoolIndex>> { let store_cf_name = Self::storage_cf_name(field_name); let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new( db, &store_cf_name, )); if !db_wrapper.has_column_family()? { if create_if_missing { db_wrapper.recreate_column_family()?; } else { // Column family doesn't exist, cannot load return Ok(None); } }; // Load in-memory index from RocksDB let mut memory = BoolMemory::new(); for (key, value) in db_wrapper.lock_db().iter()? { let idx = PointOffsetType::from_be_bytes(key.as_ref().try_into().unwrap()); debug_assert_eq!(value.len(), 1); let item = BooleanItem::from(value[0]); memory.set_or_insert(idx, &item); } Ok(Some(Self { memory, db_wrapper })) } pub fn builder(db: Arc<RwLock<DB>>, field_name: &str) -> OperationResult<BoolIndexBuilder> { Ok(BoolIndexBuilder( Self::new(db, field_name, true)?.ok_or_else(|| { OperationError::service_error(format!( "Failed to create and open SimpleBoolIndex for field: {field_name}", )) })?, )) } fn storage_cf_name(field: &str) -> String { format!("{field}_binary") } pub fn get_telemetry_data(&self) -> PayloadIndexTelemetry { PayloadIndexTelemetry { field_name: None, points_count: self.memory.indexed_count(), points_values_count: self.memory.trues_count() + self.memory.falses_count(), histogram_bucket_size: None, index_type: "simple_bool", } } pub fn check_values_any(&self, point_id: PointOffsetType, is_true: bool) -> bool { if is_true { self.values_has_true(point_id) } else { self.values_has_false(point_id) } } pub fn values_count(&self, point_id: PointOffsetType) -> usize { let binary_item = self.memory.get(point_id); usize::from(binary_item.has_true()) + usize::from(binary_item.has_false()) } pub fn values_is_empty(&self, point_id: PointOffsetType) -> bool { self.values_count(point_id) == 0 } /// Check if the point has a true value pub fn values_has_true(&self, point_id: PointOffsetType) -> bool { self.memory.get(point_id).has_true() } /// Check if the point has a false value pub fn values_has_false(&self, point_id: PointOffsetType) -> bool { self.memory.get(point_id).has_false() } pub fn iter_values_map(&self) -> impl Iterator<Item = (bool, IdIter<'_>)> { [ (false, Box::new(self.memory.iter_has_false()) as IdIter), (true, Box::new(self.memory.iter_has_true()) as IdIter), ] .into_iter() } pub fn iter_values(&self) -> impl Iterator<Item = bool> + '_ { [ self.memory.iter_has_true().next().map(|_| true), self.memory.iter_has_false().next().map(|_| false), ] .into_iter() .flatten() } pub fn iter_counts_per_value(&self) -> impl Iterator<Item = (bool, usize)> + '_ { vec![ (false, self.memory.falses_count()), (true, self.memory.trues_count()), ] .into_iter() } pub(crate) fn get_point_values(&self, point_id: u32) -> Vec<bool> { let boolean_item = self.memory.get(point_id); [ boolean_item.has_true().then_some(true), boolean_item.has_false().then_some(false), ] .into_iter() .flatten() .collect() } } pub struct BoolIndexBuilder(SimpleBoolIndex); impl FieldIndexBuilderTrait for BoolIndexBuilder { type FieldIndexType = BoolIndex; fn init(&mut self) -> OperationResult<()> { self.0.db_wrapper.recreate_column_family() } fn add_point( &mut self, id: PointOffsetType, payload: &[&Value], hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.0.add_point(id, payload, hw_counter) } fn finalize(self) -> OperationResult<Self::FieldIndexType> { Ok(BoolIndex::Simple(self.0)) } } impl PayloadFieldIndex for SimpleBoolIndex { fn wipe(self) -> OperationResult<()> { self.db_wrapper.remove_column_family() } fn flusher(&self) -> crate::common::Flusher { self.db_wrapper.flusher() } fn files(&self) -> Vec<PathBuf> { vec![] } fn immutable_files(&self) -> Vec<PathBuf> { vec![] } fn filter<'a>( &'a self, condition: &'a crate::types::FieldCondition, _: &'a HardwareCounterCell, ) -> Option<Box<dyn Iterator<Item = PointOffsetType> + 'a>> { match &condition.r#match { Some(Match::Value(MatchValue { value: ValueVariants::Bool(value), })) => { if *value { Some(Box::new(self.memory.iter_has_true())) } else { Some(Box::new(self.memory.iter_has_false())) } } _ => None, } } fn estimate_cardinality( &self, condition: &FieldCondition, _: &HardwareCounterCell, ) -> Option<CardinalityEstimation> { match &condition.r#match { Some(Match::Value(MatchValue { value: ValueVariants::Bool(value), })) => { let count = if *value { self.memory.trues_count() } else { self.memory.falses_count() }; let estimation = CardinalityEstimation::exact(count) .with_primary_clause(PrimaryCondition::Condition(Box::new(condition.clone()))); Some(estimation) } _ => None, } } fn payload_blocks( &self, threshold: usize, key: PayloadKeyType, ) -> Box<dyn Iterator<Item = PayloadBlockCondition> + '_> { let make_block = |count, value, key: PayloadKeyType| { if count > threshold { Some(PayloadBlockCondition { condition: FieldCondition::new_match( key, Match::Value(MatchValue { value: ValueVariants::Bool(value), }), ), cardinality: count, }) } else { None } }; // just two possible blocks: true and false let iter = [ make_block(self.memory.trues_count(), true, key.clone()), make_block(self.memory.falses_count(), false, key), ] .into_iter() .flatten(); Box::new(iter) } fn count_indexed_points(&self) -> usize { self.memory.indexed_count() } } impl ValueIndexer for SimpleBoolIndex { type ValueType = bool; fn add_many( &mut self, id: PointOffsetType, values: Vec<bool>, _: &HardwareCounterCell, ) -> OperationResult<()> { if values.is_empty() { return Ok(()); } let has_true = values.iter().any(|v| *v); let has_false = values.iter().any(|v| !*v); let item = BooleanItem::from_bools(has_true, has_false); self.memory.set_or_insert(id, &item); let item_bytes = item.as_bytes(); self.db_wrapper.put(id.to_be_bytes(), item_bytes)?; Ok(()) } fn get_value(value: &serde_json::Value) -> Option<bool> { value.as_bool() } fn remove_point(&mut self, id: PointOffsetType) -> OperationResult<()> { self.memory.remove(id); self.db_wrapper.remove(id.to_be_bytes())?; Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sparse_index/sparse_search_telemetry.rs
lib/segment/src/index/sparse_index/sparse_search_telemetry.rs
use std::sync::Arc; use common::types::TelemetryDetail; use parking_lot::Mutex; use crate::common::operation_time_statistics::OperationDurationsAggregator; use crate::telemetry::VectorIndexSearchesTelemetry; #[derive(Debug)] pub struct SparseSearchesTelemetry { pub filtered_sparse: Arc<Mutex<OperationDurationsAggregator>>, pub unfiltered_sparse: Arc<Mutex<OperationDurationsAggregator>>, pub filtered_plain: Arc<Mutex<OperationDurationsAggregator>>, pub unfiltered_plain: Arc<Mutex<OperationDurationsAggregator>>, pub small_cardinality: Arc<Mutex<OperationDurationsAggregator>>, } impl SparseSearchesTelemetry { pub fn new() -> Self { SparseSearchesTelemetry { filtered_sparse: OperationDurationsAggregator::new(), unfiltered_sparse: OperationDurationsAggregator::new(), filtered_plain: OperationDurationsAggregator::new(), unfiltered_plain: OperationDurationsAggregator::new(), small_cardinality: OperationDurationsAggregator::new(), } } pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> VectorIndexSearchesTelemetry { VectorIndexSearchesTelemetry { index_name: None, unfiltered_plain: self.unfiltered_plain.lock().get_statistics(detail), filtered_plain: self.filtered_plain.lock().get_statistics(detail), unfiltered_hnsw: Default::default(), filtered_small_cardinality: self.small_cardinality.lock().get_statistics(detail), filtered_large_cardinality: Default::default(), filtered_exact: Default::default(), filtered_sparse: self.filtered_sparse.lock().get_statistics(detail), unfiltered_sparse: self.unfiltered_sparse.lock().get_statistics(detail), unfiltered_exact: Default::default(), } } } impl Default for SparseSearchesTelemetry { fn default() -> Self { Self::new() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sparse_index/indices_tracker.rs
lib/segment/src/index/sparse_index/indices_tracker.rs
use std::path::{Path, PathBuf}; use ahash::AHashMap; use io::file_operations::{atomic_save_json, read_json}; use serde::{Deserialize, Serialize}; use sparse::common::sparse_vector::{RemappedSparseVector, SparseVector}; use sparse::common::types::{DimId, DimOffset}; use crate::common::operation_error::OperationResult; const INDICES_TRACKER_FILE_NAME: &str = "indices_tracker.json"; #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] pub struct IndicesTracker { pub map: AHashMap<DimId, DimOffset>, } impl IndicesTracker { pub fn open(path: &Path) -> std::io::Result<Self> { let path = Self::file_path(path); Ok(read_json(&path)?) } pub fn save(&self, path: &Path) -> OperationResult<()> { let path = Self::file_path(path); Ok(atomic_save_json(&path, self)?) } pub fn file_path(path: &Path) -> PathBuf { path.join(INDICES_TRACKER_FILE_NAME) } pub fn register_indices(&mut self, vector: &SparseVector) { for index in &vector.indices { if !self.map.contains_key(index) { self.map.insert(*index, self.map.len() as DimId); } } } pub fn remap_index(&self, index: DimId) -> Option<DimOffset> { self.map.get(&index).copied() } pub fn remap_vector(&self, vector: SparseVector) -> RemappedSparseVector { let mut placeholder_indices = self.map.len() as DimOffset; let SparseVector { mut indices, values, } = vector; indices.iter_mut().for_each(|index| { *index = if let Some(index) = self.remap_index(*index) { index } else { placeholder_indices += 1; placeholder_indices } }); let mut remapped_vector = RemappedSparseVector { indices, values }; remapped_vector.sort_by_indices(); remapped_vector } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sparse_index/sparse_vector_index.rs
lib/segment/src/index/sparse_index/sparse_vector_index.rs
use std::borrow::Cow; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::AtomicBool; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::types::{PointOffsetType, ScoredPointOffset, TelemetryDetail}; use fs_err as fs; use io::storage_version::{StorageVersion as _, VERSION_FILE}; use itertools::Itertools; use semver::Version; use sparse::common::scores_memory_pool::ScoresMemoryPool; use sparse::common::sparse_vector::SparseVector; use sparse::common::types::DimId; use sparse::index::inverted_index::inverted_index_ram_builder::InvertedIndexBuilder; use sparse::index::inverted_index::{INDEX_FILE_NAME, InvertedIndex, OLD_INDEX_FILE_NAME}; use sparse::index::search_context::SearchContext; use super::indices_tracker::IndicesTracker; use super::sparse_index_config::SparseIndexType; use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped}; use crate::common::operation_time_statistics::ScopeDurationMeasurer; use crate::data_types::named_vectors::CowVector; use crate::data_types::query_context::VectorQueryContext; use crate::data_types::vectors::{QueryVector, VectorInternal, VectorRef}; use crate::id_tracker::IdTrackerSS; use crate::index::field_index::CardinalityEstimation; use crate::index::hnsw_index::point_scorer::BatchFilteredSearcher; use crate::index::query_estimator::adjust_to_available_vectors; use crate::index::sparse_index::sparse_index_config::SparseIndexConfig; use crate::index::sparse_index::sparse_search_telemetry::SparseSearchesTelemetry; use crate::index::struct_payload_index::StructPayloadIndex; use crate::index::{PayloadIndex, VectorIndex}; use crate::telemetry::VectorIndexSearchesTelemetry; use crate::types::{DEFAULT_SPARSE_FULL_SCAN_THRESHOLD, Filter, SearchParams}; use crate::vector_storage::query::TransformInto; use crate::vector_storage::{Random, VectorStorage, VectorStorageEnum, check_deleted_condition}; /// Whether to use the new compressed format. pub const USE_COMPRESSED: bool = true; #[derive(Debug)] pub struct SparseVectorIndex<TInvertedIndex: InvertedIndex> { config: SparseIndexConfig, id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, path: PathBuf, inverted_index: TInvertedIndex, searches_telemetry: SparseSearchesTelemetry, indices_tracker: IndicesTracker, scores_memory_pool: ScoresMemoryPool, } /// Getters for internals, used for testing. #[cfg(feature = "testing")] impl<TInvertedIndex: InvertedIndex> SparseVectorIndex<TInvertedIndex> { pub fn config(&self) -> SparseIndexConfig { self.config } pub fn id_tracker(&self) -> &Arc<AtomicRefCell<IdTrackerSS>> { &self.id_tracker } pub fn vector_storage(&self) -> &Arc<AtomicRefCell<VectorStorageEnum>> { &self.vector_storage } pub fn payload_index(&self) -> &Arc<AtomicRefCell<StructPayloadIndex>> { &self.payload_index } pub fn indices_tracker(&self) -> &IndicesTracker { &self.indices_tracker } } pub struct SparseVectorIndexOpenArgs<'a, F: FnMut()> { pub config: SparseIndexConfig, pub id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, pub vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, pub payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, pub path: &'a Path, pub stopped: &'a AtomicBool, pub tick_progress: F, } impl<TInvertedIndex: InvertedIndex> SparseVectorIndex<TInvertedIndex> { /// Open a sparse vector index at a given path pub fn open<F: FnMut()>(args: SparseVectorIndexOpenArgs<F>) -> OperationResult<Self> { let SparseVectorIndexOpenArgs { config, id_tracker, vector_storage, payload_index, path, stopped, tick_progress, } = args; let config_path = SparseIndexConfig::get_config_path(path); let (config, inverted_index, indices_tracker) = if !config.index_type.is_persisted() { // RAM mutable case - build inverted index from scratch and use provided config fs::create_dir_all(path)?; let (inverted_index, indices_tracker) = Self::build_inverted_index( &id_tracker, &vector_storage, path, stopped, tick_progress, )?; (config, inverted_index, indices_tracker) } else { Self::try_load(path).or_else(|e| { if fs::exists(path).unwrap_or(true) { log::warn!("Failed to load {path:?}, rebuilding: {e}"); // Drop index completely. fs::remove_dir_all(path)?; } fs::create_dir_all(path)?; let (inverted_index, indices_tracker) = Self::build_inverted_index( &id_tracker, &vector_storage, path, stopped, tick_progress, )?; config.save(&config_path)?; inverted_index.save(path)?; indices_tracker.save(path)?; // Save the version as the last step to mark a successful rebuild. // NOTE: index in the original format (Qdrant <=v1.9 / sparse <=v0.1.0) lacks of the // version file. To distinguish between index in original format and partially // written index in the current format, the index file name is changed from // `inverted_index.data` to `inverted_index.dat`. TInvertedIndex::Version::save(path)?; OperationResult::Ok((config, inverted_index, indices_tracker)) })? }; let searches_telemetry = SparseSearchesTelemetry::new(); let path = path.to_path_buf(); let scores_memory_pool = ScoresMemoryPool::new(); Ok(Self { config, id_tracker, vector_storage, payload_index, path, inverted_index, searches_telemetry, indices_tracker, scores_memory_pool, }) } fn try_load( path: &Path, ) -> OperationResult<(SparseIndexConfig, TInvertedIndex, IndicesTracker)> { let mut stored_version = TInvertedIndex::Version::load(path)?; // Simple migration mechanism for 0.1.0. let old_path = path.join(OLD_INDEX_FILE_NAME); if TInvertedIndex::Version::current() == Version::new(0, 1, 0) && old_path.exists() { // Didn't have a version file, but uses 0.1.0 index. Create a version file. fs::rename(old_path, path.join(INDEX_FILE_NAME))?; TInvertedIndex::Version::save(path)?; stored_version = Some(TInvertedIndex::Version::current()); } if stored_version != Some(TInvertedIndex::Version::current()) { return Err(OperationError::service_error_light(format!( "Index version mismatch, expected {}, found {}", TInvertedIndex::Version::current(), stored_version.map_or_else(|| "none".to_string(), |v| v.to_string()), ))); } let loaded_config = SparseIndexConfig::load(&SparseIndexConfig::get_config_path(path))?; let inverted_index = TInvertedIndex::open(path)?; let indices_tracker = IndicesTracker::open(path)?; Ok((loaded_config, inverted_index, indices_tracker)) } fn build_inverted_index( id_tracker: &AtomicRefCell<IdTrackerSS>, vector_storage: &AtomicRefCell<VectorStorageEnum>, path: &Path, stopped: &AtomicBool, mut tick_progress: impl FnMut(), ) -> OperationResult<(TInvertedIndex, IndicesTracker)> { let borrowed_vector_storage = vector_storage.borrow(); let borrowed_id_tracker = id_tracker.borrow(); let deleted_bitslice = borrowed_vector_storage.deleted_vector_bitslice(); let mut ram_index_builder = InvertedIndexBuilder::new(); let mut indices_tracker = IndicesTracker::default(); for id in borrowed_id_tracker.iter_internal_excluding(deleted_bitslice) { check_process_stopped(stopped)?; // It is possible that the vector is not present in the storage in case of crash. // Because: // - the `id_tracker` is flushed before the `vector_storage` // - the sparse index is built *before* recovering the WAL when loading a segment match borrowed_vector_storage.get_vector_opt::<Random>(id) { None => { // the vector was lost in a crash but will be recovered by the WAL let point_id = borrowed_id_tracker.external_id(id); let point_version = borrowed_id_tracker.internal_version(id); log::debug!( "Sparse vector with id {id} is not found, external_id: {point_id:?}, version: {point_version:?}", ) } Some(vector) => { let vector: &SparseVector = vector.as_vec_ref().try_into()?; // do not index empty vectors if vector.is_empty() { continue; } indices_tracker.register_indices(vector); let vector = indices_tracker.remap_vector(vector.to_owned()); ram_index_builder.add(id, vector); } } tick_progress(); } Ok(( TInvertedIndex::from_ram_index(Cow::Owned(ram_index_builder.build()), path)?, indices_tracker, )) } pub fn inverted_index(&self) -> &TInvertedIndex { &self.inverted_index } /// Returns the maximum number of results that can be returned by the index for a given sparse vector /// Warning: the cost of this function grows with the number of dimensions in the query vector #[cfg(feature = "testing")] pub fn max_result_count(&self, query_vector: &SparseVector) -> usize { use sparse::index::posting_list_common::PostingListIter as _; // For tests only let hw_counter = HardwareCounterCell::disposable(); let mut unique_record_ids = std::collections::HashSet::new(); for dim_id in query_vector.indices.iter() { if let Some(dim_id) = self.indices_tracker.remap_index(*dim_id) && let Some(posting_list_iter) = self.inverted_index.get(dim_id, &hw_counter) { for element in posting_list_iter.into_std_iter() { unique_record_ids.insert(element.record_id); } } } unique_record_ids.len() } fn get_query_cardinality( &self, filter: &Filter, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { let vector_storage = self.vector_storage.borrow(); let id_tracker = self.id_tracker.borrow(); let payload_index = self.payload_index.borrow(); let available_vector_count = vector_storage.available_vector_count(); let query_point_cardinality = payload_index.estimate_cardinality(filter, hw_counter); adjust_to_available_vectors( query_point_cardinality, available_vector_count, id_tracker.available_point_count(), ) } // Search using raw scorer fn search_scored( &self, query_vector: &QueryVector, filter: Option<&Filter>, top: usize, prefiltered_points: &mut Option<Vec<PointOffsetType>>, vector_query_context: &VectorQueryContext, ) -> OperationResult<Vec<ScoredPointOffset>> { let vector_storage = self.vector_storage.borrow(); let id_tracker = self.id_tracker.borrow(); let deleted_point_bitslice = vector_query_context .deleted_points() .unwrap_or(id_tracker.deleted_point_bitslice()); let is_stopped = vector_query_context.is_stopped(); let searcher = BatchFilteredSearcher::new( &[query_vector], &vector_storage, None, None, top, deleted_point_bitslice, vector_query_context.hardware_counter(), )?; let hw_counter = vector_query_context.hardware_counter(); let mut results = match filter { Some(filter) => { let payload_index = self.payload_index.borrow(); let mut filtered_points = match prefiltered_points { Some(filtered_points) => filtered_points.iter().copied(), None => { let filtered_points = payload_index.query_points(filter, &hw_counter, &is_stopped); *prefiltered_points = Some(filtered_points); prefiltered_points.as_ref().unwrap().iter().copied() } }; searcher.peek_top_iter(&mut filtered_points, &is_stopped)? } None => searcher.peek_top_all(&is_stopped)?, }; let res = results.pop().expect("single element results"); Ok(res) } pub fn search_plain( &self, sparse_vector: &SparseVector, filter: &Filter, top: usize, prefiltered_points: &mut Option<Vec<PointOffsetType>>, vector_query_context: &VectorQueryContext, ) -> OperationResult<Vec<ScoredPointOffset>> { let vector_storage = self.vector_storage.borrow(); let id_tracker = self.id_tracker.borrow(); let payload_index = self.payload_index.borrow(); let is_stopped = vector_query_context.is_stopped(); let deleted_point_bitslice = vector_query_context .deleted_points() .unwrap_or(id_tracker.deleted_point_bitslice()); let deleted_vectors = vector_storage.deleted_vector_bitslice(); let hw_counter = vector_query_context.hardware_counter(); let ids = match prefiltered_points { Some(filtered_points) => filtered_points.iter(), None => { let filtered_points = payload_index.query_points(filter, &hw_counter, &is_stopped); *prefiltered_points = Some(filtered_points); prefiltered_points.as_ref().unwrap().iter() } } .copied() .filter(|&idx| check_deleted_condition(idx, deleted_vectors, deleted_point_bitslice)) .collect_vec(); let sparse_vector = self.indices_tracker.remap_vector(sparse_vector.clone()); let memory_handle = self.scores_memory_pool.get(); let mut hw_counter = vector_query_context.hardware_counter(); let is_index_on_disk = self.config.index_type.is_on_disk(); if is_index_on_disk { hw_counter.set_vector_io_read_multiplier(1); } else { hw_counter.set_vector_io_read_multiplier(0); } let mut search_context = SearchContext::new( sparse_vector, top, &self.inverted_index, memory_handle, &is_stopped, &hw_counter, ); let search_result = search_context.plain_search(&ids); Ok(search_result) } // search using sparse vector inverted index fn search_sparse( &self, sparse_vector: &SparseVector, filter: Option<&Filter>, top: usize, vector_query_context: &VectorQueryContext, ) -> Vec<ScoredPointOffset> { let vector_storage = self.vector_storage.borrow(); let id_tracker = self.id_tracker.borrow(); let deleted_point_bitslice = vector_query_context .deleted_points() .unwrap_or(id_tracker.deleted_point_bitslice()); let deleted_vectors = vector_storage.deleted_vector_bitslice(); let not_deleted_condition = |idx: PointOffsetType| -> bool { check_deleted_condition(idx, deleted_vectors, deleted_point_bitslice) }; let is_stopped = vector_query_context.is_stopped(); let sparse_vector = self.indices_tracker.remap_vector(sparse_vector.clone()); let memory_handle = self.scores_memory_pool.get(); let mut hw_counter = vector_query_context.hardware_counter(); let is_index_on_disk = self.config.index_type.is_on_disk(); if is_index_on_disk { hw_counter.set_vector_io_read_multiplier(1); } else { hw_counter.set_vector_io_read_multiplier(0); } let mut search_context = SearchContext::new( sparse_vector, top, &self.inverted_index, memory_handle, &is_stopped, &hw_counter, ); match filter { Some(filter) => { let payload_index = self.payload_index.borrow(); let filter_context = payload_index.filter_context(filter, &hw_counter); let matches_filter_condition = |idx: PointOffsetType| -> bool { not_deleted_condition(idx) && filter_context.check(idx) }; search_context.search(&matches_filter_condition) } None => search_context.search(&not_deleted_condition), } } fn search_nearest_query( &self, vector: &SparseVector, filter: Option<&Filter>, top: usize, prefiltered_points: &mut Option<Vec<PointOffsetType>>, vector_query_context: &VectorQueryContext, ) -> OperationResult<Vec<ScoredPointOffset>> { if vector.is_empty() { return Ok(vec![]); } match filter { Some(filter) => { // if cardinality is small - use plain search let query_cardinality = self.get_query_cardinality(filter, &vector_query_context.hardware_counter()); let threshold = self .config .full_scan_threshold .unwrap_or(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD); if query_cardinality.max < threshold { let _timer = ScopeDurationMeasurer::new(&self.searches_telemetry.small_cardinality); self.search_plain( vector, filter, top, prefiltered_points, vector_query_context, ) } else { let _timer = ScopeDurationMeasurer::new(&self.searches_telemetry.filtered_sparse); Ok(self.search_sparse(vector, Some(filter), top, vector_query_context)) } } None => { let _timer = ScopeDurationMeasurer::new(&self.searches_telemetry.unfiltered_sparse); Ok(self.search_sparse(vector, filter, top, vector_query_context)) } } } pub fn search_query( &self, query_vector: &QueryVector, filter: Option<&Filter>, top: usize, prefiltered_points: &mut Option<Vec<PointOffsetType>>, vector_query_context: &VectorQueryContext, ) -> OperationResult<Vec<ScoredPointOffset>> { if top == 0 { return Ok(vec![]); } match query_vector { QueryVector::Nearest(vector) => self.search_nearest_query( vector.try_into()?, filter, top, prefiltered_points, vector_query_context, ), QueryVector::RecommendBestScore(_) | QueryVector::RecommendSumScores(_) | QueryVector::Discovery(_) | QueryVector::Context(_) | QueryVector::FeedbackNaive(_) => { let _timer = if filter.is_some() { ScopeDurationMeasurer::new(&self.searches_telemetry.filtered_plain) } else { ScopeDurationMeasurer::new(&self.searches_telemetry.unfiltered_plain) }; self.search_scored( query_vector, filter, top, prefiltered_points, vector_query_context, ) } } } // Update statistics for idf-dot similarity pub fn fill_idf_statistics( &self, idf: &mut HashMap<DimId, usize>, hw_counter: &HardwareCounterCell, ) { for (dim_id, count) in idf.iter_mut() { if let Some(remapped_dim_id) = self.indices_tracker.remap_index(*dim_id) && let Some(posting_list_len) = self .inverted_index .posting_list_len(&remapped_dim_id, hw_counter) { *count += posting_list_len } } } } impl<TInvertedIndex: InvertedIndex> VectorIndex for SparseVectorIndex<TInvertedIndex> { fn search( &self, vectors: &[&QueryVector], filter: Option<&Filter>, top: usize, _params: Option<&SearchParams>, query_context: &VectorQueryContext, ) -> OperationResult<Vec<Vec<ScoredPointOffset>>> { let mut results = Vec::with_capacity(vectors.len()); let mut prefiltered_points = None; for vector in vectors { check_process_stopped(&query_context.is_stopped())?; let search_results = if query_context.is_require_idf() { let vector = (*vector).clone().transform(|mut vector| { match &mut vector { VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => { return Err(OperationError::WrongSparse); } VectorInternal::Sparse(sparse) => { query_context.remap_idf_weights(&sparse.indices, &mut sparse.values) } } Ok(vector) })?; self.search_query(&vector, filter, top, &mut prefiltered_points, query_context)? } else { self.search_query(vector, filter, top, &mut prefiltered_points, query_context)? }; results.push(search_results); } Ok(results) } fn get_telemetry_data(&self, detail: TelemetryDetail) -> VectorIndexSearchesTelemetry { self.searches_telemetry.get_telemetry_data(detail) } fn files(&self) -> Vec<PathBuf> { let config_file = SparseIndexConfig::get_config_path(&self.path); if !config_file.exists() { return vec![]; } let mut all_files = vec![ IndicesTracker::file_path(&self.path), self.path.join(VERSION_FILE), ]; all_files.retain(|f| f.exists()); all_files.push(config_file); all_files.extend_from_slice(&TInvertedIndex::files(&self.path)); all_files } fn immutable_files(&self) -> Vec<PathBuf> { let config_file = SparseIndexConfig::get_config_path(&self.path); if !config_file.exists() { return vec![]; } let mut immutable_files = vec![ self.path.join(VERSION_FILE), // TODO: Is version file immutable? ]; immutable_files.retain(|f| f.exists()); immutable_files.push(config_file); immutable_files.extend_from_slice(&TInvertedIndex::immutable_files(&self.path)); immutable_files } fn indexed_vector_count(&self) -> usize { self.inverted_index.vector_count() } fn size_of_searchable_vectors_in_bytes(&self) -> usize { self.inverted_index.total_sparse_vectors_size() } fn update_vector( &mut self, id: PointOffsetType, vector: Option<VectorRef>, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { let (old_vector, new_vector) = { let mut vector_storage = self.vector_storage.borrow_mut(); let old_vector = vector_storage .get_vector_opt::<Random>(id) .map(CowVector::to_owned); let new_vector = if let Some(vector) = vector { vector_storage.insert_vector(id, vector, hw_counter)?; vector.to_owned() } else { let default_vector = vector_storage.default_vector(); if id as usize >= vector_storage.total_vector_count() { // Vector doesn't exist in the storage // Insert default vector to keep the sequence vector_storage.insert_vector( id, VectorRef::from(&default_vector), hw_counter, )?; } vector_storage.delete_vector(id)?; default_vector }; (old_vector, new_vector) }; if self.config.index_type != SparseIndexType::MutableRam { return Err(OperationError::service_error( "Cannot update vector in non-appendable index", )); } let vector = SparseVector::try_from(new_vector)?; let old_vector: Option<SparseVector> = old_vector.map(SparseVector::try_from).transpose()?; // do not upsert empty vectors into the index if !vector.is_empty() { self.indices_tracker.register_indices(&vector); let vector = self.indices_tracker.remap_vector(vector); let old_vector = old_vector.map(|v| self.indices_tracker.remap_vector(v)); self.inverted_index.upsert(id, vector, old_vector); } else if let Some(old_vector) = old_vector { // Make sure empty vectors do not interfere with the index if !old_vector.is_empty() { let old_vector = self.indices_tracker.remap_vector(old_vector); self.inverted_index.remove(id, old_vector); } } Ok(()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sparse_index/mod.rs
lib/segment/src/index/sparse_index/mod.rs
pub mod indices_tracker; pub mod sparse_index_config; pub mod sparse_search_telemetry; pub mod sparse_vector_index;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/index/sparse_index/sparse_index_config.rs
lib/segment/src/index/sparse_index/sparse_index_config.rs
use std::path::{Path, PathBuf}; use io::file_operations::{atomic_save_json, read_json}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use crate::common::anonymize::Anonymize; use crate::common::operation_error::OperationResult; use crate::types::VectorStorageDatatype; pub const SPARSE_INDEX_CONFIG_FILE: &str = "sparse_index_config.json"; /// Sparse index types #[derive( Default, Hash, Debug, Deserialize, Serialize, JsonSchema, Anonymize, Eq, PartialEq, Copy, Clone, )] pub enum SparseIndexType { /// Mutable RAM sparse index #[default] MutableRam, /// Immutable RAM sparse index ImmutableRam, /// Mmap sparse index Mmap, } impl SparseIndexType { pub fn is_appendable(self) -> bool { self == Self::MutableRam } pub fn is_immutable(self) -> bool { self != Self::MutableRam } pub fn is_on_disk(self) -> bool { self == Self::Mmap } pub fn is_persisted(self) -> bool { self == Self::Mmap || self == Self::ImmutableRam } } /// Configuration for sparse inverted index. #[derive( Debug, Deserialize, Serialize, JsonSchema, Anonymize, Copy, Clone, PartialEq, Eq, Default, )] #[serde(rename_all = "snake_case")] pub struct SparseIndexConfig { /// We prefer a full scan search upto (excluding) this number of vectors. /// /// Note: this is number of vectors, not KiloBytes. #[anonymize(false)] pub full_scan_threshold: Option<usize>, /// Type of sparse index pub index_type: SparseIndexType, /// Datatype used to store weights in the index. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub datatype: Option<VectorStorageDatatype>, } impl SparseIndexConfig { pub fn new( full_scan_threshold: Option<usize>, index_type: SparseIndexType, datatype: Option<VectorStorageDatatype>, ) -> Self { SparseIndexConfig { full_scan_threshold, index_type, datatype, } } pub fn get_config_path(path: &Path) -> PathBuf { path.join(SPARSE_INDEX_CONFIG_FILE) } pub fn load(path: &Path) -> OperationResult<Self> { Ok(read_json(path)?) } pub fn save(&self, path: &Path) -> OperationResult<()> { Ok(atomic_save_json(path, self)?) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/payload_fixtures.rs
lib/segment/src/fixtures/payload_fixtures.rs
use std::ops::{Range, RangeInclusive}; use fnv::FnvBuildHasher; use indexmap::IndexSet; use itertools::Itertools; use ordered_float::OrderedFloat; use rand::Rng; use rand::distr::{Alphanumeric, SampleString}; use rand::seq::IndexedRandom; use serde_json::{Value, json}; use crate::data_types::vectors::{DenseVector, MultiDenseVectorInternal, VectorElementType}; use crate::payload_json; use crate::types::{ AnyVariants, Condition, ExtendedPointId, FieldCondition, Filter, HasIdCondition, IsEmptyCondition, Match, MatchAny, Payload, PayloadField, Range as RangeCondition, ValuesCount, }; const ADJECTIVE: &[&str] = &[ "jobless", "rightful", "breakable", "impartial", "shocking", "faded", "phobic", "overt", "like", "wide-eyed", "broad", ]; const NOUN: &[&str] = &[ "territory", "jam", "neck", "chicken", "cap", "kiss", "veil", "trail", "size", "digestion", "rod", "seed", ]; const INT_RANGE: Range<i64> = 0..500; pub const LON_RANGE: Range<f64> = -180.0..180.0; pub const LAT_RANGE: Range<f64> = -90.0..90.0; pub const STR_KEY: &str = "kvd"; pub const STR_PROJ_KEY: &str = "kvd_proj"; pub const STR_ROOT_PROJ_KEY: &str = "kvd_root_proj"; pub const INT_KEY: &str = "int"; pub const INT_KEY_2: &str = "int2"; pub const INT_KEY_3: &str = "int3"; pub const FLT_KEY: &str = "flt"; pub const FLICKING_KEY: &str = "flicking"; pub const GEO_KEY: &str = "geo"; pub const TEXT_KEY: &str = "text"; pub const BOOL_KEY: &str = "bool"; pub fn random_adj<R: Rng + ?Sized>(rnd_gen: &mut R) -> String { ADJECTIVE.choose(rnd_gen).copied().unwrap().to_string() } pub fn random_keyword<R: Rng + ?Sized>(rnd_gen: &mut R) -> String { let random_adj = ADJECTIVE.choose(rnd_gen).unwrap(); let random_noun = NOUN.choose(rnd_gen).unwrap(); format!("{random_adj} {random_noun}") } pub fn random_keyword_payload<R: Rng + ?Sized>( rnd_gen: &mut R, num_values: RangeInclusive<usize>, ) -> Value { let sample_num_values = rnd_gen.random_range(num_values); if sample_num_values > 1 { Value::Array( (0..sample_num_values) .map(|_| Value::String(random_keyword(rnd_gen))) .collect(), ) } else { Value::String(random_keyword(rnd_gen)) } } pub fn random_int_payload<R: Rng + ?Sized>( rnd_gen: &mut R, num_values: RangeInclusive<usize>, ) -> Vec<i64> { (0..rnd_gen.random_range(num_values)) .map(|_| rnd_gen.random_range(INT_RANGE)) .collect_vec() } pub fn random_geo_payload<R: Rng + ?Sized>( rnd_gen: &mut R, num_values: RangeInclusive<usize>, ) -> Vec<Value> { (0..rnd_gen.random_range(num_values)) .map(|_| { json!( { "lon": rnd_gen.random_range(LON_RANGE), "lat": rnd_gen.random_range(LAT_RANGE), }) }) .collect_vec() } pub fn random_full_text_payload<R: Rng + ?Sized>( rnd_gen: &mut R, num_values: RangeInclusive<usize>, value_size: RangeInclusive<usize>, ) -> Vec<Value> { (0..rnd_gen.random_range(num_values)) .map(|_| { let size = rnd_gen.random_range(value_size.clone()); let keyword = rnd_gen .sample_iter(rand::distr::Alphabetic) .map(|c| c as char) .take(size) .collect::<String>(); Value::from(keyword) }) .collect_vec() } pub fn random_bool_payload<R: Rng + ?Sized>( rnd_gen: &mut R, num_values: RangeInclusive<usize>, ) -> Vec<Value> { (0..rnd_gen.random_range(num_values)) .map(|_| Value::Bool(rnd_gen.random())) .collect_vec() } pub fn random_vector<R: Rng + ?Sized>(rng: &mut R, size: usize) -> DenseVector { (0..size).map(|_| rng.random()).collect() } pub fn random_dense_byte_vector<R: Rng + ?Sized>(rnd_gen: &mut R, size: usize) -> DenseVector { (0..size) .map(|_| { rnd_gen .random_range::<VectorElementType, _>(0.0..=255.0) .round() }) .collect() } pub fn random_multi_vector<R: Rng + ?Sized>( rng: &mut R, vector_size: usize, num_vector_per_points: usize, ) -> MultiDenseVectorInternal { let mut vectors = vec![]; for _ in 0..num_vector_per_points { let vec = random_vector(rng, vector_size); vectors.extend(vec); } MultiDenseVectorInternal::new(vectors, vector_size) } pub fn random_uncommon_condition<R: Rng + ?Sized>(rnd_gen: &mut R) -> Condition { let switch = rnd_gen.random_range(0..=3); match switch { 0 => Condition::Field(FieldCondition::new_values_count( STR_KEY.parse().unwrap(), ValuesCount { lt: None, gt: None, gte: Some(3), lte: None, }, )), 1 => Condition::Field(FieldCondition::new_values_count( STR_KEY.parse().unwrap(), ValuesCount { lt: None, gt: None, gte: None, lte: Some(2), }, )), 2 => Condition::HasId(HasIdCondition { has_id: (0..rnd_gen.random_range(10..50)) .map(|_| ExtendedPointId::NumId(rnd_gen.random_range(0..1000))) .collect(), }), 3 => Condition::IsEmpty(IsEmptyCondition { is_empty: PayloadField { key: FLICKING_KEY.parse().unwrap(), }, }), _ => unreachable!(), } } pub fn random_simple_condition<R: Rng + ?Sized>(rnd_gen: &mut R) -> Condition { let str_or_int: bool = rnd_gen.random(); if str_or_int { let kv_or_txt: bool = rnd_gen.random(); if kv_or_txt { Condition::Field(FieldCondition::new_match( STR_KEY.parse().unwrap(), random_keyword(rnd_gen).into(), )) } else { Condition::Field(FieldCondition::new_match( TEXT_KEY.parse().unwrap(), Match::Text(random_adj(rnd_gen).into()), )) } } else { Condition::Field(FieldCondition::new_range( INT_KEY.parse().unwrap(), RangeCondition { lt: None, gt: None, gte: Some(OrderedFloat(rnd_gen.random_range(INT_RANGE) as f64)), lte: Some(OrderedFloat(rnd_gen.random_range(INT_RANGE) as f64)), }, )) } } pub fn random_condition<R: Rng + ?Sized>(rnd_gen: &mut R) -> Condition { let is_simple: bool = rnd_gen.random_range(0..100) < 80; if is_simple { random_simple_condition(rnd_gen) } else { random_uncommon_condition(rnd_gen) } } pub fn random_must_filter<R: Rng + ?Sized>(rnd_gen: &mut R, num_conditions: usize) -> Filter { let must_conditions = (0..num_conditions) .map(|_| random_simple_condition(rnd_gen)) .collect_vec(); Filter { should: None, min_should: None, must: Some(must_conditions), must_not: None, } } pub fn random_match_any_filter<R: Rng + ?Sized>( rnd_gen: &mut R, len: usize, percent_existing: f32, ) -> Filter { let num_existing = (len as f32 * (percent_existing / 100.0)) as usize; let mut values: IndexSet<String, FnvBuildHasher> = (0..len - num_existing) .map(|_| { let slen = rnd_gen.random_range(1..15); Alphanumeric.sample_string(rnd_gen, slen) }) .collect(); values.extend((0..num_existing).map(|_| random_keyword(rnd_gen))); Filter { should: None, must: Some(vec![Condition::Field(FieldCondition::new_match( STR_KEY.parse().unwrap(), Match::Any(MatchAny { any: AnyVariants::Strings(values), }), ))]), must_not: None, min_should: None, } } pub fn random_filter<R: Rng + ?Sized>(rnd_gen: &mut R, total_conditions: usize) -> Filter { let num_should = rnd_gen.random_range(0..=total_conditions); let num_must = total_conditions - num_should; let should_conditions = (0..num_should) .map(|_| random_condition(rnd_gen)) .collect_vec(); let should_conditions_opt = if !should_conditions.is_empty() { Some(should_conditions) } else { None }; let must_conditions = (0..num_must) .map(|_| random_condition(rnd_gen)) .collect_vec(); let must_conditions_opt = if !must_conditions.is_empty() { Some(must_conditions) } else { None }; Filter { should: should_conditions_opt, min_should: None, must: must_conditions_opt, must_not: None, } } pub fn random_nested_filter<R: Rng + ?Sized>(rnd_gen: &mut R) -> Filter { let nested_or_proj: bool = rnd_gen.random(); let nested_str_key = if nested_or_proj { format!("{}.{}.{}", STR_KEY, "nested_1", "nested_2") } else { format!("{}.{}[].{}", STR_PROJ_KEY, "nested_1", "nested_2") }; let condition = Condition::Field(FieldCondition::new_match( nested_str_key.parse().unwrap(), random_keyword(rnd_gen).into(), )); Filter::new_should(condition) } pub fn generate_diverse_payload<R: Rng + ?Sized>(rnd_gen: &mut R) -> Payload { if rnd_gen.random_range(0.0..1.0) < 0.5 { payload_json! { STR_KEY: random_keyword_payload(rnd_gen, 1..=3), INT_KEY: random_int_payload(rnd_gen, 1..=3), INT_KEY_2: random_int_payload(rnd_gen, 1..=2), INT_KEY_3: random_int_payload(rnd_gen, 1..=2), FLT_KEY: rnd_gen.random_range(0.0..10.0), GEO_KEY: random_geo_payload(rnd_gen, 1..=3), TEXT_KEY: random_keyword_payload(rnd_gen, 1..=1), BOOL_KEY: random_bool_payload(rnd_gen, 1..=1), } } else { payload_json! { STR_KEY: random_keyword_payload(rnd_gen, 1..=2), INT_KEY: random_int_payload(rnd_gen, 1..=3), INT_KEY_2: random_int_payload(rnd_gen, 1..=2), INT_KEY_3: random_int_payload(rnd_gen, 1..=2), FLT_KEY: rnd_gen.random_range(0.0..10.0), GEO_KEY: random_geo_payload(rnd_gen, 1..=3), TEXT_KEY: random_keyword_payload(rnd_gen, 1..=1), BOOL_KEY: random_bool_payload(rnd_gen, 1..=2), FLICKING_KEY: random_int_payload(rnd_gen, 1..=3) } } } pub fn generate_diverse_nested_payload<R: Rng + ?Sized>(rnd_gen: &mut R) -> Payload { payload_json! { STR_KEY: { "nested_1": { "nested_2": random_keyword_payload(rnd_gen, 1..=3) } }, STR_PROJ_KEY: { "nested_1": [ { "nested_2": random_keyword_payload(rnd_gen, 1..=3) } ] }, STR_ROOT_PROJ_KEY: [ { "nested_1": [ { "nested_2": random_keyword_payload(rnd_gen, 1..=3) } ] } ], } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/payload_context_fixture.rs
lib/segment/src/fixtures/payload_context_fixture.rs
use std::collections::HashMap; use std::iter; use std::path::{Path, PathBuf}; use std::sync::Arc; use atomic_refcell::AtomicRefCell; use bitvec::prelude::{BitSlice, BitVec}; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use rand::SeedableRng; use rand::prelude::StdRng; use super::payload_fixtures::BOOL_KEY; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::fixtures::payload_fixtures::{ FLT_KEY, GEO_KEY, INT_KEY, STR_KEY, TEXT_KEY, generate_diverse_payload, }; use crate::id_tracker::IdTracker; use crate::index::PayloadIndex; use crate::index::plain_payload_index::PlainPayloadIndex; use crate::index::struct_payload_index::StructPayloadIndex; use crate::payload_storage::PayloadStorage; use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage; use crate::payload_storage::query_checker::SimpleConditionChecker; use crate::types::{PayloadSchemaType, PointIdType, SeqNumberType}; /// Warn: Use for tests only /// /// This struct mimics the interface of `PointsIterator` and `IdTracker` only for basic cases #[derive(Debug)] pub struct FixtureIdTracker { ids: Vec<PointOffsetType>, deleted: BitVec, deleted_count: usize, } impl FixtureIdTracker { pub fn new(num_points: usize) -> Self { Self { ids: (0..num_points).map(|x| x as PointOffsetType).collect(), deleted: BitVec::repeat(false, num_points), deleted_count: 0, } } } impl IdTracker for FixtureIdTracker { fn internal_version(&self, _internal_id: PointOffsetType) -> Option<SeqNumberType> { Some(0) } fn set_internal_version( &mut self, _internal_id: PointOffsetType, _version: SeqNumberType, ) -> OperationResult<()> { Ok(()) } fn internal_id(&self, external_id: PointIdType) -> Option<PointOffsetType> { match external_id { PointIdType::NumId(id) => { assert!(id < self.ids.len() as u64); let internal_id = id as PointOffsetType; (!self.is_deleted_point(internal_id)).then_some(internal_id) } PointIdType::Uuid(_) => unreachable!(), } } fn external_id(&self, internal_id: PointOffsetType) -> Option<PointIdType> { assert!(internal_id < self.ids.len() as PointOffsetType); let external_id = PointIdType::NumId(u64::from(internal_id)); (!self.is_deleted_point(internal_id)).then_some(external_id) } fn set_link( &mut self, _external_id: PointIdType, _internal_id: PointOffsetType, ) -> OperationResult<()> { Ok(()) } fn drop(&mut self, external_id: PointIdType) -> OperationResult<()> { let internal_id = self.internal_id(external_id).unwrap(); self.drop_internal(internal_id) } fn drop_internal(&mut self, internal_id: PointOffsetType) -> OperationResult<()> { if !self.deleted.replace(internal_id as usize, true) { self.deleted_count += 1; } self.set_internal_version(internal_id, 0)?; Ok(()) } fn iter_external(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { Box::new( self.ids .iter() .copied() .filter(|internal_id| !self.is_deleted_point(*internal_id)) .map(|internal_id| PointIdType::NumId(u64::from(internal_id))), ) } fn iter_internal(&self) -> Box<dyn Iterator<Item = PointOffsetType> + '_> { Box::new( self.ids .iter() .copied() .filter(|internal_id| !self.is_deleted_point(*internal_id)), ) } fn iter_from( &self, external_id: Option<PointIdType>, ) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { let start = match external_id { None => 0, Some(id) => match id { PointIdType::NumId(num) => num, PointIdType::Uuid(_) => unreachable!(), }, } as PointOffsetType; Box::new( self.ids .iter() .copied() .skip_while(move |internal_id| *internal_id < start) .filter(|internal_id| !self.is_deleted_point(*internal_id)) .map(|internal_id| (PointIdType::NumId(u64::from(internal_id)), internal_id)), ) } fn iter_random(&self) -> Box<dyn Iterator<Item = (PointIdType, PointOffsetType)> + '_> { unimplemented!("Not used for tests yet") } fn total_point_count(&self) -> usize { self.ids.len() } fn deleted_point_count(&self) -> usize { self.deleted_count } fn mapping_flusher(&self) -> Flusher { Box::new(|| Ok(())) } fn versions_flusher(&self) -> Flusher { Box::new(|| Ok(())) } fn is_deleted_point(&self, key: PointOffsetType) -> bool { let key = key as usize; if key >= self.deleted.len() { return true; } self.deleted[key] } fn deleted_point_bitslice(&self) -> &BitSlice { &self.deleted } fn iter_internal_versions( &self, ) -> Box<dyn Iterator<Item = (PointOffsetType, SeqNumberType)> + '_> { Box::new(iter::empty()) } fn fix_inconsistencies(&mut self) -> OperationResult<Vec<PointOffsetType>> { // This structure does not support cleaning up orphan versions Ok(vec![]) } fn name(&self) -> &'static str { "fixture id tracker" } fn files(&self) -> Vec<PathBuf> { vec![] } } /// Creates in-memory payload storage and fills it with random points /// /// # Arguments /// /// * `num_points` - how many random points to insert /// /// # Result /// /// Payload storage fixture /// pub fn create_payload_storage_fixture(num_points: usize, seed: u64) -> InMemoryPayloadStorage { let mut payload_storage = InMemoryPayloadStorage::default(); let mut rng = StdRng::seed_from_u64(seed); let hw_counter = HardwareCounterCell::new(); for id in 0..num_points { let payload = generate_diverse_payload(&mut rng); payload_storage .set(id as PointOffsetType, &payload, &hw_counter) .unwrap(); } payload_storage } /// Function generates `PlainPayloadIndex` with random payload for testing /// /// # Arguments /// /// * `path` - temp directory path /// * `num_points` - how many payloads generate? /// /// # Result /// /// `PlainPayloadIndex` /// pub fn create_plain_payload_index(path: &Path, num_points: usize, seed: u64) -> PlainPayloadIndex { let payload_storage = create_payload_storage_fixture(num_points, seed); let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(num_points))); let condition_checker = Arc::new(SimpleConditionChecker::new( Arc::new(AtomicRefCell::new(payload_storage.into())), id_tracker.clone(), HashMap::new(), )); PlainPayloadIndex::open(condition_checker, id_tracker, path).unwrap() } /// Function generates `StructPayloadIndex` with random payload for testing. /// It will also create indexes for payloads /// /// # Arguments /// /// * `path` - temp directory path /// * `num_points` - how many payloads generate? /// /// # Result /// /// `StructPayloadIndex` /// pub fn create_struct_payload_index( path: &Path, num_points: usize, seed: u64, ) -> StructPayloadIndex { let payload_storage = Arc::new(AtomicRefCell::new( create_payload_storage_fixture(num_points, seed).into(), )); let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(num_points))); let mut index = StructPayloadIndex::open( payload_storage, id_tracker, std::collections::HashMap::new(), path, true, true, ) .unwrap(); let hw_counter = HardwareCounterCell::new(); index .set_indexed( &STR_KEY.parse().unwrap(), PayloadSchemaType::Keyword, &hw_counter, ) .unwrap(); index .set_indexed( &INT_KEY.parse().unwrap(), PayloadSchemaType::Integer, &hw_counter, ) .unwrap(); index .set_indexed( &FLT_KEY.parse().unwrap(), PayloadSchemaType::Float, &hw_counter, ) .unwrap(); index .set_indexed( &GEO_KEY.parse().unwrap(), PayloadSchemaType::Geo, &hw_counter, ) .unwrap(); index .set_indexed( &TEXT_KEY.parse().unwrap(), PayloadSchemaType::Text, &hw_counter, ) .unwrap(); index .set_indexed( &BOOL_KEY.parse().unwrap(), PayloadSchemaType::Bool, &hw_counter, ) .unwrap(); index }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/sparse_fixtures.rs
lib/segment/src/fixtures/sparse_fixtures.rs
use std::fmt::Debug; use std::path::Path; use std::sync::Arc; use std::sync::atomic::AtomicBool; use atomic_refcell::AtomicRefCell; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use rand::Rng; use sparse::common::sparse_vector::SparseVector; use sparse::common::sparse_vector_fixture::random_sparse_vector; use sparse::index::inverted_index::InvertedIndex; use crate::common::operation_error::OperationResult; use crate::fixtures::payload_context_fixture::FixtureIdTracker; use crate::index::VectorIndex; use crate::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType}; use crate::index::sparse_index::sparse_vector_index::{ SparseVectorIndex, SparseVectorIndexOpenArgs, }; use crate::index::struct_payload_index::StructPayloadIndex; use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage; use crate::vector_storage::sparse::mmap_sparse_vector_storage::MmapSparseVectorStorage; use crate::vector_storage::{VectorStorage, VectorStorageEnum}; /// Prepares a sparse vector index with a given iterator of sparse vectors pub fn fixture_sparse_index_from_iter<I: InvertedIndex>( data_dir: &Path, vectors: impl ExactSizeIterator<Item = SparseVector>, full_scan_threshold: usize, index_type: SparseIndexType, ) -> OperationResult<SparseVectorIndex<I>> { let stopped = AtomicBool::new(false); // directories let index_dir = &data_dir.join("index"); let payload_dir = &data_dir.join("payload"); let storage_dir = &data_dir.join("storage"); // setup let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(vectors.len()))); let payload_storage = InMemoryPayloadStorage::default(); let wrapped_payload_storage = Arc::new(AtomicRefCell::new(payload_storage.into())); let payload_index = StructPayloadIndex::open( wrapped_payload_storage, id_tracker.clone(), std::collections::HashMap::new(), payload_dir, true, true, )?; let wrapped_payload_index = Arc::new(AtomicRefCell::new(payload_index)); let vector_storage = Arc::new(AtomicRefCell::new(VectorStorageEnum::SparseMmap( MmapSparseVectorStorage::open_or_create(storage_dir)?, ))); let mut borrowed_storage = vector_storage.borrow_mut(); let num_vectors = vectors.len(); let mut num_vectors_not_empty = 0; let hw_counter = HardwareCounterCell::new(); for (idx, vec) in vectors.enumerate() { borrowed_storage .insert_vector(idx as PointOffsetType, (&vec).into(), &hw_counter) .unwrap(); num_vectors_not_empty += usize::from(!vec.is_empty()); } drop(borrowed_storage); // assert all empty points are in storage assert_eq!( vector_storage.borrow().available_vector_count(), num_vectors, ); let sparse_index_config = SparseIndexConfig::new(Some(full_scan_threshold), index_type, None); let sparse_vector_index: SparseVectorIndex<I> = SparseVectorIndex::open(SparseVectorIndexOpenArgs { config: sparse_index_config, id_tracker, vector_storage: vector_storage.clone(), payload_index: wrapped_payload_index, path: index_dir, stopped: &stopped, tick_progress: || (), })?; assert_eq!( sparse_vector_index.indexed_vector_count(), num_vectors_not_empty ); Ok(sparse_vector_index) } /// Prepares a sparse vector index with random sparse vectors pub fn fixture_sparse_index<I: InvertedIndex + Debug, R: Rng + ?Sized>( rnd: &mut R, num_vectors: usize, max_dim: usize, full_scan_threshold: usize, data_dir: &Path, ) -> SparseVectorIndex<I> { fixture_sparse_index_from_iter( data_dir, (0..num_vectors).map(|_| random_sparse_vector(rnd, max_dim)), full_scan_threshold, SparseIndexType::ImmutableRam, ) .unwrap() } #[macro_export] macro_rules! fixture_for_all_indices { ($test:ident::<_>($($args:tt)*)) => { eprintln!("InvertedIndexCompressedImmutableRam<f32>"); $test::< ::sparse::index::inverted_index::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam<f32> >($($args)*); eprintln!("InvertedIndexCompressedMmap<f32>"); $test::< ::sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap<f32> >($($args)*); eprintln!("InvertedIndexImmutableRam"); $test::< ::sparse::index::inverted_index::inverted_index_immutable_ram::InvertedIndexImmutableRam >($($args)*); eprintln!("InvertedIndexMmap"); $test::< ::sparse::index::inverted_index::inverted_index_mmap::InvertedIndexMmap >($($args)*); }; }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/segment_fixtures.rs
lib/segment/src/fixtures/segment_fixtures.rs
use std::path::Path; use common::counter::hardware_counter::HardwareCounterCell; use crate::data_types::named_vectors::NamedVectors; use crate::data_types::vectors::DEFAULT_VECTOR_NAME; use crate::entry::entry_point::SegmentEntry; use crate::fixtures::index_fixtures::random_vector; use crate::fixtures::payload_fixtures::generate_diverse_payload; use crate::segment::Segment; use crate::segment_constructor::simple_segment_constructor::build_simple_segment; use crate::types::Distance; pub fn random_segment(path: &Path, num_points: usize) -> Segment { let dim = 4; let distance = Distance::Dot; let mut rnd_gen = rand::rng(); let mut segment = build_simple_segment(path, dim, distance).unwrap(); let hw_counter = HardwareCounterCell::new(); for point_id in 0..num_points { let vector = random_vector(&mut rnd_gen, dim); let payload = generate_diverse_payload(&mut rnd_gen); segment .upsert_point( 100, (point_id as u64).into(), NamedVectors::from_ref(DEFAULT_VECTOR_NAME, vector.as_slice().into()), &hw_counter, ) .unwrap(); segment .set_payload(100, (point_id as u64).into(), &payload, &None, &hw_counter) .unwrap(); } segment }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/index_fixtures.rs
lib/segment/src/fixtures/index_fixtures.rs
use std::sync::atomic::AtomicBool; use bitvec::prelude::BitVec; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use rand::Rng; use crate::data_types::vectors::{DenseVector, QueryVector, VectorElementType, VectorRef}; use crate::index::hnsw_index::graph_links::StorageGraphLinksVectors; use crate::index::hnsw_index::point_scorer::FilteredScorer; use crate::types::{Distance, ScalarQuantizationConfig}; use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage; use crate::vector_storage::quantized::quantized_vectors::{ QuantizedVectors, QuantizedVectorsStorageType, }; use crate::vector_storage::{VectorStorage, VectorStorageEnum}; pub fn random_vector<R: Rng + ?Sized>(rnd_gen: &mut R, size: usize) -> DenseVector { (0..size).map(|_| rnd_gen.random_range(-1.0..1.0)).collect() } pub struct TestRawScorerProducer { storage: VectorStorageEnum, deleted_points: BitVec, quantized_vectors: Option<QuantizedVectors>, } impl TestRawScorerProducer { pub fn new<R: Rng + ?Sized>( dim: usize, distance: Distance, num_vectors: usize, use_quantization: bool, rng: &mut R, ) -> Self { let mut storage = new_volatile_dense_vector_storage(dim, distance); let hw_counter = HardwareCounterCell::new(); for offset in 0..num_vectors as PointOffsetType { let rnd_vec = random_vector(rng, dim); let rnd_vec = distance.preprocess_vector::<VectorElementType>(rnd_vec); storage .insert_vector(offset, VectorRef::from(&rnd_vec), &hw_counter) .unwrap(); } let quantized_vectors = use_quantization.then(|| { QuantizedVectors::create( &storage, &ScalarQuantizationConfig { r#type: Default::default(), quantile: None, always_ram: Some(true), } .into(), QuantizedVectorsStorageType::Immutable, // NOTE: In general case, we should keep the temporary directory // as long as the QuantizedVectors instance is alive. But as for // now, for this configuration, QuantizedVectors does not touch // the file system (except during the creation), so we can drop // the directory immediately. tempfile::tempdir().unwrap().path(), 1, &AtomicBool::new(false), ) .unwrap() }); TestRawScorerProducer { storage, deleted_points: BitVec::repeat(false, num_vectors), quantized_vectors, } } pub fn storage(&self) -> &VectorStorageEnum { &self.storage } pub fn quantized_vectors(&self) -> Option<&QuantizedVectors> { self.quantized_vectors.as_ref() } pub fn graph_links_vectors(&self) -> Option<StorageGraphLinksVectors<'_>> { StorageGraphLinksVectors::try_new(&self.storage, self.quantized_vectors.as_ref()) } pub fn scorer(&self, query: impl Into<QueryVector>) -> FilteredScorer<'_> { FilteredScorer::new( query.into(), &self.storage, self.quantized_vectors.as_ref(), None, &self.deleted_points, HardwareCounterCell::new(), ) .unwrap() } pub fn internal_scorer(&self, point_id: PointOffsetType) -> FilteredScorer<'_> { FilteredScorer::new_internal( point_id, &self.storage, self.quantized_vectors.as_ref(), None, &self.deleted_points, HardwareCounterCell::new(), ) .unwrap() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/mod.rs
lib/segment/src/fixtures/mod.rs
pub mod index_fixtures; pub mod payload_context_fixture; pub mod payload_fixtures; pub mod query_fixtures; pub mod segment_fixtures; pub mod sparse_fixtures;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/fixtures/query_fixtures.rs
lib/segment/src/fixtures/query_fixtures.rs
use itertools::Itertools; use rand::Rng; use crate::data_types::vectors::{QueryVector, VectorInternal}; use crate::fixtures::payload_fixtures::random_multi_vector; use crate::vector_storage::query::{ContextPair, ContextQuery, DiscoveryQuery, RecoQuery}; const MAX_EXAMPLE_PAIRS: usize = 4; pub enum QueryVariant { Nearest, RecoBestScore, RecoSumScores, Discovery, Context, } pub fn random_query<R: Rng + ?Sized>( variant: &QueryVariant, rng: &mut R, rand_vec: impl Fn(&mut R) -> VectorInternal, ) -> QueryVector { match variant { QueryVariant::Nearest => rand_vec(rng).into(), QueryVariant::Discovery => random_discovery_query(rng, rand_vec), QueryVariant::Context => random_context_query(rng, rand_vec), QueryVariant::RecoBestScore => { QueryVector::RecommendBestScore(random_reco_query(rng, rand_vec)) } QueryVariant::RecoSumScores => { QueryVector::RecommendSumScores(random_reco_query(rng, rand_vec)) } } } pub fn random_multi_vec_query<R: Rng + ?Sized>( variant: &QueryVariant, rng: &mut R, dim: usize, num_vector_per_points: usize, ) -> QueryVector { let rand_vec = move |rng: &mut R| -> VectorInternal { random_multi_vector(rng, dim, num_vector_per_points).into() }; random_query(variant, rng, rand_vec) } fn random_discovery_query<R: Rng + ?Sized>( rng: &mut R, rand_vec: impl Fn(&mut R) -> VectorInternal, ) -> QueryVector { let num_pairs: usize = rng.random_range(1..MAX_EXAMPLE_PAIRS); let target = rand_vec(rng); let pairs = (0..num_pairs) .map(|_| { let positive = rand_vec(rng); let negative = rand_vec(rng); ContextPair { positive, negative } }) .collect_vec(); DiscoveryQuery::new(target, pairs).into() } fn random_context_query<R: Rng + ?Sized>( rng: &mut R, rand_vec: impl Fn(&mut R) -> VectorInternal, ) -> QueryVector { let num_pairs: usize = rng.random_range(0..MAX_EXAMPLE_PAIRS); let pairs = (0..num_pairs) .map(|_| { let positive = rand_vec(rng); let negative = rand_vec(rng); ContextPair { positive, negative } }) .collect_vec(); QueryVector::Context(ContextQuery::new(pairs)) } fn random_reco_query<R: Rng + ?Sized>( rng: &mut R, rand_vec: impl Fn(&mut R) -> VectorInternal, ) -> RecoQuery<VectorInternal> { let num_examples: usize = rng.random_range(1..MAX_EXAMPLE_PAIRS); let positive = (0..num_examples).map(|_| rand_vec(rng)).collect_vec(); let negative = (0..num_examples).map(|_| rand_vec(rng)).collect_vec(); RecoQuery::new(positive, negative) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/scroll.rs
lib/segment/src/segment/scroll.rs
use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use super::Segment; use crate::entry::entry_point::SegmentEntry; use crate::index::PayloadIndex; use crate::spaces::tools::peek_top_smallest_iterable; use crate::types::{Filter, PointIdType}; impl Segment { /// Estimates how many checks it would need for getting `limit` amount of points by streaming and then /// filtering, versus getting all filtered points from the index and then sorting them afterwards. /// /// If the filter is restrictive enough to yield fewer points than the amount of points a streaming /// approach would need to advance, it returns true. pub(super) fn should_pre_filter( &self, filter: &Filter, limit: Option<usize>, hw_counter: &HardwareCounterCell, ) -> bool { let query_cardinality = { let payload_index = self.payload_index.borrow(); payload_index.estimate_cardinality(filter, hw_counter) }; // ToDo: Add telemetry for this heuristics // Calculate expected number of condition checks required for // this scroll request with is stream strategy. // Example: // - cardinality = 1000 // - limit = 10 // - total = 10000 // - point filter prob = 1000 / 10000 = 0.1 // - expected_checks = 10 / 0.1 = 100 // ------------------------------- // - cardinality = 10 // - limit = 10 // - total = 10000 // - point filter prob = 10 / 10000 = 0.001 // - expected_checks = 10 / 0.001 = 10000 let available_points = self.available_point_count() + 1 /* + 1 for division-by-zero */; // Expected number of successful checks per point let check_probability = (query_cardinality.exp as f64 + 1.0/* protect from zero */) / available_points as f64; let exp_stream_checks = (limit.unwrap_or(available_points) as f64 / check_probability) as usize; // Assume it would require about `query cardinality` checks. // We are interested in approximate number of checks, so we can // use `query cardinality` as a starting point. let exp_index_checks = query_cardinality.max; exp_stream_checks > exp_index_checks } pub fn filtered_read_by_id_stream( &self, offset: Option<PointIdType>, limit: Option<usize>, condition: &Filter, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { let payload_index = self.payload_index.borrow(); let filter_context = payload_index.filter_context(condition, hw_counter); self.id_tracker .borrow() .iter_from(offset) .stop_if(is_stopped) .filter(move |(_, internal_id)| filter_context.check(*internal_id)) .map(|(external_id, _)| external_id) .take(limit.unwrap_or(usize::MAX)) .collect() } pub(super) fn read_by_id_stream( &self, offset: Option<PointIdType>, limit: Option<usize>, ) -> Vec<PointIdType> { self.id_tracker .borrow() .iter_from(offset) .map(|x| x.0) .take(limit.unwrap_or(usize::MAX)) .collect() } pub fn filtered_read_by_index( &self, offset: Option<PointIdType>, limit: Option<usize>, condition: &Filter, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { let payload_index = self.payload_index.borrow(); let id_tracker = self.id_tracker.borrow(); let cardinality_estimation = payload_index.estimate_cardinality(condition, hw_counter); let ids_iterator = payload_index .iter_filtered_points( condition, &*id_tracker, &cardinality_estimation, hw_counter, is_stopped, ) .filter_map(|internal_id| { let external_id = id_tracker.external_id(internal_id); match external_id { Some(external_id) => match offset { Some(offset) if external_id < offset => None, _ => Some(external_id), }, None => None, } }); let mut page = match limit { Some(limit) => peek_top_smallest_iterable(ids_iterator, limit), None => ids_iterator.collect(), }; page.sort_unstable(); page } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/tests.rs
lib/segment/src/segment/tests.rs
use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::tar_ext; use fs_err as fs; use fs_err::File; use rstest::rstest; use tempfile::Builder; use super::*; use crate::common::operation_error::OperationError::PointIdError; use crate::common::{check_named_vectors, check_vector, check_vector_name}; use crate::data_types::named_vectors::NamedVectors; use crate::data_types::query_context::QueryContext; use crate::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector}; use crate::entry::SnapshotEntry as _; use crate::entry::entry_point::SegmentEntry; use crate::segment_constructor::load_segment; use crate::segment_constructor::simple_segment_constructor::{ VECTOR1_NAME, VECTOR2_NAME, build_multivec_segment, build_simple_segment, }; use crate::types::{Distance, Filter, Payload, SnapshotFormat, WithPayload, WithVector}; #[test] fn test_search_batch_equivalence_single() { let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let dim = 4; let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap(); let hw_counter = HardwareCounterCell::new(); let vec4 = vec![1.1, 1.0, 0.0, 1.0]; segment .upsert_point(100, 4.into(), only_default_vector(&vec4), &hw_counter) .unwrap(); let vec6 = vec![1.0, 1.0, 0.5, 1.0]; segment .upsert_point(101, 6.into(), only_default_vector(&vec6), &hw_counter) .unwrap(); segment.delete_point(102, 1.into(), &hw_counter).unwrap(); let query_vector = [1.0, 1.0, 1.0, 1.0].into(); let search_result = segment .search( DEFAULT_VECTOR_NAME, &query_vector, &WithPayload::default(), &false.into(), None, 10, None, ) .unwrap(); eprintln!("search_result = {search_result:#?}"); let query_context = QueryContext::default(); let segment_query_context = query_context.get_segment_query_context(); let search_batch_result = segment .search_batch( DEFAULT_VECTOR_NAME, &[&query_vector], &WithPayload::default(), &false.into(), None, 10, None, &segment_query_context, ) .unwrap(); eprintln!("search_batch_result = {search_batch_result:#?}"); assert!(!search_result.is_empty()); assert_eq!(search_result, search_batch_result[0].clone()); } #[test] fn test_from_filter_attributes() { let data = r#" { "name": "John Doe", "age": 43, "metadata": { "height": 50, "width": 60 } }"#; let dir = Builder::new().prefix("payload_dir").tempdir().unwrap(); let dim = 2; let hw_counter = HardwareCounterCell::new(); let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap(); segment .upsert_point(0, 0.into(), only_default_vector(&[1.0, 1.0]), &hw_counter) .unwrap(); let payload: Payload = serde_json::from_str(data).unwrap(); segment .set_full_payload(0, 0.into(), &payload, &hw_counter) .unwrap(); let filter_valid_str = r#" { "must": [ { "key": "metadata.height", "match": { "value": 50 } } ] }"#; let filter_valid: Filter = serde_json::from_str(filter_valid_str).unwrap(); let filter_invalid_str = r#" { "must": [ { "key": "metadata.height", "match": { "value": 60 } } ] }"#; let filter_invalid: Filter = serde_json::from_str(filter_invalid_str).unwrap(); let results_with_valid_filter = segment .search( DEFAULT_VECTOR_NAME, &[1.0, 1.0].into(), &WithPayload::default(), &false.into(), Some(&filter_valid), 1, None, ) .unwrap(); assert_eq!(results_with_valid_filter.len(), 1); assert_eq!(results_with_valid_filter.first().unwrap().id, 0.into()); let results_with_invalid_filter = segment .search( DEFAULT_VECTOR_NAME, &[1.0, 1.0].into(), &WithPayload::default(), &false.into(), Some(&filter_invalid), 1, None, ) .unwrap(); assert!(results_with_invalid_filter.is_empty()); } #[rstest] #[case::regular(SnapshotFormat::Regular)] #[case::streamable(SnapshotFormat::Streamable)] fn test_snapshot(#[case] format: SnapshotFormat) { let _ = env_logger::builder().is_test(true).try_init(); let data = r#" { "name": "John Doe", "age": 43, "metadata": { "height": 50, "width": 60 } }"#; let segment_base_dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let hw_counter = HardwareCounterCell::new(); let mut segment = build_simple_segment(segment_base_dir.path(), 2, Distance::Dot).unwrap(); segment .upsert_point(0, 0.into(), only_default_vector(&[1.0, 1.0]), &hw_counter) .unwrap(); segment .set_full_payload( 1, 0.into(), &serde_json::from_str(data).unwrap(), &hw_counter, ) .unwrap(); let temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap(); // The segment snapshot is a part of a parent collection/shard snapshot. let parent_snapshot_tar = Builder::new() .prefix("parent_snapshot") .suffix(".tar") .tempfile() .unwrap(); let segment_id = segment .current_path .file_stem() .and_then(|f| f.to_str()) .unwrap(); segment.flush(true).unwrap(); // snapshotting! let tar = tar_ext::BuilderExt::new_seekable_owned(File::create(parent_snapshot_tar.path()).unwrap()); segment .take_snapshot(temp_dir.path(), &tar, format, None) .unwrap(); tar.blocking_finish().unwrap(); let parent_snapshot_unpacked = Builder::new().prefix("parent_snapshot").tempdir().unwrap(); tar::Archive::new(File::open(parent_snapshot_tar.path()).unwrap()) .unpack(parent_snapshot_unpacked.path()) .unwrap(); // Should be exactly one entry in the snapshot. let mut entries = fs::read_dir(parent_snapshot_unpacked.path()).unwrap(); let entry = entries.next().unwrap().unwrap(); assert!(entries.next().is_none()); match format { SnapshotFormat::Ancient => unreachable!("The old days are gone"), SnapshotFormat::Regular => { assert_eq!(entry.file_name(), format!("{segment_id}.tar").as_str()); assert!(entry.path().is_file()); } SnapshotFormat::Streamable => { assert_eq!(entry.file_name(), segment_id); assert!(entry.path().is_dir()); } } // restore snapshot Segment::restore_snapshot_in_place(&entry.path()).unwrap(); // Should be exactly one entry in the snapshot. let mut entries = fs::read_dir(parent_snapshot_unpacked.path()).unwrap(); let entry = entries.next().unwrap().unwrap(); assert!(entries.next().is_none()); // It should be unpacked entry, not tar archive. assert!(entry.path().is_dir()); assert_eq!(entry.file_name(), segment_id); let restored_segment = load_segment(&entry.path(), &AtomicBool::new(false)) .unwrap() .unwrap(); // validate restored snapshot is the same as original segment assert_eq!( segment.total_point_count(), restored_segment.total_point_count(), ); assert_eq!( segment.available_point_count(), restored_segment.available_point_count(), ); assert_eq!( segment.deleted_point_count(), restored_segment.deleted_point_count(), ); for id in segment.iter_points() { let vectors = segment.all_vectors(id, &hw_counter).unwrap(); let restored_vectors = restored_segment.all_vectors(id, &hw_counter).unwrap(); assert_eq!(vectors, restored_vectors); let payload = segment.payload(id, &hw_counter).unwrap(); let restored_payload = restored_segment.payload(id, &hw_counter).unwrap(); assert_eq!(payload, restored_payload); } } #[test] fn test_check_consistency() { let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let dim = 4; let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap(); let hw_counter = HardwareCounterCell::new(); let vec4 = vec![1.1, 1.0, 0.0, 1.0]; segment .upsert_point(100, 4.into(), only_default_vector(&vec4), &hw_counter) .unwrap(); let vec6 = vec![1.0, 1.0, 0.5, 1.0]; segment .upsert_point(101, 6.into(), only_default_vector(&vec6), &hw_counter) .unwrap(); // first pass on consistent data segment.check_consistency_and_repair().unwrap(); let query_vector = [1.0, 1.0, 1.0, 1.0].into(); let search_result = segment .search( DEFAULT_VECTOR_NAME, &query_vector, &WithPayload::default(), &false.into(), None, 10, None, ) .unwrap(); assert_eq!(search_result.len(), 2); assert_eq!(search_result[0].id, 6.into()); assert_eq!(search_result[1].id, 4.into()); assert!( segment .vector(DEFAULT_VECTOR_NAME, 6.into(), &hw_counter) .is_ok() ); let internal_id = segment.lookup_internal_id(6.into()).unwrap(); // make id_tracker inconsistent segment.id_tracker.borrow_mut().drop(6.into()).unwrap(); let search_result = segment .search( DEFAULT_VECTOR_NAME, &query_vector, &WithPayload::default(), &false.into(), None, 10, None, ) .unwrap(); // only one result because of inconsistent id_tracker assert_eq!(search_result.len(), 1); assert_eq!(search_result[0].id, 4.into()); // querying by external id is broken assert!( matches!(segment.vector(DEFAULT_VECTOR_NAME, 6.into(), &hw_counter), Err(PointIdError {missed_point_id }) if missed_point_id == 6.into()) ); // but querying by internal id still works matches!( segment.vector_by_offset(DEFAULT_VECTOR_NAME, internal_id, &hw_counter), Ok(Some(_)) ); // fix segment's data segment.check_consistency_and_repair().unwrap(); // querying by internal id now consistent matches!( segment.vector_by_offset(DEFAULT_VECTOR_NAME, internal_id, &hw_counter), Ok(None) ); } #[test] fn test_point_vector_count() { let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let dim = 1; let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap(); let hw_counter = HardwareCounterCell::new(); // Insert point ID 4 and 6, assert counts segment .upsert_point(100, 4.into(), only_default_vector(&[0.4]), &hw_counter) .unwrap(); segment .upsert_point(101, 6.into(), only_default_vector(&[0.6]), &hw_counter) .unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 2); assert_eq!(segment_info.num_vectors, 2); // Delete nonexistent point, counts should remain the same segment.delete_point(102, 1.into(), &hw_counter).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 2); assert_eq!(segment_info.num_vectors, 2); // Delete point 4, counts should decrease by 1 segment.delete_point(103, 4.into(), &hw_counter).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 1); assert_eq!(segment_info.num_vectors, 2); // We don't propagate deletes to vectors at this time // // Delete vector of point 6, vector count should now be zero // segment // .delete_vector(104, 6.into(), DEFAULT_VECTOR_NAME) // .unwrap(); // let segment_info = segment.info(); // assert_eq!(segment_info.num_points, 1); // assert_eq!(segment_info.num_vectors, 1); } #[test] fn test_point_vector_count_multivec() { let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let dim = 1; let mut segment = build_multivec_segment(dir.path(), dim, dim, Distance::Dot).unwrap(); let hw_counter = HardwareCounterCell::new(); // Insert point ID 4 and 6 fully, 8 and 10 partially, assert counts segment .upsert_point( 100, 4.into(), NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.4]), (VECTOR2_NAME.into(), vec![0.5]), ]), &hw_counter, ) .unwrap(); segment .upsert_point( 101, 6.into(), NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.6]), (VECTOR2_NAME.into(), vec![0.7]), ]), &hw_counter, ) .unwrap(); segment .upsert_point( 102, 8.into(), NamedVectors::from_pairs([(VECTOR1_NAME.into(), vec![0.0])]), &hw_counter, ) .unwrap(); segment .upsert_point( 103, 10.into(), NamedVectors::from_pairs([(VECTOR2_NAME.into(), vec![1.0])]), &hw_counter, ) .unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 4); assert_eq!(segment_info.num_vectors, 6); // Delete nonexistent point, counts should remain the same segment.delete_point(104, 1.into(), &hw_counter).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 4); assert_eq!(segment_info.num_vectors, 6); // Delete point 4, counts should decrease by 1 segment.delete_point(105, 4.into(), &hw_counter).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 3); assert_eq!(segment_info.num_vectors, 6); // We don't propagate deletes to vectors at this time // Delete vector 'a' of point 6, vector count should decrease by 1 segment.delete_vector(106, 6.into(), VECTOR1_NAME).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 3); assert_eq!(segment_info.num_vectors, 5); // Deleting it again shouldn't chain anything segment.delete_vector(107, 6.into(), VECTOR1_NAME).unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 3); assert_eq!(segment_info.num_vectors, 5); // Replace vector 'a' for point 8, counts should remain the same let internal_8 = segment.lookup_internal_id(8.into()).unwrap(); segment .replace_all_vectors( internal_8, 0, &NamedVectors::from_pairs([(VECTOR1_NAME.into(), vec![0.1])]), &hw_counter, ) .unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 3); assert_eq!(segment_info.num_vectors, 5); // Replace both vectors for point 8, adding a new vector segment .replace_all_vectors( internal_8, 0, &NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.1]), (VECTOR2_NAME.into(), vec![0.1]), ]), &hw_counter, ) .unwrap(); let segment_info = segment.info(); assert_eq!(segment_info.num_points, 3); assert_eq!(segment_info.num_vectors, 6); } /// Tests segment functions to ensure invalid requests do error #[test] fn test_vector_compatibility_checks() { let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let mut segment = build_multivec_segment(dir.path(), 4, 2, Distance::Dot).unwrap(); let hw_counter = HardwareCounterCell::new(); // Insert one point for a reference internal ID let point_id = 4.into(); segment .upsert_point( 100, point_id, NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.1, 0.2, 0.3, 0.4]), (VECTOR2_NAME.into(), vec![1.0, 0.9]), ]), &hw_counter, ) .unwrap(); let internal_id = segment.lookup_internal_id(point_id).unwrap(); // A set of broken vectors let wrong_vectors_single = [ // Incorrect dimensionality (VECTOR1_NAME, vec![]), (VECTOR1_NAME, vec![0.0, 1.0, 0.0]), (VECTOR1_NAME, vec![0.0, 1.0, 0.0, 1.0, 0.0]), (VECTOR2_NAME, vec![]), (VECTOR2_NAME, vec![0.5]), (VECTOR2_NAME, vec![0.0, 0.1, 0.2, 0.3]), // Incorrect names ("aa", vec![0.0, 0.1, 0.2, 0.3]), ("bb", vec![0.0, 0.1]), ]; let wrong_vectors_multi = [ // Incorrect dimensionality NamedVectors::from_ref(VECTOR1_NAME, [].as_slice().into()), NamedVectors::from_ref(VECTOR1_NAME, [0.0, 1.0, 0.0].as_slice().into()), NamedVectors::from_ref(VECTOR1_NAME, [0.0, 1.0, 0.0, 1.0, 0.0].as_slice().into()), NamedVectors::from_ref(VECTOR2_NAME, [].as_slice().into()), NamedVectors::from_ref(VECTOR2_NAME, [0.5].as_slice().into()), NamedVectors::from_ref(VECTOR2_NAME, [0.0, 0.1, 0.2, 0.3].as_slice().into()), NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.1, 0.2, 0.3]), (VECTOR2_NAME.into(), vec![1.0, 0.9]), ]), NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.1, 0.2, 0.3, 0.4]), (VECTOR2_NAME.into(), vec![1.0, 0.9, 0.0]), ]), // Incorrect names NamedVectors::from_ref("aa", [0.0, 0.1, 0.2, 0.3].as_slice().into()), NamedVectors::from_ref("bb", [0.0, 0.1].as_slice().into()), NamedVectors::from_pairs([ ("aa".into(), vec![0.1, 0.2, 0.3, 0.4]), (VECTOR2_NAME.into(), vec![1.0, 0.9]), ]), NamedVectors::from_pairs([ (VECTOR1_NAME.into(), vec![0.1, 0.2, 0.3, 0.4]), ("bb".into(), vec![1.0, 0.9]), ]), ]; let wrong_names = ["aa", "bb", ""]; for (vector_name, vector) in wrong_vectors_single.iter() { let query_vector = vector.to_owned().into(); check_vector(vector_name, &query_vector, &segment.segment_config) .err() .unwrap(); segment .search( vector_name, &query_vector, &WithPayload { enable: false, payload_selector: None, }, &WithVector::Bool(true), None, 1, None, ) .err() .unwrap(); let query_context = QueryContext::default(); let segment_query_context = query_context.get_segment_query_context(); segment .search_batch( vector_name, &[&query_vector, &query_vector], &WithPayload { enable: false, payload_selector: None, }, &WithVector::Bool(true), None, 1, None, &segment_query_context, ) .err() .unwrap(); } for vectors in wrong_vectors_multi { check_named_vectors(&vectors, &segment.segment_config) .err() .unwrap(); segment .upsert_point(101, point_id, vectors.clone(), &hw_counter) .err() .unwrap(); segment .update_vectors(internal_id, 0, vectors.clone(), &hw_counter) .err() .unwrap(); segment .insert_new_vectors(point_id, 0, &vectors, &hw_counter) .err() .unwrap(); segment .replace_all_vectors(internal_id, 0, &vectors, &hw_counter) .err() .unwrap(); } for wrong_name in wrong_names { check_vector_name(wrong_name, &segment.segment_config) .err() .unwrap(); segment .vector(wrong_name, point_id, &hw_counter) .err() .unwrap(); segment .delete_vector(101, point_id, wrong_name) .err() .unwrap(); segment.available_vector_count(wrong_name).err().unwrap(); segment .vector_by_offset(wrong_name, internal_id, &hw_counter) .err() .unwrap(); } } /// Test handling point versions /// /// Apply if the point version is equal or higher. Always apply if the point does not exist /// yet. /// /// Before <https://github.com/qdrant/qdrant/pull/4060> this function would reject operations /// on non-existent points if the operation ID was lower than the current segment version. That /// should not happen, and this test asserts correct behavior. #[test] fn test_handle_point_version() { // Create base segment with a single point let dir = Builder::new().prefix("segment_dir").tempdir().unwrap(); let dim = 4; let hw_counter = HardwareCounterCell::new(); let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap(); segment .upsert_point( 100, 1.into(), only_default_vector(&[1.1, 1.0, 0.0, 1.0]), &hw_counter, ) .unwrap(); // Do not handle operation on existing point when providing an old version let applied = segment .handle_point_version(99, Some(0), |_segment| Ok((true, None))) .unwrap(); assert!(!applied); // Do handle operation on existing point when providing the current version let applied = segment .handle_point_version(100, Some(0), |_segment| Ok((true, None))) .unwrap(); assert!(applied); // Do handle operation on existing point when providing a newer version let applied = segment .handle_point_version(101, Some(0), |_segment| Ok((true, None))) .unwrap(); assert!(applied); // Always handle operations on non-existent points let applied = segment .handle_point_version(99, None, |_segment| Ok((true, None))) .unwrap(); assert!(applied); let applied = segment .handle_point_version(100, None, |_segment| Ok((true, None))) .unwrap(); assert!(applied); let applied = segment .handle_point_version(101, None, |_segment| Ok((true, None))) .unwrap(); assert!(applied); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/sampling.rs
lib/segment/src/segment/sampling.rs
use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use rand::seq::{IteratorRandom, SliceRandom}; use super::Segment; use crate::index::PayloadIndex; use crate::types::{Filter, PointIdType}; impl Segment { pub(super) fn filtered_read_by_index_shuffled( &self, limit: usize, condition: &Filter, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { let payload_index = self.payload_index.borrow(); let id_tracker = self.id_tracker.borrow(); let cardinality_estimation = payload_index.estimate_cardinality(condition, hw_counter); let ids_iterator = payload_index .iter_filtered_points( condition, &*id_tracker, &cardinality_estimation, hw_counter, is_stopped, ) .filter_map(|internal_id| id_tracker.external_id(internal_id)); let mut rng = rand::rng(); let mut shuffled = ids_iterator.choose_multiple(&mut rng, limit); shuffled.shuffle(&mut rng); shuffled } pub fn filtered_read_by_random_stream( &self, limit: usize, condition: &Filter, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { let payload_index = self.payload_index.borrow(); let filter_context = payload_index.filter_context(condition, hw_counter); self.id_tracker .borrow() .iter_random() .stop_if(is_stopped) .filter(move |(_, internal_id)| filter_context.check(*internal_id)) .map(|(external_id, _)| external_id) .take(limit) .collect() } pub(super) fn read_by_random_id(&self, limit: usize) -> Vec<PointIdType> { self.id_tracker .borrow() .iter_random() .map(|x| x.0) .take(limit) .collect() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/search.rs
lib/segment/src/segment/search.rs
use common::counter::hardware_counter::HardwareCounterCell; use common::types::ScoredPointOffset; use super::Segment; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::named_vectors::NamedVectors; #[cfg(feature = "testing")] use crate::data_types::query_context::QueryContext; #[cfg(feature = "testing")] use crate::data_types::vectors::QueryVector; use crate::data_types::vectors::VectorStructInternal; #[cfg(feature = "testing")] use crate::entry::entry_point::SegmentEntry; #[cfg(feature = "testing")] use crate::types::VectorName; #[cfg(feature = "testing")] use crate::types::{Filter, SearchParams}; use crate::types::{ScoredPoint, WithPayload, WithVector}; impl Segment { /// Converts raw ScoredPointOffset search result into ScoredPoint result pub(super) fn process_search_result( &self, internal_result: Vec<ScoredPointOffset>, with_payload: &WithPayload, with_vector: &WithVector, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<ScoredPoint>> { let id_tracker = self.id_tracker.borrow(); internal_result .into_iter() .filter_map(|scored_point_offset| { let point_offset = scored_point_offset.idx; let external_id = id_tracker.external_id(point_offset); match external_id { Some(point_id) => Some((point_id, scored_point_offset)), None => { log::warn!( "Point with internal ID {point_offset} not found in id tracker, skipping" ); None } } }) .map(|(point_id, scored_point_offset)| { let point_offset = scored_point_offset.idx; let point_version = id_tracker.internal_version(point_offset).ok_or_else(|| { OperationError::service_error(format!( "Corrupter id_tracker, no version for point {point_id}" )) })?; let payload = if with_payload.enable { let initial_payload = self.payload_by_offset(point_offset, hw_counter)?; let processed_payload = if let Some(i) = &with_payload.payload_selector { i.process(initial_payload) } else { initial_payload }; Some(processed_payload) } else { None }; let vector = match with_vector { WithVector::Bool(false) => None, WithVector::Bool(true) => { let named_vector = self.all_vectors_by_offset(point_offset, hw_counter)?; Some(VectorStructInternal::from(named_vector)) }, WithVector::Selector(vectors) => { let mut result = NamedVectors::default(); for vector_name in vectors { if let Some(vector) = self.vector_by_offset(vector_name, point_offset, hw_counter)? { result.insert(vector_name.clone(), vector); } } Some(VectorStructInternal::from(result)) } }; Ok(ScoredPoint { id: point_id, version: point_version, score: scored_point_offset.score, payload, vector, shard_key: None, order_value: None, }) }) .collect() } /// This function is a simplified version of `search_batch` intended for testing purposes. #[allow(clippy::too_many_arguments)] #[cfg(feature = "testing")] pub fn search( &self, vector_name: &VectorName, vector: &QueryVector, with_payload: &WithPayload, with_vector: &WithVector, filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, ) -> OperationResult<Vec<ScoredPoint>> { let query_context = QueryContext::default(); let segment_query_context = query_context.get_segment_query_context(); let result = self.search_batch( vector_name, &[vector], with_payload, with_vector, filter, top, params, &segment_query_context, )?; Ok(result.into_iter().next().unwrap()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/formula_rescore.rs
lib/segment/src/segment/formula_rescore.rs
use std::sync::atomic::{AtomicBool, Ordering}; use ahash::{AHashMap, AHashSet}; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use common::types::ScoredPointOffset; use itertools::Itertools; use super::Segment; use crate::common::operation_error::OperationResult; use crate::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula; use crate::types::ScoredPoint; impl Segment { /// Rescores points of the prefetches, and returns the internal ids with the scores. pub(super) fn do_rescore_with_formula( &self, formula: &ParsedFormula, prefetches_scores: &[Vec<ScoredPoint>], limit: usize, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<ScoredPointOffset>> { // Dedup point offsets into a hashset let mut points_to_rescore = AHashSet::with_capacity(prefetches_scores.first().map_or(0, |scores| scores.len())); // Transform prefetches results into a hashmap for faster lookup, let prefetches_scores = prefetches_scores .iter() .map(|scores| { scores .iter() .filter_map(|point| { // Discard points without internal ids let internal_id = self.get_internal_id(point.id)?; // filter_map side effect: keep all uniquely seen point offsets. points_to_rescore.insert(internal_id); Some((internal_id, point.score)) }) .collect::<AHashMap<_, _>>() }) .collect::<Vec<_>>(); let index_ref = self.payload_index.borrow(); let scorer = index_ref.formula_scorer(formula, &prefetches_scores, hw_counter); // Perform rescoring let mut error = None; let rescored = points_to_rescore .into_iter() .stop_if(is_stopped) .filter_map(|internal_id| { match scorer.score(internal_id) { Ok(new_score) => Some(ScoredPointOffset { idx: internal_id, score: new_score, }), Err(err) => { // in case there is an error, defer handling it and continue error = Some(err); is_stopped.store(true, Ordering::Relaxed); None } } }) // Keep only the top k results .k_largest(limit) .collect(); if let Some(err) = error { return Err(err); } Ok(rescored) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/version_tracker.rs
lib/segment/src/segment/version_tracker.rs
use std::borrow::Borrow; use std::collections::HashMap; use std::hash::Hash; use crate::json_path::JsonPath; use crate::types::{SeqNumberType, VectorNameBuf}; /// Tracks versions of different sub-structures of segment to optimize partial snapshots. #[derive(Clone, Debug, Default)] pub struct VersionTracker { /// Tracks version of *mutable* files inside vector storage. /// Should be updated when vector storage is modified. vector_storage: HashMap<VectorNameBuf, SeqNumberType>, /// Tracks version of *mutable* files inside payload storage. /// Should be updated when payload storage is modified. payload_storage: Option<SeqNumberType>, /// Tracks version of *immutable* files inside payload index. /// Should be updated when payload index *schema* of the field is modified. /// /// Generally, we can rely on `Segment::initial_version` to filter immutable files. /// E.g., HNSW index is created when creating immutable segment, and so immutable files /// of HNSW index stays immutable for the whole lifetime of the segment. /// /// However, payload indices can be updated at any moment even for immutable segments. /// E.g., payload index can be created *after* immutable segment is created, and so we have to /// track payload index schema version separately from `Segment::initial_version`. payload_index_schema: HashMap<JsonPath, SeqNumberType>, } impl VersionTracker { pub fn get_vector(&self, vector: &str) -> Option<SeqNumberType> { self.vector_storage.get(vector).copied() } pub fn set_vector(&mut self, vector: &str, version: Option<SeqNumberType>) { bump_key(&mut self.vector_storage, vector, version) } pub fn get_payload(&self) -> Option<SeqNumberType> { self.payload_storage } pub fn set_payload(&mut self, version: Option<SeqNumberType>) { self.payload_storage = bump(self.payload_storage, version); } pub fn get_payload_index_schema(&self, field: &JsonPath) -> Option<SeqNumberType> { self.payload_index_schema.get(field).copied() } pub fn set_payload_index_schema(&mut self, field: &JsonPath, version: Option<SeqNumberType>) { bump_key(&mut self.payload_index_schema, field, version) } } fn bump(current: Option<SeqNumberType>, new: Option<SeqNumberType>) -> Option<SeqNumberType> { match (current, new) { (Some(current), Some(new)) => { if current < new { Some(new) } else { None } } (None, Some(new)) => Some(new), (_, None) => None, } } fn bump_key<K, Q>(map: &mut HashMap<K, SeqNumberType>, key: &Q, version: Option<SeqNumberType>) where K: Hash + Eq + Borrow<Q>, Q: Hash + Eq + ToOwned<Owned = K> + ?Sized, { let Some(new) = version else { map.remove(key); return; }; let Some(current) = map.get_mut(key) else { map.insert(key.to_owned(), new); return; }; if *current < new { *current = new; } else { map.remove(key); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/segment_ops.rs
lib/segment/src/segment/segment_ops.rs
use std::cmp::max; use std::collections::HashMap; use std::path::Path; use bitvec::prelude::BitVec; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use fs_err as fs; use io::file_operations::{atomic_save_json, read_json}; use super::{SEGMENT_STATE_FILE, SNAPSHOT_FILES_PATH, SNAPSHOT_PATH, Segment}; use crate::common::operation_error::{ OperationError, OperationResult, SegmentFailedState, get_service_error, }; use crate::common::validate_snapshot_archive::open_snapshot_archive_with_validation; use crate::common::{check_named_vectors, check_vector_name}; use crate::data_types::named_vectors::NamedVectors; use crate::data_types::vectors::VectorInternal; use crate::entry::entry_point::SegmentEntry; use crate::index::{PayloadIndex, VectorIndex}; use crate::types::{ Payload, PayloadFieldSchema, PayloadKeyType, PointIdType, SegmentState, SeqNumberType, SnapshotFormat, VectorName, }; use crate::utils; use crate::vector_storage::{Random, VectorStorage}; impl Segment { /// Replace vectors in-place /// /// This replaces all named vectors for this point with the given set of named vectors. /// /// - new named vectors are inserted /// - existing named vectors are replaced /// - existing named vectors not specified are deleted /// /// This differs with [`Segment::update_vectors`], because this deletes unspecified vectors. /// /// # Warning /// /// Available for appendable segments only. pub(super) fn replace_all_vectors( &mut self, internal_id: PointOffsetType, op_num: SeqNumberType, vectors: &NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { debug_assert!(self.is_appendable()); check_named_vectors(vectors, &self.segment_config)?; for (vector_name, vector_data) in self.vector_data.iter_mut() { let vector = vectors.get(vector_name); let mut vector_index = vector_data.vector_index.borrow_mut(); vector_index.update_vector(internal_id, vector, hw_counter)?; self.version_tracker.set_vector(vector_name, Some(op_num)); } Ok(()) } /// Update vectors in-place /// /// This updates all specified named vectors for this point with the given set of named vectors, leaving unspecified vectors untouched. /// /// - new named vectors are inserted /// - existing named vectors are replaced /// - existing named vectors not specified are untouched and kept as-is /// /// This differs with [`Segment::replace_all_vectors`], because this keeps unspecified vectors as-is. /// /// # Warning /// /// Available for appendable segments only. #[allow(clippy::needless_pass_by_ref_mut)] // ensure single access to AtomicRefCell vector_index pub(super) fn update_vectors( &mut self, internal_id: PointOffsetType, op_num: SeqNumberType, vectors: NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { debug_assert!(self.is_appendable()); check_named_vectors(&vectors, &self.segment_config)?; for (vector_name, new_vector) in vectors { let vector_data = &self.vector_data[vector_name.as_ref()]; let mut vector_index = vector_data.vector_index.borrow_mut(); vector_index.update_vector(internal_id, Some(new_vector.as_vec_ref()), hw_counter)?; self.version_tracker.set_vector(&vector_name, Some(op_num)); } Ok(()) } /// Insert new vectors into the segment /// /// # Warning /// /// Available for appendable segments only. pub(super) fn insert_new_vectors( &mut self, point_id: PointIdType, op_num: SeqNumberType, vectors: &NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<PointOffsetType> { debug_assert!(self.is_appendable()); check_named_vectors(vectors, &self.segment_config)?; let new_index = self.id_tracker.borrow().total_point_count() as PointOffsetType; for (vector_name, vector_data) in self.vector_data.iter_mut() { let vector_opt = vectors.get(vector_name); let mut vector_index = vector_data.vector_index.borrow_mut(); vector_index.update_vector(new_index, vector_opt, hw_counter)?; self.version_tracker.set_vector(vector_name, Some(op_num)); } self.id_tracker.borrow_mut().set_link(point_id, new_index)?; Ok(new_index) } /// Operation wrapped, which handles previous and new errors in the segment, automatically /// updates versions and skips operations if the segment version is too old /// /// # Arguments /// /// * `op_num` - sequential operation of the current operation /// * `op` - operation to be wrapped. Should return `OperationResult` of bool (which is returned outside) /// and optionally new offset of the changed point. /// /// # Result /// /// Propagates `OperationResult` of bool (which is returned in the `op` closure) pub(super) fn handle_segment_version_and_failure<F>( &mut self, op_num: SeqNumberType, operation: F, ) -> OperationResult<bool> where F: FnOnce(&mut Segment) -> OperationResult<bool>, { if let Some(SegmentFailedState { version: failed_version, point_id: _failed_point_id, error, }) = &self.error_status { // Failed operations should not be skipped, // fail if newer operation is attempted before proper recovery if *failed_version < op_num { return Err(OperationError::service_error(format!( "Not recovered from previous error: {error}" ))); } // else: Re-try operation } let res = self.handle_segment_version(op_num, operation); if let Some(error) = get_service_error(&res) { // ToDo: Recover previous segment state log::error!( "Segment {:?} operation error: {error}", self.current_path.as_path(), ); self.error_status = Some(SegmentFailedState { version: op_num, point_id: None, error, }); } res } /// Operation wrapped, which handles previous and new errors in the segment, automatically /// updates versions and skips operations if the point version is too old /// /// # Arguments /// /// * `op_num` - sequential operation of the current operation /// * `op_point_offset` - If point offset is specified, handler will use point version for comparison. /// Otherwise, it will be applied without version checks. /// * `op` - operation to be wrapped. Should return `OperationResult` of bool (which is returned outside) and optionally new offset of the changed point. /// /// # Result /// /// Propagates `OperationResult` of bool (which is returned in the `op` closure) pub(super) fn handle_point_version_and_failure<F>( &mut self, op_num: SeqNumberType, op_point_offset: Option<PointOffsetType>, operation: F, ) -> OperationResult<bool> where F: FnOnce(&mut Segment) -> OperationResult<(bool, Option<PointOffsetType>)>, { if let Some(SegmentFailedState { version: failed_version, point_id: _failed_point_id, error, }) = &self.error_status { // Failed operations should not be skipped, // fail if newer operation is attempted before proper recovery if *failed_version < op_num { return Err(OperationError::service_error(format!( "Not recovered from previous error: {error}" ))); } // else: Re-try operation } let res = self.handle_point_version(op_num, op_point_offset, operation); match get_service_error(&res) { None => { // Recover error state match &self.error_status { None => {} // all good Some(error) => { let point_id = op_point_offset.and_then(|point_offset| { self.id_tracker.borrow().external_id(point_offset) }); if error.point_id == point_id { // Fixed log::info!("Recovered from error: {}", error.error); self.error_status = None; } } } } Some(error) => { // ToDo: Recover previous segment state log::error!( "Segment {:?} operation error: {error}", self.current_path.as_path(), ); let point_id = op_point_offset .and_then(|point_offset| self.id_tracker.borrow().external_id(point_offset)); self.error_status = Some(SegmentFailedState { version: op_num, point_id, error, }); } } res } /// Manage segment version checking, for segment level operations /// /// If current version is higher than operation version - do not perform the operation /// Update current version if operation successfully executed fn handle_segment_version<F>( &mut self, op_num: SeqNumberType, operation: F, ) -> OperationResult<bool> where F: FnOnce(&mut Segment) -> OperationResult<bool>, { // Global version to check if operation has already been applied, then skip without execution if self.version.unwrap_or(0) > op_num { return Ok(false); } let applied = operation(self)?; self.bump_segment_version(op_num); Ok(applied) } /// Manage point version checking inside this segment, for point level operations /// /// If current version is higher than operation version - do not perform the operation /// Update current version if operation successfully executed pub(super) fn handle_point_version<F>( &mut self, op_num: SeqNumberType, op_point_offset: Option<PointOffsetType>, operation: F, ) -> OperationResult<bool> where F: FnOnce(&mut Segment) -> OperationResult<(bool, Option<PointOffsetType>)>, { // If point does not exist or has lower version, ignore operation if let Some(point_offset) = op_point_offset && self .id_tracker .borrow() .internal_version(point_offset) .is_some_and(|current_version| current_version > op_num) { return Ok(false); } let (applied, internal_id) = operation(self)?; self.bump_segment_version(op_num); if let Some(internal_id) = internal_id { self.id_tracker .borrow_mut() .set_internal_version(internal_id, op_num)?; } Ok(applied) } pub fn delete_point_internal( &mut self, internal_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { // Mark point as deleted, drop mapping self.payload_index .borrow_mut() .clear_payload(internal_id, hw_counter)?; self.id_tracker.borrow_mut().drop_internal(internal_id)?; // Before, we propagated point deletions to also delete its vectors. This turns // out to be problematic because this sometimes makes us lose vector data // because we cannot control the order of segment flushes. // Disabled until we properly fix it or find a better way to clean up old // vectors. // // // Propagate point deletion to all its vectors // for vector_data in segment.vector_data.values() { // let mut vector_storage = vector_data.vector_storage.borrow_mut(); // vector_storage.delete_vector(internal_id)?; // } Ok(()) } fn bump_segment_version(&mut self, op_num: SeqNumberType) { self.version.replace(max(op_num, self.version.unwrap_or(0))); } pub fn get_internal_id(&self, point_id: PointIdType) -> Option<PointOffsetType> { self.id_tracker.borrow().internal_id(point_id) } pub fn get_deleted_points_bitvec(&self) -> BitVec { BitVec::from(self.id_tracker.borrow().deleted_point_bitslice()) } pub(super) fn lookup_internal_id( &self, point_id: PointIdType, ) -> OperationResult<PointOffsetType> { let internal_id_opt = self.id_tracker.borrow().internal_id(point_id); match internal_id_opt { Some(internal_id) => Ok(internal_id), None => Err(OperationError::PointIdError { missed_point_id: point_id, }), } } pub(super) fn get_state(&self) -> SegmentState { SegmentState { initial_version: self.initial_version, version: self.version, config: self.segment_config.clone(), } } pub fn save_state(state: &SegmentState, current_path: &Path) -> OperationResult<()> { let state_path = current_path.join(SEGMENT_STATE_FILE); Ok(atomic_save_json(&state_path, state)?) } pub fn load_state(current_path: &Path) -> OperationResult<SegmentState> { let state_path = current_path.join(SEGMENT_STATE_FILE); read_json(&state_path).map_err(|err| { OperationError::service_error(format!( "Failed to read segment state {} error: {}", current_path.display(), err )) }) } /// Retrieve vector by internal ID /// /// Returns None if the vector does not exists or deleted #[inline] pub(super) fn vector_by_offset( &self, vector_name: &VectorName, point_offset: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<VectorInternal>> { check_vector_name(vector_name, &self.segment_config)?; let vector_data = &self .vector_data .get(vector_name) .ok_or_else(|| OperationError::vector_name_not_exists(vector_name))?; let vector_storage = vector_data.vector_storage.borrow(); let is_vector_deleted = vector_storage.is_deleted_vector(point_offset); if !is_vector_deleted && !self.id_tracker.borrow().is_deleted_point(point_offset) { if vector_storage.total_vector_count() <= point_offset as usize { // Storage does not have vector with such offset. // This is possible if the storage is inconsistent due to interrupted flush. // Assume consistency will be restored with WAL replay. // Without this check, the service will panic on the `get_vector` call. Err(OperationError::InconsistentStorage { description: format!( "Vector storage '{}' is inconsistent, total_vector_count: {}, point_offset: {}", vector_name, vector_storage.total_vector_count(), point_offset ), }) } else { let vector = vector_storage.get_vector::<Random>(point_offset); if vector_storage.is_on_disk() { hw_counter .vector_io_read() .incr_delta(vector.estimate_size_in_bytes()); } Ok(Some(vector.to_owned())) } } else { Ok(None) } } pub(super) fn all_vectors_by_offset( &self, point_offset: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<NamedVectors<'_>> { let mut vectors = NamedVectors::default(); for vector_name in self.vector_data.keys() { if let Some(vector) = self.vector_by_offset(vector_name, point_offset, hw_counter)? { vectors.insert(vector_name.clone(), vector); } } Ok(vectors) } /// Retrieve payload by internal ID #[inline] pub(super) fn payload_by_offset( &self, point_offset: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload> { self.payload_index .borrow() .get_payload(point_offset, hw_counter) } pub fn save_current_state(&self) -> OperationResult<()> { Self::save_state(&self.get_state(), &self.current_path) } /// Unpacks and restores the segment snapshot in-place. The original /// snapshot is destroyed in the process. /// /// Both of the following calls would result in a directory /// `foo/bar/segment-id/` with the segment data: /// /// - `segment.restore_snapshot("foo/bar/segment-id.tar")` (tar archive) /// - `segment.restore_snapshot("foo/bar/segment-id")` (directory) pub fn restore_snapshot_in_place(snapshot_path: &Path) -> OperationResult<()> { restore_snapshot_in_place(snapshot_path).map_err(|err| { OperationError::service_error(format!( "Failed to restore snapshot from {snapshot_path:?}: {err}", )) }) } /// Check consistency of the segment's data and repair it if possible. /// Removes partially persisted points. pub fn check_consistency_and_repair(&mut self) -> OperationResult<()> { // Get rid of versionless points. let ids_to_clean = self.fix_id_tracker_inconsistencies()?; // There are some leftovers to clean from segment. // After that we need to set internal version to 0, so that // we won't need to clean them again. // This is internal operation, no hw measurement needed let disposable_hw_counter = HardwareCounterCell::disposable(); if !ids_to_clean.is_empty() { log::debug!("cleaning up {} points without version", ids_to_clean.len()); for internal_id in ids_to_clean { self.delete_point_internal(internal_id, &disposable_hw_counter)?; } self.flush(true)?; // We do not drop version here, because it is already not loaded into memory. // There are no explicit mapping between internal ID and version, so all dangling // versions will be ignored automatically. // Those versions could be overwritten by new points, but it is not a problem. // They will also be deleted by the next optimization. } Ok(()) } /// Update all payload/field indices to match `desired_schemas` /// /// Missing payload indices are created. Incorrectly configured payload indices are recreated. /// Extra payload indices are NOT deleted. /// /// This does nothing if the current payload indices state matches `desired_schemas` exactly. pub fn update_all_field_indices( &mut self, desired_schemas: &HashMap<PayloadKeyType, PayloadFieldSchema>, ) -> OperationResult<()> { let schema_applied = self.payload_index.borrow().indexed_fields(); let schema_config = desired_schemas; // Create or update payload indices if they don't match configuration for (key, schema) in schema_config { match schema_applied.get(key) { Some(existing_schema) if existing_schema == schema => continue, Some(existing_schema) => log::warn!( "Segment has incorrect payload index for {key}, recreating it now (current: {:?}, configured: {:?})", existing_schema.name(), schema.name(), ), None => log::warn!( "Segment is missing a {} payload index for {key}, creating it now", schema.name(), ), } let created = self.create_field_index( self.version(), key, Some(schema), &HardwareCounterCell::disposable(), // This function is only used in Segment::load which is unmeasured. )?; if !created { log::warn!("Failed to create payload index for {key} in segment"); } } // Do not delete extra payload indices, because collection-level information about // the payload indices might be incomplete due to migrations from older versions. Ok(()) } /// Check data consistency of the segment on its own /// - internal id without external id /// - external id without internal /// - internal id without version /// - internal id without vector /// /// A shard can still be consistent with an inconsistent segment as points are merged based on their version. /// /// Returns an error if any inconsistency is found pub fn check_data_consistency(&self) -> OperationResult<()> { let id_tracker = self.id_tracker.borrow(); // dangling internal ids let mut has_dangling_internal_ids = false; for internal_id in id_tracker.iter_internal() { if id_tracker.external_id(internal_id).is_none() { log::error!("Internal id {internal_id} without external id"); has_dangling_internal_ids = true } } // dangling external ids let mut has_dangling_external_ids = false; for external_id in id_tracker.iter_external() { if id_tracker.internal_id(external_id).is_none() { log::error!("External id {external_id} without internal id"); has_dangling_external_ids = true; } } // checking internal id without version let mut has_internal_ids_without_version = false; for internal_id in id_tracker.iter_internal() { if id_tracker.internal_version(internal_id).is_none() { log::error!("Internal id {internal_id} without version"); has_internal_ids_without_version = true; } } // check that non deleted points exist in vector storage let mut has_internal_ids_without_vector = false; for internal_id in id_tracker.iter_internal() { for (vector_name, vector_data) in &self.vector_data { let vector_storage = vector_data.vector_storage.borrow(); let is_vector_deleted_storage = vector_storage.is_deleted_vector(internal_id); let is_vector_deleted_tracker = id_tracker.is_deleted_point(internal_id); let vector_stored = vector_storage.get_vector_opt::<Random>(internal_id); if !is_vector_deleted_storage && !is_vector_deleted_tracker && vector_stored.is_none() { let point_id = id_tracker.external_id(internal_id); let point_version = id_tracker.internal_version(internal_id); // ignoring initial version because the WAL replay can resurrect un-flushed points by assigning them a new initial version // those points will be deleted by the next deduplication process if point_version != Some(0) { log::error!( "Vector storage '{vector_name}' is missing point {point_id:?} point_offset: {internal_id} version: {point_version:?}", ); has_internal_ids_without_vector = true; } } } } let is_inconsistent = has_dangling_internal_ids || has_dangling_external_ids || has_internal_ids_without_version || has_internal_ids_without_vector; if is_inconsistent { Err(OperationError::service_error( "Inconsistent segment data detected", )) } else { Ok(()) } } pub fn available_vector_count(&self, vector_name: &VectorName) -> OperationResult<usize> { check_vector_name(vector_name, &self.segment_config)?; Ok(self .vector_data .get(vector_name) .ok_or_else(|| OperationError::vector_name_not_exists(vector_name))? .vector_storage .borrow() .available_vector_count()) } pub fn total_point_count(&self) -> usize { self.id_tracker.borrow().total_point_count() } /// Fixes inconsistencies in the ID tracker, if any. /// Returns list of IDs, which should be removed from segment pub fn fix_id_tracker_inconsistencies(&mut self) -> OperationResult<Vec<PointOffsetType>> { self.id_tracker.borrow_mut().fix_inconsistencies() } } fn restore_snapshot_in_place(snapshot_path: &Path) -> OperationResult<()> { let segments_dir = snapshot_path .parent() .ok_or_else(|| OperationError::service_error("Cannot extract parent path"))?; let file_name = snapshot_path .file_name() .and_then(|name| name.to_str()) .ok_or_else(|| { OperationError::service_error("Cannot extract segment ID from snapshot path") })?; let meta = fs::metadata(snapshot_path)?; let (segment_id, is_tar) = match file_name.split_once('.') { Some((segment_id, "tar")) if meta.is_file() => (segment_id, true), None if meta.is_dir() => (file_name, false), _ => { return Err(OperationError::service_error( "Invalid snapshot path, expected either a directory or a .tar file", )); } }; if !is_tar { log::debug!( "Extracting segment {} from {:?} snapshot", segment_id, SnapshotFormat::Streamable ); unpack_snapshot(snapshot_path)?; } else { let segment_path = segments_dir.join(segment_id); open_snapshot_archive_with_validation(snapshot_path)?.unpack(&segment_path)?; let inner_path = segment_path.join(SNAPSHOT_PATH); if inner_path.is_dir() { log::debug!( "Extracting segment {} from {:?} snapshot", segment_id, SnapshotFormat::Regular ); unpack_snapshot(&inner_path)?; utils::fs::move_all(&inner_path, &segment_path)?; fs::remove_dir(&inner_path)?; } else { log::debug!( "Extracting segment {} from {:?} snapshot", segment_id, SnapshotFormat::Ancient ); // Do nothing, this format is just a plain archive. } fs::remove_file(snapshot_path)?; } Ok(()) } fn unpack_snapshot(segment_path: &Path) -> OperationResult<()> { #[cfg(feature = "rocksdb")] { use super::{DB_BACKUP_PATH, PAYLOAD_DB_BACKUP_PATH}; use crate::index::struct_payload_index::StructPayloadIndex; let db_backup_path = segment_path.join(DB_BACKUP_PATH); if db_backup_path.is_dir() { crate::rocksdb_backup::restore(&db_backup_path, segment_path)?; fs::remove_dir_all(&db_backup_path)?; } let payload_index_db_backup = segment_path.join(PAYLOAD_DB_BACKUP_PATH); if payload_index_db_backup.is_dir() { StructPayloadIndex::restore_database_snapshot(&payload_index_db_backup, segment_path)?; fs::remove_dir_all(&payload_index_db_backup)?; } } let files_path = segment_path.join(SNAPSHOT_FILES_PATH); utils::fs::move_all(&files_path, segment_path)?; fs::remove_dir(&files_path)?; Ok(()) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/facet.rs
lib/segment/src/segment/facet.rs
use std::collections::{BTreeSet, HashMap}; use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use itertools::{Either, Itertools}; use super::Segment; use crate::common::operation_error::OperationResult; use crate::data_types::facets::{FacetHit, FacetParams, FacetValue}; use crate::entry::entry_point::SegmentEntry; use crate::index::PayloadIndex; use crate::json_path::JsonPath; use crate::payload_storage::FilterContext; use crate::types::Filter; impl Segment { pub(super) fn approximate_facet( &self, request: &FacetParams, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<HashMap<FacetValue, usize>> { let payload_index = self.payload_index.borrow(); // Shortcut if this segment has no points, prevent division by zero later let available_points = self.available_point_count(); if available_points == 0 { return Ok(HashMap::new()); } let facet_index = payload_index.get_facet_index(&request.key)?; let context; let hits_iter = if let Some(filter) = &request.filter { let id_tracker = self.id_tracker.borrow(); let filter_cardinality = payload_index.estimate_cardinality(filter, hw_counter); let percentage_filtered = filter_cardinality.exp as f64 / available_points as f64; // TODO(facets): define a better estimate for this decision, the question is: // What is more expensive, to hash the same value excessively or to check with filter too many times? // // For now this is defined from some rudimentary benchmarking two scenarios: // - a collection with few keys // - a collection with almost a unique key per point let use_iterative_approach = percentage_filtered < 0.3; let iter = if use_iterative_approach { // go over the filtered points and aggregate the values // aka. read from other indexes let iter = payload_index .iter_filtered_points( filter, &*id_tracker, &filter_cardinality, hw_counter, is_stopped, ) .filter(|point_id| !id_tracker.is_deleted_point(*point_id)) .fold(HashMap::new(), |mut map, point_id| { facet_index .get_point_values(point_id) .unique() .for_each(|value| { *map.entry(value).or_insert(0) += 1; }); map }) .into_iter() .map(|(value, count)| FacetHit { value, count }); Either::Left(iter) } else { // go over the values and filter the points // aka. read from facet index // // This is more similar to a full-scan, but we won't be hashing so many times. context = payload_index.struct_filtered_context(filter, hw_counter); let iter = facet_index .iter_values_map(hw_counter) .stop_if(is_stopped) .filter_map(|(value, iter)| { let count = iter .unique() .filter(|&point_id| context.check(point_id)) .count(); (count > 0).then_some(FacetHit { value, count }) }); Either::Right(iter) }; Either::Left(iter) } else { // just count how many points each value has let iter = facet_index .iter_counts_per_value() .stop_if(is_stopped) .filter(|hit| hit.count > 0); Either::Right(iter) }; // We can't just select top values, because we need to aggregate across segments, // which we can't assume to select the same best top. // // We need all values to be able to aggregate correctly across segments let hits: HashMap<_, _> = hits_iter .map(|hit| (hit.value.to_owned(), hit.count)) .collect(); Ok(hits) } pub(super) fn facet_values( &self, key: &JsonPath, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<BTreeSet<FacetValue>> { let payload_index = self.payload_index.borrow(); let facet_index = payload_index.get_facet_index(key)?; let values = if let Some(filter) = filter { let id_tracker = self.id_tracker.borrow(); let filter_cardinality = payload_index.estimate_cardinality(filter, hw_counter); payload_index .iter_filtered_points( filter, &*id_tracker, &filter_cardinality, hw_counter, is_stopped, ) .filter(|point_id| !id_tracker.is_deleted_point(*point_id)) .fold(BTreeSet::new(), |mut set, point_id| { set.extend(facet_index.get_point_values(point_id)); set }) .into_iter() .map(|value| value.to_owned()) .collect() } else { facet_index .iter_values() .stop_if(is_stopped) .map(|value_ref| value_ref.to_owned()) .collect() }; Ok(values) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/mod.rs
lib/segment/src/segment/mod.rs
mod entry; mod facet; mod formula_rescore; mod order_by; mod sampling; mod scroll; mod search; mod segment_ops; mod version_tracker; pub mod snapshot; #[cfg(test)] mod tests; use std::collections::HashMap; use std::fmt; use std::path::PathBuf; use std::sync::Arc; use atomic_refcell::AtomicRefCell; use common::is_alive_lock::IsAliveLock; use io::storage_version::StorageVersion; use parking_lot::Mutex; #[cfg(feature = "rocksdb")] use rocksdb::DB; use self::version_tracker::VersionTracker; use crate::common::operation_error::SegmentFailedState; use crate::id_tracker::IdTrackerSS; use crate::index::VectorIndexEnum; use crate::index::struct_payload_index::StructPayloadIndex; use crate::payload_storage::payload_storage_enum::PayloadStorageEnum; use crate::types::{SegmentConfig, SegmentType, SeqNumberType, VectorNameBuf}; use crate::vector_storage::VectorStorageEnum; use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors; pub const SEGMENT_STATE_FILE: &str = "segment.json"; const SNAPSHOT_PATH: &str = "snapshot"; // Sub-directories of `SNAPSHOT_PATH`: #[cfg(feature = "rocksdb")] const DB_BACKUP_PATH: &str = "db_backup"; #[cfg(feature = "rocksdb")] const PAYLOAD_DB_BACKUP_PATH: &str = "payload_index_db_backup"; const SNAPSHOT_FILES_PATH: &str = "files"; pub struct SegmentVersion; impl StorageVersion for SegmentVersion { fn current_raw() -> &'static str { env!("CARGO_PKG_VERSION") } } /// Segment - an object which manages an independent group of points. /// /// - Provides storage, indexing and managing operations for points (vectors + payload) /// - Keeps track of point versions /// - Persists data /// - Keeps track of occurred errors #[derive(Debug)] pub struct Segment { /// Initial version this segment was created at pub initial_version: Option<SeqNumberType>, /// Latest update operation number, applied to this segment /// If None, there were no updates and segment is empty pub version: Option<SeqNumberType>, /// Latest persisted version /// Locked structure on which we hold the lock during flush to prevent concurrent flushes pub persisted_version: Arc<Mutex<Option<SeqNumberType>>>, /// Lock to prevent concurrent flushes and used for waiting for ongoing flushes to finish. pub is_alive_flush_lock: IsAliveLock, /// Path of the storage root pub current_path: PathBuf, pub version_tracker: VersionTracker, /// Component for mapping external ids to internal and also keeping track of point versions pub id_tracker: Arc<AtomicRefCell<IdTrackerSS>>, pub vector_data: HashMap<VectorNameBuf, VectorData>, pub payload_index: Arc<AtomicRefCell<StructPayloadIndex>>, pub payload_storage: Arc<AtomicRefCell<PayloadStorageEnum>>, /// Shows if it is possible to insert more points into this segment pub appendable_flag: bool, /// Shows what kind of indexes and storages are used in this segment pub segment_type: SegmentType, pub segment_config: SegmentConfig, /// Last unhandled error /// If not None, all update operations will be aborted until original operation is performed properly pub error_status: Option<SegmentFailedState>, #[cfg(feature = "rocksdb")] pub database: Option<Arc<parking_lot::RwLock<DB>>>, } pub struct VectorData { pub vector_index: Arc<AtomicRefCell<VectorIndexEnum>>, pub vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>, pub quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>, } impl fmt::Debug for VectorData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("VectorData").finish_non_exhaustive() } } impl Drop for Segment { fn drop(&mut self) { // Wait for all background flush operations to finish self.is_alive_flush_lock.blocking_mark_dead(); // Try to remove everything from the disk cache, as it might pollute the cache if let Err(e) = self.payload_storage.borrow().clear_cache() { log::error!("Failed to clear cache of payload_storage: {e}"); } if let Err(e) = self.payload_index.borrow().clear_cache() { log::error!("Failed to clear cache of payload_index: {e}"); } for (name, vector_data) in &self.vector_data { let VectorData { vector_index, vector_storage, quantized_vectors, } = vector_data; if let Err(e) = vector_index.borrow().clear_cache() { log::error!("Failed to clear cache of vector index {name}: {e}"); } if let Err(e) = vector_storage.borrow().clear_cache() { log::error!("Failed to clear cache of vector storage {name}: {e}"); } if let Some(quantized_vectors) = quantized_vectors.borrow().as_ref() && let Err(e) = quantized_vectors.clear_cache() { log::error!("Failed to clear cache of quantized vectors {name}: {e}"); } } } } #[cfg(feature = "rocksdb")] pub fn destroy_rocksdb( path: &std::path::Path, ) -> crate::common::operation_error::OperationResult<()> { rocksdb::DB::destroy(&Default::default(), path).map_err(|err| { crate::common::operation_error::OperationError::service_error(format!( "failed to destroy RocksDB at {}: {err}", path.display() )) })?; Ok(()) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/order_by.rs
lib/segment/src/segment/order_by.rs
use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::iterator_ext::IteratorExt; use itertools::Either; use super::Segment; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::order_by::{Direction, OrderBy, OrderValue}; use crate::index::PayloadIndex; use crate::index::field_index::numeric_index::StreamRange; use crate::spaces::tools::{peek_top_largest_iterable, peek_top_smallest_iterable}; use crate::types::{Filter, PointIdType}; impl Segment { pub fn filtered_read_by_index_ordered( &self, order_by: &OrderBy, limit: Option<usize>, condition: &Filter, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<(OrderValue, PointIdType)>> { let payload_index = self.payload_index.borrow(); let id_tracker = self.id_tracker.borrow(); let numeric_index = payload_index .field_indexes .get(&order_by.key) .and_then(|indexes| indexes.iter().find_map(|index| index.as_numeric())) .ok_or_else(|| OperationError::MissingRangeIndexForOrderBy { key: order_by.key.to_string(), })?; let cardinality_estimation = payload_index.estimate_cardinality(condition, hw_counter); let start_from = order_by.start_from(); let values_ids_iterator = payload_index .iter_filtered_points( condition, &*id_tracker, &cardinality_estimation, hw_counter, is_stopped, ) .flat_map(|internal_id| { // Repeat a point for as many values as it has numeric_index .get_ordering_values(internal_id) // But only those which start from `start_from` .filter(|value| match order_by.direction() { Direction::Asc => value >= &start_from, Direction::Desc => value <= &start_from, }) .map(move |ordering_value| (ordering_value, internal_id)) }) .filter_map(|(value, internal_id)| { id_tracker .external_id(internal_id) .map(|external_id| (value, external_id)) }); let page = match order_by.direction() { Direction::Asc => { let mut page = match limit { Some(limit) => peek_top_smallest_iterable(values_ids_iterator, limit), None => values_ids_iterator.collect(), }; page.sort_unstable_by(|(value_a, _), (value_b, _)| value_a.cmp(value_b)); page } Direction::Desc => { let mut page = match limit { Some(limit) => peek_top_largest_iterable(values_ids_iterator, limit), None => values_ids_iterator.collect(), }; page.sort_unstable_by(|(value_a, _), (value_b, _)| value_b.cmp(value_a)); page } }; Ok(page) } pub fn filtered_read_by_value_stream( &self, order_by: &OrderBy, limit: Option<usize>, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<(OrderValue, PointIdType)>> { let payload_index = self.payload_index.borrow(); let numeric_index = payload_index .field_indexes .get(&order_by.key) .and_then(|indexes| indexes.iter().find_map(|index| index.as_numeric())) .ok_or_else(|| OperationError::MissingRangeIndexForOrderBy { key: order_by.key.to_string(), })?; let range_iter = numeric_index.stream_range(&order_by.as_range()); let directed_range_iter = match order_by.direction() { Direction::Asc => Either::Left(range_iter), Direction::Desc => Either::Right(range_iter.rev()), }; let id_tracker = self.id_tracker.borrow(); let filtered_iter = match filter { None => Either::Left(directed_range_iter), Some(filter) => { let filter_context = payload_index.filter_context(filter, hw_counter); Either::Right( directed_range_iter .filter(move |(_, internal_id)| filter_context.check(*internal_id)), ) } }; let reads = filtered_iter .stop_if(is_stopped) .filter_map(|(value, internal_id)| { id_tracker .external_id(internal_id) .map(|external_id| (value, external_id)) }) .take(limit.unwrap_or(usize::MAX)) .collect(); Ok(reads) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/snapshot.rs
lib/segment/src/segment/snapshot.rs
use std::collections::{HashMap, HashSet}; use std::io::{Seek, Write}; use std::ops::Deref as _; use std::path::{Path, PathBuf}; use std::{fmt, thread}; use common::tar_ext; use fs_err as fs; use io::storage_version::VERSION_FILE; use uuid::Uuid; use crate::common::operation_error::{OperationError, OperationResult}; use crate::data_types::manifest::{FileVersion, SegmentManifest, SnapshotManifest}; use crate::entry::SegmentEntry as _; use crate::entry::snapshot_entry::SnapshotEntry; use crate::index::{PayloadIndex, VectorIndex}; use crate::payload_storage::PayloadStorage; use crate::segment::{SEGMENT_STATE_FILE, SNAPSHOT_FILES_PATH, SNAPSHOT_PATH, Segment}; use crate::types::SnapshotFormat; use crate::utils::path::strip_prefix; use crate::vector_storage::VectorStorage; pub const ROCKS_DB_VIRT_FILE: &str = "::ROCKS_DB"; pub const PAYLOAD_INDEX_ROCKS_DB_VIRT_FILE: &str = "::PAYLOAD_INDEX_ROCKS_DB"; impl SnapshotEntry for Segment { fn take_snapshot( &self, temp_path: &Path, tar: &tar_ext::BuilderExt, format: SnapshotFormat, manifest: Option<&SnapshotManifest>, ) -> OperationResult<()> { let segment_id = self.segment_id()?; log::debug!("Taking snapshot of segment {segment_id}"); let include_files = match manifest { None => HashSet::new(), Some(manifest) => { let updated_manifest = self.get_segment_manifest()?; let updated_manifest_json = serde_json::to_vec(&updated_manifest).map_err(|err| { OperationError::service_error(format!( "failed to serialize segment manifest into JSON: {err}" )) })?; let tar = tar.descend(Path::new(&segment_id))?; tar.blocking_append_data( &updated_manifest_json, Path::new("files/segment_manifest.json"), )?; let mut empty_manifest = None; let request_manifest = manifest .get(segment_id) .unwrap_or_else(|| empty_manifest.insert(SegmentManifest::empty(segment_id))); updated_files(request_manifest, &updated_manifest) } }; let include_if = |path: &Path| { if manifest.is_none() { true } else { include_files.contains(path) } }; match format { SnapshotFormat::Ancient => { debug_assert!(false, "Unsupported snapshot format: {format:?}"); return Err(OperationError::service_error(format!( "Unsupported snapshot format: {format:?}" ))); } SnapshotFormat::Regular => { tar.blocking_write_fn(Path::new(&format!("{segment_id}.tar")), |writer| { let tar = tar_ext::BuilderExt::new_streaming_borrowed(writer); let tar = tar.descend(Path::new(SNAPSHOT_PATH))?; snapshot_files(self, temp_path, &tar, include_if) })??; } SnapshotFormat::Streamable => { let tar = tar.descend(Path::new(&segment_id))?; snapshot_files(self, temp_path, &tar, include_if)?; } } Ok(()) } fn collect_snapshot_manifest(&self, manifest: &mut SnapshotManifest) -> OperationResult<()> { manifest.add(self.get_segment_manifest()?); Ok(()) } } impl Segment { fn segment_id(&self) -> OperationResult<&str> { let id = self .current_path .file_stem() .and_then(|segment_dir| segment_dir.to_str()) .ok_or_else(|| { OperationError::service_error(format!( "failed to extract segment ID from segment path {}", self.current_path.display(), )) })?; debug_assert!( Uuid::try_parse(id).is_ok(), "segment ID {id} is not a valid UUID", ); Ok(id) } fn get_segment_manifest(&self) -> OperationResult<SegmentManifest> { let segment_id = self.segment_id()?; let segment_version = self.version(); let files = self .files() .into_iter() .map(|path| (path, FileVersion::Unversioned)); let vector_storage_files = self.vector_data .iter() .flat_map(|(vector_name, vector_data)| { let version = self.version_tracker.get_vector(vector_name); vector_data .vector_storage .borrow() .files() .into_iter() .map(move |file| (file, FileVersion::from(version))) }); let payload_storage_files = self .payload_storage .borrow() .files() .into_iter() .map(|file| (file, FileVersion::from(self.version_tracker.get_payload()))); let payload_index_files = self .payload_index .borrow() .immutable_files() .into_iter() .map(|(field, file)| { let version = FileVersion::from( self.version_tracker .get_payload_index_schema(&field) .or(self.initial_version) .or(self.version) .unwrap_or(0), ); (file, version) }); let immutable_files = self.immutable_files().into_iter().map(|path| { let version = FileVersion::from(self.initial_version.or(self.version).unwrap_or(0)); (path, version) }); let mut file_versions = HashMap::with_capacity(files.len()); let files = files .chain(vector_storage_files) .chain(payload_storage_files) .chain(immutable_files) .chain(payload_index_files); for (path, version) in files { // All segment files should be contained within segment directory debug_assert!( path.starts_with(&self.current_path), "segment file {} is not contained within segment directory {}", path.display(), self.current_path.display(), ); let path = strip_prefix(&path, &self.current_path)?; let _ = file_versions.insert(path.to_path_buf(), version); } // TODO: Version RocksDB!? 🤯 file_versions.insert(PathBuf::from(ROCKS_DB_VIRT_FILE), FileVersion::Unversioned); file_versions.insert( PathBuf::from(PAYLOAD_INDEX_ROCKS_DB_VIRT_FILE), FileVersion::Unversioned, ); Ok(SegmentManifest { segment_id: segment_id.into(), segment_version, file_versions, }) } fn files(&self) -> Vec<PathBuf> { let mut files = Vec::new(); for vector_data in self.vector_data.values() { files.extend(vector_data.vector_index.borrow().files()); files.extend(vector_data.vector_storage.borrow().files()); if let Some(quantized_vectors) = vector_data.quantized_vectors.borrow().deref() { files.extend(quantized_vectors.files()); } } files.extend(self.payload_index.borrow().files()); files.extend(self.payload_storage.borrow().files()); files.extend(self.id_tracker.borrow().files()); files } fn immutable_files(&self) -> Vec<PathBuf> { let mut files = Vec::new(); for vector_data in self.vector_data.values() { files.extend(vector_data.vector_index.borrow().immutable_files()); files.extend(vector_data.vector_storage.borrow().immutable_files()); if let Some(quantized_vectors) = vector_data.quantized_vectors.borrow().deref() { files.extend(quantized_vectors.immutable_files()); } } files.extend(self.payload_storage.borrow().immutable_files()); files.extend(self.id_tracker.borrow().immutable_files()); files } } pub fn snapshot_files( segment: &Segment, temp_path: &Path, tar: &tar_ext::BuilderExt<impl Write + Seek>, include_if: impl Fn(&Path) -> bool, ) -> OperationResult<()> { // use temp_path for intermediary files let temp_path = temp_path.join(format!("segment-{}", Uuid::new_v4())); // TODO: Version RocksDB!? 🤯 #[cfg(feature = "rocksdb")] if include_if(ROCKS_DB_VIRT_FILE.as_ref()) && let Some(db) = &segment.database { let db_backup_path = temp_path.join(super::DB_BACKUP_PATH); let db = db.read(); crate::rocksdb_backup::create(&db, &db_backup_path).map_err(|err| { OperationError::service_error(format!( "failed to create RocksDB backup at {}: {err}", db_backup_path.display() )) })?; } #[cfg(feature = "rocksdb")] if include_if(PAYLOAD_INDEX_ROCKS_DB_VIRT_FILE.as_ref()) { let payload_index_db_backup_path = temp_path.join(crate::segment::PAYLOAD_DB_BACKUP_PATH); segment .payload_index .borrow() .take_database_snapshot(&payload_index_db_backup_path) .map_err(|err| { OperationError::service_error(format!( "failed to create payload index RocksDB backup at {}: {err}", payload_index_db_backup_path.display() )) })?; } if temp_path.exists() { tar.blocking_append_dir_all(&temp_path, Path::new("")) .map_err(|err| { OperationError::service_error(format!( "failed to add RockDB backup {} into snapshot: {err}", temp_path.display() )) })?; // remove tmp directory in background let _ = thread::spawn(move || { let res = fs::remove_dir_all(&temp_path); if let Err(err) = res { log::error!( "failed to remove temporary directory {}: {err}", temp_path.display(), ); } }); } let tar = tar.descend(Path::new(SNAPSHOT_FILES_PATH))?; for vector_data in segment.vector_data.values() { for file in vector_data.vector_index.borrow().files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("vector index file", &file, err))?; } } for file in vector_data.vector_storage.borrow().files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("vector storage file", &file, err))?; } } if let Some(quantized_vectors) = vector_data.quantized_vectors.borrow().as_ref() { for file in quantized_vectors.files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("quantized vectors file", &file, err))?; } } } } for file in segment.payload_index.borrow().files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("payload index file", &file, err))?; } } for file in segment.payload_storage.borrow().files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("payload storage file", &file, err))?; } } for file in segment.id_tracker.borrow().files() { let stripped_path = strip_prefix(&file, &segment.current_path)?; if include_if(stripped_path) { tar.blocking_append_file(&file, stripped_path) .map_err(|err| failed_to_add("id tracker file", &file, err))?; } } let segment_state_path = segment.current_path.join(SEGMENT_STATE_FILE); tar.blocking_append_file(&segment_state_path, Path::new(SEGMENT_STATE_FILE)) .map_err(|err| failed_to_add("segment state file", &segment_state_path, err))?; let version_file_path = segment.current_path.join(VERSION_FILE); tar.blocking_append_file(&version_file_path, Path::new(VERSION_FILE)) .map_err(|err| failed_to_add("segment version file", &version_file_path, err))?; Ok(()) } fn failed_to_add(what: &str, path: &Path, err: impl fmt::Display) -> OperationError { OperationError::service_error(format!( "failed to add {what} {} into snapshot: {err}", path.display(), )) } fn updated_files(old: &SegmentManifest, current: &SegmentManifest) -> HashSet<PathBuf> { // Compare two segment manifests, and return a list of files from `current` manifest, that // should be included into partial snapshot. let mut updated = HashSet::new(); for (path, current_version) in current.file_versions() { // Include file into partial snapshot if: // // 1. `old` manifest does not contain this file let Some(old_version) = old.file_version(path) else { updated.insert(path.to_path_buf()); continue; }; // 2. if `old` manifest contains this file and file/segment in `current` manifest is *newer*: // - if file is `Unversioned` in both manifests, compare segment versions // - if file is versioned in *one* of the manifests only, compare *file* version against // other *segment* version // - if file is versioned in both manifests, compare file versions if old_version < current_version { updated.insert(path.to_path_buf()); continue; } // 3. if `old` manifest contains this file and file/segment versions in both `old` and `current` manifests are 0 // - we can't distinguish between new empty (no operations applied yet) segment (version 0) // - and segment with operation 0 applied (also version 0) // - so if both files/segments are at version 0, we always include the file into snapshot if old_version == 0 && current_version == 0 { updated.insert(path.to_path_buf()); } } updated }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment/entry.rs
lib/segment/src/segment/entry.rs
use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; use common::counter::hardware_counter::HardwareCounterCell; use common::types::TelemetryDetail; use fs_err as fs; use super::Segment; use crate::common::operation_error::{OperationError, OperationResult, SegmentFailedState}; use crate::common::{ Flusher, check_named_vectors, check_query_vectors, check_stopped, check_vector_name, }; use crate::data_types::build_index_result::BuildFieldIndexResult; use crate::data_types::facets::{FacetParams, FacetValue}; use crate::data_types::named_vectors::NamedVectors; use crate::data_types::order_by::{OrderBy, OrderValue}; use crate::data_types::query_context::{ FormulaContext, QueryContext, QueryIdfStats, SegmentQueryContext, }; use crate::data_types::vectors::{QueryVector, VectorInternal}; use crate::entry::entry_point::{SegmentEntry, SegmentFlushOrdering}; use crate::index::field_index::{CardinalityEstimation, FieldIndex}; use crate::index::{BuildIndexResult, PayloadIndex, VectorIndex}; use crate::json_path::JsonPath; use crate::payload_storage::PayloadStorage; use crate::telemetry::SegmentTelemetry; use crate::types::{ Filter, Payload, PayloadFieldSchema, PayloadIndexInfo, PayloadKeyType, PayloadKeyTypeRef, PointIdType, ScoredPoint, SearchParams, SegmentConfig, SegmentInfo, SegmentType, SeqNumberType, VectorDataInfo, VectorName, VectorNameBuf, WithPayload, WithVector, }; use crate::vector_storage::VectorStorage; /// This is a basic implementation of `SegmentEntry`, /// meaning that it implements the _actual_ operations with data and not any kind of proxy or wrapping impl SegmentEntry for Segment { fn version(&self) -> SeqNumberType { self.version.unwrap_or(0) } fn persistent_version(&self) -> SeqNumberType { (*self.persisted_version.lock()).unwrap_or(0) } fn is_proxy(&self) -> bool { false } fn point_version(&self, point_id: PointIdType) -> Option<SeqNumberType> { let id_tracker = self.id_tracker.borrow(); id_tracker .internal_id(point_id) .and_then(|internal_id| id_tracker.internal_version(internal_id)) } fn search_batch( &self, vector_name: &VectorName, query_vectors: &[&QueryVector], with_payload: &WithPayload, with_vector: &WithVector, filter: Option<&Filter>, top: usize, params: Option<&SearchParams>, query_context: &SegmentQueryContext, ) -> OperationResult<Vec<Vec<ScoredPoint>>> { check_query_vectors(vector_name, query_vectors, &self.segment_config)?; let vector_data = &self .vector_data .get(vector_name) .ok_or_else(|| OperationError::vector_name_not_exists(vector_name))?; let vector_query_context = query_context.get_vector_context(vector_name); let internal_results = vector_data.vector_index.borrow().search( query_vectors, filter, top, params, &vector_query_context, )?; check_stopped(&vector_query_context.is_stopped())?; let hw_counter = vector_query_context.hardware_counter(); internal_results .into_iter() .map(|internal_result| { self.process_search_result(internal_result, with_payload, with_vector, &hw_counter) }) .collect() } fn rescore_with_formula( &self, ctx: Arc<FormulaContext>, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<ScoredPoint>> { let FormulaContext { formula, prefetches_results, limit, is_stopped, } = &*ctx; let internal_results = self.do_rescore_with_formula( formula, prefetches_results, *limit, is_stopped, hw_counter, )?; self.process_search_result(internal_results, &false.into(), &false.into(), hw_counter) } fn upsert_point( &mut self, op_num: SeqNumberType, point_id: PointIdType, mut vectors: NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { debug_assert!(self.is_appendable()); check_named_vectors(&vectors, &self.segment_config)?; vectors.preprocess(|name| self.config().vector_data.get(name).unwrap()); let stored_internal_point = self.id_tracker.borrow().internal_id(point_id); self.handle_point_version_and_failure(op_num, stored_internal_point, |segment| { if let Some(existing_internal_id) = stored_internal_point { segment.replace_all_vectors(existing_internal_id, op_num, &vectors, hw_counter)?; Ok((true, Some(existing_internal_id))) } else { let new_index = segment.insert_new_vectors(point_id, op_num, &vectors, hw_counter)?; Ok((false, Some(new_index))) } }) } fn delete_point( &mut self, op_num: SeqNumberType, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let internal_id = self.id_tracker.borrow().internal_id(point_id); match internal_id { // Point does already not exist anymore None => Ok(false), Some(internal_id) => { self.handle_point_version_and_failure(op_num, Some(internal_id), |segment| { segment.delete_point_internal(internal_id, hw_counter)?; segment.version_tracker.set_payload(Some(op_num)); Ok((true, Some(internal_id))) }) } } } fn update_vectors( &mut self, op_num: SeqNumberType, point_id: PointIdType, mut vectors: NamedVectors, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { check_named_vectors(&vectors, &self.segment_config)?; vectors.preprocess(|name| self.config().vector_data.get(name).unwrap()); let internal_id = self.id_tracker.borrow().internal_id(point_id); match internal_id { None => Err(OperationError::PointIdError { missed_point_id: point_id, }), Some(internal_id) => { self.handle_point_version_and_failure(op_num, Some(internal_id), |segment| { segment.update_vectors(internal_id, op_num, vectors, hw_counter)?; Ok((true, Some(internal_id))) }) } } } fn delete_vector( &mut self, op_num: SeqNumberType, point_id: PointIdType, vector_name: &VectorName, ) -> OperationResult<bool> { check_vector_name(vector_name, &self.segment_config)?; let internal_id = self.id_tracker.borrow().internal_id(point_id); match internal_id { None => Err(OperationError::PointIdError { missed_point_id: point_id, }), Some(internal_id) => { self.handle_point_version_and_failure(op_num, Some(internal_id), |segment| { let vector_data = segment .vector_data .get(vector_name) .ok_or_else(|| OperationError::vector_name_not_exists(vector_name))?; let mut vector_storage = vector_data.vector_storage.borrow_mut(); let is_deleted = vector_storage.delete_vector(internal_id)?; if is_deleted { segment .version_tracker .set_vector(vector_name, Some(op_num)); } Ok((is_deleted, Some(internal_id))) }) } } } fn set_full_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, full_payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let internal_id = self.id_tracker.borrow().internal_id(point_id); self.handle_point_version_and_failure(op_num, internal_id, |segment| match internal_id { Some(internal_id) => { segment.payload_index.borrow_mut().overwrite_payload( internal_id, full_payload, hw_counter, )?; segment.version_tracker.set_payload(Some(op_num)); Ok((true, Some(internal_id))) } None => Err(OperationError::PointIdError { missed_point_id: point_id, }), }) } fn set_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, payload: &Payload, key: &Option<JsonPath>, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let internal_id = self.id_tracker.borrow().internal_id(point_id); self.handle_point_version_and_failure(op_num, internal_id, |segment| match internal_id { Some(internal_id) => { segment.payload_index.borrow_mut().set_payload( internal_id, payload, key, hw_counter, )?; segment.version_tracker.set_payload(Some(op_num)); Ok((true, Some(internal_id))) } None => Err(OperationError::PointIdError { missed_point_id: point_id, }), }) } fn delete_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, key: PayloadKeyTypeRef, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let internal_id = self.id_tracker.borrow().internal_id(point_id); self.handle_point_version_and_failure(op_num, internal_id, |segment| match internal_id { Some(internal_id) => { segment .payload_index .borrow_mut() .delete_payload(internal_id, key, hw_counter)?; segment.version_tracker.set_payload(Some(op_num)); Ok((true, Some(internal_id))) } None => Err(OperationError::PointIdError { missed_point_id: point_id, }), }) } fn clear_payload( &mut self, op_num: SeqNumberType, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<bool> { let internal_id = self.id_tracker.borrow().internal_id(point_id); self.handle_point_version_and_failure(op_num, internal_id, |segment| match internal_id { Some(internal_id) => { segment .payload_index .borrow_mut() .clear_payload(internal_id, hw_counter)?; segment.version_tracker.set_payload(Some(op_num)); Ok((true, Some(internal_id))) } None => Err(OperationError::PointIdError { missed_point_id: point_id, }), }) } fn vector( &self, vector_name: &VectorName, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<VectorInternal>> { let internal_id = self.lookup_internal_id(point_id)?; let vector_opt = self.vector_by_offset(vector_name, internal_id, hw_counter)?; Ok(vector_opt) } fn all_vectors( &self, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<NamedVectors<'_>> { let mut result = NamedVectors::default(); for vector_name in self.vector_data.keys() { if let Some(vec) = self.vector(vector_name, point_id, hw_counter)? { result.insert(vector_name.clone(), vec); } } Ok(result) } fn payload( &self, point_id: PointIdType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload> { let internal_id = self.lookup_internal_id(point_id)?; self.payload_by_offset(internal_id, hw_counter) } fn iter_points(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> { // Sorry for that, but I didn't find any way easier. // If you try simply return iterator - it won't work because AtomicRef should exist // If you try to make callback instead - you won't be able to create <dyn SegmentEntry> // Attempt to create return borrowed value along with iterator failed because of insane lifetimes unsafe { self.id_tracker.as_ptr().as_ref().unwrap().iter_external() } } fn read_filtered<'a>( &'a self, offset: Option<PointIdType>, limit: Option<usize>, filter: Option<&'a Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { match filter { None => self.read_by_id_stream(offset, limit), Some(condition) => { if self.should_pre_filter(condition, limit, hw_counter) { self.filtered_read_by_index(offset, limit, condition, is_stopped, hw_counter) } else { self.filtered_read_by_id_stream( offset, limit, condition, is_stopped, hw_counter, ) } } } } fn read_ordered_filtered<'a>( &'a self, limit: Option<usize>, filter: Option<&'a Filter>, order_by: &'a OrderBy, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<(OrderValue, PointIdType)>> { match filter { None => { self.filtered_read_by_value_stream(order_by, limit, None, is_stopped, hw_counter) } Some(filter) => { if self.should_pre_filter(filter, limit, hw_counter) { self.filtered_read_by_index_ordered( order_by, limit, filter, is_stopped, hw_counter, ) } else { self.filtered_read_by_value_stream( order_by, limit, Some(filter), is_stopped, hw_counter, ) } } } } fn read_random_filtered( &self, limit: usize, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> Vec<PointIdType> { match filter { None => self.read_by_random_id(limit), Some(condition) => { if self.should_pre_filter(condition, Some(limit), hw_counter) { self.filtered_read_by_index_shuffled(limit, condition, is_stopped, hw_counter) } else { self.filtered_read_by_random_stream(limit, condition, is_stopped, hw_counter) } } } } fn read_range(&self, from: Option<PointIdType>, to: Option<PointIdType>) -> Vec<PointIdType> { let id_tracker = self.id_tracker.borrow(); let iterator = id_tracker.iter_from(from).map(|x| x.0); match to { None => iterator.collect(), Some(to_id) => iterator.take_while(|x| *x < to_id).collect(), } } fn has_point(&self, point_id: PointIdType) -> bool { self.id_tracker.borrow().internal_id(point_id).is_some() } fn is_empty(&self) -> bool { self.id_tracker.borrow().total_point_count() == 0 } fn available_point_count(&self) -> usize { self.id_tracker.borrow().available_point_count() } fn deleted_point_count(&self) -> usize { self.id_tracker.borrow().deleted_point_count() } fn available_vectors_size_in_bytes(&self, vector_name: &VectorName) -> OperationResult<usize> { check_vector_name(vector_name, &self.segment_config)?; let vector_data = self .vector_data .get(vector_name) .ok_or_else(|| OperationError::vector_name_not_exists(vector_name))?; let size = vector_data .vector_index .borrow() .size_of_searchable_vectors_in_bytes(); Ok(size) } fn estimate_point_count<'a>( &'a self, filter: Option<&'a Filter>, hw_counter: &HardwareCounterCell, ) -> CardinalityEstimation { match filter { None => { let available = self.available_point_count(); CardinalityEstimation { primary_clauses: vec![], min: available, exp: available, max: available, } } Some(filter) => { let payload_index = self.payload_index.borrow(); payload_index.estimate_cardinality(filter, hw_counter) } } } fn unique_values( &self, key: &JsonPath, filter: Option<&Filter>, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<std::collections::BTreeSet<FacetValue>> { self.facet_values(key, filter, is_stopped, hw_counter) } fn facet( &self, request: &FacetParams, is_stopped: &AtomicBool, hw_counter: &HardwareCounterCell, ) -> OperationResult<HashMap<FacetValue, usize>> { self.approximate_facet(request, is_stopped, hw_counter) } fn segment_type(&self) -> SegmentType { self.segment_type } fn size_info(&self) -> SegmentInfo { let num_vectors = self .vector_data .values() .map(|data| data.vector_storage.borrow().available_vector_count()) .sum(); let mut total_average_vectors_size_bytes: usize = 0; let vector_data_info: HashMap<_, _> = self .vector_data .iter() .map(|(key, vector_data)| { let vector_storage = vector_data.vector_storage.borrow(); let num_vectors = vector_storage.available_vector_count(); let vector_index = vector_data.vector_index.borrow(); let is_indexed = vector_index.is_index(); let average_vector_size_bytes = if num_vectors > 0 { vector_index.size_of_searchable_vectors_in_bytes() / num_vectors } else { 0 }; total_average_vectors_size_bytes += average_vector_size_bytes; let vector_data_info = VectorDataInfo { num_vectors, num_indexed_vectors: if is_indexed { vector_index.indexed_vector_count() } else { 0 }, num_deleted_vectors: vector_storage.deleted_vector_count(), }; (key.clone(), vector_data_info) }) .collect(); let num_indexed_vectors = if self.segment_type == SegmentType::Indexed { self.vector_data .values() .map(|data| data.vector_index.borrow().indexed_vector_count()) .sum() } else { 0 }; let num_points = self.available_point_count(); let vectors_size_bytes = total_average_vectors_size_bytes * num_points; // Unwrap and default to 0 here because the RocksDB storage is the only faillible one, and we will remove it eventually. let payloads_size_bytes = self .payload_storage .borrow() .get_storage_size_bytes() .unwrap_or(0); SegmentInfo { segment_type: self.segment_type, num_vectors, num_indexed_vectors, num_points: self.available_point_count(), num_deleted_vectors: self.deleted_point_count(), vectors_size_bytes, // Considers vector storage, but not indices payloads_size_bytes, // Considers payload storage, but not indices ram_usage_bytes: 0, // ToDo: Implement disk_usage_bytes: 0, // ToDo: Implement is_appendable: self.appendable_flag, index_schema: HashMap::new(), vector_data: vector_data_info, } } fn info(&self) -> SegmentInfo { let payload_index = self.payload_index.borrow(); let schema = payload_index .indexed_fields() .into_iter() .map(|(key, index_schema)| { let points_count = payload_index.indexed_points(&key); let index_info = PayloadIndexInfo::new(index_schema, points_count); (key, index_info) }) .collect(); let mut info = self.size_info(); info.index_schema = schema; info } fn config(&self) -> &SegmentConfig { &self.segment_config } fn is_appendable(&self) -> bool { self.appendable_flag } fn flush_ordering(&self) -> SegmentFlushOrdering { if self.is_appendable() { SegmentFlushOrdering::Appendable } else { SegmentFlushOrdering::NonAppendable } } fn flusher(&self, force: bool) -> Option<Flusher> { let current_persisted_version: Option<SeqNumberType> = *self.persisted_version.lock(); match (self.version, current_persisted_version) { (None, _) => { // Segment is empty, nothing to flush return None; } (Some(version), Some(persisted_version)) => { if !force && version == persisted_version { log::trace!("not flushing because version == persisted_version"); // Segment is already flushed return None; } } (_, _) => {} } let vector_storage_flushers: Vec<_> = self .vector_data .values() .map(|v| v.vector_storage.borrow().flusher()) .collect(); let quantization_flushers: Vec<_> = self .vector_data .values() .filter_map(|v| v.quantized_vectors.borrow().as_ref().map(|q| q.flusher())) .collect(); let state = self.get_state(); let current_path = self.current_path.clone(); let id_tracker_mapping_flusher = self.id_tracker.borrow().mapping_flusher(); let payload_index_flusher = self.payload_index.borrow().flusher(); let id_tracker_versions_flusher = self.id_tracker.borrow().versions_flusher(); let persisted_version = self.persisted_version.clone(); // Flush order is important: // // 1. Flush id mapping. So during recovery the point will be recovered in proper segment. // 2. Flush vectors and payloads. // 3. Flush id versions last. So presence of version indicates that all other data is up-to-date. // // Example of recovery from WAL in case of partial flush: // // In-memory state: // // Segment 1 Segment 2 // // ID-mapping vst.1 ID-mapping vst.2 // ext int // ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ // │100├───┤1 │ │1 │ │300├───┤1 │ │1 │ // └───┘ └───┘ │2 │ └───┘ └───┘ │2 │ // │ │ │ │ // ┌───┐ ┌───┐ │ │ ┌───┐ ┌───┐ │ │ // │200├───┤2 │ │ │ │400├───┤2 │ │ │ // └───┘ └───┘ └───┘ └───┘ └───┘ └───┘ // // // ext - external id // int - internal id // vst - vector storage // // ───────────────────────────────────────────────── // After flush, segments could be partially preserved: // // ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ // │100├───┤1 │ │ 1 │ │300├───┤1 │ │ * │ // └───┘ └───┘ │ │ └───┘ └───┘ │ * │ // │ │ │ 3 │ // │ │ ┌───┐ ┌───┐ │ │ // │ │ │400├───┤2 │ │ │ // └───┘ └───┘ └───┘ └───┘ // WAL: ▲ // │ ┌───┐ ┌───┐ // 100───────┘ ┌────────► │200├───┤3 │ // | └───┘ └───┘ // 200──────────────┘ // // 300 // // 400 let is_alive_flush_lock = self.is_alive_flush_lock.handle(); let flush_op = move || { let Some(is_alive_flush_guard) = is_alive_flush_lock.lock_if_alive() else { // Segment is removed, skip flush return Ok(()); }; // Flush mapping first to prevent having orphan internal ids. id_tracker_mapping_flusher().map_err(|err| { OperationError::service_error(format!("Failed to flush id_tracker mapping: {err}")) })?; for vector_storage_flusher in vector_storage_flushers { vector_storage_flusher().map_err(|err| { OperationError::service_error(format!("Failed to flush vector_storage: {err}")) })?; } for quantization_flusher in quantization_flushers { quantization_flusher().map_err(|err| { OperationError::service_error(format!( "Failed to flush quantized vectors: {err}" )) })?; } payload_index_flusher().map_err(|err| { OperationError::service_error(format!("Failed to flush payload_index: {err}")) })?; // Id Tracker contains versions of points. We need to flush it after vector_storage and payload_index flush. // This is because vector_storage and payload_index flush are not atomic. // If payload or vector flush fails, we will be able to recover data from WAL. // If Id Tracker flush fails, we are also able to recover data from WAL // by simply overriding data in vector and payload storages. // Once versions are saved - points are considered persisted. id_tracker_versions_flusher().map_err(|err| { OperationError::service_error(format!("Failed to flush id_tracker versions: {err}")) })?; let mut current_persisted_version_guard = persisted_version.lock(); let persisted_version_value_opt = *current_persisted_version_guard; if persisted_version_value_opt > state.version { debug_assert!( persisted_version_value_opt.is_some(), "Persisted version should never be None if it's greater than state.version" ); // Another flush beat us to it return Ok(()); } Self::save_state(&state, &current_path).map_err(|err| { OperationError::service_error(format!("Failed to flush segment state: {err}")) })?; *current_persisted_version_guard = state.version; debug_assert!(state.version.is_some()); // Keep the guard till the end of the flush to prevent concurrent drop/flushes drop(is_alive_flush_guard); Ok(()) }; Some(Box::new(flush_op)) } fn drop_data(self) -> OperationResult<()> { let current_path = self.current_path.clone(); drop(self); let mut deleted_path = current_path.clone(); deleted_path.set_extension("deleted"); fs::rename(&current_path, &deleted_path)?; fs::remove_dir_all(&deleted_path).map_err(|err| { OperationError::service_error(format!( "Can't remove segment data at {}, error: {}", deleted_path.to_str().unwrap_or_default(), err )) }) } fn data_path(&self) -> PathBuf { self.current_path.clone() } fn delete_field_index(&mut self, op_num: u64, key: PayloadKeyTypeRef) -> OperationResult<bool> { self.handle_segment_version_and_failure(op_num, |segment| { segment.payload_index.borrow_mut().drop_index(key)?; segment.version_tracker.set_payload_index_schema(key, None); Ok(true) }) } fn delete_field_index_if_incompatible( &mut self, op_num: SeqNumberType, key: PayloadKeyTypeRef, field_schema: &PayloadFieldSchema, ) -> OperationResult<bool> { self.handle_segment_version_and_failure(op_num, |segment| { let is_incompatible = segment .payload_index .borrow_mut() .drop_index_if_incompatible(key, field_schema)?; if is_incompatible { segment.version_tracker.set_payload_index_schema(key, None); } Ok(true) }) } fn build_field_index( &self, op_num: SeqNumberType, key: PayloadKeyTypeRef, field_type: &PayloadFieldSchema, hw_counter: &HardwareCounterCell, ) -> OperationResult<BuildFieldIndexResult> { // Check version without updating it if self.version.unwrap_or(0) > op_num { return Ok(BuildFieldIndexResult::SkippedByVersion); } let field_index = match self .payload_index .borrow() .build_index(key, field_type, hw_counter)? { BuildIndexResult::Built(indexes) => indexes, BuildIndexResult::AlreadyBuilt => { return Ok(BuildFieldIndexResult::AlreadyExists); } BuildIndexResult::IncompatibleSchema => { // This function expects that incompatible schema is already removed return Ok(BuildFieldIndexResult::IncompatibleSchema); } }; Ok(BuildFieldIndexResult::Built { indexes: field_index, schema: field_type.clone(), }) } fn apply_field_index( &mut self, op_num: SeqNumberType, key: PayloadKeyType, schema: PayloadFieldSchema, field_index: Vec<FieldIndex>, ) -> OperationResult<bool> { self.handle_segment_version_and_failure(op_num, |segment| { segment .payload_index .borrow_mut() .apply_index(key.clone(), schema, field_index)?; segment .version_tracker .set_payload_index_schema(&key, Some(op_num)); Ok(true) }) } fn get_indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema> { self.payload_index.borrow().indexed_fields() } fn check_error(&self) -> Option<SegmentFailedState> { self.error_status.clone() } fn vector_names(&self) -> HashSet<VectorNameBuf> { self.vector_data.keys().cloned().collect() } fn get_telemetry_data(&self, detail: TelemetryDetail) -> SegmentTelemetry {
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/json_path/parse.rs
lib/segment/src/json_path/parse.rs
use std::str::FromStr; use nom::branch::alt; use nom::bytes::complete::tag; use nom::character::complete::{char, digit1, none_of, satisfy}; use nom::combinator::{all_consuming, map_res, recognize}; use nom::multi::{many0, many1}; use nom::sequence::{delimited, preceded}; use nom::{IResult, Parser}; use super::{JsonPath, JsonPathItem}; impl FromStr for JsonPath { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { match json_path(s) { Ok(("", path)) => Ok(path), _ => Err(()), } } } pub fn key_needs_quoting(s: &str) -> bool { all_consuming(raw_str).parse(s).is_err() } fn json_path(input: &str) -> IResult<&str, JsonPath> { let (input, first_key) = alt((raw_str.map(str::to_string), quoted_str)).parse(input)?; let (input, rest) = many0(alt(( (preceded(char('.'), raw_str).map(|s| JsonPathItem::Key(s.to_string()))), (preceded(char('.'), quoted_str).map(JsonPathItem::Key)), (delimited(char('['), number, char(']')).map(JsonPathItem::Index)), (tag("[]").map(|_| JsonPathItem::WildcardIndex)), ))) .parse(input)?; Ok((input, JsonPath { first_key, rest })) } fn raw_str(input: &str) -> IResult<&str, &str> { recognize(many1( satisfy(|c: char| c.is_alphanumeric() || c == '_' || c == '-').map(|_: char| ()), )) .parse(input) } fn quoted_str(input: &str) -> IResult<&str, String> { let (input, _) = char('"')(input)?; let (input, rest) = many0(none_of("\\\"")).parse(input)?; let (input, _) = char('"')(input)?; Ok((input, rest.iter().collect())) } fn number(input: &str) -> IResult<&str, usize> { map_res(recognize(digit1), str::parse).parse(input) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse() { assert!("".parse::<JsonPath>().is_err()); assert_eq!( "foo".parse(), Ok(JsonPath { first_key: "foo".to_string(), rest: vec![], }) ); assert_eq!( "foo[1][50].bar-baz[].\"qux[.]quux\"".parse(), Ok(JsonPath { first_key: "foo".to_string(), rest: vec![ JsonPathItem::Index(1), JsonPathItem::Index(50), JsonPathItem::Key("bar-baz".to_string()), JsonPathItem::WildcardIndex, JsonPathItem::Key("qux[.]quux".to_string()), ], }) ); } #[test] fn test_key_needs_quoting() { // Key needs no quoting assert!(!key_needs_quoting("f")); assert!(!key_needs_quoting("foo")); assert!(!key_needs_quoting("foo_123-bar")); // Key needs quoting assert!(key_needs_quoting("")); assert!(key_needs_quoting(" foo")); assert!(key_needs_quoting("foo ")); assert!(key_needs_quoting("foo bar")); assert!(key_needs_quoting("foo bar baz")); assert!(key_needs_quoting("foo.bar.baz")); assert!(key_needs_quoting("foo[]")); assert!(key_needs_quoting("foo[0]")); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/json_path/mod.rs
lib/segment/src/json_path/mod.rs
use std::fmt::{Display, Formatter}; use std::hash::Hash; use data_encoding::BASE32_DNSSEC; use itertools::Itertools as _; use schemars::JsonSchema; use schemars::r#gen::SchemaGenerator; use schemars::schema::Schema; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; use sha2::{Digest as _, Sha256}; use crate::common::anonymize::Anonymize; use crate::common::utils::{MultiValue, merge_map}; mod parse; #[derive(Debug, Clone, PartialEq, Eq, Anonymize, Ord, Hash, PartialOrd)] pub struct JsonPath { pub first_key: String, pub rest: Vec<JsonPathItem>, } #[derive(Debug, PartialEq, Clone, Eq, Anonymize, Ord, Hash, PartialOrd)] pub enum JsonPathItem { /// A key in a JSON object, e.g. `.foo` Key(String), /// An index in a JSON array, e.g. `[3]` #[anonymize(false)] Index(usize), /// All indices in a JSON array, i.e. `[]` WildcardIndex, } impl JsonPath { /// Create a new `JsonPath` from a string. For production code, use `FromStr::parse` instead. /// /// # Panics /// /// Panics if the string is not a valid path. Thus, this function should only be used in tests. #[cfg(feature = "testing")] pub fn new(p: &str) -> Self { p.parse().unwrap() } /// Get values at a given JSON path from a JSON map. pub fn value_get<'a>( &self, json_map: &'a serde_json::Map<String, Value>, ) -> MultiValue<&'a Value> { let mut result = MultiValue::new(); if let Some(value) = json_map.get(&self.first_key) { value_get(&self.rest, Some(value), &mut result); } result } /// Set values at a given JSON path in a JSON map. pub fn value_set<'a>( path: Option<&Self>, dest: &'a mut serde_json::Map<String, Value>, src: &'a serde_json::Map<String, Value>, ) { if let Some(path) = path { value_set_map(&path.first_key, &path.rest, dest, src); } else { merge_map(dest, src); } } /// Remove values at a given JSON path from a JSON map. Returns values that were removed. pub fn value_remove(&self, json_map: &mut serde_json::Map<String, Value>) -> MultiValue<Value> { let mut result = MultiValue::new(); if let Some((rest1, restn)) = self.rest.split_first() { if let Some(value) = json_map.get_mut(&self.first_key) { value_remove(rest1, restn, value, &mut result); } } else if let Some(value) = json_map.remove(&self.first_key) { result.push(value); } result } /// Filter values in a JSON map based on a predicate. pub fn value_filter( json_map: &serde_json::Map<String, Value>, filter: impl Fn(&Self, &Value) -> bool, ) -> serde_json::Map<String, Value> { let mut new_map = serde_json::Map::new(); let mut path = JsonPath { first_key: "".to_string(), rest: Vec::new(), }; for (key, value) in json_map.iter() { path.first_key.clone_from(key); if filter(&path, value) { let value = run_filter(&mut path, value, &filter); new_map.insert(key.clone(), value); } } new_map } /// Remove the wildcard suffix from the path, if it exists. /// E.g. `a.b[]` -> `a.b`. pub fn strip_wildcard_suffix(&self) -> Self { match self.rest.split_last() { Some((JsonPathItem::WildcardIndex, rest)) => JsonPath { first_key: self.first_key.clone(), rest: rest.to_vec(), }, _ => self.clone(), } } /// If `self` starts with `prefix`, returns a new path with the prefix removed. pub fn strip_prefix(&self, prefix: &Self) -> Option<Self> { if self.first_key != prefix.first_key { return None; } let mut self_it = self.rest.iter().peekable(); let mut prefix_it = prefix.rest.iter().peekable(); loop { match (self_it.peek(), prefix_it.peek()) { (Some(self_item), Some(prefix_item)) if self_item == prefix_item => { self_it.next(); prefix_it.next(); } (Some(_), Some(_)) => return None, (Some(JsonPathItem::Key(k)), None) => { return Some(JsonPath { first_key: k.clone(), rest: self_it.skip(1).cloned().collect(), }); } (Some(_), None) => { // We don't support json paths starting with `[`. So // `strip_prefix("foo[]", "foo")` is not possible. return None; } (None, Some(_)) => return None, (None, None) => { // Paths are equal. We don't support empty json paths. return None; } } } } /// Extend the path with another path. pub fn extend(&self, other: &Self) -> Self { let mut rest = Vec::with_capacity(self.rest.len() + 1 + other.rest.len()); rest.extend_from_slice(&self.rest); rest.push(JsonPathItem::Key(other.first_key.clone())); rest.extend_from_slice(&other.rest); JsonPath { first_key: self.first_key.clone(), rest, } } /// Returns a new path with an array key appended to the end. /// E.g. `a.b` -> `a.b[]`. pub fn array_key(&self) -> Self { let mut result = JsonPath { first_key: self.first_key.clone(), rest: Vec::with_capacity(self.rest.len() + 1), }; result.rest.extend_from_slice(&self.rest); if result.rest.last() != Some(&JsonPathItem::WildcardIndex) { result.rest.push(JsonPathItem::WildcardIndex); } result } pub fn has_wildcard_suffix(&self) -> bool { self.rest.last() == Some(&JsonPathItem::WildcardIndex) } /// Check if a path is included in a list of patterns. /// /// Basically, it checks if either the pattern or path is a prefix of the other. pub fn check_include_pattern(&self, pattern: &Self) -> bool { self.first_key == pattern.first_key && self.rest.iter().zip(&pattern.rest).all(|(a, b)| a == b) } /// Check if a path should be excluded by a pattern. /// /// Basically, it checks if pattern is a prefix of path, but not the other way around. pub fn check_exclude_pattern(&self, pattern: &Self) -> bool { self.first_key == pattern.first_key && pattern.rest.starts_with(&self.rest) } pub fn extend_or_new(base: Option<&Self>, other: &Self) -> Self { base.map_or_else(|| other.clone(), |base| base.extend(other)) } /// Check if a path is a compatible prefix of another path or vice versa. pub fn compatible(&self, other: &Self) -> bool { if self.first_key != other.first_key { return false; } self.rest .iter() .zip(&other.rest) .all(|(a, b)| match (a, b) { (JsonPathItem::Key(a), JsonPathItem::Key(b)) => a == b, (JsonPathItem::Index(a), JsonPathItem::Index(b)) => a == b, (JsonPathItem::WildcardIndex, JsonPathItem::WildcardIndex) => true, (JsonPathItem::Index(_), JsonPathItem::WildcardIndex) => true, (JsonPathItem::WildcardIndex, JsonPathItem::Index(_)) => true, _ => false, }) } /// Check if the path will be affected by a call to `path_to_remove.value_remove(_)`. pub fn is_affected_by_value_remove(&self, path_to_remove: &JsonPath) -> bool { // If we have, e.g., indexed field "a.b", then it is not safe to delete any of of "a", // "a.b", or "a.b.c". path_to_remove.compatible(self) } /// Check if the path will be affected by a call to `path_to_set.value_set(_, payload)`. pub fn is_affected_by_value_set( &self, payload: &serde_json::Map<String, Value>, path_to_set: Option<&JsonPath>, ) -> bool { // Suppose we have a `path_to_set=a.b.c` and a `payload={"x": 1, "y": 2, "z": {"q": 0}}`. // It's safe to set the payload if the indexed fields doesn't intersect[^1] with the // following paths: // - `a.b.c.x` // - `a.b.c.y` // - `a.b.c.z` // Note that only top-level keys of the payload are considered. // // [^1]: In simple cases, we consider two paths to intersect if one of them is a prefix of // the other. For example, `a.b` and `a.b.c` intersect, but `a.b` and `a.c` don't. More // nuanced cases include wildcard indexes, e.g., `a[0].b` and `a[].b` intersect. // Additionally, we consider path with incompatible types (e.g. `a[0]` and `a.b`) to // intersect because `valuse_set` could override the subtree by replacing an array with an // object (or vice versa), deleting indexed fields. let Some(path_to_set) = path_to_set else { return payload.contains_key(&self.first_key); }; if self.first_key != path_to_set.first_key { return false; } let mut it_a = self.rest.iter(); let mut it_b = path_to_set.rest.iter(); loop { let (a, b) = match (it_a.next(), it_b.next()) { (Some(a), Some(b)) => (a, b), (None, _) => return true, // indexed_path is a compatible prefix of path_to_set (Some(JsonPathItem::Key(a)), None) => return payload.contains_key(a), (Some(JsonPathItem::Index(_)), None) => return true, (Some(JsonPathItem::WildcardIndex), None) => return true, }; match (a, b) { // Paths items match each other => continue. (JsonPathItem::Key(a), JsonPathItem::Key(b)) if a == b => (), (JsonPathItem::Index(a), JsonPathItem::Index(b)) if a == b => (), (JsonPathItem::WildcardIndex, JsonPathItem::WildcardIndex) => (), (JsonPathItem::Index(_), JsonPathItem::WildcardIndex) => (), (JsonPathItem::WildcardIndex, JsonPathItem::Index(_)) => (), // Paths diverge, but their types are compatible, e.g. `a.b` and `a.c`, or `a[0]` // and `a[1]`. This means that payload and indexed fields point to different // subtrees, so it's safe to set the payload. (JsonPathItem::Key(_), JsonPathItem::Key(_)) => return false, (JsonPathItem::Index(_), JsonPathItem::Index(_)) => return false, // Types are not compatible. This means that `value_set` could override the // subtree, deleting indexed fields. (JsonPathItem::Key(_), JsonPathItem::Index(_) | JsonPathItem::WildcardIndex) => { return true; } (JsonPathItem::Index(_) | JsonPathItem::WildcardIndex, JsonPathItem::Key(_)) => { return true; } } } } /// Convert the path into a string suitable for use as a filename by adhering to the following /// restrictions: max length, limited character set, but still being relatively unique. pub fn filename(&self) -> String { const MAX_LENGTH: usize = 33; const HASH_LENGTH: usize = 24; // In base32 characters, i.e. 5 bits per character. let text = self.to_string(); let mut result = String::with_capacity(MAX_LENGTH); BASE32_DNSSEC.encode_append( &Sha256::digest(text.as_bytes())[0..(HASH_LENGTH * 5).div_ceil(8)], &mut result, ); debug_assert_eq!(result.len(), HASH_LENGTH); result.push('-'); text.chars() .map(|c| match c { 'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_' => c.to_ascii_lowercase(), _ => '_', }) .dedup_by(|&a, &b| a == '_' && b == '_') .take(MAX_LENGTH - result.len()) .for_each(|c| result.push(c)); debug_assert!(result.len() <= MAX_LENGTH); result } } fn value_get<'a>( path: &[JsonPathItem], value: Option<&'a Value>, result: &mut MultiValue<&'a Value>, ) { if let Some((head, tail)) = path.split_first() { match (head, value) { (JsonPathItem::Key(key), Some(Value::Object(map))) => { value_get(tail, map.get(key), result) } (JsonPathItem::Index(index), Some(Value::Array(array))) => { if let Some(value) = array.get(*index) { value_get(tail, Some(value), result); } } (JsonPathItem::WildcardIndex, Some(Value::Array(array))) => array .iter() .for_each(|value| value_get(tail, Some(value), result)), _ => (), } } else if let Some(value) = value { result.push(value); } } fn value_set(path: &[JsonPathItem], dest: &mut Value, src: &serde_json::Map<String, Value>) { if let Some((head, rest)) = path.split_first() { match head { JsonPathItem::Key(key) => { if !dest.is_object() { *dest = Value::Object(serde_json::Map::new()); } let map = dest.as_object_mut().unwrap(); value_set_map(key, rest, map, src); } &JsonPathItem::Index(i) => { if !dest.is_array() { *dest = Value::Array(Vec::new()); } let array = dest.as_array_mut().unwrap(); if let Some(v) = array.get_mut(i) { value_set(rest, v, src); } } JsonPathItem::WildcardIndex => { if dest.is_array() { for value in dest.as_array_mut().unwrap() { value_set(rest, value, src); } } else { *dest = Value::Array(Vec::new()); } } } } else { if !dest.is_object() { *dest = Value::Object(serde_json::Map::new()); } let map = dest.as_object_mut().unwrap(); merge_map(map, src); } } fn value_set_map( key: &str, path: &[JsonPathItem], dest_map: &mut serde_json::Map<String, Value>, src: &serde_json::Map<String, Value>, ) { if let Some(value) = dest_map.get_mut(key) { value_set(path, value, src); } else { let mut value = Value::Null; value_set(path, &mut value, src); dest_map.insert(key.to_string(), value); } } fn value_remove( head: &JsonPathItem, rest: &[JsonPathItem], value: &mut Value, result: &mut MultiValue<Value>, ) { if let Some((rest1, restn)) = rest.split_first() { match (head, value) { (JsonPathItem::Key(k), Value::Object(map)) => { if let Some(value) = map.get_mut(k) { value_remove(rest1, restn, value, result); } } (JsonPathItem::Index(i), Value::Array(array)) => { if let Some(value) = array.get_mut(*i) { value_remove(rest1, restn, value, result); } } (JsonPathItem::WildcardIndex, Value::Array(array)) => { for value in array { value_remove(rest1, restn, value, result); } } _ => (), } } else { match (head, value) { (JsonPathItem::Key(k), Value::Object(map)) => { if let Some(v) = map.remove(k) { result.push(v); } } (JsonPathItem::Index(_), Value::Array(_)) => { // Deleting array indices is not idempotent, so we don't support it. } (JsonPathItem::WildcardIndex, Value::Array(array)) => { result.push(Value::Array(std::mem::take(array))); } _ => (), } } } fn run_filter<'a>( path: &mut JsonPath, value: &'a Value, filter: &dyn Fn(&JsonPath, &Value) -> bool, ) -> Value { match &value { Value::Null => value.clone(), Value::Bool(_) => value.clone(), Value::Number(_) => value.clone(), Value::String(_) => value.clone(), Value::Array(array) => { let mut new_array = Vec::new(); path.rest.push(JsonPathItem::WildcardIndex); for value in array.iter() { if filter(path, value) { let value = run_filter(path, value, filter); new_array.push(value); } } path.rest.pop(); Value::Array(new_array) } Value::Object(object) => { let mut new_object = serde_json::Map::new(); for (key, value) in object.iter() { path.rest.push(JsonPathItem::Key(key.clone())); if filter(path, value) { let value = run_filter(path, value, filter); new_object.insert(key.clone(), value); } path.rest.pop(); } Value::Object(new_object) } } } impl Display for JsonPath { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let write_key = |f: &mut Formatter<'_>, key: &str| { if parse::key_needs_quoting(key) { write!(f, "\"{key}\"") } else { f.write_str(key) } }; write_key(f, &self.first_key)?; for item in &self.rest { match item { JsonPathItem::Key(key) => { f.write_str(".")?; write_key(f, key)?; } JsonPathItem::Index(index) => write!(f, "[{index}]")?, JsonPathItem::WildcardIndex => f.write_str("[]")?, } } Ok(()) } } impl TryFrom<&str> for JsonPath { type Error = (); fn try_from(value: &str) -> Result<Self, Self::Error> { value.parse() } } impl Serialize for JsonPath { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str(&self.to_string()) } } impl<'de> Deserialize<'de> for JsonPath { fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { let string = String::deserialize(deserializer)?; string .parse() .map_err(|_| serde::de::Error::custom(format!("Invalid json path: \'{string}\'"))) } } impl JsonSchema for JsonPath { fn is_referenceable() -> bool { false } fn schema_name() -> String { "JsonPath".to_string() } fn json_schema(generator: &mut SchemaGenerator) -> Schema { String::json_schema(generator) } } #[cfg(test)] mod tests { use super::*; use crate::common::utils::check_is_empty; fn json(str: &str) -> serde_json::Map<String, Value> { serde_json::from_str(str).unwrap() } #[test] fn test_is_affected_by_value_set() { assert!(!JsonPath::new("a").is_affected_by_value_set(&json(r#"{"b": 1, "c": 1}"#), None)); assert!(JsonPath::new("a").is_affected_by_value_set(&json(r#"{"a": 1, "b": 1}"#), None)); assert!(JsonPath::new("a.x").is_affected_by_value_set(&json(r#"{"a": {"y": 1}}"#), None)); assert!(!JsonPath::new("a.x").is_affected_by_value_set(&json(r#"{"b": {"x": 1}}"#), None)); } #[test] fn test_is_affected_by_value_remove() { assert!(JsonPath::new("a").is_affected_by_value_remove(&JsonPath::new("a"))); assert!(!JsonPath::new("a").is_affected_by_value_remove(&JsonPath::new("b"))); assert!(JsonPath::new("a.b").is_affected_by_value_remove(&JsonPath::new("a"))); assert!(JsonPath::new("a.b").is_affected_by_value_remove(&JsonPath::new("a.b"))); assert!(JsonPath::new("a.b").is_affected_by_value_remove(&JsonPath::new("a.b.c"))); } /// This test checks that `is_affected_by_value_set` and `is_affected_by_value_remove` don't /// produce false negatives. /// The penalty for a false positive is just degraded performance, but the penalty for a false /// negative is inconsistency in the indexed fields. #[test] fn test_no_false_negatives() { let paths: Vec<JsonPath> = ["a", "a.a", "a[]", "a[0]", "a[0].a", "a[0].a[]"] .iter() .map(|s| s.parse().unwrap()) .collect(); let payloads = vec![ json(r#"{"b": 1}"#), json(r#"{"a": 1, "b": 2}"#), json(r#"{"a": [], "b": 1}"#), json(r#"{"a": [1], "b": 2}"#), json(r#"{"a": {}, "b": 1}"#), json(r#"{"a": {"a": 1, "b": 2}, "b": 3}"#), json(r#"{"a": [{"a": 1, "b": 2}, {"a": 3, "b": 4}], "b": 5}"#), json(r#"{"a": [{"a": [1], "b": 2}, {"a": [3], "b": 4}], "b": 5}"#), ]; for init_payload in &payloads { for indexed_path in &paths { for value_key in &["a", "b"] { check_set(init_payload, indexed_path, None, value_key); for path_to_set in &paths { check_set(init_payload, indexed_path, Some(path_to_set), value_key); } } for path_to_remove in &paths { check_remove(init_payload, indexed_path, path_to_remove); } } } } fn check_set( init_payload: &serde_json::Map<String, serde_json::Value>, indexed_path: &JsonPath, path_to_set: Option<&JsonPath>, value_key: &str, ) { let mut new_payload = init_payload.clone(); let init_values = indexed_path.value_get(init_payload); JsonPath::value_set( path_to_set, &mut new_payload, &json(r#"{"value_key": 100}"#), ); let new_values = indexed_path.value_get(&new_payload); // Ground truth let indexed_value_changed = init_values != new_values; // Our prediction let is_affected = indexed_path.is_affected_by_value_set(&json(r#"{"value_key": 100}"#), path_to_set); assert!( is_affected || !indexed_value_changed, "init_payload: {:?}\nnew_payload: {:?}\nindex_path: {:?}\npath_to_set: {:?}\nvalue_key: {:?}", init_payload, new_payload, indexed_path.to_string(), path_to_set.map(|p| p.to_string()), value_key, ); } fn check_remove( init_payload: &serde_json::Map<String, serde_json::Value>, indexed_path: &JsonPath, path_to_remove: &JsonPath, ) { let mut new_payload = init_payload.clone(); let init_values = indexed_path.value_get(init_payload); path_to_remove.value_remove(&mut new_payload); let new_values = indexed_path.value_get(&new_payload); // Ground truth let indexed_value_changed = init_values != new_values; // Our prediction let is_affected = indexed_path.is_affected_by_value_remove(path_to_remove); assert!( is_affected || !indexed_value_changed, "init_payload: {:?}\nnew_payload: {:?}\nindex_path: {:?}\npath_to_remove: {:?}", init_payload, new_payload, indexed_path.to_string(), path_to_remove.to_string(), ); } #[test] fn test_get_nested_value_from_json_map() { let map = json( r#" { "a": {"b": {"c": 1}}, "d": 2 } "#, ); assert_eq!( JsonPath::new("a.b").value_get(&map).into_vec(), vec![&Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(1.into()) )]))] ); // going deeper assert_eq!( JsonPath::new("a.b.c").value_get(&map).into_vec(), vec![&Value::Number(1.into())] ); // missing path assert!(check_is_empty( JsonPath::new("a.b.c.d").value_get(&map).iter().copied() )); } #[test] fn test_is_empty() { let map = json( r#" { "a": [ { "b": 1 }, { "b": 2 }, { "b": null }, { "d": [] }, { "d": [] }, { "f": null } ] } "#, ); let multivalue = JsonPath::new("a[].b").value_get(&map); let is_empty = check_is_empty(multivalue.iter().copied()); assert!(!is_empty, "a[].b is not empty"); let multivalue = JsonPath::new("a[].c").value_get(&map); let is_empty = check_is_empty(multivalue.iter().copied()); assert!(is_empty, "a[].c is empty"); let multivalue = JsonPath::new("a[].d").value_get(&map); let is_empty = check_is_empty(multivalue.iter().copied()); assert!(is_empty, "a[].d is empty"); let multivalue = JsonPath::new("a[].f").value_get(&map); let is_empty = check_is_empty(multivalue.iter().copied()); assert!(is_empty, "a[].f is empty"); } #[test] fn test_get_nested_array_value_from_json_map() { let map = json( r#" { "a": { "b": [ { "c": 1 }, { "c": 2 }, { "d": { "e": 3 } } ] }, "f": 3, "g": ["g0", "g1", "g2"] } "#, ); // get JSON array assert_eq!( JsonPath::new("a.b").value_get(&map).into_vec(), vec![&Value::Array(vec![ Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(1.into()) )])), Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(2.into()) )])), Value::Object(serde_json::Map::from_iter(vec![( "d".to_string(), Value::Object(serde_json::Map::from_iter(vec![( "e".to_string(), Value::Number(3.into()) )])) )])) ])] ); // a.b[] extract all elements from array assert_eq!( JsonPath::new("a.b[]").value_get(&map).into_vec(), vec![ &Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(1.into()) )])), &Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(2.into()) )])), &Value::Object(serde_json::Map::from_iter(vec![( "d".to_string(), Value::Object(serde_json::Map::from_iter(vec![( "e".to_string(), Value::Number(3.into()) )])) )])) ] ); // project scalar field through array assert_eq!( JsonPath::new("a.b[].c").value_get(&map).into_vec(), vec![&Value::Number(1.into()), &Value::Number(2.into())] ); // project object field through array assert_eq!( JsonPath::new("a.b[].d").value_get(&map).into_vec(), vec![&Value::Object(serde_json::Map::from_iter(vec![( "e".to_string(), Value::Number(3.into()) )]))] ); // select scalar element from array assert_eq!( JsonPath::new("a.b[0]").value_get(&map).into_vec(), vec![&Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(1.into()) )]))] ); // select scalar object from array different index assert_eq!( JsonPath::new("a.b[1]").value_get(&map).into_vec(), vec![&Value::Object(serde_json::Map::from_iter(vec![( "c".to_string(), Value::Number(2.into()) )]))] ); // select field element from array different index assert_eq!( JsonPath::new("a.b[1].c").value_get(&map).into_vec(), vec![&Value::Number(2.into())] ); // select scalar element from array different index assert_eq!( JsonPath::new("g[2]").value_get(&map).into_vec(), vec![&Value::String("g2".to_string())] ); // select object element from array assert_eq!( JsonPath::new("a.b[2]").value_get(&map).into_vec(), vec![&Value::Object(serde_json::Map::from_iter(vec![( "d".to_string(), Value::Object(serde_json::Map::from_iter(vec![( "e".to_string(), Value::Number(3.into()) )])) )]))] ); // select out of bound index from array assert!(check_is_empty( JsonPath::new("a.b[3]").value_get(&map).iter().copied() )); } #[test] fn test_get_deeply_nested_array_value_from_json_map() { let map = json( r#" { "arr1": [ { "arr2": [ {"a": 1, "b": 2} ] }, { "arr2": [ {"a": 3, "b": 4}, {"a": 5, "b": 6} ] } ] } "#, ); // extract and flatten all elements from arrays assert_eq!( JsonPath::new("arr1[].arr2[].a").value_get(&map).into_vec(), vec![ &Value::Number(1.into()), &Value::Number(3.into()), &Value::Number(5.into()), ] ); } #[test] fn test_no_flatten_array_value_from_json_map() { let map = json( r#" { "arr": [ { "a": [1, 2, 3] }, { "a": 4 }, { "b": 5 } ] } "#, ); // extract and retain structure for arrays arrays assert_eq!( JsonPath::new("arr[].a").value_get(&map).into_vec(), vec![ &Value::Array(vec![ Value::Number(1.into()), Value::Number(2.into()), Value::Number(3.into()), ]), &Value::Number(4.into()), ] ); // expect an array as leaf, ignore non arrays assert_eq!( JsonPath::new("arr[].a[]").value_get(&map).into_vec(), vec![ &Value::Number(1.into()), &Value::Number(2.into()), &Value::Number(3.into()), ] ); } #[test] fn test_get_null_and_absent_values() { let map = json( r#" { "a": null, "b": [null, null], "c": [] } "#, ); assert_eq!( JsonPath::new("a").value_get(&map).as_slice(), &[&Value::Null], ); assert!(JsonPath::new("a[]").value_get(&map).is_empty()); assert_eq!( JsonPath::new("b").value_get(&map).as_slice(), &[&Value::Array(vec![Value::Null, Value::Null])], ); assert_eq!( JsonPath::new("b[]").value_get(&map).as_slice(), &[&Value::Null, &Value::Null], ); assert_eq!(
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/condition_checker.rs
lib/segment/src/payload_storage/condition_checker.rs
//! Contains functions for interpreting filter queries and defining if given points pass the conditions use std::str::FromStr; use ordered_float::OrderedFloat; use serde_json::Value; use crate::types::{ AnyVariants, DateTimePayloadType, FieldCondition, FloatPayloadType, GeoBoundingBox, GeoPoint, GeoPolygon, GeoRadius, Match, MatchAny, MatchExcept, MatchPhrase, MatchText, MatchTextAny, MatchValue, Range, RangeInterface, ValueVariants, ValuesCount, }; /// Threshold representing the point to which iterating through an IndexSet is more efficient than using hashing. /// /// For sets smaller than this threshold iterating outperforms hashing. /// For more information see <https://github.com/qdrant/qdrant/pull/3525>. pub const INDEXSET_ITER_THRESHOLD: usize = 13; pub trait ValueChecker { fn check_match(&self, payload: &Value) -> bool; #[inline] fn _check(&self, payload: &Value) -> bool { match payload { Value::Array(values) => values.iter().any(|x| self.check_match(x)), _ => self.check_match(payload), } } fn check(&self, payload: &Value) -> bool { self._check(payload) } /// Check condition in case of empty payload value fn check_empty(&self) -> bool { false } } fn check_is_empty(is_empty: bool, payload_value: &Value) -> bool { match payload_value { Value::Null => is_empty, Value::Bool(_) => !is_empty, Value::Number(_) => !is_empty, Value::String(_) => !is_empty, Value::Array(array) => array.is_empty() == is_empty, Value::Object(_) => !is_empty, } } fn check_is_null(is_null: bool, payload_value: &Value) -> bool { match payload_value { Value::Null => is_null, Value::Bool(_) => !is_null, Value::Number(_) => !is_null, Value::String(_) => !is_null, Value::Array(array) => array.iter().any(|x| x.is_null()) == is_null, Value::Object(_) => !is_null, } } impl ValueChecker for FieldCondition { fn check_match(&self, payload: &Value) -> bool { // Destructuring so compiler can check that we don't forget a condition let FieldCondition { r#match, range, geo_radius, geo_bounding_box, geo_polygon, values_count, key: _, is_empty, is_null, } = self; r#match .as_ref() .is_some_and(|condition| condition.check_match(payload)) || range .as_ref() .is_some_and(|range_interface| match range_interface { RangeInterface::Float(condition) => condition.check_match(payload), RangeInterface::DateTime(condition) => condition.check_match(payload), }) || geo_radius .as_ref() .is_some_and(|condition| condition.check_match(payload)) || geo_bounding_box .as_ref() .is_some_and(|condition| condition.check_match(payload)) || geo_polygon .as_ref() .is_some_and(|condition| condition.check_match(payload)) || values_count .as_ref() .is_some_and(|condition| condition.check_match(payload)) || is_empty.is_some_and(|is_empty| check_is_empty(is_empty, payload)) || is_null.is_some_and(|is_null| check_is_null(is_null, payload)) } fn check(&self, payload: &Value) -> bool { let FieldCondition { r#match: _, range: _, geo_radius: _, geo_bounding_box: _, geo_polygon: _, values_count, key: _, is_empty, is_null, } = self; if values_count.is_some() { self.values_count .as_ref() .unwrap() .check_count_from(payload) } else if is_empty.is_some() { check_is_empty(is_empty.unwrap(), payload) } else if is_null.is_some() { check_is_null(is_null.unwrap(), payload) } else { self._check(payload) } } fn check_empty(&self) -> bool { let FieldCondition { r#match: _, range: _, geo_radius: _, geo_bounding_box: _, geo_polygon: _, values_count: _, key: _, is_empty, is_null, } = self; if let Some(is_empty) = is_empty { return *is_empty; } if let Some(is_null) = is_null { return !*is_null; } false } } impl ValueChecker for Match { fn check_match(&self, payload: &Value) -> bool { match self { Match::Value(MatchValue { value }) => match (payload, value) { (Value::Bool(stored), ValueVariants::Bool(val)) => stored == val, (Value::String(stored), ValueVariants::String(val)) => stored == val, (Value::Number(stored), ValueVariants::Integer(val)) => { stored.as_i64().map(|num| num == *val).unwrap_or(false) } _ => false, }, Match::Text(MatchText { text }) | Match::Phrase(MatchPhrase { phrase: text }) => { match payload { Value::String(stored) => stored.contains(text), _ => false, } } Match::TextAny(MatchTextAny { text_any }) => match payload { Value::String(stored) => text_any .split_whitespace() .any(|token| stored.contains(token)), _ => false, }, Match::Any(MatchAny { any }) => match (payload, any) { (Value::String(stored), AnyVariants::Strings(list)) => { if list.len() < INDEXSET_ITER_THRESHOLD { list.iter().any(|i| i.as_str() == stored.as_str()) } else { list.contains(stored.as_str()) } } (Value::Number(stored), AnyVariants::Integers(list)) => stored .as_i64() .map(|num| { if list.len() < INDEXSET_ITER_THRESHOLD { list.iter().any(|i| *i == num) } else { list.contains(&num) } }) .unwrap_or(false), _ => false, }, Match::Except(MatchExcept { except }) => match (payload, except) { (Value::String(stored), AnyVariants::Strings(list)) => { if list.len() < INDEXSET_ITER_THRESHOLD { !list.iter().any(|i| i.as_str() == stored.as_str()) } else { !list.contains(stored.as_str()) } } (Value::Number(stored), AnyVariants::Integers(list)) => stored .as_i64() .map(|num| { if list.len() < INDEXSET_ITER_THRESHOLD { !list.iter().any(|i| *i == num) } else { !list.contains(&num) } }) .unwrap_or(true), (Value::Null, _) => false, (Value::Bool(_), _) => true, (Value::Array(_), _) => true, // Array inside array is not flattened (Value::Object(_), _) => true, (Value::Number(_), _) => true, (Value::String(_), _) => true, }, } } } impl ValueChecker for Range<OrderedFloat<FloatPayloadType>> { fn check_match(&self, payload: &Value) -> bool { match payload { Value::Number(num) => num .as_f64() .map(|number| self.check_range(OrderedFloat(number))) .unwrap_or(false), _ => false, } } } impl ValueChecker for Range<DateTimePayloadType> { fn check_match(&self, payload: &Value) -> bool { payload .as_str() .and_then(|s| DateTimePayloadType::from_str(s).ok()) .is_some_and(|x| self.check_range(x)) } } impl ValueChecker for GeoBoundingBox { fn check_match(&self, payload: &Value) -> bool { match payload { Value::Object(obj) => { let lon_op = obj.get("lon").and_then(|x| x.as_f64()); let lat_op = obj.get("lat").and_then(|x| x.as_f64()); if let (Some(lon), Some(lat)) = (lon_op, lat_op) { return self.check_point(&GeoPoint::new_unchecked(lon, lat)); } false } _ => false, } } } impl ValueChecker for GeoRadius { fn check_match(&self, payload: &Value) -> bool { match payload { Value::Object(obj) => { let lon_op = obj.get("lon").and_then(|x| x.as_f64()); let lat_op = obj.get("lat").and_then(|x| x.as_f64()); if let (Some(lon), Some(lat)) = (lon_op, lat_op) { return self.check_point(&GeoPoint::new_unchecked(lon, lat)); } false } _ => false, } } } impl ValueChecker for GeoPolygon { fn check_match(&self, payload: &Value) -> bool { match payload { Value::Object(obj) => { let lon_op = obj.get("lon").and_then(|x| x.as_f64()); let lat_op = obj.get("lat").and_then(|x| x.as_f64()); if let (Some(lon), Some(lat)) = (lon_op, lat_op) { return self .convert() .check_point(&GeoPoint::new_unchecked(lon, lat)); } false } _ => false, } } } impl ValueChecker for ValuesCount { fn check_match(&self, payload: &Value) -> bool { self.check_count_from(payload) } fn check(&self, payload: &Value) -> bool { self.check_count_from(payload) } fn check_empty(&self) -> bool { self.check_count(0) } } #[cfg(test)] mod tests { use serde_json::json; use super::*; use crate::json_path::JsonPath; use crate::types::GeoPoint; #[test] fn test_geo_matching() { let berlin_and_moscow = json!([ { "lat": 52.52197645, "lon": 13.413637435864272 }, { "lat": 55.7536283, "lon": 37.62137960067377, } ]); let near_berlin_query = GeoRadius { center: GeoPoint::new_unchecked(13.413637, 52.521976), radius: OrderedFloat(2000.0), }; let miss_geo_query = GeoRadius { center: GeoPoint::new_unchecked(20.423637, 52.511), radius: OrderedFloat(2000.0), }; assert!(near_berlin_query.check(&berlin_and_moscow)); assert!(!miss_geo_query.check(&berlin_and_moscow)); } #[test] fn test_value_count() { let countries = json!([ { "country": "Germany", }, { "country": "France", } ]); let gt_one_country_query = ValuesCount { lt: None, gt: Some(1), gte: None, lte: None, }; assert!(gt_one_country_query.check(&countries)); let gt_two_countries_query = ValuesCount { lt: None, gt: Some(2), gte: None, lte: None, }; assert!(!gt_two_countries_query.check(&countries)); let gte_two_countries_query = ValuesCount { lt: None, gt: None, gte: Some(2), lte: None, }; assert!(gte_two_countries_query.check(&countries)); } #[test] fn test_value_checker_for_null_or_empty() { let array = json!([]); let array_with_null = json!([null]); let array_with_something = json!([true]); let array_with_null_and_something = json!([true, null]); let object = json!({}); let string = json!("string"); let number = json!(1); let bool = json!(true); let key = JsonPath::new("key"); let is_empty = FieldCondition { r#match: None, range: None, geo_radius: None, geo_bounding_box: None, geo_polygon: None, values_count: None, key: key.clone(), is_empty: Some(true), is_null: None, }; let is_not_empty = FieldCondition { r#match: None, range: None, geo_radius: None, geo_bounding_box: None, geo_polygon: None, values_count: None, key: key.clone(), is_empty: Some(false), is_null: None, }; let is_null = FieldCondition { r#match: None, range: None, geo_radius: None, geo_bounding_box: None, geo_polygon: None, values_count: None, key: key.clone(), is_empty: None, is_null: Some(true), }; let is_not_null = FieldCondition { r#match: None, range: None, geo_radius: None, geo_bounding_box: None, geo_polygon: None, values_count: None, key: key.clone(), is_empty: None, is_null: Some(false), }; assert!(is_empty.check(&array)); assert!(!is_empty.check(&array_with_null)); assert!(!is_empty.check(&array_with_something)); assert!(!is_empty.check(&array_with_null_and_something)); assert!(!is_empty.check(&object)); assert!(!is_empty.check(&string)); assert!(!is_empty.check(&number)); assert!(!is_empty.check(&bool)); assert!(!is_not_empty.check(&array)); assert!(is_not_empty.check(&array_with_null)); assert!(is_not_empty.check(&array_with_something)); assert!(is_not_empty.check(&array_with_null_and_something)); assert!(is_not_empty.check(&object)); assert!(is_not_empty.check(&string)); assert!(is_not_empty.check(&number)); assert!(is_not_empty.check(&bool)); assert!(!is_null.check(&array)); assert!(is_null.check(&array_with_null)); assert!(!is_null.check(&array_with_something)); assert!(is_null.check(&array_with_null_and_something)); assert!(!is_null.check(&object)); assert!(!is_null.check(&string)); assert!(!is_null.check(&number)); assert!(!is_null.check(&bool)); assert!(is_not_null.check(&array)); assert!(!is_not_null.check(&array_with_null)); assert!(is_not_null.check(&array_with_something)); assert!(!is_not_null.check(&array_with_null_and_something)); assert!(is_not_null.check(&object)); assert!(is_not_null.check(&string)); assert!(is_not_null.check(&number)); assert!(is_not_null.check(&bool)); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/tests.rs
lib/segment/src/payload_storage/tests.rs
use std::path::Path; use common::counter::hardware_counter::HardwareCounterCell; use rstest::rstest; use super::PayloadStorage; use super::mmap_payload_storage::MmapPayloadStorage; #[cfg(feature = "rocksdb")] use super::on_disk_payload_storage::OnDiskPayloadStorage; #[cfg(feature = "rocksdb")] use super::simple_payload_storage::SimplePayloadStorage; #[cfg(feature = "rocksdb")] use crate::common::rocksdb_wrapper::open_db; use crate::payload_json; fn test_trait_impl<S: PayloadStorage>(open: impl Fn(&Path) -> S) { let dir = tempfile::tempdir().unwrap(); let mut storage = open(dir.path()); assert_eq!(storage.get_storage_size_bytes().unwrap(), 0); let payload = payload_json! { "a": "some text", }; let hw_counter = HardwareCounterCell::new(); // set storage.set(0, &payload, &hw_counter).unwrap(); assert_eq!(storage.get(0, &hw_counter).unwrap(), payload); // set on existing let payload_to_merge = payload_json! { "zzz": "some other text", }; storage.set(0, &payload_to_merge, &hw_counter).unwrap(); let stored = storage.get(0, &hw_counter).unwrap(); assert_eq!( stored, payload_json! { "a": "some text", "zzz": "some other text", }, ); // set_by_key let nested_payload = payload_json! { "layer2": true, }; storage .set_by_key( 0, &nested_payload, &"layer1".try_into().unwrap(), &hw_counter, ) .unwrap(); let stored = storage.get(0, &hw_counter).unwrap(); assert_eq!( stored, payload_json! { "a": "some text", "zzz": "some other text", "layer1": { "layer2": true, } }, ); // delete key storage .delete(0, &"layer1".try_into().unwrap(), &hw_counter) .unwrap(); let stored = storage.get(0, &hw_counter).unwrap(); assert_eq!( stored, payload_json! { "a": "some text", "zzz": "some other text", }, ); // overwrite let new_payload = payload_json! { "new": "new text", "other_new": "other new text", }; storage.overwrite(0, &new_payload, &hw_counter).unwrap(); let stored = storage.get(0, &hw_counter).unwrap(); assert_eq!(stored, new_payload); storage.clear(0, &hw_counter).unwrap(); assert_eq!(storage.get(0, &hw_counter).unwrap(), payload_json! {}); for i in 1..10 { storage.set(i, &payload, &hw_counter).unwrap(); } let assert_payloads = |storage: &S| { storage .iter( |key, value| { if key == 0 { assert_eq!(value, &payload_json! {}); return Ok(true); } assert_eq!(value, &payload); Ok(true) }, &hw_counter, ) .unwrap(); }; assert_payloads(&storage); eprintln!("storage is correct before drop"); // flush, drop, and reopen storage.flusher()().unwrap(); drop(storage); let storage = open(dir.path()); // check if the data is still there assert_payloads(&storage); eprintln!("storage is correct after drop"); assert!(storage.get_storage_size_bytes().unwrap() > 0); } #[test] #[cfg(feature = "rocksdb")] fn test_in_memory_storage() { test_trait_impl(|path| { let db = open_db(path, &[""]).unwrap(); SimplePayloadStorage::open(db).unwrap() }); } #[rstest] fn test_mmap_storage(#[values(false, true)] populate: bool) { test_trait_impl(|path| { MmapPayloadStorage::open_or_create(path.to_path_buf(), populate).unwrap() }); } #[test] #[cfg(feature = "rocksdb")] fn test_on_disk_storage() { test_trait_impl(|path| { let db = open_db(path, &[""]).unwrap(); OnDiskPayloadStorage::open(db).unwrap() }); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/payload_storage_base.rs
lib/segment/src/payload_storage/payload_storage_base.rs
use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use serde_json::Value; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::json_path::JsonPath; use crate::types::{Filter, Payload}; /// Trait for payload data storage. Should allow filter checks pub trait PayloadStorage { /// Overwrite payload for point_id. If payload already exists, replace it fn overwrite( &mut self, point_id: PointOffsetType, payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Set payload for point_id. If payload already exists, merge it with existing fn set( &mut self, point_id: PointOffsetType, payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; /// Set payload to a point_id by key. If payload already exists, merge it with existing fn set_by_key( &mut self, point_id: PointOffsetType, payload: &Payload, key: &JsonPath, hw_counter: &HardwareCounterCell, ) -> OperationResult<()>; fn get( &self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload>; fn get_sequential( &self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload>; /// Delete payload by point_id and key fn delete( &mut self, point_id: PointOffsetType, key: &JsonPath, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<Value>>; /// Clear all payload of the point fn clear( &mut self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<Payload>>; /// Completely delete payload storage, without keeping allocated memory. Pufff! #[cfg(test)] fn clear_all(&mut self, hw_counter: &HardwareCounterCell) -> OperationResult<()>; /// Return function that forces persistence of current storage state. fn flusher(&self) -> Flusher; /// Iterate over all stored payload and apply the provided callback. /// Stop iteration if callback returns false or error. /// /// Required for building payload index. fn iter<F>(&self, callback: F, hw_counter: &HardwareCounterCell) -> OperationResult<()> where F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>; /// Return all files that are used by storage to include in snapshots. /// RocksDB storages are captured outside of this trait. fn files(&self) -> Vec<PathBuf>; /// Returns a list of files, that are immutable, to exclude from partial snapshots. fn immutable_files(&self) -> Vec<PathBuf> { Vec::new() } /// Return storage size in bytes fn get_storage_size_bytes(&self) -> OperationResult<usize>; /// Whether this storage is on-disk or in-memory. fn is_on_disk(&self) -> bool; } pub trait ConditionChecker { /// Check if point satisfies filter condition. Return true if satisfies fn check(&self, point_id: PointOffsetType, query: &Filter) -> bool; } pub trait FilterContext { /// Check if point satisfies filter condition. Return true if satisfies fn check(&self, point_id: PointOffsetType) -> bool; } pub type PayloadStorageSS = dyn PayloadStorage + Sync + Send; pub type ConditionCheckerSS = dyn ConditionChecker + Sync + Send;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/simple_payload_storage_impl.rs
lib/segment/src/payload_storage/simple_payload_storage_impl.rs
use std::path::PathBuf; use common::counter::hardware_counter::HardwareCounterCell; use common::types::PointOffsetType; use serde_json::Value; use crate::common::Flusher; use crate::common::operation_error::OperationResult; use crate::json_path::JsonPath; use crate::payload_storage::PayloadStorage; use crate::payload_storage::simple_payload_storage::SimplePayloadStorage; use crate::types::{Payload, PayloadKeyTypeRef}; impl PayloadStorage for SimplePayloadStorage { fn overwrite( &mut self, point_id: PointOffsetType, payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { self.payload.insert(point_id, payload.to_owned()); self.update_storage(point_id, hw_counter)?; Ok(()) } fn set( &mut self, point_id: PointOffsetType, payload: &Payload, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self.payload.get_mut(&point_id) { Some(point_payload) => point_payload.merge(payload), None => { self.payload.insert(point_id, payload.to_owned()); } } self.update_storage(point_id, hw_counter)?; Ok(()) } fn set_by_key( &mut self, point_id: PointOffsetType, payload: &Payload, key: &JsonPath, hw_counter: &HardwareCounterCell, ) -> OperationResult<()> { match self.payload.get_mut(&point_id) { Some(point_payload) => point_payload.merge_by_key(payload, key), None => { let mut dest_payload = Payload::default(); dest_payload.merge_by_key(payload, key); self.payload.insert(point_id, dest_payload); } } self.update_storage(point_id, hw_counter)?; Ok(()) } fn get(&self, point_id: PointOffsetType, _: &HardwareCounterCell) -> OperationResult<Payload> { match self.payload.get(&point_id) { Some(payload) => Ok(payload.to_owned()), None => Ok(Default::default()), } } fn get_sequential( &self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Payload> { // No sequential access optimizations for simple payload storage. self.get(point_id, hw_counter) } fn delete( &mut self, point_id: PointOffsetType, key: PayloadKeyTypeRef, hw_counter: &HardwareCounterCell, ) -> OperationResult<Vec<Value>> { match self.payload.get_mut(&point_id) { Some(payload) => { let res = payload.remove(key); if !res.is_empty() { self.update_storage(point_id, hw_counter)?; } Ok(res) } None => Ok(vec![]), } } fn clear( &mut self, point_id: PointOffsetType, hw_counter: &HardwareCounterCell, ) -> OperationResult<Option<Payload>> { let res = self.payload.remove(&point_id); self.update_storage(point_id, hw_counter)?; Ok(res) } #[cfg(test)] fn clear_all(&mut self, _: &HardwareCounterCell) -> OperationResult<()> { self.payload = ahash::AHashMap::new(); self.db_wrapper.recreate_column_family() } fn flusher(&self) -> Flusher { self.db_wrapper.flusher() } fn iter<F>(&self, mut callback: F, _hw_counter: &HardwareCounterCell) -> OperationResult<()> where F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>, { for (key, val) in self.payload.iter() { let do_continue = callback(*key, val)?; if !do_continue { return Ok(()); } } Ok(()) } fn files(&self) -> Vec<PathBuf> { vec![] } fn get_storage_size_bytes(&self) -> OperationResult<usize> { self.db_wrapper.get_storage_size_bytes() } fn is_on_disk(&self) -> bool { false } } #[cfg(test)] mod tests { use std::str::FromStr; use tempfile::Builder; use super::*; use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db}; #[test] fn test_wipe() { let dir = Builder::new().prefix("db_dir").tempdir().unwrap(); let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap(); let hw_counter = HardwareCounterCell::new(); let mut storage = SimplePayloadStorage::open(db).unwrap(); let payload: Payload = serde_json::from_str(r#"{"name": "John Doe"}"#).unwrap(); storage.set(100, &payload, &hw_counter).unwrap(); storage.clear_all(&hw_counter).unwrap(); storage.set(100, &payload, &hw_counter).unwrap(); storage.clear_all(&hw_counter).unwrap(); storage.set(100, &payload, &hw_counter).unwrap(); assert!(!storage.get(100, &hw_counter).unwrap().is_empty()); storage.clear_all(&hw_counter).unwrap(); assert_eq!(storage.get(100, &hw_counter).unwrap(), Default::default()); } #[test] fn test_set_by_key_consistency() { let dir = Builder::new().prefix("db_dir").tempdir().unwrap(); let expected_payload: Payload = serde_json::from_str(r#"{"name": {"name": "Dohn Joe"}}"#).unwrap(); let hw_counter = HardwareCounterCell::new(); { let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap(); let mut storage = SimplePayloadStorage::open(db).unwrap(); let payload: Payload = serde_json::from_str(r#"{"name": "John Doe"}"#).unwrap(); storage.set(100, &payload, &hw_counter).unwrap(); let new_payload: Payload = serde_json::from_str(r#"{"name": "Dohn Joe"}"#).unwrap(); storage .set_by_key( 100, &new_payload, &JsonPath::from_str("name").unwrap(), &hw_counter, ) .unwrap(); // Here it's `expected_payload` assert_eq!(storage.get(100, &hw_counter).unwrap(), expected_payload); } let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap(); let storage = SimplePayloadStorage::open(db).unwrap(); assert_eq!(storage.get(100, &hw_counter).unwrap(), expected_payload); // Here must be `expected_payload` as well } #[test] fn test_assign_payload_from_serde_json() { let data = r#" { "name": "John Doe", "age": 43, "boolean": "true", "floating": 30.5, "string_array": ["hello", "world"], "boolean_array": ["true", "false"], "float_array": [1.0, 2.0], "integer_array": [1, 2], "geo_data": {"type": "geo", "value": {"lon": 1.0, "lat": 1.0}}, "metadata": { "height": 50, "width": 60, "temperature": 60.5, "nested": { "feature": 30.5 }, "integer_array": [1, 2] } }"#; let hw_counter = HardwareCounterCell::new(); let payload: Payload = serde_json::from_str(data).unwrap(); let dir = Builder::new().prefix("storage_dir").tempdir().unwrap(); let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap(); let mut storage = SimplePayloadStorage::open(db).unwrap(); storage.set(100, &payload, &hw_counter).unwrap(); let pload = storage.get(100, &hw_counter).unwrap(); assert_eq!(pload, payload); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false