repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard_holder/mod.rs
lib/collection/src/shards/shard_holder/mod.rs
mod resharding; pub(crate) mod shard_mapping; use std::collections::{HashMap, HashSet}; use std::ops::Deref as _; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use ahash::AHashMap; use api::rest::ShardKeyWithFallback; use common::budget::ResourceBudget; use common::save_on_disk::SaveOnDisk; use common::tar_ext::BuilderExt; use fs_err as fs; use fs_err::{File, tokio as tokio_fs}; use futures::{Future, StreamExt, TryStreamExt as _, stream}; use itertools::Itertools; use segment::common::validate_snapshot_archive::{ open_snapshot_archive, validate_snapshot_archive, }; use segment::data_types::manifest::SnapshotManifest; use segment::json_path::JsonPath; use segment::types::{PayloadFieldSchema, ShardKey, SnapshotFormat}; use shard_mapping::ShardKeyMapping; use tokio::runtime::Handle; use tokio::sync::{OwnedRwLockReadGuard, RwLock, broadcast}; use tokio_util::codec::{BytesCodec, FramedRead}; use tokio_util::io::SyncIoBridge; use super::replica_set::snapshots::RecoveryType; use super::replica_set::{AbortShardTransfer, ChangePeerFromState}; use super::resharding::{ReshardStage, ReshardState}; use super::transfer::transfer_tasks_pool::TransferTasksPool; use crate::collection::payload_index_schema::PayloadIndexSchema; use crate::common::collection_size_stats::CollectionSizeStats; use crate::common::snapshot_stream::SnapshotStream; use crate::config::{CollectionConfigInternal, ShardingMethod}; use crate::hash_ring::HashRingRouter; use crate::operations::cluster_ops::ReshardingDirection; use crate::operations::shard_selector_internal::ShardSelectorInternal; use crate::operations::shared_storage_config::SharedStorageConfig; use crate::operations::snapshot_ops::SnapshotDescription; use crate::operations::types::{ CollectionError, CollectionResult, ReshardingInfo, ShardTransferInfo, }; use crate::operations::{OperationToShard, SplitByShard}; use crate::optimizers_builder::OptimizersConfig; use crate::shards::channel_service::ChannelService; use crate::shards::replica_set::ShardReplicaSet; use crate::shards::replica_set::replica_set_state::ReplicaState; use crate::shards::shard::{PeerId, ShardId}; use crate::shards::shard_config::ShardConfig; use crate::shards::transfer::{ShardTransfer, ShardTransferKey}; use crate::shards::{CollectionId, check_shard_path, shard_initializing_flag_path}; const SHARD_TRANSFERS_FILE: &str = "shard_transfers"; const RESHARDING_STATE_FILE: &str = "resharding_state.json"; pub const SHARD_KEY_MAPPING_FILE: &str = "shard_key_mapping.json"; pub struct ShardHolder { shards: AHashMap<ShardId, ShardReplicaSet>, pub(crate) shard_transfers: SaveOnDisk<HashSet<ShardTransfer>>, pub(crate) shard_transfer_changes: broadcast::Sender<ShardTransferChange>, pub(crate) resharding_state: SaveOnDisk<Option<ReshardState>>, /// Hash rings per shard key /// /// In case of auto sharding, this only hash a `None` hash ring. In case of custom sharding, /// this only has hash rings for defined shard keys excluding `None`. pub(crate) rings: HashMap<Option<ShardKey>, HashRingRouter>, key_mapping: SaveOnDisk<ShardKeyMapping>, // Duplicates the information from `key_mapping` for faster access, does not use locking shard_id_to_key_mapping: AHashMap<ShardId, ShardKey>, sharding_method: ShardingMethod, } pub type LockedShardHolder = RwLock<ShardHolder>; impl ShardHolder { pub async fn trigger_optimizers(&self) { for shard in self.shards.values() { shard.trigger_optimizers().await; } } pub fn new(collection_path: &Path, sharding_method: ShardingMethod) -> CollectionResult<Self> { let shard_transfers = SaveOnDisk::load_or_init_default(collection_path.join(SHARD_TRANSFERS_FILE))?; let resharding_state: SaveOnDisk<Option<ReshardState>> = SaveOnDisk::load_or_init_default(collection_path.join(RESHARDING_STATE_FILE))?; let key_mapping: SaveOnDisk<ShardKeyMapping> = SaveOnDisk::load_or_init_default(collection_path.join(SHARD_KEY_MAPPING_FILE))?; // TODO(1.17.0): Remove once the old shardkey format has been removed entirely. Self::migrate_shard_key_if_needed(&key_mapping)?; let mut shard_id_to_key_mapping = AHashMap::new(); for (shard_key, shard_ids) in key_mapping.read().iter() { for shard_id in shard_ids { shard_id_to_key_mapping.insert(*shard_id, shard_key.clone()); } } let rings = match sharding_method { ShardingMethod::Auto => HashMap::from([(None, HashRingRouter::single())]), ShardingMethod::Custom => HashMap::new(), }; let (shard_transfer_changes, _) = broadcast::channel(64); Ok(Self { shards: AHashMap::new(), shard_transfers, shard_transfer_changes, resharding_state, rings, key_mapping, shard_id_to_key_mapping, sharding_method, }) } pub async fn stop_gracefully(&mut self) { let futures = self .shards .drain() .map(|(_, shard)| shard.stop_gracefully()); futures::future::join_all(futures).await; } #[cfg(feature = "testing")] pub async fn stop_gracefully_owned(mut self) { self.stop_gracefully().await; } pub async fn save_key_mapping_to_tar( &self, tar: &common::tar_ext::BuilderExt, ) -> CollectionResult<()> { self.key_mapping .save_to_tar(tar, Path::new(SHARD_KEY_MAPPING_FILE)) .await?; Ok(()) } pub fn get_shard_id_to_key_mapping(&self) -> &AHashMap<ShardId, ShardKey> { &self.shard_id_to_key_mapping } pub fn get_shard_key_to_ids_mapping(&self) -> ShardKeyMapping { self.key_mapping.read().clone() } /// Set the shard key mappings /// /// # Warning /// /// This does not update the shard key inside replica sets. If the shard key mapping changes /// and we have existing replica sets, they must be updated as well to reflect the changed /// mappings. pub fn set_shard_key_mappings( &mut self, shard_key_mapping: ShardKeyMapping, ) -> CollectionResult<()> { let shard_id_to_key_mapping = shard_key_mapping.shard_id_to_shard_key(); self.key_mapping .write_optional(move |_| Some(shard_key_mapping))?; self.shard_id_to_key_mapping = shard_id_to_key_mapping; Ok(()) } pub async fn drop_and_remove_shard(&mut self, shard_id: ShardId) -> CollectionResult<()> { if let Some(replica_set) = self.shards.remove(&shard_id) { let shard_path = replica_set.shard_path.clone(); replica_set.stop_gracefully().await; // Explicitly drop shard config file first // If removing all shard files at once, it may be possible for the shard configuration // file to be left behind if the process is killed in the middle. We must avoid this so // we don't attempt to load this shard anymore on restart. let shard_config_path = ShardConfig::get_config_path(&shard_path); if let Err(err) = tokio_fs::remove_file(shard_config_path).await { log::error!( "Failed to remove shard config file before removing the rest of the files: {err}", ); } tokio_fs::remove_dir_all(shard_path).await?; } Ok(()) } pub fn remove_shard_from_key_mapping( &mut self, shard_id: ShardId, shard_key: &ShardKey, ) -> CollectionResult<()> { self.key_mapping.write_optional(|key_mapping| { if !key_mapping.contains_key(shard_key) { return None; } let mut key_mapping = key_mapping.clone(); key_mapping.get_mut(shard_key).unwrap().remove(&shard_id); Some(key_mapping) })?; self.shard_id_to_key_mapping.remove(&shard_id); Ok(()) } /// ## Cancel safety /// /// This function is **not** cancel safe. pub async fn add_shard( &mut self, shard_id: ShardId, shard: ShardReplicaSet, shard_key: Option<ShardKey>, ) -> CollectionResult<()> { let evicted = self.shards.insert(shard_id, shard); if let Some(evicted) = evicted { debug_assert!(false, "Overwriting existing shard id {shard_id}"); evicted.stop_gracefully().await; } self.rings .entry(shard_key.clone()) .or_insert_with(HashRingRouter::single) .add(shard_id); if let Some(shard_key) = shard_key { self.key_mapping.write_optional(|key_mapping| { let has_id = key_mapping .get(&shard_key) .map(|shard_ids| shard_ids.contains(&shard_id)) .unwrap_or(false); if has_id { return None; } let mut copy_of_mapping = key_mapping.clone(); let shard_ids = copy_of_mapping.entry(shard_key.clone()).or_default(); shard_ids.insert(shard_id); Some(copy_of_mapping) })?; self.shard_id_to_key_mapping.insert(shard_id, shard_key); } Ok(()) } pub async fn remove_shard_key(&mut self, shard_key: &ShardKey) -> CollectionResult<()> { let mut remove_shard_ids = Vec::new(); self.key_mapping.write_optional(|key_mapping| { if key_mapping.contains_key(shard_key) { let mut new_key_mapping = key_mapping.clone(); if let Some(shard_ids) = new_key_mapping.remove(shard_key) { for shard_id in shard_ids { remove_shard_ids.push(shard_id); } } Some(new_key_mapping) } else { None } })?; self.rings.remove(&shard_key.clone().into()); for shard_id in remove_shard_ids { self.drop_and_remove_shard(shard_id).await?; self.shard_id_to_key_mapping.remove(&shard_id); } Ok(()) } fn rebuild_rings(&mut self) { let mut rings = match self.sharding_method { // With auto sharding, we have a single hash ring ShardingMethod::Auto => HashMap::from([(None, HashRingRouter::single())]), // With custom sharding, we have a hash ring per shard key ShardingMethod::Custom => HashMap::new(), }; // Add shards and shard keys let ids_to_key = self.get_shard_id_to_key_mapping(); for shard_id in self.shards.keys() { let shard_key = ids_to_key.get(shard_id).cloned(); debug_assert!( matches!( (self.sharding_method, &shard_key), (ShardingMethod::Auto, None) | (ShardingMethod::Custom, Some(_)), ), "auto sharding cannot have shard key, custom sharding must have shard key ({:?}, {shard_key:?})", self.sharding_method, ); rings .entry(shard_key) .or_insert_with(HashRingRouter::single) .add(*shard_id); } // Restore resharding hash ring if resharding is active and haven't reached // `WriteHashRingCommitted` stage yet if let Some(state) = self.resharding_state.read().deref() { let ring = rings .get_mut(&state.shard_key) .expect("must have hash ring for current resharding shard key"); ring.start_resharding(state.shard_id, state.direction); if state.stage >= ReshardStage::WriteHashRingCommitted { ring.commit_resharding(); } } self.rings = rings; } pub async fn apply_shards_state( &mut self, shard_ids: HashSet<ShardId>, shard_key_mapping: ShardKeyMapping, extra_shards: AHashMap<ShardId, ShardReplicaSet>, ) -> CollectionResult<()> { for (extra_shard_id, extra_shard) in extra_shards { let evicted = self.shards.insert(extra_shard_id, extra_shard); if let Some(evicted) = evicted { evicted.stop_gracefully().await; } } let all_shard_ids = self.shards.keys().cloned().collect::<HashSet<_>>(); self.set_shard_key_mappings(shard_key_mapping)?; for shard_id in all_shard_ids { if !shard_ids.contains(&shard_id) { self.drop_and_remove_shard(shard_id).await?; } } self.rebuild_rings(); Ok(()) } pub fn contains_shard(&self, shard_id: ShardId) -> bool { self.shards.contains_key(&shard_id) } pub fn get_shard(&self, shard_id: ShardId) -> Option<&ShardReplicaSet> { self.shards.get(&shard_id) } pub fn get_shard_mut(&mut self, shard_id: ShardId) -> Option<&mut ShardReplicaSet> { self.shards.get_mut(&shard_id) } pub fn get_shards(&self) -> impl Iterator<Item = (ShardId, &ShardReplicaSet)> { self.shards.iter().map(|(id, shard)| (*id, shard)) } pub fn all_shards(&self) -> impl Iterator<Item = &ShardReplicaSet> { self.shards.values() } pub fn all_shards_mut(&mut self) -> impl Iterator<Item = &mut ShardReplicaSet> { self.shards.values_mut() } pub fn split_by_shard<O: SplitByShard + Clone>( &self, operation: O, shard_keys_selection: &Option<ShardKey>, ) -> CollectionResult<Vec<(&ShardReplicaSet, O)>> { let Some(hashring) = self.rings.get(&shard_keys_selection.clone()) else { return if let Some(shard_key) = shard_keys_selection { Err(CollectionError::bad_input(format!( "Shard key {shard_key} not found" ))) } else { Err(CollectionError::bad_input( "Shard key not specified".to_string(), )) }; }; if hashring.is_empty() { return Err(CollectionError::bad_input( "No shards found for shard key".to_string(), )); } let operation_to_shard = operation.split_by_shard(hashring); let shard_ops: Vec<_> = match operation_to_shard { OperationToShard::ByShard(by_shard) => by_shard .into_iter() .map(|(shard_id, operation)| (self.shards.get(&shard_id).unwrap(), operation)) .collect(), OperationToShard::ToAll(operation) => { if let Some(shard_key) = shard_keys_selection { let shard_ids = self .key_mapping .read() .get(shard_key) .cloned() .unwrap_or_default(); shard_ids .into_iter() .map(|shard_id| (self.shards.get(&shard_id).unwrap(), operation.clone())) .collect() } else { self.all_shards() .map(|shard| (shard, operation.clone())) .collect() } } }; Ok(shard_ops) } pub fn register_start_shard_transfer(&self, transfer: ShardTransfer) -> CollectionResult<bool> { let changed = self .shard_transfers .write(|transfers| transfers.insert(transfer.clone()))?; let _ = self .shard_transfer_changes .send(ShardTransferChange::Start(transfer)); Ok(changed) } pub fn register_finish_transfer(&self, key: &ShardTransferKey) -> CollectionResult<bool> { let any_removed = self .shard_transfers .write(|transfers| transfers.extract_if(|transfer| key.check(transfer)).count() > 0)?; let _ = self .shard_transfer_changes .send(ShardTransferChange::Finish(*key)); Ok(any_removed) } pub fn register_abort_transfer(&self, key: &ShardTransferKey) -> CollectionResult<bool> { let any_removed = self .shard_transfers .write(|transfers| transfers.extract_if(|transfer| key.check(transfer)).count() > 0)?; let _ = self .shard_transfer_changes .send(ShardTransferChange::Abort(*key)); Ok(any_removed) } /// Await for a given shard transfer to complete. /// /// The returned inner result defines whether it successfully finished or whether it was /// aborted/cancelled. pub fn await_shard_transfer_end( &self, transfer: ShardTransferKey, timeout: Duration, ) -> impl Future<Output = CollectionResult<Result<(), ()>>> { let mut subscriber = self.shard_transfer_changes.subscribe(); let receiver = async move { loop { match subscriber.recv().await { Err(tokio::sync::broadcast::error::RecvError::Closed) => { return Err(CollectionError::service_error( "Failed to await shard transfer end: failed to listen for shard transfer changes, channel closed", )); } Err(err @ tokio::sync::broadcast::error::RecvError::Lagged(_)) => { return Err(CollectionError::service_error(format!( "Failed to await shard transfer end: failed to listen for shard transfer changes, channel lagged behind: {err}", ))); } Ok(ShardTransferChange::Finish(key)) if key == transfer => return Ok(Ok(())), Ok(ShardTransferChange::Abort(key)) if key == transfer => return Ok(Err(())), Ok(_) => {} } } }; async move { match tokio::time::timeout(timeout, receiver).await { Ok(operation) => Ok(operation?), // Timeout Err(err) => Err(CollectionError::service_error(format!( "Awaiting for shard transfer end timed out: {err}" ))), } } } /// The count of incoming and outgoing shard transfers on the given peer /// /// This only includes shard transfers that are in consensus for the current collection. A /// shard transfer that has just been proposed may not be included yet. pub fn count_shard_transfer_io(&self, peer_id: PeerId) -> (usize, usize) { let (mut incoming, mut outgoing) = (0, 0); for transfer in self.shard_transfers.read().iter() { incoming += usize::from(transfer.to == peer_id); outgoing += usize::from(transfer.from == peer_id); } (incoming, outgoing) } pub fn get_shard_transfer_info( &self, tasks_pool: &TransferTasksPool, ) -> Vec<ShardTransferInfo> { let mut shard_transfers = vec![]; for shard_transfer in self.shard_transfers.read().iter() { let shard_id = shard_transfer.shard_id; let to_shard_id = shard_transfer.to_shard_id; let to = shard_transfer.to; let from = shard_transfer.from; let sync = shard_transfer.sync; let method = shard_transfer.method; let status = tasks_pool.get_task_status(&shard_transfer.key()); shard_transfers.push(ShardTransferInfo { shard_id, to_shard_id, from, to, sync, method, comment: status.map(|p| p.comment), }) } shard_transfers.sort_by_key(|k| k.shard_id); shard_transfers } pub fn get_resharding_operations_info(&self) -> Option<Vec<ReshardingInfo>> { let mut resharding_operations = vec![]; // We eventually expect to extend this to multiple concurrent operations, which is why // we're using a list here let Some(resharding_state) = &*self.resharding_state.read() else { return None; }; resharding_operations.push(ReshardingInfo { uuid: resharding_state.uuid, shard_id: resharding_state.shard_id, peer_id: resharding_state.peer_id, direction: resharding_state.direction, shard_key: resharding_state.shard_key.clone(), }); resharding_operations.sort_by_key(|k| k.shard_id); Some(resharding_operations) } /// Get all transfers related to the given peer and shard ID pair pub fn get_related_transfers(&self, peer_id: PeerId, shard_id: ShardId) -> Vec<ShardTransfer> { self.get_transfers(|transfer| transfer.is_source_or_target(peer_id, shard_id)) } pub fn get_shard_ids_by_key(&self, shard_key: &ShardKey) -> CollectionResult<HashSet<ShardId>> { match self.key_mapping.read().get(shard_key).cloned() { None => Err(CollectionError::bad_request(format!( "Shard key {shard_key} not found" ))), Some(ids) => Ok(ids), } } pub fn select_shards<'a>( &'a self, shard_selector: &'a ShardSelectorInternal, ) -> CollectionResult<Vec<(&'a ShardReplicaSet, Option<&'a ShardKey>)>> { let mut res = Vec::new(); match shard_selector { ShardSelectorInternal::Empty => { debug_assert!(false, "Do not expect empty shard selector") } ShardSelectorInternal::All => { let is_custom_sharding = match self.sharding_method { ShardingMethod::Auto => false, ShardingMethod::Custom => true, }; for (&shard_id, shard) in self.shards.iter() { // Ignore a new resharding shard until it completed point migration // The shard will be marked as active at the end of the migration stage let resharding_migrating_up = self.resharding_state.read().clone().is_some_and(|state| { state.direction == ReshardingDirection::Up && state.shard_id == shard_id && state.stage < ReshardStage::ReadHashRingCommitted }); if resharding_migrating_up { continue; } // Technically, we could skip inactive shards regardless of sharding method, // as we do not expect that shard id can even become inactive on all replicas. // (if it happens, means there is a bug) // But for earlier detection of such issues, we only do this check for custom sharding, // where this situation is expected for the case of tenant promotion. if is_custom_sharding && !shard.shard_is_active() { continue; } let shard_key = self.shard_id_to_key_mapping.get(&shard_id); res.push((shard, shard_key)); } } ShardSelectorInternal::ShardKey(shard_key) => { for shard_id in self.get_shard_ids_by_key(shard_key)? { if let Some(replica_set) = self.shards.get(&shard_id) { res.push((replica_set, Some(shard_key))); } else { debug_assert!(false, "Shard id {shard_id} not found") } } } ShardSelectorInternal::ShardKeys(shard_keys) => { for shard_key in shard_keys { for shard_id in self.get_shard_ids_by_key(shard_key)? { if let Some(replica_set) = self.shards.get(&shard_id) { res.push((replica_set, Some(shard_key))); } else { debug_assert!(false, "Shard id {shard_id} not found") } } } } ShardSelectorInternal::ShardKeyWithFallback(key) => { let (shard_ids_to_query, used_shard_key) = self.route_with_fallback_for_read(key)?; log::trace!("Search routing with fallback: {used_shard_key:?}"); for shard_id in shard_ids_to_query { if let Some(replica_set) = self.shards.get(&shard_id) { res.push((replica_set, Some(used_shard_key))); } else { debug_assert!(false, "Shard id {shard_id} not found") } } } ShardSelectorInternal::ShardId(shard_id) => { if let Some(replica_set) = self.shards.get(shard_id) { res.push((replica_set, self.shard_id_to_key_mapping.get(shard_id))); } else { return Err(shard_not_found_error(*shard_id)); } } } Ok(res) } /// Common routing logic for reads when using ShardKeyWithFallback /// /// Example routing: /// /// request: {"target": "key1", "fallback": "default"} /// /// Situation 1: /// /// - key1 -> shard_ids {1, 2} (both active) /// Request is routed to shard_ids {1, 2} of target key1 /// /// Situation 2: /// /// - key1 -> no shards found /// Request is routed to shard_ids of fallback key "default" /// /// Situation 3: /// /// - key1 -> shard_ids {1} and it is in Partial state (no active replicas) /// Request is routed to shard_ids of fallback key "default" /// /// Situation 4: /// /// - key1 -> shard_ids {1, 2} (shard 1 active, shard 2 partial) /// Request is routed to shard_ids of fallback key "default" /// /// /// If at least one of target shards is Active, use target shard. If not, redirect to fallback shard pub fn route_with_fallback_for_read<'a>( &self, key: &'a ShardKeyWithFallback, ) -> CollectionResult<(HashSet<ShardId>, &'a ShardKey)> { let mut shard_key_to_ids_mapping = self.get_shard_key_to_ids_mapping(); let target_shard_ids = shard_key_to_ids_mapping.remove(&key.target); let fallback_shard_ids = shard_key_to_ids_mapping.remove(&key.fallback); if let Some(target_shard_ids) = target_shard_ids { let target_replicas = target_shard_ids .iter() .filter_map(|shard_id| self.shards.get(shard_id)) .collect::<Vec<_>>(); let target_shards_active = target_replicas .iter() .all(|replica_set| !replica_set.readable_shards().is_empty()); if !target_replicas.is_empty() && target_shards_active { // 1st condition is required to handle empty shard keys (2nd one returns true) Ok((target_shard_ids, &key.target)) } else if let Some(fallback_shard_ids) = fallback_shard_ids { Ok((fallback_shard_ids, &key.fallback)) } else { Err(CollectionError::shard_unavailable(format!( "Neither target shard key {} nor fallback shard key {} have active replicas", key.target, key.fallback ))) } } else if let Some(fallback_shard_ids) = fallback_shard_ids { Ok((fallback_shard_ids, &key.fallback)) } else { Err(CollectionError::not_found(format!( "Neither target shard key {} nor fallback shard key {} exist", key.target, key.fallback ))) } } /// Common routing logic for writes when using ShardKeyWithFallback /// /// Similar to read routing, but in case if target shard exists, but is in Partial state, we still want /// to route to both target and fallback shards to ensure data consistency. pub fn route_with_fallback_for_write( &self, key: ShardKeyWithFallback, ) -> CollectionResult<Vec<(HashSet<ShardId>, ShardKey)>> { let ShardKeyWithFallback { target, fallback } = key; let mut shard_key_to_ids_mapping = self.get_shard_key_to_ids_mapping(); let target_shard_ids = shard_key_to_ids_mapping.remove(&target); let fallback_shard_ids = shard_key_to_ids_mapping.remove(&fallback); if let Some(target_shard_ids) = target_shard_ids { let target_replicas = target_shard_ids .iter() .filter_map(|shard_id| self.shards.get(shard_id)) .collect::<Vec<_>>(); // Check that at least one active replica per shard exists let target_shards_active = target_replicas .iter() .all(|replica_set| !replica_set.active_shards(false).is_empty()); if target_replicas.is_empty() { return if let Some(fallback_shard_ids) = fallback_shard_ids { Ok(vec![(fallback_shard_ids, fallback)]) } else { Err(CollectionError::not_found(format!( "Neither target shard key {target} nor fallback shard key {fallback} exist", ))) }; } if target_shards_active { // 1st condition is required to handle empty shard keys (2nd one returns true) Ok(vec![(target_shard_ids, target)]) } else if let Some(fallback_shard_ids) = fallback_shard_ids { // target is not active, but it can be in Partial state, so we need extra check // Target: // Shard_id 1 -> replicas: A (Partial) // Shard_id 2 -> replicas: B (Active) // In this case target is still receiving updates. We need to fallback. // Target: // Shard_id 1 -> replicas: A (ActiveRead) // Shard_id 2 -> replicas: B (Active) // We need to send to both target and fallback to ensure consistency. // Target: // Shard_id 1 -> replicas: A (Partial) B (Active) // This is not possible, as we checked for active shards above // Target: // Shard_id 1 -> replicas: A (Partial) B (Dead) // This is not possible, as we never deactivate last active replica // Target: // Shard_id 1 -> replicas: A (ActiveRead) B (Dead) // This is not possible, as we never deactivate last active replica // Target: // Shard_id 1 -> replicas: A (ActiveRead) // We need to send to both target and fallback to ensure consistency. // Target: // Shard_id 1 -> replicas: A (Dead) // Can be, if transfer failed. We just fallback. let is_all_replicas_in_read_active = target_replicas.iter().any(|replica_set| { replica_set.check_peers_state_all(|state| state == ReplicaState::ActiveRead) }); if is_all_replicas_in_read_active { Ok(vec![ (target_shard_ids, target), (fallback_shard_ids, fallback), ]) } else { Ok(vec![(fallback_shard_ids, fallback)]) } } else { Err(CollectionError::shard_unavailable(format!( "Neither target shard key {target} nor fallback shard key {fallback} have active replicas", ))) } } else if let Some(fallback_shard_ids) = fallback_shard_ids { Ok(vec![(fallback_shard_ids, fallback)]) } else { Err(CollectionError::not_found(format!( "Neither target shard key {target} nor fallback shard key {fallback} exist", ))) } } pub fn len(&self) -> usize { self.shards.len() } pub fn is_empty(&self) -> bool {
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard_holder/resharding.rs
lib/collection/src/shards/shard_holder/resharding.rs
use std::collections::{HashMap, HashSet}; use std::fmt; use std::ops::Deref as _; use std::sync::Arc; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::types::{Condition, CustomIdCheckerCondition as _, Filter, ShardKey}; use super::ShardHolder; use crate::config::ShardingMethod; use crate::hash_ring::{self, HashRingRouter}; use crate::operations::CollectionUpdateOperations; use crate::operations::cluster_ops::ReshardingDirection; use crate::operations::point_ops::{ConditionalInsertOperationInternal, PointOperations}; use crate::operations::types::{CollectionError, CollectionResult}; use crate::shards::replica_set::ShardReplicaSet; use crate::shards::replica_set::replica_set_state::ReplicaState; use crate::shards::resharding::{ReshardKey, ReshardStage, ReshardState}; use crate::shards::shard::ShardId; impl ShardHolder { pub fn resharding_state(&self) -> Option<ReshardState> { self.resharding_state.read().clone() } pub fn check_start_resharding(&mut self, resharding_key: &ReshardKey) -> CollectionResult<()> { let ReshardKey { uuid: _, direction, peer_id: _, shard_id, shard_key, } = resharding_key; // Additional shard key check // For auto sharding no shard key must be provided, with custom sharding it must be provided match self.sharding_method { ShardingMethod::Auto => { if shard_key.is_some() { return Err(CollectionError::bad_request(format!( "cannot specify shard key {} on collection with auto sharding", shard_key_fmt(shard_key), ))); } } ShardingMethod::Custom => { if shard_key.is_none() { return Err(CollectionError::bad_request( "must specify shard key on collection with custom sharding", )); } } } let ring = get_ring(&mut self.rings, shard_key)?; { let state = self.resharding_state.read(); assert_resharding_state_consistency(&state, ring, shard_key); if let Some(state) = state.deref() { return if state.matches(resharding_key) { Err(CollectionError::bad_request(format!( "resharding {resharding_key} is already in progress:\n{state:#?}" ))) } else { Err(CollectionError::bad_request(format!( "another resharding is in progress:\n{state:#?}" ))) }; } } // Don't remove the last shard if resharding down if matches!(direction, ReshardingDirection::Down) { let shard_count = match shard_key { Some(shard_key) => self .get_shard_key_to_ids_mapping() .get(shard_key) .map_or(0, |shards| shards.len()), None => self.shards.len(), }; if shard_count <= 1 { return Err(CollectionError::bad_request(format!( "cannot remove shard {shard_id} by resharding down, it is the last shard", ))); } } let has_shard = self.shards.contains_key(shard_id); match resharding_key.direction { ReshardingDirection::Up => { if has_shard { return Err(CollectionError::bad_request(format!( "shard holder already contains shard {shard_id} replica set", ))); } } ReshardingDirection::Down => { if !has_shard { return Err(CollectionError::bad_request(format!( "shard holder does not contain shard {shard_id} replica set", ))); } } } // TODO(resharding): Check that peer exists!? Ok(()) } // TODO: do not leave broken intermediate state if this fails midway? /// ## Cancel safety /// /// This function is **not** cancel safe. pub async fn start_resharding_unchecked( &mut self, resharding_key: ReshardKey, new_shard: Option<ShardReplicaSet>, ) -> CollectionResult<()> { let ReshardKey { uuid, direction, peer_id, shard_id, shard_key, } = resharding_key; // TODO(resharding): Delete shard on error!? let ring_res = get_ring(&mut self.rings, &shard_key); let ring = match ring_res { Ok(ring) => ring, Err(err) => { if let Some(new_shard) = new_shard { new_shard.stop_gracefully().await; } return Err(err); } }; ring.start_resharding(shard_id, direction); // Add new shard if resharding up if let Some(new_shard) = new_shard { debug_assert_eq!(direction, ReshardingDirection::Up); self.add_shard(shard_id, new_shard, shard_key.clone()) .await?; } self.resharding_state.write(|state| { debug_assert!( state.is_none(), "resharding is already in progress:\n{state:#?}", ); *state = Some(ReshardState::new( uuid, direction, peer_id, shard_id, shard_key, )); })?; Ok(()) } pub fn commit_read_hashring(&mut self, resharding_key: &ReshardKey) -> CollectionResult<()> { self.check_resharding(resharding_key, check_stage(ReshardStage::MigratingPoints))?; self.resharding_state.write(|state| { let Some(state) = state else { unreachable!(); }; state.stage = ReshardStage::ReadHashRingCommitted; })?; Ok(()) } pub fn commit_write_hashring(&mut self, resharding_key: &ReshardKey) -> CollectionResult<()> { self.check_resharding( resharding_key, check_stage(ReshardStage::ReadHashRingCommitted), )?; let ring = get_ring(&mut self.rings, &resharding_key.shard_key)?; ring.commit_resharding(); self.resharding_state.write(|state| { let Some(state) = state else { unreachable!(); }; state.stage = ReshardStage::WriteHashRingCommitted; })?; Ok(()) } pub fn check_finish_resharding(&mut self, resharding_key: &ReshardKey) -> CollectionResult<()> { self.check_resharding( resharding_key, check_stage(ReshardStage::WriteHashRingCommitted), )?; Ok(()) } pub fn finish_resharding_unchecked(&mut self, _: &ReshardKey) -> CollectionResult<()> { self.resharding_state.write(|state| { debug_assert!(state.is_some(), "resharding is not in progress"); *state = None; })?; Ok(()) } fn check_resharding( &mut self, resharding_key: &ReshardKey, check_state: impl Fn(&ReshardState) -> CollectionResult<()>, ) -> CollectionResult<()> { let ReshardKey { shard_id, shard_key, .. } = resharding_key; let ring = get_ring(&mut self.rings, shard_key)?; let state = self.resharding_state.read(); assert_resharding_state_consistency(&state, ring, &resharding_key.shard_key); match state.deref() { Some(state) if state.matches(resharding_key) => { check_state(state)?; } Some(state) => { return Err(CollectionError::bad_request(format!( "another resharding is in progress:\n{state:#?}" ))); } None => { return Err(CollectionError::bad_request( "resharding is not in progress", )); } } debug_assert!( self.shards.contains_key(shard_id), "shard holder does not contain shard {shard_id} replica set" ); // TODO(resharding): Assert that peer exists!? Ok(()) } pub fn check_abort_resharding(&self, resharding_key: &ReshardKey) -> CollectionResult<()> { let state = self.resharding_state.read(); // - do not abort if no resharding operation is ongoing let Some(state) = state.deref() else { return Err(CollectionError::bad_request(format!( "can't abort resharding {resharding_key}, no resharding operation in progress", ))); }; // - do not abort if there is no active resharding operation with that key if !state.matches(resharding_key) { return Err(CollectionError::bad_request(format!( "can't abort resharding {resharding_key}, \ resharding operation in progress has key {}", state.key(), ))); } // - it's safe to run, if read hash ring was not committed yet if state.stage < ReshardStage::ReadHashRingCommitted { return Ok(()); } // - but resharding can't be aborted, after read hash ring has been committed Err(CollectionError::bad_request(format!( "can't abort resharding {resharding_key}, \ because read hash ring has been committed already, \ resharding must be completed", ))) } pub async fn abort_resharding( &mut self, resharding_key: ReshardKey, force: bool, ) -> CollectionResult<()> { log::warn!("Aborting resharding {resharding_key} (force: {force})"); let ReshardKey { uuid: _, direction, peer_id: _, shard_id, ref shard_key, } = resharding_key; // Cleanup existing shards if resharding down if direction == ReshardingDirection::Down { for (&id, shard) in self.shards.iter() { // Skip shards that does not belong to resharding shard key if self.shard_id_to_key_mapping.get(&id) != shard_key.as_ref() { continue; } // Skip target shard if id == shard_id { continue; } // Revert replicas in `Resharding` state back into `Active` state for (peer, state) in shard.peers() { if state.is_resharding() { shard.set_replica_state(peer, ReplicaState::Active).await?; } } // We only cleanup local shards if !shard.is_local().await { continue; } // Remove any points that might have been transferred from target shard // Replica may be dead, so we force the delete operation let filter = self.hash_ring_filter(id).expect("hash ring filter"); let filter = Filter::new_must_not(Condition::new_custom(Arc::new(filter))); shard .delete_local_points(filter, HwMeasurementAcc::disposable(), true) // Internal operation, no performance tracking needed .await?; } } if let Some(ring) = self.rings.get_mut(shard_key) { log::debug!("reverting resharding hashring for shard {shard_id}"); ring.abort_resharding(shard_id, direction); } else { log::warn!( "aborting resharding {resharding_key}, \ but {shard_key:?} hashring does not exist" ); } // Remove new shard if resharding up if direction == ReshardingDirection::Up { if let Some(shard) = self.get_shard(shard_id) { // Remove all replicas from shard for (peer_id, replica_state) in shard.peers() { log::debug!( "removing peer {peer_id} with state {replica_state:?} from replica set {shard_id}", ); shard.remove_peer(peer_id).await?; } debug_assert!( shard.peers().is_empty(), "replica set {shard_id} must be empty after removing all peers", ); log::debug!("removing replica set {shard_id}"); // Drop the shard if let Some(shard_key) = shard_key { self.key_mapping.write_optional(|key_mapping| { if !key_mapping.contains_key(shard_key) { return None; } let mut key_mapping = key_mapping.clone(); key_mapping.get_mut(shard_key).unwrap().remove(&shard_id); Some(key_mapping) })?; } self.drop_and_remove_shard(shard_id).await?; self.shard_id_to_key_mapping.remove(&shard_id); } else { log::warn!( "aborting resharding {resharding_key}, \ but shard holder does not contain {shard_id} replica set", ); } } self.resharding_state.write(|state| { debug_assert!( state .as_ref() .is_some_and(|state| state.matches(&resharding_key)), "resharding {resharding_key} is not in progress:\n{state:#?}" ); state.take(); })?; Ok(()) } /// Split collection update operation by "update mode": /// - update all: /// - "regular" operation /// - `upsert` inserts new points and updates existing ones /// - other update operations return error, if a point does not exist in collection /// - update existing: /// - `upsert` does *not* insert new points, only updates existing ones /// - other update operations ignore points that do not exist in collection /// /// Depends on the current resharding state. If resharding is not active operations are not split. pub fn split_by_mode( &self, shard_id: ShardId, operation: CollectionUpdateOperations, ) -> OperationsByMode { let Some(state) = self.resharding_state() else { return OperationsByMode::from(operation); }; // Resharding *UP* // ┌────────────┐ ┌──────────┐ // │ │ │ │ // │ Shard 1 │ │ Shard 2 │ // │ Non-Target ├──►│ Target │ // │ Sender │ │ Receiver │ // │ │ │ │ // └────────────┘ └──────────┘ // // Resharding *DOWN* // ┌────────────┐ ┌──────────┐ // │ │ │ │ // │ Shard 1 │ │ Shard 2 │ // │ Non-Target │◄──┤ Target │ // │ Receiver │ │ Sender │ // │ │ │ │ // └────────────┘ └──────────┘ // Target shard of the resharding operation. This is the shard that: // // - *created* during resharding *up* // - *deleted* during resharding *down* let is_target_shard = shard_id == state.shard_id; // Shard that will be *receiving* migrated points during resharding: // // - *target* shard during resharding *up* // - *non* target shards during resharding *down* let is_receiver_shard = match state.direction { ReshardingDirection::Up => is_target_shard, ReshardingDirection::Down => !is_target_shard, }; // Shard that will be *sending* migrated points during resharding: // // - *non* target shards during resharding *up* // - *target* shard during resharding *down* let is_sender_shard = !is_receiver_shard; // We split update operations: // // - on *receiver* shards during `MigratingPoints` stage (for all operations except `upsert`) // - and on *sender* shards during `ReadHashRingCommitted` stage when resharding *up* let should_split_receiver = is_receiver_shard && state.stage == ReshardStage::MigratingPoints && !operation.is_upsert_points(); let should_split_sender = is_sender_shard && state.stage >= ReshardStage::ReadHashRingCommitted && state.direction == ReshardingDirection::Up; if !should_split_receiver && !should_split_sender { return OperationsByMode::from(operation); } // There's no point splitting delete operations if operation.is_delete_points() { return OperationsByMode::from(operation); } let Some(filter) = self.resharding_filter() else { return OperationsByMode::from(operation); }; let point_ids = match operation.point_ids() { Some(ids) if !ids.is_empty() => ids, Some(_) | None => return OperationsByMode::from(operation), }; let target_point_ids: HashSet<_> = point_ids .iter() .copied() .filter(|&point_id| filter.check(point_id)) .collect(); if target_point_ids.is_empty() { OperationsByMode::from(operation) } else if target_point_ids.len() == point_ids.len() { OperationsByMode::default().with_update_only_existing(operation) } else { let mut update_all = operation.clone(); update_all.retain_point_ids(|point_id| !target_point_ids.contains(point_id)); let mut update_only_existing = operation; update_only_existing.retain_point_ids(|point_id| target_point_ids.contains(point_id)); OperationsByMode::from(update_all).with_update_only_existing(update_only_existing) } } pub fn resharding_filter(&self) -> Option<hash_ring::HashRingFilter> { let shard_id = self.resharding_state.read().as_ref()?.shard_id; self.hash_ring_filter(shard_id) } pub fn hash_ring_router(&self, shard_id: ShardId) -> Option<&HashRingRouter> { if !self.contains_shard(shard_id) { return None; } let shard_key = self.shard_id_to_key_mapping.get(&shard_id).cloned(); let router = self.rings.get(&shard_key).expect("hashring exists"); Some(router) } pub fn hash_ring_filter(&self, shard_id: ShardId) -> Option<hash_ring::HashRingFilter> { let router = self.hash_ring_router(shard_id)?; let ring = match router { HashRingRouter::Single(ring) => ring, HashRingRouter::Resharding { old, new } => { if new.len() > old.len() { new } else { old } } }; Some(hash_ring::HashRingFilter::new(ring.clone(), shard_id)) } } #[derive(Clone, Debug, Default)] pub struct OperationsByMode { pub update_all: Vec<CollectionUpdateOperations>, pub update_only_existing: Vec<CollectionUpdateOperations>, } impl OperationsByMode { pub fn with_update_only_existing(mut self, operation: CollectionUpdateOperations) -> Self { self.update_only_existing = match operation { CollectionUpdateOperations::PointOperation(point_operation) => match point_operation { PointOperations::UpsertPoints(operation) => operation.into_update_only(None), PointOperations::UpsertPointsConditional(operation) => { let ConditionalInsertOperationInternal { points_op, condition, } = operation; points_op.into_update_only(Some(condition)) } PointOperations::DeletePoints { ids } => { vec![CollectionUpdateOperations::PointOperation( PointOperations::DeletePoints { ids }, )] } PointOperations::DeletePointsByFilter(op) => { vec![CollectionUpdateOperations::PointOperation( PointOperations::DeletePointsByFilter(op), )] } PointOperations::SyncPoints(op) => { vec![CollectionUpdateOperations::PointOperation( PointOperations::SyncPoints(op), )] } #[cfg(feature = "staging")] PointOperations::TestDelay(op) => { vec![CollectionUpdateOperations::PointOperation( PointOperations::TestDelay(op), )] } }, CollectionUpdateOperations::VectorOperation(_) | CollectionUpdateOperations::PayloadOperation(_) | CollectionUpdateOperations::FieldIndexOperation(_) => { vec![operation] } }; self } } impl From<CollectionUpdateOperations> for OperationsByMode { fn from(operation: CollectionUpdateOperations) -> Self { Self { update_all: vec![operation], update_only_existing: Vec::new(), } } } fn get_ring<'a>( rings: &'a mut HashMap<Option<ShardKey>, HashRingRouter>, shard_key: &'_ Option<ShardKey>, ) -> CollectionResult<&'a mut HashRingRouter> { rings.get_mut(shard_key).ok_or_else(|| { CollectionError::bad_request(format!( "{} hashring does not exist", shard_key_fmt(shard_key) )) }) } fn assert_resharding_state_consistency( state: &Option<ReshardState>, ring: &HashRingRouter, shard_key: &Option<ShardKey>, ) { match state.as_ref().map(|state| state.stage) { Some(ReshardStage::MigratingPoints | ReshardStage::ReadHashRingCommitted) => { debug_assert!( ring.is_resharding(), "resharding is in progress, \ but {shard_key:?} hashring is not a resharding hashring:\n\ {state:#?}" ); } Some(ReshardStage::WriteHashRingCommitted) => { debug_assert!( !ring.is_resharding(), "resharding is in progress, \ and write hashring has already been committed, \ but {shard_key:?} hashring is a resharding hashring:\n\ {state:#?}" ); } None => { debug_assert!( !ring.is_resharding(), "resharding is not in progress, \ but {shard_key:?} hashring is a resharding hashring" ); } } } fn check_stage(stage: ReshardStage) -> impl Fn(&ReshardState) -> CollectionResult<()> { move |state| { if state.stage == stage { Ok(()) } else { Err(CollectionError::bad_request(format!( "expected resharding stage {stage:?}, but resharding is at stage {:?}", state.stage, ))) } } } fn shard_key_fmt(key: &Option<ShardKey>) -> &dyn fmt::Display { match key { Some(key) => key, None => &"default", } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/update.rs
lib/collection/src/shards/replica_set/update.rs
use std::ops::Deref as _; use std::time::Duration; use common::counter::hardware_accumulator::HwMeasurementAcc; use futures::stream::FuturesUnordered; use futures::{FutureExt as _, StreamExt as _}; use itertools::Itertools as _; use tokio_util::task::AbortOnDropHandle; use super::{ShardReplicaSet, clock_set}; use crate::operations::point_ops::WriteOrdering; use crate::operations::types::{CollectionError, CollectionResult, UpdateResult, UpdateStatus}; use crate::operations::{ClockTag, CollectionUpdateOperations, OperationWithClockTag}; use crate::shards::replica_set::replica_set_state::{ReplicaSetState, ReplicaState}; use crate::shards::shard::{PeerId, Shard}; use crate::shards::shard_trait::ShardOperation as _; /// Maximum number of attempts for applying an update with a new clock. /// /// If an update is rejected because of an old clock, we will try again with a new clock. This /// describes the maximum number of times we try the update. const UPDATE_MAX_CLOCK_REJECTED_RETRIES: usize = 3; const DEFAULT_SHARD_DEACTIVATION_TIMEOUT: Duration = Duration::from_secs(30); impl ShardReplicaSet { /// Update local shard if any without forwarding to remote shards /// /// If `force` is true, the operation will be applied unconditionally no matter the replica /// state. Must only be used internally. /// /// # Cancel safety /// /// This method is *not* cancel safe. pub async fn update_local( &self, operation: OperationWithClockTag, wait: bool, mut hw_measurement: HwMeasurementAcc, force: bool, ) -> CollectionResult<Option<UpdateResult>> { // `ShardOperations::update` is not guaranteed to be cancel safe, so this method is not // cancel safe. let local = self.local.read().await; let Some(local) = local.deref() else { return Ok(None); }; let Some(state) = self.peer_state(self.this_peer_id()) else { return Ok(None); }; // Don't measure hw when resharding if state.is_resharding() && !hw_measurement.is_disposable() { hw_measurement = HwMeasurementAcc::disposable(); } let result = match state { ReplicaState::Active => { // Rate limit update operations on Active replica self.check_operation_write_rate_limiter(&hw_measurement, local, &operation) .await?; local.get().update(operation, wait, hw_measurement).await } // Force apply the operation no matter the state _ if force => local.get().update(operation, wait, hw_measurement).await, ReplicaState::Partial | ReplicaState::Initializing | ReplicaState::Resharding | ReplicaState::ReshardingScaleDown | ReplicaState::ActiveRead => local.get().update(operation, wait, hw_measurement).await, ReplicaState::Listener => local.get().update(operation, false, hw_measurement).await, ReplicaState::PartialSnapshot | ReplicaState::Recovery if operation.clock_tag.is_some_and(|tag| tag.force) => { local.get().update(operation, wait, hw_measurement).await } ReplicaState::PartialSnapshot | ReplicaState::Recovery => { if log::log_enabled!(log::Level::Debug) { if let Some(ids) = operation.operation.point_ids() { log::debug!( "Operation affecting point IDs {ids:?} rejected on this peer, force flag required in recovery state", ); } else { log::debug!( "Operation {operation:?} rejected on this peer, force flag required in recovery state", ); } } return Ok(None); } ReplicaState::Dead => { return Ok(None); } }; result.map(Some) } /// # Cancel safety /// /// This method is *not* cancel safe. pub async fn update_with_consistency( &self, operation: CollectionUpdateOperations, wait: bool, ordering: WriteOrdering, update_only_existing: bool, mut hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<UpdateResult> { // `ShardReplicaSet::update` is not cancel safe, so this method is not cancel safe. let Some(leader_peer) = self.leader_peer_for_update(ordering) else { return Err(CollectionError::service_error(format!( "Cannot update shard {}:{} with {ordering:?} ordering because no leader could be selected", self.collection_id, self.shard_id ))); }; // Don't measure hw when resharding let peer_state = self.peer_state(leader_peer); if peer_state.is_some_and(|state| state.is_resharding()) { hw_measurement_acc = HwMeasurementAcc::disposable(); } // If we are the leader, run the update from this replica set if leader_peer == self.this_peer_id() { // Lock updates if ordering is strong or medium let _write_ordering_lock = match ordering { WriteOrdering::Strong | WriteOrdering::Medium => { Some(self.write_ordering_lock.lock().await) } WriteOrdering::Weak => None, }; self.update(operation, wait, update_only_existing, hw_measurement_acc) .await } else { // Forward the update to the designated leader self.forward_update(leader_peer, operation, wait, ordering, hw_measurement_acc) .await .map_err(|err| { if err.is_transient() { // Deactivate the peer if forwarding failed with transient error let replica_state = self.replica_state.read(); let from_state = replica_state.get_peer_state(leader_peer); self.add_locally_disabled(Some(&replica_state), leader_peer, from_state); // Return service error CollectionError::service_error(format!( "Failed to apply update with {ordering:?} ordering via leader peer {leader_peer}: {err}" )) } else { err } }) } } /// Designated a leader replica for the update based on the WriteOrdering fn leader_peer_for_update(&self, ordering: WriteOrdering) -> Option<PeerId> { match ordering { WriteOrdering::Weak => Some(self.this_peer_id()), // no requirement for consistency WriteOrdering::Medium => self.highest_alive_replica_peer_id(), // consistency with highest alive replica WriteOrdering::Strong => self.highest_replica_peer_id(), // consistency with highest replica } } fn highest_alive_replica_peer_id(&self) -> Option<PeerId> { let read_lock = self.replica_state.read(); let peer_ids = read_lock.peers().keys().cloned().collect::<Vec<_>>(); drop(read_lock); peer_ids .into_iter() .filter(|&peer_id| self.peer_can_be_source_of_truth(peer_id)) // re-acquire replica_state read lock .max() } fn highest_replica_peer_id(&self) -> Option<PeerId> { self.replica_state.read().peers().keys().max().cloned() } /// # Cancel safety /// /// This method is *not* cancel safe. async fn update( &self, operation: CollectionUpdateOperations, wait: bool, update_only_existing: bool, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<UpdateResult> { // `ShardRepilcaSet::update_impl` is not cancel safe, so this method is not cancel safe. // TODO: Optimize `remotes`/`local`/`clock` locking for the "happy path"? // // E.g., refactor `update`/`update_impl`, so that it would be possible to: // - lock `remotes`, `local`, `clock` (in specified order!) on the *first* iteration of the loop // - then release and lock `remotes` and `local` *only* for all next iterations // - but keep initial `clock` for the whole duration of `update` let mut clock = self.clock_set.lock().await.get_clock(); for attempt in 1..=UPDATE_MAX_CLOCK_REJECTED_RETRIES { let is_non_zero_tick = clock.current_tick().is_some(); let res = self .update_impl( operation.clone(), wait, &mut clock, update_only_existing, hw_measurement_acc.clone(), ) .await?; if let Some(res) = res { return Ok(res); } // Log a warning, if operation was rejected... but only if operation had a non-0 tick, // because operations with tick 0 should *always* be rejected and rejection is *expected*. if is_non_zero_tick && log::log_enabled!(log::Level::Warn) { if let Some(ids) = operation.point_ids() { log::warn!( "Operation affecting point IDs {ids:?} was rejected by some node(s), retrying... \ (attempt {attempt}/{UPDATE_MAX_CLOCK_REJECTED_RETRIES})" ); } else { log::warn!( "Operation {operation:?} was rejected by some node(s), retrying... \ (attempt {attempt}/{UPDATE_MAX_CLOCK_REJECTED_RETRIES})" ); } } } Err(CollectionError::service_error(format!( "Failed to apply operation {operation:?} \ after {UPDATE_MAX_CLOCK_REJECTED_RETRIES} attempts, \ all attempts were rejected", ))) } /// # Cancel safety /// /// This method is *not* cancel safe. async fn update_impl( &self, operation: CollectionUpdateOperations, wait: bool, clock: &mut clock_set::ClockGuard, update_only_existing: bool, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Option<UpdateResult>> { // `LocalShard::update` is not guaranteed to be cancel safe and it's impossible to cancel // multiple parallel updates in a way that is *guaranteed* not to introduce inconsistencies // between nodes, so this method is not cancel safe. let remotes = self.remotes.read().await; let local = self.local.read().await; let replica_count = usize::from(local.is_some()) + remotes.len(); let this_peer_id = self.this_peer_id(); // Target all remote peers that can receive updates let updatable_remote_shards: Vec<_> = remotes .iter() .filter(|rs| self.is_peer_updatable(rs.peer_id)) .collect(); // Local is defined and can receive updates let local_is_updatable = local.is_some() && self.is_peer_updatable(this_peer_id); if updatable_remote_shards.is_empty() && !local_is_updatable { return Err(CollectionError::service_error(format!( "The replica set for shard {} on peer {this_peer_id} has no active replica", self.shard_id, ))); } let current_clock_tick = clock.tick_once(); let clock_tag = ClockTag::new(this_peer_id, clock.id() as _, current_clock_tick); let operation = OperationWithClockTag::new(operation, Some(clock_tag)); let mut update_futures = Vec::with_capacity(updatable_remote_shards.len() + 1); if let Some(local) = local.deref() && self.is_peer_updatable(this_peer_id) { let local_wait = if self.peer_state(this_peer_id) == Some(ReplicaState::Listener) { false } else { wait }; if self.peer_is_active(this_peer_id) { // Check write rate limiter before proceeding if replica active self.check_operation_write_rate_limiter(&hw_measurement_acc, local, &operation) .await?; } let operation = operation.clone(); let hw_acc = hw_measurement_acc.clone(); let local_update = async move { local .get() .update(operation, local_wait, hw_acc) .await .map(|ok| (this_peer_id, ok)) .map_err(|err| (this_peer_id, err)) }; update_futures.push(local_update.left_future()); } for remote in updatable_remote_shards { let operation = operation.clone(); let hw_acc = hw_measurement_acc.clone(); let remote_update = async move { remote .update(operation, wait, hw_acc) .await .map(|ok| (remote.peer_id, ok)) .map_err(|err| (remote.peer_id, err)) }; update_futures.push(remote_update.right_future()); } let all_res: Vec<Result<_, _>> = match self.shared_storage_config.update_concurrency { Some(concurrency) => { futures::stream::iter(update_futures) .buffer_unordered(concurrency.get()) .collect() .await } None => FuturesUnordered::from_iter(update_futures).collect().await, }; drop(local); drop(remotes); let write_consistency_factor = self .collection_config .read() .await .params .write_consistency_factor .get() as usize; let minimal_success_count = write_consistency_factor.min(replica_count); let (successes, failures): (Vec<_>, Vec<_>) = all_res.into_iter().partition_result(); // Advance clock if some replica echoed *newer* tick let new_clock_tick = successes .iter() .filter_map(|(_, result)| { let echo_tag = result.clock_tag?; if echo_tag.peer_id != clock_tag.peer_id { debug_assert!( false, "Echoed clock tag peer_id does not match the original", ); return None; } if echo_tag.clock_id != clock_tag.clock_id { debug_assert!( false, "Echoed clock tag clock_id does not match the original", ); return None; } Some(echo_tag.clock_tick) }) .max(); if let Some(new_clock_tick) = new_clock_tick { clock.advance_to(new_clock_tick); } // Notify consensus about replica failures if: // 1. there are some failures, but enough successes for the operation to be accepted // 2. a resharding replica failed, and there are not enough successes for the operation to be accepted // // Notify user about potential consistency problems if: // 1. there are some failures and enough successes, but we fail to deactivate the failed replicas // 2. successes were not applied to any Active or Resharding replica // // Notify user with operation error if: // 1. there are not enough successes for the operation to be accepted let failure_error = if let Some((peer_id, collection_error)) = failures.first() { format!("Failed peer: {peer_id}, error: {collection_error}") } else { String::new() }; if !failures.is_empty() { for (peer_id, err) in &failures { log::warn!( "Failed to update shard {}:{} on peer {peer_id}, error: {err}", self.collection_id, self.shard_id, ); } // If there is at least one full-complete operation, we can't ignore non-transient errors (4xx) // And we must deactivate failed replicas to ensure consistency let has_full_completed_updates = successes.iter().any(|(_, res)| match res.status { UpdateStatus::Completed => true, UpdateStatus::Acknowledged => false, UpdateStatus::ClockRejected => false, }); if successes.len() >= minimal_success_count { // If there are enough successes, deactivate failed replicas // Failed replicas will automatically recover from another replica ensuring consistency let failures_to_handle: Vec<_> = if !has_full_completed_updates { // We can only deactivate transient errors failures .into_iter() .filter(|(_, err)| err.is_transient()) .collect() } else { failures }; let wait_for_deactivation = self.handle_failed_replicas( &failures_to_handle, &self.replica_state.read(), update_only_existing, ); // Wait for replica failures to be accepted, otherwise return consistency error if wait && wait_for_deactivation { // ToDo: allow timeout configuration in API let timeout = DEFAULT_SHARD_DEACTIVATION_TIMEOUT; let replica_state = self.replica_state.clone(); let peer_ids: Vec<_> = failures_to_handle .iter() .map(|(peer_id, _)| *peer_id) .collect(); let shards_disabled = AbortOnDropHandle::new(tokio::task::spawn_blocking(move || { replica_state.wait_for( |state| { peer_ids.iter().all(|peer_id| { // Not found means that peer is dead // Wait for replica deactivation. let is_active = state .peers() .get(peer_id) .map(|state| state.can_be_source_of_truth()) .unwrap_or(false); !is_active }) }, timeout, ) })) .await?; if !shards_disabled { return Err(CollectionError::service_error(format!( "Some replica of shard {} failed to apply operation and deactivation \ timed out after {timeout:?}. Consistency of this update is not guaranteed. Please retry. {failure_error}", self.shard_id, ))); } } } else { // If there aren't enough successes, report error to user // TODO(resharding): reconsider how we count/deactivate resharding replicas. self.handle_failed_replicas( failures .iter() .filter(|(peer_id, _)| self.peer_is_resharding(*peer_id)), &self.replica_state.read(), update_only_existing, ); let (_peer_id, err) = failures.into_iter().next().unwrap(); return Err(err); } } // Successes must have applied to at least one active replica if !successes .iter() .any(|&(peer_id, _)| self.peer_can_be_source_of_truth(peer_id)) { return Err(CollectionError::service_error(format!( "Failed to apply operation to at least one `Active` replica. \ Consistency of this update is not guaranteed. Please retry. {failure_error}", ))); } let is_any_operation_rejected = successes .iter() .any(|(_, res)| matches!(res.status, UpdateStatus::ClockRejected)); if is_any_operation_rejected { return Ok(None); } // There are enough successes, return the first one let (_, res) = successes .into_iter() .next() .expect("successes is not empty"); Ok(Some(res)) } /// Check write rate limiter for the operation /// /// Lazily compute the cost of the operation and check against the write rate limiter async fn check_operation_write_rate_limiter( &self, hw_measurement: &HwMeasurementAcc, local: &Shard, operation: &OperationWithClockTag, ) -> CollectionResult<()> { self.check_write_rate_limiter(hw_measurement, || async { let mut ratelimiter_cost = 1; // Estimate the cost based on affected points if filter is available. match local .estimate_request_cardinality(&operation.operation, hw_measurement) .await { Ok(est) => ratelimiter_cost = 1.max(est.exp), Err(err) => log::error!("Estimating cardinality: {err:?}"), } ratelimiter_cost }) .await?; Ok(()) } /// Whether to send updates to the given peer /// /// A peer in dead state, or a locally disabled peer, will not accept updates. fn is_peer_updatable(&self, peer_id: PeerId) -> bool { let Some(state) = self.peer_state(peer_id) else { return false; }; state.is_updatable() && !self.is_locally_disabled(peer_id) } fn peer_is_resharding(&self, peer_id: PeerId) -> bool { let is_resharding = matches!( self.peer_state(peer_id), Some(ReplicaState::Resharding | ReplicaState::ReshardingScaleDown) ); is_resharding && !self.is_locally_disabled(peer_id) } fn handle_failed_replicas<'a>( &self, failures: impl IntoIterator<Item = &'a (PeerId, CollectionError)>, state: &ReplicaSetState, update_only_existing: bool, ) -> bool { let mut wait_for_deactivation = false; for (peer_id, err) in failures { let Some(peer_state) = state.get_peer_state(*peer_id) else { continue; }; // Ignore errors entirely for dead and listener replicas match peer_state { ReplicaState::Dead | ReplicaState::Listener => continue, ReplicaState::Active | ReplicaState::Initializing | ReplicaState::Partial | ReplicaState::Recovery | ReplicaState::PartialSnapshot | ReplicaState::Resharding | ReplicaState::ReshardingScaleDown | ReplicaState::ActiveRead => (), } // Handle a special case where transfer receiver is not in the expected replica state yet. // Data consistency will be handled by the shard transfer and the associated proxies. if peer_state.is_partial_or_recovery() && err.is_pre_condition_failed() { continue; } // Ignore missing point errors if replica is in partial or recovery state // Partial or recovery state indicates that the replica is receiving a shard transfer, // it might not have received all the points yet // See: <https://github.com/qdrant/qdrant/pull/5991> if peer_state.is_partial_or_recovery() && err.is_missing_point() { continue; } if update_only_existing && err.is_missing_point() { continue; } if err.is_transient() || peer_state == ReplicaState::Initializing { // If the error is transient, we should not deactivate the peer // before allowing other operations to continue. // Otherwise, the failed node can become responsive again, before // the other nodes deactivate it, so the storage might be inconsistent. wait_for_deactivation = true; } log::debug!( "Deactivating peer {peer_id} because of failed update of shard {}:{}", self.collection_id, self.shard_id, ); // Deactivate replica in consensus if it matches the state we expect // Always deactivate the replica if its in a shard transfer related state let from_state = Some(peer_state).filter(|state| !state.is_partial_or_recovery()); self.add_locally_disabled(Some(state), *peer_id, from_state); } wait_for_deactivation } /// Forward update to the leader replica /// /// # Cancel safety /// /// This method is cancel safe. async fn forward_update( &self, leader_peer: PeerId, operation: CollectionUpdateOperations, wait: bool, ordering: WriteOrdering, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<UpdateResult> { // `RemoteShard::forward_update` is cancel safe, so this method is cancel safe. let remotes_guard = self.remotes.read().await; let Some(remote_leader) = remotes_guard.iter().find(|r| r.peer_id == leader_peer) else { return Err(CollectionError::service_error(format!( "Cannot forward update to shard {} because was removed from the replica set", self.shard_id ))); }; remote_leader .forward_update( OperationWithClockTag::from(operation), wait, ordering, hw_measurement_acc, ) // `clock_tag` *has to* be `None`! .await } } #[cfg(test)] mod tests { use std::collections::HashSet; use std::num::NonZeroU32; use std::sync::Arc; use common::budget::ResourceBudget; use common::save_on_disk::SaveOnDisk; use segment::types::Distance; use tempfile::{Builder, TempDir}; use tokio::runtime::Handle; use tokio::sync::RwLock; use super::*; use crate::config::*; use crate::operations::types::VectorsConfig; use crate::operations::vector_params_builder::VectorParamsBuilder; use crate::optimizers_builder::OptimizersConfig; use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState}; #[tokio::test] async fn test_highest_replica_peer_id() { let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap(); let rs = new_shard_replica_set(&collection_dir).await; assert_eq!(rs.highest_replica_peer_id(), Some(5)); // at build time the replicas are all dead, they need to be activated assert_eq!(rs.highest_alive_replica_peer_id(), None); rs.set_replica_state(1, ReplicaState::Active).await.unwrap(); rs.set_replica_state(3, ReplicaState::Active).await.unwrap(); rs.set_replica_state(4, ReplicaState::Active).await.unwrap(); rs.set_replica_state(5, ReplicaState::Partial) .await .unwrap(); assert_eq!(rs.highest_replica_peer_id(), Some(5)); assert_eq!(rs.highest_alive_replica_peer_id(), Some(4)); } const TEST_OPTIMIZERS_CONFIG: OptimizersConfig = OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 2, max_segment_size: None, #[expect(deprecated)] memmap_threshold: None, indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: Some(2), }; async fn new_shard_replica_set(collection_dir: &TempDir) -> ShardReplicaSet { let update_runtime = Handle::current(); let search_runtime = Handle::current(); let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let collection_params = CollectionParams { vectors: VectorsConfig::Single(VectorParamsBuilder::new(4, Distance::Dot).build()), shard_number: NonZeroU32::new(4).unwrap(), replication_factor: NonZeroU32::new(3).unwrap(), write_consistency_factor: NonZeroU32::new(2).unwrap(), ..CollectionParams::empty() }; let config = CollectionConfigInternal { params: collection_params, optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(), wal_config, hnsw_config: Default::default(), quantization_config: None, strict_mode_config: None, uuid: None, metadata: None, }; let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap(); let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json"); let payload_index_schema = Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap()); let shared_config = Arc::new(RwLock::new(config.clone())); let remotes = HashSet::from([2, 3, 4, 5]); ShardReplicaSet::build( 1, None, "test_collection".to_string(), 1, false, remotes, dummy_on_replica_failure(), dummy_abort_shard_transfer(), collection_dir.path(), shared_config, config.optimizer_config.clone(), Default::default(), payload_index_schema, Default::default(), update_runtime, search_runtime, ResourceBudget::default(), None, ) .await .unwrap() } fn dummy_on_replica_failure() -> ChangePeerFromState { Arc::new(move |_peer_id, _shard_id, _from_state| {}) } fn dummy_abort_shard_transfer() -> AbortShardTransfer { Arc::new(|_shard_transfer, _reason| {}) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/telemetry.rs
lib/collection/src/shards/replica_set/telemetry.rs
use std::ops::Deref as _; use std::time::Duration; use common::types::TelemetryDetail; use segment::types::SizeStats; use crate::operations::types::{CollectionResult, OptimizersStatus}; use crate::shards::replica_set::ShardReplicaSet; use crate::shards::telemetry::{PartialSnapshotTelemetry, ReplicaSetTelemetry}; impl ShardReplicaSet { pub(crate) async fn get_telemetry_data( &self, detail: TelemetryDetail, timeout: Duration, ) -> CollectionResult<ReplicaSetTelemetry> { let local_shard = self.local.read().await; let local = local_shard.as_ref(); let local_telemetry = match local { Some(local_shard) => Some(local_shard.get_telemetry_data(detail, timeout).await?), None => None, }; Ok(ReplicaSetTelemetry { id: self.shard_id, key: self.shard_key.clone(), local: local_telemetry, remote: self .remotes .read() .await .iter() .map(|remote| remote.get_telemetry_data(detail)) .collect(), replicate_states: self.replica_state.read().peers().clone(), partial_snapshot: Some(PartialSnapshotTelemetry { ongoing_create_snapshot_requests: self .partial_snapshot_meta .ongoing_create_snapshot_requests(), is_recovering: self.partial_snapshot_meta.is_recovery_lock_taken(), recovery_timestamp: self.partial_snapshot_meta.recovery_timestamp(), }), }) } pub(crate) async fn get_optimization_status( &self, timeout: Duration, ) -> Option<CollectionResult<OptimizersStatus>> { let local_shard = self.local.read().await; let Some(local) = local_shard.deref() else { return None; }; Some(local.get_optimization_status(timeout).await) } pub(crate) async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> { let local_shard = self.local.read().await; let Some(local) = local_shard.deref() else { return Ok(SizeStats::default()); }; local.get_size_stats(timeout).await } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/clock_set.rs
lib/collection/src/shards/replica_set/clock_set.rs
use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; #[derive(Clone, Debug, Default)] pub struct ClockSet { clocks: Vec<Arc<Clock>>, } impl ClockSet { pub fn new() -> Self { Self::default() } /// Get the first available clock from the set, or create a new one. pub fn get_clock(&mut self) -> ClockGuard { for (id, clock) in self.clocks.iter().enumerate() { if clock.try_lock() { return ClockGuard::new(id as u32, clock.clone()); } } let id = self.clocks.len() as u32; let clock = Arc::new(Clock::new_locked()); self.clocks.push(clock.clone()); ClockGuard::new(id, clock) } } #[derive(Debug)] pub struct ClockGuard { id: u32, clock: Arc<Clock>, } impl ClockGuard { fn new(id: u32, clock: Arc<Clock>) -> Self { Self { id, clock } } pub fn id(&self) -> u32 { self.id } /// Return current clock tick. pub fn current_tick(&self) -> Option<u64> { self.clock.current_tick() } /// Advance clock by a single tick and return current tick. #[must_use = "new clock value must be used"] pub fn tick_once(&mut self) -> u64 { self.clock.tick_once() } /// Advance clock to `new_tick`, if `new_tick` is newer than current tick. pub fn advance_to(&mut self, new_tick: u64) { self.clock.advance_to(new_tick) } } impl Drop for ClockGuard { fn drop(&mut self) { self.clock.release(); } } #[derive(Debug)] struct Clock { /// Tracks the *next* clock tick next_tick: AtomicU64, available: AtomicBool, } impl Clock { fn new_locked() -> Self { Self { next_tick: 0.into(), available: false.into(), } } /// Return current clock tick. fn current_tick(&self) -> Option<u64> { let next_tick = self.next_tick.load(Ordering::Relaxed); next_tick.checked_sub(1) } /// Advance clock by a single tick and return current tick. /// /// # Thread safety /// /// Clock *has to* be locked (using [`Clock::try_lock`]) before calling [`Clock::tick_once`]! #[must_use = "new clock tick value must be used"] fn tick_once(&self) -> u64 { // `Clock` tracks *next* tick, so we increment `next_tick` by 1 and return *previous* value // of `next_tick` (which is *exactly* what `fetch_add(1)` does) let current_tick = self.next_tick.fetch_add(1, Ordering::Relaxed); // If `current_tick` is `0`, then "revert" `next_tick` back to `0`. // We expect that `advance_to` would be used to advance "past" the initial `0`. // // Executing multiple atomic operations sequentially is not strictly thread-safe, // but we expect that `Clock` would be "locked" before calling `tick_once`. if current_tick == 0 { self.next_tick.store(0, Ordering::Relaxed); } current_tick } /// Advance clock to `new_tick`, if `new_tick` is newer than current tick. fn advance_to(&self, new_tick: u64) { // `Clock` tracks *next* tick, so if we want to advance *current* tick to `new_tick`, // we have to advance `next_tick` to `new_tick + 1` self.next_tick.fetch_max(new_tick + 1, Ordering::Relaxed); } /// Try to acquire exclusive lock over this clock. /// /// Returns `true` if the lock was successfully acquired, or `false` if the clock is already /// locked. fn try_lock(&self) -> bool { self.available.swap(false, Ordering::Relaxed) } /// Release the exclusive lock over this clock. /// /// No-op if the clock is not locked. fn release(&self) { self.available.store(true, Ordering::Relaxed); } } #[cfg(test)] mod tests { use std::iter; use rand::prelude::*; use super::*; /// Tick a single clock, it should increment after we advance it from 0 (or higher). #[test] fn test_clock_set_single_tick() { let mut clock_set = ClockSet::new(); // Don't tick from 0 unless we explicitly advance assert_eq!(clock_set.get_clock().tick_once(), 0); assert_eq!(clock_set.get_clock().tick_once(), 0); assert_eq!(clock_set.get_clock().tick_once(), 0); clock_set.get_clock().advance_to(0); // Following ticks should increment assert_eq!(clock_set.get_clock().tick_once(), 1); assert_eq!(clock_set.get_clock().tick_once(), 2); assert_eq!(clock_set.get_clock().tick_once(), 3); assert_eq!(clock_set.get_clock().tick_once(), 4); } /// Test a single clock, but tick it multiple times on the same guard. #[test] fn test_clock_set_single_tick_twice() { let mut clock_set = ClockSet::new(); // Don't tick from 0 unless we explicitly advance { let mut clock = clock_set.get_clock(); assert_eq!(clock.tick_once(), 0); assert_eq!(clock.tick_once(), 0); clock.advance_to(0); } // Following ticks should increment { let mut clock = clock_set.get_clock(); assert_eq!(clock.tick_once(), 1); assert_eq!(clock.tick_once(), 2); assert_eq!(clock.tick_once(), 3); assert_eq!(clock.tick_once(), 4); } } /// Advance a clock to a higher number, which should increase it. #[test] fn test_clock_set_single_advance_high() { let mut clock_set = ClockSet::new(); // Bring the clock up to 4 assert_eq!(clock_set.get_clock().tick_once(), 0); clock_set.get_clock().advance_to(0); assert_eq!(clock_set.get_clock().tick_once(), 1); assert_eq!(clock_set.get_clock().tick_once(), 2); assert_eq!(clock_set.get_clock().tick_once(), 3); assert_eq!(clock_set.get_clock().tick_once(), 4); // If we advance to 100, we should continue from 101 clock_set.get_clock().advance_to(100); assert_eq!(clock_set.get_clock().tick_once(), 101); } /// Advance a clock to a lower number, which should not do anything. #[test] fn test_clock_set_single_advance_low() { let mut clock_set = ClockSet::new(); // Bring the clock up to 4 assert_eq!(clock_set.get_clock().tick_once(), 0); clock_set.get_clock().advance_to(0); assert_eq!(clock_set.get_clock().tick_once(), 1); assert_eq!(clock_set.get_clock().tick_once(), 2); assert_eq!(clock_set.get_clock().tick_once(), 3); assert_eq!(clock_set.get_clock().tick_once(), 4); // If we advance to a low number, just continue clock_set.get_clock().advance_to(0); assert_eq!(clock_set.get_clock().tick_once(), 5); clock_set.get_clock().advance_to(1); assert_eq!(clock_set.get_clock().tick_once(), 6); } /// Test multiple clocks in various configurations. #[test] fn test_clock_multi_tick() { let mut clock_set = ClockSet::new(); // 2 parallel operations, that fails and doesn't advance { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 0); assert_eq!(clock2.tick_once(), 0); } // 2 parallel operations { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 0); assert_eq!(clock2.tick_once(), 0); clock1.advance_to(0); clock2.advance_to(0); } // 1 operation { let mut clock1 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 1); clock1.advance_to(1); } // 1 operation, without advancing should still tick { let mut clock1 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 2); } // 2 parallel operations { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 3); assert_eq!(clock2.tick_once(), 1); clock1.advance_to(3); clock2.advance_to(1); } // 3 parallel operations, but clock 2 was much newer on some node { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 4); assert_eq!(clock2.tick_once(), 2); assert_eq!(clock3.tick_once(), 0); clock1.advance_to(4); clock2.advance_to(10); clock3.advance_to(0); } // 3 parallel operations, advancing in a different order should not matter { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 5); assert_eq!(clock2.tick_once(), 11); assert_eq!(clock3.tick_once(), 1); clock3.advance_to(1); clock2.advance_to(11); clock1.advance_to(5); } // 3 parallel operations, advancing just some should still tick all { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 6); assert_eq!(clock2.tick_once(), 12); assert_eq!(clock3.tick_once(), 2); clock2.advance_to(12); } // 1 operation { let mut clock1 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 7); } // Test final state of all clocks { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); let mut clock4 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 8); assert_eq!(clock2.tick_once(), 13); assert_eq!(clock3.tick_once(), 3); assert_eq!(clock4.tick_once(), 0); } } /// Test a number of parallel operations with some running for a long time. Clocks are resolved /// unordered. #[test] fn test_clock_set_long_running_unordered() { let mut clock_set = ClockSet::new(); // Clock 1 runs for a long while let mut clock1 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 0); // 2 quick parallel operations { let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); assert_eq!(clock2.tick_once(), 0); assert_eq!(clock3.tick_once(), 0); clock2.advance_to(0); clock3.advance_to(0); } // Clock 2 runs for a long while let clock2 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 0); // 2 quick parallel operations { let mut clock3 = clock_set.get_clock(); let mut clock4 = clock_set.get_clock(); assert_eq!(clock3.tick_once(), 1); assert_eq!(clock4.tick_once(), 0); clock4.advance_to(0); } // Clock 1 finally resolves clock1.advance_to(0); drop(clock1); // 3 quick parallel operations { let mut clock1 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); let mut clock4 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 1); assert_eq!(clock3.tick_once(), 2); assert_eq!(clock4.tick_once(), 1); } // Clock 2 finally resolves drop(clock2); // Test final state of all clocks { let mut clock1 = clock_set.get_clock(); let mut clock2 = clock_set.get_clock(); let mut clock3 = clock_set.get_clock(); let mut clock4 = clock_set.get_clock(); let mut clock5 = clock_set.get_clock(); assert_eq!(clock1.tick_once(), 2); assert_eq!(clock2.tick_once(), 1); assert_eq!(clock3.tick_once(), 3); assert_eq!(clock4.tick_once(), 2); assert_eq!(clock5.tick_once(), 0); } } /// Test and increment a lot of clocks, but at some point 10% of the clocks gets stuck. #[test] fn test_clock_set_many() { const N: usize = 5000; let mut clock_set = ClockSet::new(); // Tick all clocks past 0 { let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N) .collect::<Vec<_>>(); clocks.shuffle(&mut rand::rng()); for clock in &mut clocks { assert_eq!(clock.tick_once(), 0); clock.advance_to(0); } } // Tick all clocks 10 times { for tick in 0..10 { let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N) .collect::<Vec<_>>(); clocks.shuffle(&mut rand::rng()); for clock in &mut clocks { assert_eq!(clock.tick_once(), 1 + tick); } } } // Now the first 10% gets stuck let mut stuck_clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N / 10) .collect::<Vec<_>>(); for clock in stuck_clocks.iter_mut() { assert_eq!(clock.tick_once(), 11); } // Tick all other clocks 10 times { for tick in 0..10 { let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N - (N / 10)) .collect::<Vec<_>>(); clocks.shuffle(&mut rand::rng()); for clock in clocks.iter_mut() { assert_eq!(clock.tick_once(), 11 + tick); } } } // All stuck clocks resolve drop(stuck_clocks); // Test all clocks { let mut stuck_clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N / 10) .collect::<Vec<_>>(); let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N - (N / 10)) .collect::<Vec<_>>(); for clock in stuck_clocks.iter_mut() { assert_eq!(clock.tick_once(), 12); } for clock in clocks.iter_mut() { assert_eq!(clock.tick_once(), 21); } } } /// Test and increment a lot of clocks, but each iteration, we get on more clock stuck. #[test] fn test_clock_set_many_staggered_stuck() { const N: usize = 500; let mut clock_set = ClockSet::new(); let mut stuck_clocks = Vec::new(); for i in 0..N { let mut clock_to_stuck = clock_set.get_clock(); let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N - i - 1) .collect::<Vec<_>>(); if clock_to_stuck.tick_once() == 0 { clock_to_stuck.advance_to(0); } for clock in clocks.iter_mut() { if clock.tick_once() == 0 { clock.advance_to(0); } } stuck_clocks.push(clock_to_stuck); } // All stuck clocks resolve now drop(stuck_clocks); // Test all clocks { let mut clocks = iter::repeat_with(|| clock_set.get_clock()) .take(N) .enumerate() .collect::<Vec<_>>(); for (i, clock) in clocks.iter_mut() { assert_eq!(clock.tick_once(), *i as u64 + 1); } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/locally_disabled_peers.rs
lib/collection/src/shards/replica_set/locally_disabled_peers.rs
use std::cmp; use std::collections::HashMap; use std::time::{Duration, Instant}; use crate::shards::replica_set::replica_set_state::ReplicaState; use crate::shards::shard::PeerId; #[derive(Clone, Debug, Default)] pub struct Registry { /// List of disabled peer IDs and a backoff to prevent spamming consensus. /// /// Each peer optionally specifies what state it was in when it was disabled. They're send /// along with the consensus proposal and prevents accidentally killing replicas if the current /// peer is slow to catch up with consensus. /// See: <https://github.com/qdrant/qdrant/pull/5343> locally_disabled_peers: HashMap<PeerId, (Backoff, Option<ReplicaState>)>, } impl Registry { pub fn is_disabled(&self, peer_id: PeerId) -> bool { self.locally_disabled_peers.contains_key(&peer_id) } pub fn is_all_disabled(&self, peer_ids: impl IntoIterator<Item = PeerId>) -> bool { peer_ids .into_iter() .all(|peer_id| self.is_disabled(peer_id)) } pub fn disable_peer(&mut self, peer_id: PeerId) { self.locally_disabled_peers.entry(peer_id).or_default(); } pub fn disable_peer_and_notify_if_elapsed( &mut self, peer_id: PeerId, from_state: Option<ReplicaState>, ) -> bool { let (backoff, _from_state) = self .locally_disabled_peers .entry(peer_id) // Update from state if changed on already disabled peers .and_modify(|(_backoff, value_from_state)| { *value_from_state = from_state; }) .or_insert_with(|| (Backoff::default(), from_state)); backoff.retry_if_elapsed() } pub fn enable_peer(&mut self, peer_id: PeerId) { let _ = self.locally_disabled_peers.remove(&peer_id); } pub fn clear(&mut self) { self.locally_disabled_peers.clear(); } pub fn notify_elapsed(&mut self) -> impl Iterator<Item = (PeerId, Option<ReplicaState>)> + '_ { self.locally_disabled_peers .iter_mut() .filter_map(|(&peer_id, (backoff, from_state))| { backoff.retry_if_elapsed().then_some((peer_id, *from_state)) }) } } #[derive(Copy, Clone, Debug)] struct Backoff { last_attempt: Instant, delay: Duration, } impl Default for Backoff { fn default() -> Self { Self { last_attempt: Instant::now(), delay: Duration::ZERO, } } } impl Backoff { const MAX_DELAY: Duration = Duration::from_secs(10); pub fn retry_if_elapsed(&mut self) -> bool { let is_elapsed = self.is_elapsed(); if is_elapsed { self.retry(); } is_elapsed } fn is_elapsed(&self) -> bool { self.last_attempt.elapsed() >= self.delay } fn retry(&mut self) { self.last_attempt = Instant::now(); self.delay = if self.delay.is_zero() { Duration::from_secs(1) } else { cmp::min(self.delay * 2, Self::MAX_DELAY) } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/partial_snapshot_meta.rs
lib/collection/src/shards/replica_set/partial_snapshot_meta.rs
use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Duration, SystemTime}; use common::scope_tracker::{ScopeTracker, ScopeTrackerGuard}; use crate::operations::types::{CollectionError, CollectionResult}; /// API Flow: /// /// ┌─────────────────┐ /// │ recover_from API│ /// └───────┬─────────┘ /// │◄────────────── /// │ recovery_lock.lock() /// ┌───────▼─────────┐ (accept reads, /// │ Download snap │ but decline new recover requests) /// └───────┬─────────┘ /// │◄─────────────── /// │ local_shard.write() /// ┌───────▼─────────┐ (reject reads, /// │ Recover snapshot│ if both local_shard and recovery_lock are locked) /// └───────┬─────────┘ /// │ /// │ /// ┌───────▼─────────┐ /// │ Swap shard │ /// └─────────────────┘ /// #[derive(Debug, Default)] pub struct PartialSnapshotMeta { /// Tracks ongoing *create* partial snapshot requests. There might be multiple parallel /// create partial snapshot requests, so we track them with a counter. ongoing_create_snapshot_requests_tracker: ScopeTracker, /// Limits parallel *recover* partial snapshot requests. We are using `RwLock`, so that multiple /// read requests can check if recovery is in progress (by doing `try_read`) without blocking /// each other. recovery_lock: Arc<tokio::sync::RwLock<()>>, /// Rejects read requests when partial snapshot recovery is in proggress. search_lock: tokio::sync::RwLock<()>, /// Timestamp of the last successful snapshot recovery. recovery_timestamp: AtomicU64, } impl PartialSnapshotMeta { pub fn ongoing_create_snapshot_requests(&self) -> usize { self.ongoing_create_snapshot_requests_tracker .get(Ordering::Relaxed) } #[must_use] pub fn track_create_snapshot_request(&self) -> ScopeTrackerGuard { self.ongoing_create_snapshot_requests_tracker .measure_scope() } pub fn try_take_recovery_lock( &self, ) -> CollectionResult<tokio::sync::OwnedRwLockWriteGuard<()>> { self.recovery_lock .clone() .try_write_owned() .map_err(|_| recovery_in_progress()) } pub fn is_recovery_lock_taken(&self) -> bool { self.recovery_lock.try_read().is_err() } pub async fn take_search_write_lock(&self) -> tokio::sync::RwLockWriteGuard<'_, ()> { self.search_lock.write().await } pub fn try_take_search_read_lock( &self, ) -> CollectionResult<tokio::sync::RwLockReadGuard<'_, ()>> { self.search_lock .try_read() .map_err(|_| recovery_in_progress()) } pub fn recovery_timestamp(&self) -> u64 { self.recovery_timestamp.load(Ordering::Relaxed) } pub fn snapshot_recovered(&self) { let timestamp = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap_or(Duration::ZERO) .as_secs(); self.recovery_timestamp.store(timestamp, Ordering::Relaxed); } } fn recovery_in_progress() -> CollectionError { CollectionError::shard_unavailable("partial snapshot recovery is in progress") }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/mod.rs
lib/collection/src/shards/replica_set/mod.rs
pub mod clock_set; mod execute_read_operation; mod locally_disabled_peers; mod partial_snapshot_meta; mod read_ops; pub mod replica_set_state; mod shard_transfer; pub mod snapshots; mod telemetry; mod update; use std::collections::{HashMap, HashSet}; use std::ops::Deref as _; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use common::budget::ResourceBudget; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::rate_limiting::RateLimiter; use common::save_on_disk::SaveOnDisk; use parking_lot::Mutex as ParkingMutex; use replica_set_state::{ReplicaSetState, ReplicaState}; use segment::types::{ExtendedPointId, Filter, ShardKey}; use serde::{Deserialize, Serialize}; use tokio::runtime::Handle; use tokio::sync::{Mutex, RwLock}; use tokio_util::task::AbortOnDropHandle; use self::partial_snapshot_meta::PartialSnapshotMeta; use super::CollectionId; use super::local_shard::LocalShard; use super::local_shard::clock_map::RecoveryPoint; use super::remote_shard::RemoteShard; use super::transfer::ShardTransfer; use crate::collection::payload_index_schema::PayloadIndexSchema; use crate::collection_manager::optimizers::TrackerLog; use crate::common::collection_size_stats::CollectionSizeStats; use crate::common::snapshots_manager::SnapshotStorageManager; use crate::config::CollectionConfigInternal; use crate::operations::shared_storage_config::SharedStorageConfig; use crate::operations::types::{CollectionError, CollectionResult, UpdateResult, UpdateStatus}; use crate::operations::{CollectionUpdateOperations, point_ops}; use crate::optimizers_builder::OptimizersConfig; use crate::shards::channel_service::ChannelService; use crate::shards::dummy_shard::DummyShard; use crate::shards::replica_set::clock_set::ClockSet; use crate::shards::shard::{PeerId, Shard, ShardId}; use crate::shards::shard_config::ShardConfig; // │ Collection Created // │ // ▼ // ┌──────────────┐ // │ │ // │ Initializing │ // │ │ // └──────┬───────┘ // │ Report created ┌───────────┐ // └────────────────────► │ // Activate │ Consensus │ // ┌─────────────────────┤ │ // │ └───────────┘ // ┌─────▼───────┐ User Promote ┌──────────┐ // │ ◄──────────────────────────► │ // │ Active │ │ Listener │ // │ ◄───────────┐ │ │ // └──┬──────────┘ │Transfer └──┬───────┘ // │ │Finished │ // │ ┌──────┴────────┐ │Update // │Update │ │ │Failure // │Failure │ Partial ├───┐ │ // │ │ │ │ │ // │ └───────▲───────┘ │ │ // │ │ │ │ // ┌──▼──────────┐ Transfer │ │ │ // │ │ Started │ │ │ // │ Dead ├────────────┘ │ │ // │ │ │ │ // └─▲───────▲───┘ Transfer │ │ // │ │ Failed/Cancelled│ │ // │ └────────────────────────────┘ │ // │ │ // └─────────────────────────────────────────┘ // /// A set of shard replicas. /// /// Handles operations so that the state is consistent across all the replicas of the shard. /// Prefers local shard for read-only operations. /// Perform updates on all replicas and report error if there is at least one failure. /// pub struct ShardReplicaSet { local: RwLock<Option<Shard>>, // Abstract Shard to be able to use a Proxy during replication remotes: RwLock<Vec<RemoteShard>>, replica_state: Arc<SaveOnDisk<ReplicaSetState>>, /// List of peers that are marked as dead locally, but are not yet submitted to the consensus. /// List is checked on each consensus round and submitted to the consensus. /// If the state of the peer is changed in the consensus, it is removed from the list. /// Update and read operations are not performed on the peers marked as dead. locally_disabled_peers: parking_lot::RwLock<locally_disabled_peers::Registry>, pub(crate) shard_path: PathBuf, pub(crate) shard_id: ShardId, shard_key: Option<ShardKey>, notify_peer_failure_cb: ChangePeerFromState, abort_shard_transfer_cb: AbortShardTransfer, channel_service: ChannelService, collection_id: CollectionId, collection_config: Arc<RwLock<CollectionConfigInternal>>, optimizers_config: OptimizersConfig, pub(crate) shared_storage_config: Arc<SharedStorageConfig>, payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>, update_runtime: Handle, search_runtime: Handle, optimizer_resource_budget: ResourceBudget, /// Lock to serialized write operations on the replicaset when a write ordering is used. write_ordering_lock: Mutex<()>, /// Local clock set, used to tag new operations on this shard. clock_set: Mutex<ClockSet>, write_rate_limiter: Option<parking_lot::Mutex<RateLimiter>>, pub partial_snapshot_meta: PartialSnapshotMeta, } pub type AbortShardTransfer = Arc<dyn Fn(ShardTransfer, &str) + Send + Sync>; pub type ChangePeerState = Arc<dyn Fn(PeerId, ShardId) + Send + Sync>; pub type ChangePeerFromState = Arc<dyn Fn(PeerId, ShardId, Option<ReplicaState>) + Send + Sync>; const REPLICA_STATE_FILE: &str = "replica_state.json"; impl ShardReplicaSet { /// Create a new fresh replica set, no previous state is expected. #[allow(clippy::too_many_arguments)] pub async fn build( shard_id: ShardId, shard_key: Option<ShardKey>, collection_id: CollectionId, this_peer_id: PeerId, local: bool, remotes: HashSet<PeerId>, on_peer_failure: ChangePeerFromState, abort_shard_transfer: AbortShardTransfer, collection_path: &Path, collection_config: Arc<RwLock<CollectionConfigInternal>>, effective_optimizers_config: OptimizersConfig, shared_storage_config: Arc<SharedStorageConfig>, payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>, channel_service: ChannelService, update_runtime: Handle, search_runtime: Handle, optimizer_resource_budget: ResourceBudget, init_state: Option<ReplicaState>, ) -> CollectionResult<Self> { let shard_path = super::create_shard_dir(collection_path, shard_id).await?; let local = if local { let shard = LocalShard::build( shard_id, collection_id.clone(), &shard_path, collection_config.clone(), shared_storage_config.clone(), payload_index_schema.clone(), update_runtime.clone(), search_runtime.clone(), optimizer_resource_budget.clone(), effective_optimizers_config.clone(), ) .await?; Some(Shard::Local(shard)) } else { None }; let replica_state: SaveOnDisk<ReplicaSetState> = SaveOnDisk::load_or_init_default(shard_path.join(REPLICA_STATE_FILE))?; let init_replica_state = init_state.unwrap_or(ReplicaState::Initializing); replica_state.write(|rs| { rs.this_peer_id = this_peer_id; if local.is_some() { rs.is_local = true; rs.set_peer_state(this_peer_id, init_replica_state); } for peer in remotes { rs.set_peer_state(peer, init_replica_state); } })?; let remote_shards = Self::init_remote_shards( shard_id, collection_id.clone(), &replica_state.read(), &channel_service, ); // Save shard config as the last step, to ensure that the file state is consistent // Presence of shard config indicates that the shard is ready to be used let replica_set_shard_config = ShardConfig::new_replica_set(); replica_set_shard_config.save(&shard_path)?; // Initialize the write rate limiter let config = collection_config.read().await; let write_rate_limiter = config.strict_mode_config.as_ref().and_then(|strict_mode| { strict_mode .write_rate_limit .map(RateLimiter::new_per_minute) .map(parking_lot::Mutex::new) }); drop(config); Ok(Self { shard_id, shard_key, local: RwLock::new(local), remotes: RwLock::new(remote_shards), replica_state: replica_state.into(), locally_disabled_peers: Default::default(), shard_path, abort_shard_transfer_cb: abort_shard_transfer, notify_peer_failure_cb: on_peer_failure, channel_service, collection_id, collection_config, optimizers_config: effective_optimizers_config, shared_storage_config, payload_index_schema, update_runtime, search_runtime, optimizer_resource_budget, write_ordering_lock: Mutex::new(()), clock_set: Default::default(), write_rate_limiter, partial_snapshot_meta: PartialSnapshotMeta::default(), }) } /// Recovers shard from disk. /// /// WARN: This method intended to be used only on the initial start of the node. /// It does not implement any logic to recover from a failure. /// Will panic or load partial state if there is a failure. #[allow(clippy::too_many_arguments)] pub async fn load( shard_id: ShardId, shard_key: Option<ShardKey>, collection_id: CollectionId, shard_path: &Path, is_dirty_shard: bool, collection_config: Arc<RwLock<CollectionConfigInternal>>, effective_optimizers_config: OptimizersConfig, shared_storage_config: Arc<SharedStorageConfig>, payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>, channel_service: ChannelService, on_peer_failure: ChangePeerFromState, abort_shard_transfer: AbortShardTransfer, this_peer_id: PeerId, update_runtime: Handle, search_runtime: Handle, optimizer_resource_budget: ResourceBudget, ) -> Self { let replica_state: SaveOnDisk<ReplicaSetState> = SaveOnDisk::load_or_init_default(shard_path.join(REPLICA_STATE_FILE)).unwrap(); if replica_state.read().this_peer_id != this_peer_id { replica_state .write(|rs| { let this_peer_id = rs.this_peer_id; let local_state = rs.remove_peer_state(this_peer_id); if let Some(state) = local_state { rs.set_peer_state(this_peer_id, state); } rs.this_peer_id = this_peer_id; }) .map_err(|e| { panic!("Failed to update replica state in {shard_path:?}: {e}"); }) .unwrap(); } let remote_shards: Vec<_> = Self::init_remote_shards( shard_id, collection_id.clone(), &replica_state.read(), &channel_service, ); let mut local_load_failure = false; let local = if replica_state.read().is_local { let shard = if let Some(recovery_reason) = &shared_storage_config.recovery_mode { Shard::Dummy(DummyShard::new(recovery_reason)) } else if is_dirty_shard { log::error!( "Shard {collection_id}:{shard_id} is not fully initialized - loading as dummy shard" ); // This dummy shard will be replaced only when it rejects an update (marked as dead so recovery process kicks in) Shard::Dummy(DummyShard::new( "Dirty shard - shard is not fully initialized", )) } else { let res = LocalShard::load( shard_id, collection_id.clone(), shard_path, collection_config.clone(), effective_optimizers_config.clone(), shared_storage_config.clone(), payload_index_schema.clone(), true, update_runtime.clone(), search_runtime.clone(), optimizer_resource_budget.clone(), ) .await; match res { Ok(shard) => Shard::Local(shard), Err(err) => { if !shared_storage_config.handle_collection_load_errors { panic!("Failed to load local shard {shard_path:?}: {err}") } local_load_failure = true; log::error!( "Failed to load local shard {shard_path:?}, \ initializing \"dummy\" shard instead: \ {err}" ); Shard::Dummy(DummyShard::new(format!( "Failed to load local shard {shard_path:?}: {err}" ))) } } }; Some(shard) } else { None }; // Initialize the write rate limiter let config = collection_config.read().await; let write_rate_limiter = config.strict_mode_config.as_ref().and_then(|strict_mode| { strict_mode .write_rate_limit .map(RateLimiter::new_per_minute) .map(parking_lot::Mutex::new) }); drop(config); let replica_set = Self { shard_id, shard_key, local: RwLock::new(local), remotes: RwLock::new(remote_shards), replica_state: replica_state.into(), // TODO: move to collection config locally_disabled_peers: Default::default(), shard_path: shard_path.to_path_buf(), notify_peer_failure_cb: on_peer_failure, abort_shard_transfer_cb: abort_shard_transfer, channel_service, collection_id, collection_config, optimizers_config: effective_optimizers_config, shared_storage_config, payload_index_schema, update_runtime, search_runtime, optimizer_resource_budget, write_ordering_lock: Mutex::new(()), clock_set: Default::default(), write_rate_limiter, partial_snapshot_meta: PartialSnapshotMeta::default(), }; // `active_remote_shards` includes `Active` and `ReshardingScaleDown` replicas! if local_load_failure && replica_set.active_shards(true).is_empty() { replica_set .locally_disabled_peers .write() .disable_peer(this_peer_id); } replica_set } pub async fn stop_gracefully(self) { if let Some(local) = self.local.write().await.take() { local.stop_gracefully().await; } } pub fn shard_key(&self) -> Option<&ShardKey> { self.shard_key.as_ref() } pub fn this_peer_id(&self) -> PeerId { self.replica_state.read().this_peer_id } pub async fn has_remote_shard(&self) -> bool { !self.remotes.read().await.is_empty() } pub async fn has_local_shard(&self) -> bool { self.local.read().await.is_some() } /// Checks if the shard exists locally and not a proxy. pub async fn is_local(&self) -> bool { let local_read = self.local.read().await; matches!(*local_read, Some(Shard::Local(_) | Shard::Dummy(_))) } pub async fn is_proxy(&self) -> bool { let local_read = self.local.read().await; match *local_read { None => false, Some(Shard::Local(_)) => false, Some(Shard::Proxy(_)) => true, Some(Shard::ForwardProxy(_)) => true, Some(Shard::QueueProxy(_)) => true, Some(Shard::Dummy(_)) => false, } } pub async fn is_queue_proxy(&self) -> bool { let local_read = self.local.read().await; matches!(*local_read, Some(Shard::QueueProxy(_))) } pub async fn is_dummy(&self) -> bool { let local_read = self.local.read().await; matches!(*local_read, Some(Shard::Dummy(_))) } pub fn peers(&self) -> HashMap<PeerId, ReplicaState> { self.replica_state.read().peers().clone() } /// Checks if the current replica contains a unique source of truth and should never /// be deactivated or removed. /// If current replica is the only "alive" replica, it is considered the last source of truth. /// /// If our replica is `Initializing`, we consider it to be the last source of truth if there is /// no other active replicas. If we would deactivate it, it will be impossible to recover the /// replica later. This may happen if we got killed or crashed during collection creation. /// /// Same logic applies to `Listener` replicas, as they are not recoverable if there are no /// other active replicas. /// /// Examples: /// Active(this), Initializing(other), Initializing(other) -> true /// Active(this), Active(other) -> false /// Initializing(this) -> true /// Initializing(this), Initializing(other) -> true /// Initializing(this), Dead(other) -> true /// Initializing(this), Active(other) -> false /// Active(this), Initializing(other) -> true /// pub fn is_last_source_of_truth_replica(&self, peer_id: PeerId) -> bool { // This includes `Active` and `ReshardingScaleDown` replicas! let active_peers = self.replica_state.read().active_peers(); if active_peers.is_empty() && let Some(peer_state) = self.peer_state(peer_id) { // If there are no other active peers, deactivating those replicas // is not recoverable, so it is considered the last source of truth, // even though it is not technically active. return matches!( peer_state, ReplicaState::Initializing | ReplicaState::Listener ); } active_peers.len() == 1 && active_peers.contains(&peer_id) } pub fn peer_state(&self, peer_id: PeerId) -> Option<ReplicaState> { self.replica_state.read().get_peer_state(peer_id) } pub fn check_peers_state_all(&self, check: impl Fn(ReplicaState) -> bool) -> bool { self.replica_state.read().check_peers_state_all(check) } /// List the peer IDs on which this shard is active /// - `remote_only`: if true, excludes the local peer ID from the result pub fn active_shards(&self, remote_only: bool) -> Vec<PeerId> { let replica_state = self.replica_state.read(); replica_state .active_peers() // This includes `Active` and `ReshardingScaleDown` replicas! .into_iter() .filter(|&peer_id| { !self.is_locally_disabled(peer_id) && (!remote_only || peer_id != replica_state.this_peer_id) }) .collect() } pub fn readable_shards(&self) -> Vec<PeerId> { let replica_state = self.replica_state.read(); replica_state .readable_peers() // This includes `ActiveRead`, `Active`, and `ReshardingScaleDown` replicas! .into_iter() .filter(|&peer_id| !self.is_locally_disabled(peer_id)) .collect() } /// Wait for a local shard to be initialized. /// /// Uses a blocking thread internally. pub async fn wait_for_local(&self, timeout: Duration) -> CollectionResult<()> { self.wait_for(|replica_set_state| replica_set_state.is_local, timeout) .await } pub fn wait_for_state_condition_sync<F>(&self, check: F, timeout: Duration) -> bool where F: Fn(&ReplicaSetState) -> bool, { let replica_state = self.replica_state.clone(); replica_state.wait_for(check, timeout) } /// Wait for a local shard to get into `state` /// /// Uses a blocking thread internally. pub async fn wait_for_local_state( &self, state: ReplicaState, timeout: Duration, ) -> CollectionResult<()> { self.wait_for( move |replica_set_state| { replica_set_state.get_peer_state(replica_set_state.this_peer_id) == Some(state) }, timeout, ) .await } /// Wait for a peer shard to get into `state` /// /// Uses a blocking thread internally. /// /// # Cancel safety /// /// This method is cancel safe. pub fn wait_for_state( &self, peer_id: PeerId, state: ReplicaState, timeout: Duration, ) -> impl Future<Output = CollectionResult<()>> + 'static { self.wait_for( move |replica_set_state| replica_set_state.get_peer_state(peer_id) == Some(state), timeout, ) } /// Wait for a replica set state condition to be true. /// /// Uses a blocking thread internally. /// /// # Cancel safety /// /// This method is cancel safe. pub fn wait_for<F>( &self, check: F, timeout: Duration, ) -> impl Future<Output = CollectionResult<()>> + 'static where F: Fn(&ReplicaSetState) -> bool + Send + 'static, { // TODO: Propagate cancellation into `spawn_blocking` task!? let replica_state = self.replica_state.clone(); let task = AbortOnDropHandle::new(tokio::task::spawn_blocking(move || { replica_state.wait_for(check, timeout) })); async move { let status = task.await.map_err(|err| { CollectionError::service_error(format!( "Failed to wait for replica set state: {err}" )) })?; if status { Ok(()) } else { Err(CollectionError::timeout( timeout, "wait for replica set state", )) } } } /// Clears the local shard data and loads an empty local shard pub async fn init_empty_local_shard(&self) -> CollectionResult<()> { let mut local = self.local.write().await; let current_shard = local.take(); if let Some(current_shard) = current_shard { current_shard.stop_gracefully().await; } LocalShard::clear(&self.shard_path).await?; let local_shard_res = LocalShard::build( self.shard_id, self.collection_id.clone(), &self.shard_path, self.collection_config.clone(), self.shared_storage_config.clone(), self.payload_index_schema.clone(), self.update_runtime.clone(), self.search_runtime.clone(), self.optimizer_resource_budget.clone(), self.optimizers_config.clone(), ) .await; match local_shard_res { Ok(local_shard) => { *local = Some(Shard::Local(local_shard)); Ok(()) } Err(err) => { let error = format!( "Failed to initialize local shard at {:?}: {err}", self.shard_path ); log::error!("{error}"); *local = Some(Shard::Dummy(DummyShard::new(error))); Err(err) } } } /// Replaces the local shard with the given one. pub async fn set_local( &self, local: LocalShard, state: Option<ReplicaState>, ) -> CollectionResult<Option<Shard>> { let old_shard = self.local.write().await.replace(Shard::Local(local)); if !self.replica_state.read().is_local || state.is_some() { self.replica_state.write(|rs| { rs.is_local = true; if let Some(state) = state { rs.set_peer_state(self.this_peer_id(), state); } })?; self.on_local_state_updated(state.unwrap_or(ReplicaState::Dead)) .await?; } self.update_locally_disabled(self.this_peer_id()); Ok(old_shard) } pub async fn remove_local(&self) -> CollectionResult<()> { // TODO: Ensure cancel safety! self.replica_state.write(|rs| { rs.is_local = false; let this_peer_id = rs.this_peer_id; rs.remove_peer_state(this_peer_id); })?; self.update_locally_disabled(self.this_peer_id()); let removing_local = { let mut local = self.local.write().await; local.take() }; if let Some(removing_local) = removing_local { // stop ongoing tasks and delete data removing_local.stop_gracefully().await; LocalShard::clear(&self.shard_path).await?; } Ok(()) } pub async fn add_remote(&self, peer_id: PeerId, state: ReplicaState) -> CollectionResult<()> { debug_assert_ne!(peer_id, self.this_peer_id()); self.replica_state .write(|rs| rs.set_peer_state(peer_id, state))?; if self.this_peer_id() == peer_id { self.on_local_state_updated(state).await?; } self.update_locally_disabled(peer_id); let mut remotes = self.remotes.write().await; // check remote already exists if remotes.iter().any(|remote| remote.peer_id == peer_id) { return Ok(()); } remotes.push(RemoteShard::new( self.shard_id, self.collection_id.clone(), peer_id, self.channel_service.clone(), )); Ok(()) } pub async fn remove_remote(&self, peer_id: PeerId) -> CollectionResult<()> { self.replica_state.write(|rs| { rs.remove_peer_state(peer_id); })?; self.update_locally_disabled(peer_id); let mut remotes = self.remotes.write().await; remotes.retain(|remote| remote.peer_id != peer_id); Ok(()) } /// Change state of the replica to the given. /// Ensure that remote shard is initialized. pub async fn ensure_replica_with_state( &self, peer_id: PeerId, state: ReplicaState, ) -> CollectionResult<()> { if peer_id == self.this_peer_id() { self.set_replica_state(peer_id, state).await?; } else { // Create remote shard if necessary self.add_remote(peer_id, state).await?; } Ok(()) } pub async fn set_replica_state( &self, peer_id: PeerId, state: ReplicaState, ) -> CollectionResult<()> { log::debug!( "Changing local shard {}:{} state from {:?} to {state:?}", self.collection_id, self.shard_id, self.replica_state.read().get_peer_state(peer_id), ); self.replica_state.write(|rs| { if rs.this_peer_id == peer_id { rs.is_local = true; } rs.set_peer_state(peer_id, state); })?; if self.this_peer_id() == peer_id { self.on_local_state_updated(state).await?; } self.update_locally_disabled(peer_id); Ok(()) } /// Called when the local replica state is updated /// /// Not called if: /// - there is no local shard /// - the local shard is removed async fn on_local_state_updated(&self, new_state: ReplicaState) -> CollectionResult<()> { // Update newest clocks snapshot on each state change if let Some(local_shard) = self.local.read().await.as_ref() { if new_state.is_active() { local_shard.clear_newest_clocks_snapshot().await?; } else { local_shard.take_newest_clocks_snapshot().await?; } } Ok(()) } pub async fn remove_peer(&self, peer_id: PeerId) -> CollectionResult<()> { if self.this_peer_id() == peer_id { self.remove_local().await?; } else { self.remove_remote(peer_id).await?; } Ok(()) } pub async fn apply_state( &mut self, replicas: HashMap<PeerId, ReplicaState>, shard_key: Option<ShardKey>, ) -> CollectionResult<()> { let old_peers = self.replica_state.read().peers().clone(); self.replica_state.write(|state| { state.set_peers(replicas.clone()); })?; if let Some(&state) = replicas.get(&self.this_peer_id()) { self.on_local_state_updated(state).await?; } self.locally_disabled_peers.write().clear(); let removed_peers = old_peers .keys() .filter(|peer_id| !replicas.contains_key(peer_id)) .copied() .collect::<Vec<_>>(); for peer_id in removed_peers { self.remove_peer(peer_id).await?; } for (peer_id, state) in replicas { let peer_already_exists = old_peers.contains_key(&peer_id); if peer_already_exists { // do nothing // We only need to change state and it is already saved continue; } if peer_id == self.this_peer_id() { // Consensus wants a local replica on this peer let local_shard = LocalShard::build( self.shard_id, self.collection_id.clone(), &self.shard_path, self.collection_config.clone(), self.shared_storage_config.clone(), self.payload_index_schema.clone(), self.update_runtime.clone(), self.search_runtime.clone(), self.optimizer_resource_budget.clone(), self.optimizers_config.clone(), ) .await?; match state { ReplicaState::Active | ReplicaState::Listener | ReplicaState::ReshardingScaleDown => { // No way we can provide up-to-date replica right away at this point, // so we report a failure to consensus let existing = self.set_local(local_shard, Some(state)).await?; if let Some(existing) = existing { existing.stop_gracefully().await; } self.notify_peer_failure(peer_id, Some(state)); } ReplicaState::Dead | ReplicaState::Partial | ReplicaState::Initializing | ReplicaState::PartialSnapshot | ReplicaState::Recovery | ReplicaState::Resharding | ReplicaState::ActiveRead => { let existing = self.set_local(local_shard, Some(state)).await?;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/snapshots.rs
lib/collection/src/shards/replica_set/snapshots.rs
use std::collections::HashSet; use std::io; use std::path::Path; use common::save_on_disk::SaveOnDisk; use common::tar_ext; use fs_err as fs; use fs_err::{File, tokio as tokio_fs}; use segment::data_types::manifest::{SegmentManifest, SnapshotManifest}; use segment::types::SnapshotFormat; use super::{REPLICA_STATE_FILE, ShardReplicaSet}; use crate::operations::types::{CollectionError, CollectionResult}; use crate::shards::dummy_shard::DummyShard; use crate::shards::local_shard::LocalShard; use crate::shards::replica_set::replica_set_state::ReplicaSetState; use crate::shards::shard::{PeerId, Shard}; use crate::shards::shard_config::ShardConfig; use crate::shards::shard_initializing_flag_path; #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum RecoveryType { Full, Partial, } impl RecoveryType { pub fn is_full(self) -> bool { matches!(self, Self::Full) } pub fn is_partial(self) -> bool { matches!(self, Self::Partial) } } impl ShardReplicaSet { pub async fn create_snapshot( &self, temp_path: &Path, tar: &tar_ext::BuilderExt, format: SnapshotFormat, manifest: Option<SnapshotManifest>, save_wal: bool, ) -> CollectionResult<()> { let local_read = self.local.read().await; // Track concurrent `create_partial_snapshot` requests, so that cluster manager can load-balance them let _partial_snapshot_create_request_guard = if manifest.is_some() { Some(self.partial_snapshot_meta.track_create_snapshot_request()) } else { None }; if let Some(local) = &*local_read { local .create_snapshot(temp_path, tar, format, manifest, save_wal) .await? } self.replica_state .save_to_tar(tar, REPLICA_STATE_FILE) .await?; let shard_config = ShardConfig::new_replica_set(); shard_config.save_to_tar(tar).await?; Ok(()) } pub fn try_take_partial_snapshot_recovery_lock( &self, ) -> CollectionResult<tokio::sync::OwnedRwLockWriteGuard<()>> { self.partial_snapshot_meta.try_take_recovery_lock() } pub fn restore_snapshot( snapshot_path: &Path, this_peer_id: PeerId, is_distributed: bool, ) -> CollectionResult<()> { let replica_state: SaveOnDisk<ReplicaSetState> = SaveOnDisk::load_or_init_default(snapshot_path.join(REPLICA_STATE_FILE))?; // If this shard have local data let is_snapshot_local = replica_state.read().is_local; if !is_distributed && !is_snapshot_local { return Err(CollectionError::service_error(format!( "Can't restore snapshot in local mode with missing data at shard: {}", snapshot_path.display() ))); } replica_state.write(|state| { state.switch_peer_id(this_peer_id); if !is_distributed { state.force_local_active() } })?; if replica_state.read().is_local { LocalShard::restore_snapshot(snapshot_path)?; } Ok(()) } /// # Cancel safety /// /// This method is *not* cancel safe. pub async fn restore_local_replica_from( &self, replica_path: &Path, recovery_type: RecoveryType, collection_path: &Path, cancel: cancel::CancellationToken, ) -> CollectionResult<bool> { // `local.take()` call and `restore` task have to be executed as a single transaction if !LocalShard::check_data(replica_path) { return Ok(false); } let segments_path = LocalShard::segments_path(replica_path); let mut snapshot_segments = HashSet::new(); let mut snapshot_manifest = SnapshotManifest::default(); for segment_entry in fs::read_dir(segments_path)? { let segment_path = segment_entry?.path(); if !segment_path.is_dir() { log::warn!( "segment path {} in extracted snapshot {} is not a directory", segment_path.display(), replica_path.display(), ); continue; } let segment_id = segment_path .file_name() .and_then(|segment_id| segment_id.to_str()) .expect("segment path ends with a valid segment id"); let added = snapshot_segments.insert(segment_id.to_string()); debug_assert!(added); let manifest_path = segment_path.join("segment_manifest.json"); if recovery_type.is_full() { if manifest_path.exists() { return Err(CollectionError::bad_request(format!( "invalid shard snapshot: \ segment {segment_id} contains segment manifest; \ ensure you are not recovering partial snapshot on shard snapshot endpoint", ))); } continue; } if !manifest_path.exists() { return Err(CollectionError::bad_request(format!( "invalid partial snapshot: \ segment {segment_id} does not contain segment manifest; \ ensure you are not recovering shard snapshot on partial snapshot endpoint", ))); } let manifest = File::open(&manifest_path).map_err(|err| { CollectionError::service_error(format!( "failed to open segment {segment_id} manifest: {err}", )) })?; let manifest = io::BufReader::new(manifest); let manifest: SegmentManifest = serde_json::from_reader(manifest).map_err(|err| { CollectionError::bad_request(format!( "failed to deserialize segment {segment_id} manifest: {err}", )) })?; if segment_id != manifest.segment_id { return Err(CollectionError::bad_request(format!( "invalid partial snapshot: \ segment {segment_id} contains segment manifest with segment ID {}", manifest.segment_id, ))); } let added = snapshot_manifest.add(manifest); debug_assert!(added); } snapshot_manifest.validate().map_err(|err| { CollectionError::bad_request(format!("invalid partial snapshot: {err}")) })?; // TODO: // Check that shard snapshot is compatible with the collection // (see `VectorsConfig::check_compatible_with_segment_config`) let _partial_snapshot_search_lock = match recovery_type { RecoveryType::Full => None, RecoveryType::Partial => { Some(self.partial_snapshot_meta.take_search_write_lock().await) } }; let mut local = cancel::future::cancel_on_token(cancel.clone(), self.local.write()).await?; // set shard_id initialization flag // the file is removed after full recovery to indicate a well-formed shard // for example: some of the files may go missing if node gets killed during shard directory move/replace let shard_flag = shard_initializing_flag_path(collection_path, self.shard_id); let flag_file = tokio_fs::File::create(&shard_flag).await?; flag_file.sync_all().await?; // Check `cancel` token one last time before starting non-cancellable section if cancel.is_cancelled() { return Err(cancel::Error::Cancelled.into()); } let local_manifest = match local.take() { Some(shard) if snapshot_manifest.is_empty() => { // Shard is no longer needed and can be dropped shard.stop_gracefully().await; None } None if snapshot_manifest.is_empty() => None, Some(shard) => { let local_manifest = shard.snapshot_manifest().await; // If local shard produces a valid manifest, it can be replaced and no longer needed // If it fails, we return it back. match local_manifest { Ok(local_manifest) => { local_manifest.validate().map_err(|err| { CollectionError::service_error(format!( "failed to restore partial shard snapshot for shard {}:{}: \ local shard produces invalid snapshot manifest: \ {err}", self.collection_id, self.shard_id, )) })?; // Shard is no longer needed and can be dropped shard.stop_gracefully().await; Some(local_manifest) } Err(err) => { let _ = local.insert(shard); return Err(CollectionError::service_error(format!( "failed to restore partial shard snapshot for shard {}:{}: \ failed to collect snapshot manifest: \ {err}", self.collection_id, self.shard_id, ))); } } } None => { return Err(CollectionError::bad_request(format!( "failed to restore partial shard snapshot for shard {}:{}: \ local shard does not exist on peer {}", self.collection_id, self.shard_id, self.this_peer_id(), ))); } }; // Try to restore local replica from specified shard snapshot directory let restore = async { if let Some(local_manifest) = local_manifest { let segments_path = LocalShard::segments_path(&self.shard_path); for (segment_id, local_manifest) in local_manifest.iter() { let segment_path = segments_path.join(segment_id); log::debug!("Cleaning up segment {}", segment_path.display()); // Delete local segment, if it's not present in partial snapshot let Some(snapshot_manifest) = snapshot_manifest.get(segment_id) else { log::debug!("Removing outdated segment {}", segment_path.display()); tokio_fs::remove_dir_all(&segment_path) .await .map_err(|err| { CollectionError::service_error(format!( "failed to remove outdated segment {}: {err}", segment_path.display(), )) })?; continue; }; for (file, local_version) in local_manifest.file_versions() { let snapshot_version = snapshot_manifest.file_version(file); let is_removed = snapshot_version.is_none(); let is_outdated = snapshot_version.is_none_or(|snapshot_version| { let is_outdated = local_version < snapshot_version; let is_zero = local_version == 0 && snapshot_version == 0; is_outdated || is_zero }); #[cfg(feature = "rocksdb")] let (is_rocksdb, is_payload_index_rocksdb) = ( file == Path::new(segment::segment::snapshot::ROCKS_DB_VIRT_FILE), file == Path::new( segment::segment::snapshot::PAYLOAD_INDEX_ROCKS_DB_VIRT_FILE, ), ); if is_removed { // If `file` is a regular file, delete it from disk, if it was // *removed* from the snapshot #[cfg(feature = "rocksdb")] let delete_regular_file = !is_rocksdb && !is_payload_index_rocksdb; #[cfg(not(feature = "rocksdb"))] let delete_regular_file = true; if delete_regular_file { let path = segment_path.join(file); log::debug!("Removing outdated segment file {}", path.display()); tokio_fs::remove_file(&path).await.map_err(|err| { CollectionError::service_error(format!( "failed to remove outdated segment file {}: {err}", path.display(), )) })?; } } else if is_outdated { // If `file` is a RocksDB "virtual" file, remove RocksDB from disk, // if it was *updated* in or *removed* from the snapshot #[cfg(feature = "rocksdb")] { use segment::segment::destroy_rocksdb; use segment::segment_constructor::PAYLOAD_INDEX_PATH; if is_rocksdb { log::debug!( "Destroying outdated RocksDB at {}", segment_path.display(), ); destroy_rocksdb(&segment_path)?; } else if is_payload_index_rocksdb { log::debug!( "Destroying outdated payload index RocksDB at {}/{}", segment_path.display(), PAYLOAD_INDEX_PATH, ); destroy_rocksdb(&segment_path.join(PAYLOAD_INDEX_PATH))?; } } } } } let wal_path = LocalShard::wal_path(&self.shard_path); if wal_path.is_dir() { log::debug!("Removing WAL {}", wal_path.display()); tokio_fs::remove_dir_all(&wal_path).await.map_err(|err| { CollectionError::service_error(format!( "failed to remove WAL {}: {err}", wal_path.display(), )) })?; } } else { // Remove shard data but not configuration files LocalShard::clear(&self.shard_path).await?; } LocalShard::move_data(replica_path, &self.shard_path).await?; LocalShard::load( self.shard_id, self.collection_id.clone(), &self.shard_path, self.collection_config.clone(), self.optimizers_config.clone(), self.shared_storage_config.clone(), self.payload_index_schema.clone(), recovery_type.is_full(), self.update_runtime.clone(), self.search_runtime.clone(), self.optimizer_resource_budget.clone(), ) .await }; match restore.await { Ok(new_local) => { local.replace(Shard::Local(new_local)); // remove shard_id initialization flag because shard is fully recovered tokio_fs::remove_file(&shard_flag).await?; if recovery_type.is_partial() { self.partial_snapshot_meta.snapshot_recovered(); } Ok(true) } Err(restore_err) => { // Initialize "dummy" replica local.replace(Shard::Dummy(DummyShard::new( "Failed to restore local replica", ))); // Mark local replica as Dead since it's dummy and dirty self.add_locally_disabled(None, self.this_peer_id(), None); // Remove inner shard data but keep the shard folder with its configuration files. // This way the shard can be read on startup and the user can decide what to do next. match LocalShard::clear(&self.shard_path).await { Ok(()) => Err(restore_err), Err(cleanup_err) => { log::error!( "Failed to cleanup shard {} directory ({}) after restore failed: \ {cleanup_err}", self.shard_id, self.shard_path.display(), ); // TODO: Contextualize `restore_err` with `cleanup_err` details!? Err(restore_err) } } } } } pub async fn get_partial_snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> { self.local .read() .await .as_ref() .ok_or_else(|| { CollectionError::bad_request(format!( "local shard {}:{} does not exist on peer {}", self.collection_id, self.shard_id, self.this_peer_id(), )) })? .snapshot_manifest() .await } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/replica_set_state.rs
lib/collection/src/shards/replica_set/replica_set_state.rs
use std::collections::HashMap; use schemars::JsonSchema; use segment::common::anonymize::Anonymize; use serde::{Deserialize, Serialize}; use crate::shards::shard::PeerId; /// Represents a replica set state #[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Clone)] pub struct ReplicaSetState { pub is_local: bool, pub this_peer_id: PeerId, peers: HashMap<PeerId, ReplicaState>, } impl ReplicaSetState { pub fn get_peer_state(&self, peer_id: PeerId) -> Option<ReplicaState> { self.peers.get(&peer_id).copied() } /// Returns previous state if any pub fn set_peer_state(&mut self, peer_id: PeerId, state: ReplicaState) -> Option<ReplicaState> { self.peers.insert(peer_id, state) } pub fn remove_peer_state(&mut self, peer_id: PeerId) -> Option<ReplicaState> { self.peers.remove(&peer_id) } pub fn peers(&self) -> &HashMap<PeerId, ReplicaState> { &self.peers } pub fn check_peers_state_all<F>(&self, check: F) -> bool where F: Fn(ReplicaState) -> bool, { self.peers.values().all(|state| check(*state)) } pub fn active_peers(&self) -> Vec<PeerId> { self.peers .iter() .filter_map(|(peer_id, state)| { // We consider `ReshardingScaleDown` to be `Active`! state.is_active().then_some(*peer_id) }) .collect() } pub fn readable_peers(&self) -> Vec<PeerId> { self.peers .iter() .filter_map(|(peer_id, state)| state.is_readable().then_some(*peer_id)) .collect() } pub fn active_or_resharding_peers(&self) -> impl Iterator<Item = PeerId> + '_ { self.peers.iter().filter_map(|(peer_id, state)| { matches!( state, ReplicaState::Active | ReplicaState::Resharding | ReplicaState::ReshardingScaleDown ) .then_some(*peer_id) }) } pub fn set_peers(&mut self, peers: HashMap<PeerId, ReplicaState>) { self.peers = peers; } /// Change current `this_peer_id` to a new one. pub fn switch_peer_id(&mut self, new_peer_id: PeerId) { let old_peer_id = self.this_peer_id; self.this_peer_id = new_peer_id; self.peers .remove(&old_peer_id) .and_then(|replica_state| self.peers.insert(new_peer_id, replica_state)); } /// Remove all remote peers from the replica set state and activate local peer. pub fn force_local_active(&mut self) { self.peers.clear(); self.peers.insert(self.this_peer_id, ReplicaState::Active); } } /// State of the single shard within a replica set. #[derive( Debug, Deserialize, Serialize, JsonSchema, Default, PartialEq, Eq, Hash, Clone, Copy, Anonymize, )] pub enum ReplicaState { // Active and sound #[default] Active, // Failed for some reason Dead, // The shard is partially loaded and is currently receiving data from other shards Partial, // Collection is being created Initializing, // A shard which receives data, but is not used for search // Useful for backup shards Listener, // Deprecated since Qdrant 1.9.0, used in Qdrant 1.7.0 and 1.8.0 // // Snapshot shard transfer is in progress, updates aren't sent to the shard // Normally rejects updates. Since 1.8 it allows updates if force is true. PartialSnapshot, // Shard is undergoing recovery by an external node // Normally rejects updates, accepts updates if force is true Recovery, // Points are being migrated to this shard as part of resharding up Resharding, // Points are being migrated to this shard as part of resharding down ReshardingScaleDown, // Active for readers, Partial for writers ActiveRead, } impl ReplicaState { /// Check if replica state is active /// Used to define if this replica can be used as a source of truth. pub fn is_active(self) -> bool { match self { ReplicaState::Active => true, ReplicaState::ReshardingScaleDown => true, ReplicaState::Dead | ReplicaState::Partial | ReplicaState::Initializing | ReplicaState::Listener | ReplicaState::PartialSnapshot | ReplicaState::Recovery | ReplicaState::Resharding | ReplicaState::ActiveRead => false, } } /// Check that replica has full dataset, so it can be used for read operations. pub fn is_readable(self) -> bool { match self { ReplicaState::Active => true, ReplicaState::ReshardingScaleDown => true, ReplicaState::ActiveRead => true, // False from here on ReplicaState::Dead => false, ReplicaState::Partial => false, ReplicaState::Initializing => false, ReplicaState::Listener => false, ReplicaState::PartialSnapshot => false, ReplicaState::Recovery => false, ReplicaState::Resharding => false, } } pub fn is_updatable(self) -> bool { match self { ReplicaState::Active => true, ReplicaState::Partial => true, ReplicaState::Initializing => true, ReplicaState::Listener => true, ReplicaState::Recovery | ReplicaState::PartialSnapshot => false, ReplicaState::Resharding | ReplicaState::ReshardingScaleDown => true, ReplicaState::Dead => false, ReplicaState::ActiveRead => true, } } /// Check if this peer can be used as a source of truth within a shard_id. /// For instance: /// - It can be the only receiver of updates /// - It can be a primary replica for ordered writes pub fn can_be_source_of_truth(self) -> bool { match self { ReplicaState::Active => true, ReplicaState::ActiveRead => true, // Can be only one replica per shard_id ReplicaState::Resharding => true, // Can be only one replica per shard_id ReplicaState::ReshardingScaleDown => true, // Acts like Active, until resharding is committed // false from here on ReplicaState::Partial => false, ReplicaState::Initializing => false, ReplicaState::Listener => false, ReplicaState::PartialSnapshot => false, ReplicaState::Recovery => false, ReplicaState::Dead => false, } } /// Check whether the replica state is active or listener or resharding. /// Healthy state means that replica does not require **automatic** recovery. pub fn is_healthy(self) -> bool { match self { ReplicaState::Active | ReplicaState::Listener | ReplicaState::Resharding | ReplicaState::ReshardingScaleDown => true, ReplicaState::Dead | ReplicaState::Initializing | ReplicaState::Partial | ReplicaState::PartialSnapshot | ReplicaState::Recovery | ReplicaState::ActiveRead => false, } } /// Check whether the replica state is partial or partial-like. /// /// In other words: is the state related to shard transfers? // // TODO(resharding): What's the best way to handle `ReshardingScaleDown` properly!? pub fn is_partial_or_recovery(self) -> bool { match self { ReplicaState::Partial | ReplicaState::PartialSnapshot | ReplicaState::Recovery | ReplicaState::Resharding | ReplicaState::ReshardingScaleDown | ReplicaState::ActiveRead => true, ReplicaState::Active | ReplicaState::Dead | ReplicaState::Initializing | ReplicaState::Listener => false, } } /// Returns `true` if the replica state is resharding, either up or down. pub fn is_resharding(&self) -> bool { match self { ReplicaState::Resharding | ReplicaState::ReshardingScaleDown => true, ReplicaState::Partial | ReplicaState::PartialSnapshot | ReplicaState::Recovery | ReplicaState::Active | ReplicaState::Dead | ReplicaState::Initializing | ReplicaState::Listener | ReplicaState::ActiveRead => false, } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/execute_read_operation.rs
lib/collection/src/shards/replica_set/execute_read_operation.rs
use std::cmp; use std::fmt::Write as _; use std::ops::Deref as _; use futures::future::{self, BoxFuture}; use futures::stream::FuturesUnordered; use futures::{FutureExt as _, StreamExt as _}; use rand::seq::SliceRandom as _; use super::ShardReplicaSet; use crate::operations::consistency_params::{ReadConsistency, ReadConsistencyType}; use crate::operations::types::{CollectionError, CollectionResult}; use crate::shards::remote_shard::RemoteShard; use crate::shards::resolve::{Resolve, ResolveCondition}; use crate::shards::shard_trait::ShardOperation; impl ShardReplicaSet { /// Execute read op. on replica set: /// 1 - Prefer local replica /// 2 - Otherwise uses `read_fan_out_ratio` to compute list of active remote shards. /// 3 - Fallbacks to all remaining shards if the optimisations fails. /// It does not report failing peer_ids to the consensus. pub async fn execute_read_operation<Res, F>( &self, read_operation: F, local_only: bool, ) -> CollectionResult<Res> where F: Fn(&(dyn ShardOperation + Send + Sync)) -> BoxFuture<'_, CollectionResult<Res>>, { if local_only { return self.execute_local_read_operation(read_operation).await; } let mut responses = self .execute_cluster_read_operation(read_operation, 1, None) .await?; Ok(responses.pop().unwrap()) } pub async fn execute_and_resolve_read_operation<Res, F>( &self, read_operation: F, read_consistency: Option<ReadConsistency>, local_only: bool, ) -> CollectionResult<Res> where F: Fn(&(dyn ShardOperation + Send + Sync)) -> BoxFuture<'_, CollectionResult<Res>>, Res: Resolve, { if local_only { return self.execute_local_read_operation(read_operation).await; } let read_consistency = read_consistency.unwrap_or_default(); let local_count = usize::from(self.peer_state(self.this_peer_id()).is_some()); let active_local_count = usize::from(self.peer_is_readable(self.this_peer_id())); let initializing_local_count = usize::from(self.peer_is_initializing(self.this_peer_id())); let remotes = self.remotes.read().await; let remotes_count = remotes.len(); // TODO(resharding): Handle resharded shard? let active_remotes_count = remotes .iter() .filter(|remote| self.peer_is_readable(remote.peer_id)) .count(); let initializing_remotes_count = remotes .iter() .filter(|remote| self.peer_is_initializing(remote.peer_id)) .count(); let total_count = local_count + remotes_count; let active_count = active_local_count + active_remotes_count; let initializing_count = initializing_local_count + initializing_remotes_count; let (mut required_successful_results, condition) = match read_consistency { ReadConsistency::Type(ReadConsistencyType::All) => (total_count, ResolveCondition::All), ReadConsistency::Type(ReadConsistencyType::Majority) => { (total_count, ResolveCondition::Majority) } ReadConsistency::Type(ReadConsistencyType::Quorum) => { (total_count / 2 + 1, ResolveCondition::All) } ReadConsistency::Factor(factor) => { (factor.clamp(1, total_count), ResolveCondition::All) } }; if active_count + initializing_count < required_successful_results { return Err(CollectionError::service_error(format!( "The replica set for shard {} on peer {} does not have enough active replicas", self.shard_id, self.this_peer_id(), ))); } if active_count < required_successful_results { required_successful_results = cmp::max( required_successful_results.saturating_sub(initializing_count), active_count, ); } let mut responses = self .execute_cluster_read_operation( read_operation, required_successful_results, Some(remotes), ) .await?; if responses.is_empty() { Ok(Res::default()) } else if responses.len() == 1 { Ok(responses.pop().unwrap()) } else { Ok(Res::resolve(responses, condition)) } } async fn execute_local_read_operation<Res, F>(&self, read_operation: F) -> CollectionResult<Res> where F: Fn(&(dyn ShardOperation + Send + Sync)) -> BoxFuture<'_, CollectionResult<Res>>, { let _partial_snapshot_search_lock = self.partial_snapshot_meta.try_take_search_read_lock()?; let local = self.local.read().await; let Some(local) = local.deref() else { return Err(CollectionError::service_error(format!( "Local shard {} not found", self.shard_id ))); }; read_operation(local.get()).await } async fn execute_cluster_read_operation<Res, F>( &self, read_operation: F, required_successful_results: usize, remotes: Option<tokio::sync::RwLockReadGuard<'_, Vec<RemoteShard>>>, ) -> CollectionResult<Vec<Res>> where F: Fn(&(dyn ShardOperation + Send + Sync)) -> BoxFuture<'_, CollectionResult<Res>>, { let remotes = match remotes { Some(remotes) => remotes, None => self.remotes.read().await, }; // We don't need to explicitly check partial snapshot recovery lock, because // - partial snapshot recovery *write-locks* `local` shard when applying partial snapshot // - this method *tries* to read-lock `local` shard, and if it's unavailable, fan-out // request to other replicas let local_read = self.local.try_read().ok(); let local_read = local_read.as_ref().and_then(|local| local.as_ref()); let (local, is_local_ready, update_watcher) = match local_read { Some(local) => { let update_watcher = local.watch_for_update(); let is_local_ready = !local.is_update_in_progress(); (Some(local), is_local_ready, Some(update_watcher)) } None => (None, false, None), }; let local_is_readable = self.peer_is_readable(self.this_peer_id()); let local_operation = if local_is_readable { let local_operation = async { let Some(local) = local else { return Err(CollectionError::service_error(format!( "Local shard {} not found", self.shard_id, ))); }; read_operation(local.get()).await }; Some(local_operation.map(|result| (result, true)).left_future()) } else { None }; // TODO(resharding): Handle resharded shard? let mut readable_remotes: Vec<_> = remotes .iter() .filter(|remote| self.peer_is_readable(remote.peer_id)) .collect(); readable_remotes.shuffle(&mut rand::rng()); let remote_operations = readable_remotes.into_iter().map(|remote| { read_operation(remote) .map(|result| (result, false)) .right_future() }); let mut operations = local_operation.into_iter().chain(remote_operations); // Possible scenarios: // // - Local is available: default fan-out is 0 (no fan-out, unless explicitly requested) // - Local is not available: default fan-out is 1 // - There is no local: default fan-out is 1 let default_fan_out = if is_local_ready && local_is_readable { 0 } else { 1 }; let read_fan_out_factor: usize = self .collection_config .read() .await .params .read_fan_out_factor .unwrap_or(default_fan_out) .try_into() .expect("u32 can be converted into usize"); let initial_concurrent_operations = required_successful_results + read_fan_out_factor; let mut pending_operations: FuturesUnordered<_> = operations .by_ref() .take(initial_concurrent_operations) .collect(); let mut responses = Vec::new(); let mut errors = Vec::new(); let mut is_local_operation_resolved = false; let update_watcher = async move { match update_watcher { Some(update_watcher) => update_watcher.await, None => future::pending().await, } }; let update_watcher = update_watcher.fuse(); tokio::pin!(update_watcher); loop { let result; tokio::select! { operation_result = pending_operations.next() => { let Some(operation_result) = operation_result else { break; }; let (operation_result, is_local_operation) = operation_result; result = operation_result; if is_local_operation { is_local_operation_resolved = true; } } _ = &mut update_watcher, if local_is_readable && !is_local_operation_resolved => { pending_operations.extend(operations.next()); continue; } } match result { Ok(response) => { responses.push(response); if responses.len() >= required_successful_results { break; } } Err(error) => { if error.is_transient() { log::debug!("Read operation failed: {error}"); errors.push(error); } else { return Err(error); } pending_operations.extend(operations.next()); if responses.len() + pending_operations.len() < required_successful_results { break; } } } } if responses.len() >= required_successful_results { Ok(responses) } else { let errors_count = errors.len(); let operations_count = responses.len() + errors.len(); let errors_separator = if !errors.is_empty() { ":" } else { "" }; let mut message = format!( "{errors_count} of {operations_count} read operations failed{errors_separator}" ); for error in errors { write!(&mut message, "\n {error}").expect("writing into String always succeeds"); } Err(CollectionError::service_error(message)) } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/shard_transfer.rs
lib/collection/src/shards/replica_set/shard_transfer.rs
use std::ops::Deref as _; use std::sync::Arc; use parking_lot::Mutex; use segment::types::{Filter, PointIdType}; use super::ShardReplicaSet; use crate::hash_ring::HashRingRouter; use crate::operations::types::{CollectionError, CollectionResult}; use crate::shards::forward_proxy_shard::ForwardProxyShard; use crate::shards::local_shard::clock_map::RecoveryPoint; use crate::shards::queue_proxy_shard::QueueProxyShard; use crate::shards::remote_shard::RemoteShard; use crate::shards::shard::Shard; use crate::shards::transfer::transfer_tasks_pool::TransferTaskProgress; impl ShardReplicaSet { /// Convert `Local` shard into `ForwardProxy`. /// /// # Cancel safety /// /// This method is cancel safe. pub async fn proxify_local( &self, remote_shard: RemoteShard, resharding_hash_ring: Option<HashRingRouter>, filter: Option<Filter>, ) -> CollectionResult<()> { let mut local = self.local.write().await; match local.deref() { // Expected state, continue Some(Shard::Local(_)) => {} // If a forward proxy to same remote, return early Some(Shard::ForwardProxy(proxy)) if proxy.remote_shard.peer_id == remote_shard.peer_id => { return Ok(()); } // Unexpected states, error Some(Shard::ForwardProxy(proxy)) => { return Err(CollectionError::service_error(format!( "Cannot proxify local shard {} to peer {} because it is already proxified to peer {}", self.shard_id, remote_shard.peer_id, proxy.remote_shard.peer_id ))); } Some(Shard::QueueProxy(_)) => { return Err(CollectionError::service_error(format!( "Cannot proxify local shard {} to peer {} because it is already queue proxified", self.shard_id, remote_shard.peer_id, ))); } Some(Shard::Proxy(_)) => { return Err(CollectionError::service_error(format!( "Cannot queue proxify local shard {} to peer {} because it already is a proxy", self.shard_id, remote_shard.peer_id, ))); } Some(Shard::Dummy(_)) => { return Err(CollectionError::service_error(format!( "Cannot proxify local dummy shard {} to peer {}", self.shard_id, remote_shard.peer_id, ))); } None => { return Err(CollectionError::service_error(format!( "Cannot proxify local shard {} on peer {} because it is not active", self.shard_id, self.this_peer_id() ))); } }; // Explicit `match` instead of `if-let` to catch `unreachable` condition if top `match` is // changed let Some(Shard::Local(local_shard)) = local.take() else { unreachable!() }; let proxy_shard_res = ForwardProxyShard::new( self.shard_id, local_shard, remote_shard, resharding_hash_ring, filter, ); match proxy_shard_res { Ok(proxy_shard) => { let _ = local.insert(Shard::ForwardProxy(proxy_shard)); Ok(()) } Err((err, local_shard)) => { log::warn!("Failed to proxify shard, reverting to local shard: {err}"); let _ = local.insert(Shard::Local(local_shard)); Err(err) } } } /// Queue proxy our local shard, pointing to the remote shard. /// /// A `from_version` may be provided to start queueing the WAL from a specific version. The /// point may be in the past, but can never be outside the range of what we currently have in /// WAL. If `None` is provided, it'll queue from the latest available WAL version at this time. /// /// For snapshot transfer we queue from the latest version, so we can send all new updates once /// the remote shard has been recovered. For WAL delta transfer we queue from a specific /// version based on our recovery point. /// /// # Cancel safety /// /// This method is cancel safe. pub async fn queue_proxify_local( &self, remote_shard: RemoteShard, from_version: Option<u64>, progress: Arc<Mutex<TransferTaskProgress>>, ) -> CollectionResult<()> { let mut local = self.local.write().await; match local.deref() { // Expected state, continue Some(Shard::Local(_)) => {} // If a forward proxy to same remote, continue and change into queue proxy Some(Shard::ForwardProxy(proxy)) if proxy.remote_shard.peer_id == remote_shard.peer_id => {} // Unexpected states, error Some(Shard::QueueProxy(_)) => { return Err(CollectionError::service_error(format!( "Cannot queue proxify local shard {} to peer {} because it is already queue proxified", self.shard_id, remote_shard.peer_id, ))); } Some(Shard::ForwardProxy(proxy)) => { return Err(CollectionError::service_error(format!( "Cannot queue proxify local shard {} to peer {} because it is already proxified to peer {}", self.shard_id, remote_shard.peer_id, proxy.remote_shard.peer_id ))); } Some(Shard::Proxy(_)) => { return Err(CollectionError::service_error(format!( "Cannot queue proxify local shard {} to peer {} because it already is a proxy", self.shard_id, remote_shard.peer_id, ))); } Some(Shard::Dummy(_)) => { return Err(CollectionError::service_error(format!( "Cannot proxify local dummy shard {} to peer {}", self.shard_id, remote_shard.peer_id, ))); } None => { return Err(CollectionError::service_error(format!( "Cannot queue proxify local shard {} on peer {} because it is not active", self.shard_id, self.this_peer_id() ))); } }; // Get `max_ack_version` without "taking" local shard (to maintain cancel safety) let local_shard = match local.deref() { Some(Shard::Local(local)) => local, Some(Shard::ForwardProxy(proxy)) => &proxy.wrapped_shard, _ => unreachable!(), }; let wal_keep_from = local_shard .update_handler .lock() .await .wal_keep_from .clone(); // Proxify local shard // // Making `await` calls between `local.take()` and `local.insert(...)` is *not* cancel safe! let local_shard = match local.take() { Some(Shard::Local(local)) => local, Some(Shard::ForwardProxy(proxy)) => proxy.wrapped_shard, _ => unreachable!(), }; // Try to queue proxify with or without version let proxy_shard = match from_version { None => { Ok(QueueProxyShard::new(local_shard, remote_shard, wal_keep_from, progress).await) } Some(from_version) => { QueueProxyShard::new_from_version( local_shard, remote_shard, wal_keep_from, from_version, progress, ) .await } }; // Insert queue proxy shard on success or revert to local shard on failure match proxy_shard { // All good, insert queue proxy shard Ok(proxy_shard) => { let _ = local.insert(Shard::QueueProxy(proxy_shard)); Ok(()) } Err((local_shard, err)) => { log::warn!("Failed to queue proxify shard, reverting to local shard: {err}"); let _ = local.insert(Shard::Local(local_shard)); Err(err) } } } /// Un-proxify local shard wrapped as `ForwardProxy` or `QueueProxy`. /// /// # Cancel safety /// /// This method is cancel safe. pub async fn un_proxify_local(&self) -> CollectionResult<()> { let mut local = self.local.write().await; match local.deref() { // Expected states, continue Some(Shard::Local(_)) => return Ok(()), Some(Shard::ForwardProxy(_) | Shard::QueueProxy(_)) => {} // Unexpected states, error Some(shard @ (Shard::Proxy(_) | Shard::Dummy(_))) => { return Err(CollectionError::service_error(format!( "Cannot un-proxify local shard {} because it has unexpected type - {}", self.shard_id, shard.variant_name(), ))); } None => { return Err(CollectionError::service_error(format!( "Cannot un-proxify local shard {} on peer {} because it is not active", self.shard_id, self.this_peer_id(), ))); } }; // Perform async finalization without "taking" local shard (to maintain cancel safety) // // Explicit `match` instead of `if-let` on `Shard::QueueProxy` to catch `unreachable` // condition if top `match` is changed let result = match local.deref() { Some(Shard::ForwardProxy(_)) => Ok(()), Some(Shard::QueueProxy(proxy)) => { // We should not unproxify a queue proxy shard directly because it can fail if it // fails to send all updates to the remote shard. // Instead we should transform it into a forward proxy shard before unproxify is // called to handle errors at an earlier time. // Also, we're holding a write lock here which could block other accessors for a // long time if transferring updates takes a long time. // See `Self::queue_proxy_into_forward_proxy()` for more details. log::warn!( "Directly unproxifying queue proxy shard, this should not happen normally" ); let result = proxy.transfer_all_missed_updates().await; if let Err(err) = &result { log::error!( "Failed to un-proxify local shard because transferring remaining queue \ items to remote failed: {err}" ); } result } _ => unreachable!(), }; // Un-proxify local shard // // Making `await` calls between `local.take()` and `local.insert(...)` is *not* cancel safe! let local_shard = match local.take() { Some(Shard::ForwardProxy(proxy)) => proxy.wrapped_shard, Some(Shard::QueueProxy(proxy)) => { let (local_shard, _) = proxy.forget_updates_and_finalize(); local_shard } _ => unreachable!(), }; let _ = local.insert(Shard::Local(local_shard)); result } /// Revert usage of a `QueueProxy` shard and forget all updates, then un-proxify to local /// /// This can be used to intentionally forget all updates that are collected by the queue proxy /// shard and revert back to a local shard. This is useful if a shard transfer operation using /// a queue proxy must be aborted. /// /// Does nothing if the local shard is not a queue proxy shard. /// This method cannot fail. /// /// # Warning /// /// This intentionally forgets and drops updates pending to be transferred to the remote shard. /// The remote shard may therefore therefore be left in an inconsistent state, which should be /// resolved separately. /// /// # Cancel safety /// /// This method is cancel safe. /// /// If cancelled - the queue proxy may not be reverted to a local proxy. pub async fn revert_queue_proxy_local(&self) { let mut local = self.local.write().await; // Take out queue proxy shard or return if !matches!(local.deref(), Some(Shard::QueueProxy(_))) { return; }; log::debug!("Forgetting queue proxy updates and reverting to local shard"); // Making `await` calls between `local.take()` and `local.insert(...)` is *not* cancel safe! let Some(Shard::QueueProxy(queue_proxy)) = local.take() else { unreachable!(); }; let (local_shard, _) = queue_proxy.forget_updates_and_finalize(); let _ = local.insert(Shard::Local(local_shard)); } /// Custom operation for transferring data from one shard to another during transfer /// /// Returns new point offset and transferred count /// /// # Cancel safety /// /// This method is cancel safe. pub async fn transfer_batch( &self, offset: Option<PointIdType>, batch_size: usize, hashring_filter: Option<&HashRingRouter>, merge_points: bool, ) -> CollectionResult<(Option<PointIdType>, usize)> { let local = self.local.read().await; let Some(Shard::ForwardProxy(proxy)) = local.deref() else { return Err(CollectionError::service_error(format!( "Cannot transfer batch from shard {} because it is not proxified", self.shard_id ))); }; proxy .transfer_batch( offset, batch_size, hashring_filter, merge_points, &self.search_runtime, ) .await } /// Custom operation for transferring indexes from one shard to another during transfer /// /// # Cancel safety /// /// This method is cancel safe. pub async fn transfer_indexes(&self) -> CollectionResult<()> { let local = self.local.read().await; let Some(Shard::ForwardProxy(proxy)) = local.deref() else { return Err(CollectionError::service_error(format!( "Cannot transfer indexes from shard {} because it is not proxified", self.shard_id, ))); }; log::trace!( "Transferring indexes to shard {}", proxy.remote_shard.peer_id, ); proxy.transfer_indexes().await } /// Send all queue proxy updates to remote /// /// This method allows to transfer queued updates at any point, before the shard is /// unproxified for example. This allows for proper error handling at the time this method is /// called. Because the shard is transformed into a forward proxy after this operation it will /// not error again when the shard is eventually unproxified again. /// /// Does nothing if the local shard is not a queue proxy. /// /// # Errors /// /// Returns an error if transferring all updates to the remote failed. /// /// # Cancel safety /// /// This function is cancel safe. /// /// If cancelled - transforming the queue proxy into a forward proxy may not actually complete. /// None, some or all queued operations may be transmitted to the remote. pub async fn queue_proxy_flush(&self) -> CollectionResult<()> { let local = self.local.read().await; let Some(Shard::QueueProxy(proxy)) = local.deref() else { return Ok(()); }; proxy.transfer_all_missed_updates().await?; Ok(()) } /// Send all queue proxy updates to remote and transform into forward proxy /// /// When a queue or forward proxy shard needs to be unproxified into a local shard again we /// typically don't have room to handle errors. A queue proxy shard may error if it fails to /// send updates to the remote shard, while a forward proxy does not fail at all when /// transforming. /// /// This method allows to transfer queued updates before the shard is unproxified. This allows /// for proper error handling at the time this method is called. Because the shard is /// transformed into a forward proxy after this operation it will not error again when the /// shard is eventually unproxified again. /// /// If the local shard is a queue proxy: /// - Transfers all missed updates to remote /// - Transforms queue proxy into forward proxy /// /// Does nothing if the local shard is not a queue proxy. /// /// # Errors /// /// Returns an error if transferring all updates to the remote failed. /// /// # Cancel safety /// /// This function is cancel safe. /// /// If cancelled - transforming the queue proxy into a forward proxy may not actually complete. /// None, some or all queued operations may be transmitted to the remote. pub async fn queue_proxy_into_forward_proxy(&self) -> CollectionResult<()> { // First pass: transfer all missed updates with shared read lock self.queue_proxy_flush().await?; // Second pass: transfer new updates let mut local = self.local.write().await; let Some(Shard::QueueProxy(proxy)) = local.deref() else { return Ok(()); }; proxy.transfer_all_missed_updates().await?; // Transform `QueueProxyShard` into `ForwardProxyShard` log::trace!("Transferred all queue proxy operations, transforming into forward proxy now"); // Making `await` calls between `local.take()` and `local.insert(...)` is *not* cancel safe! let Some(Shard::QueueProxy(queue_proxy)) = local.take() else { unreachable!(); }; let (local_shard, remote_shard) = queue_proxy.forget_updates_and_finalize(); let forward_proxy_res = ForwardProxyShard::new(self.shard_id, local_shard, remote_shard, None, None); match forward_proxy_res { Ok(forward_proxy) => { let _ = local.insert(Shard::ForwardProxy(forward_proxy)); Ok(()) } Err((err, local_shard)) => { log::warn!( "Failed to transform queue proxy shard into forward proxy, reverting to local shard: {err}" ); let _ = local.insert(Shard::Local(local_shard)); Err(err) } } } pub async fn resolve_wal_delta( &self, recovery_point: RecoveryPoint, ) -> CollectionResult<Option<u64>> { let local_shard_read = self.local.read().await; let Some(local_shard) = local_shard_read.deref() else { return Err(CollectionError::service_error( "Cannot resolve WAL delta, shard replica set does not have local shard", )); }; local_shard.resolve_wal_delta(recovery_point).await } pub async fn wal_version(&self) -> CollectionResult<Option<u64>> { let local_shard_read = self.local.read().await; let Some(local_shard) = local_shard_read.deref() else { return Err(CollectionError::service_error( "Cannot get WAL version, shard replica set does not have local shard", )); }; local_shard.wal_version().await } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/replica_set/read_ops.rs
lib/collection/src/shards/replica_set/read_ops.rs
use std::sync::Arc; use std::time::Duration; use common::counter::hardware_accumulator::HwMeasurementAcc; use futures::FutureExt as _; use segment::data_types::facets::{FacetParams, FacetResponse}; use segment::types::*; use shard::retrieve::record_internal::RecordInternal; use shard::search::CoreSearchRequestBatch; use super::ShardReplicaSet; use crate::operations::consistency_params::ReadConsistency; use crate::operations::types::*; use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse}; impl ShardReplicaSet { #[allow(clippy::too_many_arguments)] pub async fn scroll_by( &self, request: Arc<ScrollRequestInternal>, read_consistency: Option<ReadConsistency>, local_only: bool, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<RecordInternal>> { self.execute_and_resolve_read_operation( |shard| { let request = request.clone(); let search_runtime = self.search_runtime.clone(); let hw_acc = hw_measurement_acc.clone(); async move { shard .scroll_by(request, &search_runtime, timeout, hw_acc) .await } .boxed() }, read_consistency, local_only, ) .await } #[allow(clippy::too_many_arguments)] pub async fn local_scroll_by_id( &self, offset: Option<ExtendedPointId>, limit: usize, with_payload_interface: &WithPayloadInterface, with_vector: &WithVector, filter: Option<&Filter>, read_consistency: Option<ReadConsistency>, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<RecordInternal>> { let with_payload_interface = Arc::new(with_payload_interface.clone()); let with_vector = Arc::new(with_vector.clone()); let filter = filter.map(|filter| Arc::new(filter.clone())); self.execute_and_resolve_read_operation( |shard| { let with_payload_interface = with_payload_interface.clone(); let with_vector = with_vector.clone(); let filter = filter.clone(); let search_runtime = self.search_runtime.clone(); let hw_acc = hw_measurement_acc.clone(); async move { shard .local_scroll_by_id( offset, limit, &with_payload_interface, &with_vector, filter.as_deref(), &search_runtime, timeout, hw_acc, ) .await } .boxed() }, read_consistency, true, ) .await } pub async fn core_search( &self, request: Arc<CoreSearchRequestBatch>, read_consistency: Option<ReadConsistency>, local_only: bool, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<Vec<ScoredPoint>>> { self.execute_and_resolve_read_operation( |shard| { let request = Arc::clone(&request); let search_runtime = self.search_runtime.clone(); let hw_measurement_acc_clone = hw_measurement_acc.clone(); async move { shard .core_search(request, &search_runtime, timeout, hw_measurement_acc_clone) .await } .boxed() }, read_consistency, local_only, ) .await } pub async fn count( &self, request: Arc<CountRequestInternal>, read_consistency: Option<ReadConsistency>, timeout: Option<Duration>, local_only: bool, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<CountResult> { self.execute_and_resolve_read_operation( |shard| { let request = request.clone(); let search_runtime = self.search_runtime.clone(); let hw_measurement_acc_clone = hw_measurement_acc.clone(); async move { shard .count(request, &search_runtime, timeout, hw_measurement_acc_clone) .await } .boxed() }, read_consistency, local_only, ) .await } #[allow(clippy::too_many_arguments)] pub async fn retrieve( &self, request: Arc<PointRequestInternal>, with_payload: &WithPayload, with_vector: &WithVector, read_consistency: Option<ReadConsistency>, timeout: Option<Duration>, local_only: bool, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<RecordInternal>> { let with_payload = Arc::new(with_payload.clone()); let with_vector = Arc::new(with_vector.clone()); self.execute_and_resolve_read_operation( |shard| { let request = request.clone(); let with_payload = with_payload.clone(); let with_vector = with_vector.clone(); let search_runtime = self.search_runtime.clone(); let hw_acc = hw_measurement_acc.clone(); async move { shard .retrieve( request, &with_payload, &with_vector, &search_runtime, timeout, hw_acc, ) .await } .boxed() }, read_consistency, local_only, ) .await } pub async fn info(&self, local_only: bool) -> CollectionResult<CollectionInfo> { self.execute_read_operation( |shard| async move { shard.info().await }.boxed(), local_only, ) .await } pub async fn count_local( &self, request: Arc<CountRequestInternal>, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Option<CountResult>> { let local = self.local.read().await; match &*local { None => Ok(None), Some(shard) => { let search_runtime = self.search_runtime.clone(); Ok(Some( shard .get() .count(request, &search_runtime, timeout, hw_measurement_acc) .await?, )) } } } pub async fn query_batch( &self, requests: Arc<Vec<ShardQueryRequest>>, read_consistency: Option<ReadConsistency>, local_only: bool, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<ShardQueryResponse>> { self.execute_and_resolve_read_operation( |shard| { let requests = Arc::clone(&requests); let search_runtime = self.search_runtime.clone(); let hw_measurement_acc_clone = hw_measurement_acc.clone(); async move { shard .query_batch(requests, &search_runtime, timeout, hw_measurement_acc_clone) .await } .boxed() }, read_consistency, local_only, ) .await } pub async fn facet( &self, request: Arc<FacetParams>, read_consistency: Option<ReadConsistency>, local_only: bool, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<FacetResponse> { self.execute_and_resolve_read_operation( |shard| { let request = request.clone(); let search_runtime = self.search_runtime.clone(); let hw_acc = hw_measurement_acc.clone(); async move { shard.facet(request, &search_runtime, timeout, hw_acc).await }.boxed() }, read_consistency, local_only, ) .await } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/profiling/interface.rs
lib/collection/src/profiling/interface.rs
use tokio::runtime::Handle; use tokio::sync::OnceCell; use crate::operations::loggable::Loggable; use crate::profiling::slow_requests_collector::{MIN_SLOW_REQUEST_DURATION, RequestProfileMessage}; use crate::profiling::slow_requests_log::LogEntry; static REQUESTS_COLLECTOR: OnceCell<crate::profiling::slow_requests_collector::RequestsCollector> = OnceCell::const_new(); /// This function should be used to log request profiles into the shared log structure. /// This structure is later can be read via API. pub fn log_request_to_collector<F, L>( collection_name: impl Into<String>, duration: std::time::Duration, get_request: F, ) where F: FnOnce() -> L, L: Loggable + Sync + Send + 'static, { if duration < MIN_SLOW_REQUEST_DURATION { return; } if let Some(listener) = REQUESTS_COLLECTOR.get() { let message = RequestProfileMessage::new(Box::new(get_request()), duration, collection_name.into()); listener.send_if_available(message); } else { log::warn!("SlowRequestsListener is not initialized"); } } /// This function initializes a global listener for slow requests channel /// /// It should be called once during the application startup with a valid Tokio runtime handle /// to spawn the listener task. pub fn init_requests_profile_collector(runtime: Handle) { runtime.spawn(async move { REQUESTS_COLLECTOR .get_or_init(async || { let (listener, receiver) = crate::profiling::slow_requests_collector::RequestsCollector::new(); let log = listener.get_log(); tokio::spawn( crate::profiling::slow_requests_collector::RequestsCollector::run( log, receiver, ), ); listener }) .await; }); } /// Read current log of slow requests with associated data. pub async fn get_requests_profile_log( limit: usize, method_name_substr: Option<&str>, ) -> Vec<LogEntry> { let listener = REQUESTS_COLLECTOR.get(); if let Some(listener) = listener { listener .get_log() .read() .await .get_log_entries(limit, method_name_substr) } else { log::warn!("RequestsCollector is not initialized"); vec![] } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/profiling/slow_requests_collector.rs
lib/collection/src/profiling/slow_requests_collector.rs
use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; use chrono::{DateTime, Utc}; use tokio::sync::RwLock; use crate::operations::loggable::Loggable; use crate::profiling::slow_requests_log::SlowRequestsLog; /// Logger should ignore everything below this threshold pub const MIN_SLOW_REQUEST_DURATION: std::time::Duration = std::time::Duration::from_millis(50); /// Message, used to communicate between main application and profile listener. /// This is not supposed to be exposed to the users directly, use helper functions instead. pub struct RequestProfileMessage { request: Box<dyn Loggable + Send + Sync>, duration: std::time::Duration, collection_name: String, datetime: DateTime<Utc>, } impl RequestProfileMessage { pub fn new( request: Box<dyn Loggable + Send + Sync>, duration: std::time::Duration, collection_name: String, ) -> Self { RequestProfileMessage { request, duration, collection_name, datetime: Utc::now(), } } } /// This structure is responsible for listening to slow requests and logging them, if needed. /// It is supposed to be a singleton in the application and run in a separate future. pub struct RequestsCollector { log: Arc<RwLock<SlowRequestsLog>>, sender: tokio::sync::mpsc::Sender<RequestProfileMessage>, } /// Number of top slow requests to keep in the log /// per request type (method name) const MAX_REQUESTS_LOGGED: usize = 32; const QUEUE_CAPACITY: usize = 64; /// Rate-limit interval for warning logs (seconds) const WARN_INTERVAL_SECS: u64 = 10; /// Last time a send warning was emitted (unix seconds) static LAST_SEND_WARN: AtomicU64 = AtomicU64::new(0); impl RequestsCollector { pub fn new() -> (Self, tokio::sync::mpsc::Receiver<RequestProfileMessage>) { let log = SlowRequestsLog::new(MAX_REQUESTS_LOGGED); let (sender, receiver) = tokio::sync::mpsc::channel(QUEUE_CAPACITY); ( RequestsCollector { log: Arc::new(RwLock::new(log)), sender, }, receiver, ) } pub fn get_log(&self) -> Arc<RwLock<SlowRequestsLog>> { self.log.clone() } pub fn send_if_available(&self, message: RequestProfileMessage) { self.sender.try_send(message).unwrap_or_else(|err| { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .map(|d| d.as_secs()) .unwrap_or_else(|_| 0); // Atomically update if enough time has passed let updated = LAST_SEND_WARN.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |prev| { if now.saturating_sub(prev) >= WARN_INTERVAL_SECS { Some(now) } else { None } }); if updated.is_err() { return; } log::warn!("Failed to send message: {err}"); }) } pub async fn run( log: Arc<RwLock<SlowRequestsLog>>, receiver: tokio::sync::mpsc::Receiver<RequestProfileMessage>, ) { let mut receiver = receiver; while let Some(message) = receiver.recv().await { let RequestProfileMessage { request, duration, collection_name, datetime, } = message; log.write() .await .log_request(&collection_name, duration, datetime, request.as_ref()); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/profiling/slow_requests_log.rs
lib/collection/src/profiling/slow_requests_log.rs
use std::hash::{Hash, Hasher}; use std::time::Duration; use ahash::AHashMap; use chrono::{DateTime, Utc}; use common::fixed_length_priority_queue::FixedLengthPriorityQueue; use count_min_sketch::CountMinSketch64; use itertools::Itertools; use schemars::JsonSchema; use serde::Serialize; use crate::operations::loggable::Loggable; #[derive(Serialize, PartialEq, Eq, Clone, JsonSchema)] pub struct LogEntry { collection_name: String, #[serde(serialize_with = "duration_as_seconds")] duration: Duration, datetime: DateTime<Utc>, request_name: &'static str, approx_count: usize, request_body: serde_json::Value, /// Used for fast comparison and lookup #[serde(skip)] content_hash: u64, } impl LogEntry { pub fn new( collection_name: String, duration: Duration, datetime: DateTime<Utc>, request_name: &'static str, request_body: serde_json::Value, content_hash: u64, // Pre-computed content hash ) -> Self { LogEntry { collection_name, duration, datetime, request_name, approx_count: 1, request_body, content_hash, } } pub fn upd_counter(&mut self, count: usize) { self.approx_count = count; } } fn duration_as_seconds<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { serializer.serialize_f64(duration.as_millis() as f64 / 1000.0) } impl PartialOrd for LogEntry { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for LogEntry { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.duration.cmp(&other.duration) } } pub struct SlowRequestsLog { log_priority_queue: AHashMap<&'static str, FixedLengthPriorityQueue<LogEntry>>, counters: CountMinSketch64<u64>, max_entries: usize, } impl SlowRequestsLog { pub fn new(max_entries: usize) -> Self { SlowRequestsLog { log_priority_queue: Default::default(), counters: CountMinSketch64::new(1024, 0.95, 0.1).unwrap(), // 95% probability, 10% tolerance max_entries, } } /// Try insert an entry into the log in the way, that it is not duplicated by content. fn try_insert_dedup(&mut self, entry: LogEntry) -> Option<LogEntry> { let queue = self .log_priority_queue .entry(entry.request_name) .or_insert_with(|| FixedLengthPriorityQueue::new(self.max_entries)); let duplicate = queue.iter_unsorted().find(|e| { e.content_hash == entry.content_hash // Fast check }); if let Some(duplicate) = duplicate { if duplicate.duration >= entry.duration { // Existing record took longer, keep it None } else { // New record took longer, replace existing record queue.retain(|e| e.content_hash != entry.content_hash); queue.push(entry) } } else { // just insert queue.push(entry) } } fn inc_counter(&mut self, content_hash: u64) { self.counters.increment(&content_hash); } fn content_hash(request_hash: u64, collection_name: &str) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); request_hash.hash(&mut hasher); collection_name.hash(&mut hasher); hasher.finish() } /// Try to log a request if the log. /// If proposed log is slower than the fastest logged request, it will be kept in the log. /// Otherwise, it will be ignored. /// /// Returns the log entry that was removed from the log, if any. pub fn log_request( &mut self, collection_name: &str, duration: Duration, datetime: DateTime<Utc>, request: &dyn Loggable, ) -> Option<LogEntry> { let content_hash = Self::content_hash(request.request_hash(), collection_name); self.inc_counter(content_hash); let queue = self .log_priority_queue .entry(request.request_name()) .or_insert_with(|| FixedLengthPriorityQueue::new(self.max_entries)); if !queue.is_full() { let entry = LogEntry::new( collection_name.to_string(), duration, datetime, request.request_name(), request.to_log_value(), content_hash, ); return self.try_insert_dedup(entry); } // Check if we can insert into the queue before actually serializing the request // Safety: unwrap is safe because we checked that the queue is full let fastest_logged = queue.top().unwrap(); if duration <= fastest_logged.duration { // Our queue is already slower than this request return None; } let entry = LogEntry::new( collection_name.to_string(), duration, datetime, request.request_name(), request.to_log_value(), content_hash, ); self.try_insert_dedup(entry) } pub fn get_log_entries(&self, limit: usize, method_name_substr: Option<&str>) -> Vec<LogEntry> { self.log_priority_queue .iter() .filter(|(key, _value)| { if let Some(substr) = &method_name_substr { key.contains(substr) } else { true } }) .flat_map(|(_key, queue)| queue.iter_unsorted()) .sorted_by(|a, b| b.cmp(a)) .take(limit) .cloned() .map(|mut entry| { let approx_count = self.counters.estimate(&entry.content_hash); entry.upd_counter(approx_count as usize); entry }) .collect() } } #[cfg(test)] mod tests { use std::time::Duration; use serde_json::{Value, json}; use super::*; struct DummyLoggable; impl Loggable for DummyLoggable { fn to_log_value(&self) -> Value { json!({"dummy": true}) } fn request_name(&self) -> &'static str { "dummy" } fn request_hash(&self) -> u64 { 42 } } #[test] fn test_get_slow_requests_returns_all_logged() { let mut log = SlowRequestsLog::new(3); let request = DummyLoggable; log.log_request("col1", Duration::from_secs(1), Utc::now(), &request); log.log_request("col2", Duration::from_secs(2), Utc::now(), &request); log.log_request("col3", Duration::from_secs(3), Utc::now(), &request); let entries = log.get_log_entries(10, None); assert_eq!(entries.len(), 3); let evicted = log.log_request("col4", Duration::from_secs(4), Utc::now(), &request); assert!(evicted.is_some()); let evicted = evicted.unwrap(); assert_eq!(evicted.collection_name, "col1"); let entries = log.get_log_entries(10, None); assert_eq!(entries.len(), 3); let evicted = log.log_request("col5", Duration::from_secs(1), Utc::now(), &request); assert!(evicted.is_none()); let entries = log.get_log_entries(10, None); assert_eq!(entries.len(), 3); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/profiling/mod.rs
lib/collection/src/profiling/mod.rs
pub mod interface; mod slow_requests_collector; pub mod slow_requests_log;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/update_workers/optimization_worker.rs
lib/collection/src/update_workers/optimization_worker.rs
use std::collections::HashSet; use std::panic::AssertUnwindSafe; use std::path::Path; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::time::Duration; use common::budget::ResourceBudget; use common::counter::hardware_counter::HardwareCounterCell; use common::panic; use common::save_on_disk::SaveOnDisk; use parking_lot::Mutex; use segment::common::operation_error::{OperationError, OperationResult}; use segment::index::hnsw_index::num_rayon_threads; use segment::types::QuantizationConfig; use shard::payload_index_schema::PayloadIndexSchema; use shard::segment_holder::LockedSegmentHolder; use tokio::sync::Mutex as TokioMutex; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::task; use tokio::task::JoinHandle; use tokio::time::error::Elapsed; use tokio::time::timeout; use crate::collection_manager::collection_updater::CollectionUpdater; use crate::collection_manager::optimizers::segment_optimizer::OptimizerThresholds; use crate::collection_manager::optimizers::{Tracker, TrackerLog, TrackerStatus}; use crate::common::stoppable_task::{StoppableTaskHandle, spawn_stoppable}; use crate::config::CollectionParams; use crate::operations::types::{CollectionError, CollectionResult}; use crate::shards::update_tracker::UpdateTracker; use crate::update_handler::{Optimizer, OptimizerSignal}; use crate::update_workers::UpdateWorkers; use crate::wal_delta::LockedWal; /// Interval at which the optimizer worker cleans up old optimization handles /// /// The longer the duration, the longer it takes for panicked tasks to be reported. const OPTIMIZER_CLEANUP_INTERVAL: Duration = Duration::from_secs(5); impl UpdateWorkers { #[allow(clippy::too_many_arguments)] pub async fn optimization_worker_fn( optimizers: Arc<Vec<Arc<Optimizer>>>, sender: Sender<OptimizerSignal>, mut receiver: Receiver<OptimizerSignal>, segments: LockedSegmentHolder, wal: LockedWal, optimization_handles: Arc<TokioMutex<Vec<StoppableTaskHandle<bool>>>>, optimizers_log: Arc<Mutex<TrackerLog>>, total_optimized_points: Arc<AtomicUsize>, optimizer_resource_budget: ResourceBudget, max_handles: Option<usize>, has_triggered_optimizers: Arc<AtomicBool>, payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>, update_operation_lock: Arc<tokio::sync::RwLock<()>>, update_tracker: UpdateTracker, ) { let max_handles = max_handles.unwrap_or(usize::MAX); let max_indexing_threads = optimizers .first() .map(|optimizer| optimizer.hnsw_config().max_indexing_threads) .unwrap_or_default(); // Asynchronous task to trigger optimizers once CPU budget is available again let mut resource_available_trigger: Option<JoinHandle<()>> = None; loop { let result = timeout(OPTIMIZER_CLEANUP_INTERVAL, receiver.recv()).await; let cleaned_any = Self::cleanup_optimization_handles(optimization_handles.clone()).await; // Either continue below here with the worker, or reloop/break // Decision logic doing one of three things: // 1. run optimizers // 2. reloop and wait for next signal // 3. break here and stop the optimization worker let ignore_max_handles = match result { // Regular optimizer signal: run optimizers: do 1 Ok(Some(OptimizerSignal::Operation(_))) => false, // Optimizer signal ignoring max handles: do 1 Ok(Some(OptimizerSignal::Nop)) => true, // Hit optimizer cleanup interval, did clean up a task: do 1 Err(Elapsed { .. }) if cleaned_any => { // This branch prevents a race condition where optimizers would get stuck // If the optimizer cleanup interval was triggered and we did clean any task we // must run optimizers now. If we don't there may not be any other ongoing // tasks that'll trigger this for us. If we don't run optimizers here we might // get stuck into yellow state until a new update operation is received. // See: <https://github.com/qdrant/qdrant/pull/5111> log::warn!( "Cleaned an optimization handle after timeout, explicitly triggering optimizers", ); true } // Hit optimizer cleanup interval, did not clean up a task: do 2 Err(Elapsed { .. }) => continue, // Channel closed or received stop signal: do 3 Ok(None | Some(OptimizerSignal::Stop)) => break, }; has_triggered_optimizers.store(true, Ordering::Relaxed); // Ensure we have at least one appendable segment with enough capacity // Source required parameters from first optimizer if let Some(optimizer) = optimizers.first() { let result = Self::ensure_appendable_segment_with_capacity( &segments, optimizer.segments_path(), &optimizer.collection_params(), optimizer.quantization_config().as_ref(), optimizer.threshold_config(), payload_index_schema.clone(), ); if let Err(err) = result { log::error!( "Failed to ensure there are appendable segments with capacity: {err}" ); panic!("Failed to ensure there are appendable segments with capacity: {err}"); } } // If not forcing, wait on next signal if we have too many handles if !ignore_max_handles && optimization_handles.lock().await.len() >= max_handles { continue; } if Self::try_recover( segments.clone(), wal.clone(), update_operation_lock.clone(), update_tracker.clone(), ) .await .is_err() { continue; } // Continue if we have enough resource budget available to start an optimization // Otherwise skip now and start a task to trigger the optimizer again once resource // budget becomes available let desired_cpus = 0; let desired_io = num_rayon_threads(max_indexing_threads); if !optimizer_resource_budget.has_budget(desired_cpus, desired_io) { let trigger_active = resource_available_trigger .as_ref() .is_some_and(|t| !t.is_finished()); if !trigger_active { resource_available_trigger.replace( Self::trigger_optimizers_on_resource_budget( optimizer_resource_budget.clone(), desired_cpus, desired_io, sender.clone(), ), ); } continue; } // Determine optimization handle limit based on max handles we allow // Not related to the CPU budget, but a different limit for the maximum number // of concurrent concrete optimizations per shard as configured by the user in // the Qdrant configuration. // Skip if we reached limit, an ongoing optimization that finishes will trigger this loop again let limit = max_handles.saturating_sub(optimization_handles.lock().await.len()); if limit == 0 { log::trace!("Skipping optimization check, we reached optimization thread limit"); continue; } Self::process_optimization( optimizers.clone(), segments.clone(), optimization_handles.clone(), optimizers_log.clone(), total_optimized_points.clone(), &optimizer_resource_budget, sender.clone(), limit, ) .await; } } /// Cleanup finalized optimization task handles /// /// This finds and removes completed tasks from our list of optimization handles. /// It also propagates any panics (and unknown errors) so we properly handle them if desired. /// /// It is essential to call this every once in a while for handling panics in time. /// /// Returns true if any optimization handle was finished, joined and removed. async fn cleanup_optimization_handles( optimization_handles: Arc<TokioMutex<Vec<StoppableTaskHandle<bool>>>>, ) -> bool { // Remove finished handles let finished_handles: Vec<_> = { let mut handles = optimization_handles.lock().await; (0..handles.len()) .filter(|i| handles[*i].is_finished()) .collect::<Vec<_>>() .into_iter() .rev() .map(|i| handles.swap_remove(i)) .collect() }; let finished_any = !finished_handles.is_empty(); for handle in finished_handles { handle.join().await; } finished_any } #[allow(clippy::too_many_arguments)] pub(crate) async fn process_optimization( optimizers: Arc<Vec<Arc<Optimizer>>>, segments: LockedSegmentHolder, optimization_handles: Arc<TokioMutex<Vec<StoppableTaskHandle<bool>>>>, optimizers_log: Arc<Mutex<TrackerLog>>, total_optimized_points: Arc<AtomicUsize>, optimizer_resource_budget: &ResourceBudget, sender: Sender<OptimizerSignal>, limit: usize, ) { let mut new_handles = Self::launch_optimization( optimizers.clone(), optimizers_log, total_optimized_points, optimizer_resource_budget, segments.clone(), move || { // After optimization is finished, we still need to check if there are // some further optimizations possible. // If receiver is already dead - we do not care. // If channel is full - optimization will be triggered by some other signal let _ = sender.try_send(OptimizerSignal::Nop); }, Some(limit), ); let mut handles = optimization_handles.lock().await; handles.append(&mut new_handles); } /// Checks conditions for all optimizers until there is no suggested segment /// Starts a task for each optimization /// Returns handles for started tasks pub(crate) fn launch_optimization<F>( optimizers: Arc<Vec<Arc<Optimizer>>>, optimizers_log: Arc<Mutex<TrackerLog>>, total_optimized_points: Arc<AtomicUsize>, optimizer_resource_budget: &ResourceBudget, segments: LockedSegmentHolder, callback: F, limit: Option<usize>, ) -> Vec<StoppableTaskHandle<bool>> where F: Fn() + Send + Clone + Sync + 'static, { let mut scheduled_segment_ids = HashSet::<_>::default(); let mut handles = vec![]; let is_optimization_failed = Arc::new(AtomicBool::new(false)); 'outer: for optimizer in optimizers.iter() { // Loop until there are no more segments to with given optimizer loop { // Return early if we reached the optimization job limit if limit.map(|extra| handles.len() >= extra).unwrap_or(false) { log::trace!("Reached optimization job limit, postponing other optimizations"); break 'outer; } // If optimization failed, we should not endlessly try to optimize same segments if is_optimization_failed.load(Ordering::Relaxed) { log::debug!("Skipping further optimizations due to previous failure"); break 'outer; } let nonoptimal_segment_ids = optimizer.check_condition(segments.clone(), &scheduled_segment_ids); if nonoptimal_segment_ids.is_empty() { break; } log::debug!( "Optimizer '{}' running on segments: {:?}", optimizer.name(), &nonoptimal_segment_ids ); // Determine how many Resources we prefer for optimization task, acquire permit for it // And use same amount of IO threads as CPUs let max_indexing_threads = optimizer.hnsw_config().max_indexing_threads; let desired_io = num_rayon_threads(max_indexing_threads); let Some(mut permit) = optimizer_resource_budget.try_acquire(0, desired_io) else { // If there is no Resource budget, break outer loop and return early // If we have no handles (no optimizations) trigger callback so that we wake up // our optimization worker to try again later, otherwise it could get stuck log::trace!( "No available IO permit for {} optimizer, postponing", optimizer.name(), ); if handles.is_empty() { callback(); } break 'outer; }; log::trace!( "Acquired {} IO permit for {} optimizer", permit.num_io, optimizer.name(), ); let permit_callback = callback.clone(); permit.set_on_manual_release(move || { // Notify scheduler that resource budget is explicitly changed permit_callback(); }); let callback = callback.clone(); let optimizer = optimizer.clone(); let optimizers_log = optimizers_log.clone(); let total_optimized_points = total_optimized_points.clone(); let segments = segments.clone(); let nsi = nonoptimal_segment_ids.clone(); scheduled_segment_ids.extend(&nsi); let is_optimization_failed = is_optimization_failed.clone(); let resource_budget = optimizer_resource_budget.clone(); // Track optimizer status let (tracker, progress) = Tracker::start(optimizer.as_ref().name(), nsi.clone()); let tracker_handle = tracker.handle(); let handle = spawn_stoppable(move |stopped| { let result = std::panic::catch_unwind(AssertUnwindSafe(|| { optimizer.as_ref().optimize( segments.clone(), nsi, permit, resource_budget, stopped, progress, Box::new(move || { // Do not clutter the log with early cancelled optimizations, // wait for `on_successful_start` instead. optimizers_log.lock().register(tracker); }), ) })); let is_optimized; let status; let reported_error; match result { // Success Ok(Ok(optimized_points)) => { is_optimized = optimized_points > 0; status = TrackerStatus::Done; reported_error = None; total_optimized_points.fetch_add(optimized_points, Ordering::Relaxed); callback(); } // Cancelled Ok(Err(CollectionError::Cancelled { description })) => { is_optimized = false; log::debug!("Optimization cancelled - {description}"); status = TrackerStatus::Cancelled(description); reported_error = None; } // `optimize()` returned Result::Err Ok(Err(error)) => { is_optimized = false; status = TrackerStatus::Error(error.to_string()); log::error!("Optimization error: {error}"); reported_error = Some(error); } // `optimize()` panicked Err(panic_payload) => { let message = panic::downcast_str(&panic_payload).unwrap_or(""); let separator = if !message.is_empty() { ": " } else { "" }; let status_msg = format!("Optimization task panicked{separator}{message}"); is_optimized = false; status = TrackerStatus::Error(status_msg.clone()); reported_error = Some(CollectionError::service_error(status_msg)); log::warn!( "Optimization task panicked, collection may be in unstable state\ {separator}{message}" ); } } tracker_handle.update(status); if let Some(reported_error) = reported_error { segments.write().report_optimizer_error(reported_error); is_optimization_failed.store(true, Ordering::Relaxed); } is_optimized }); handles.push(handle); } } handles } /// Ensure there is at least one appendable segment with enough capacity /// /// If there is no appendable segment, or all are at or over capacity, a new empty one is /// created. /// /// Capacity is determined based on `optimizers.max_segment_size_kb`. pub fn ensure_appendable_segment_with_capacity( segments: &LockedSegmentHolder, segments_path: &Path, collection_params: &CollectionParams, collection_quantization: Option<&QuantizationConfig>, thresholds_config: &OptimizerThresholds, payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>, ) -> OperationResult<()> { let no_segment_with_capacity = { let segments_read = segments.read(); segments_read .appendable_segments_ids() .into_iter() .filter_map(|segment_id| segments_read.get(segment_id)) .all(|segment| { let max_vector_size_bytes = segment .get() .read() .max_available_vectors_size_in_bytes() .unwrap_or_default(); let max_segment_size_bytes = thresholds_config .max_segment_size_kb .saturating_mul(segment::common::BYTES_IN_KB); max_vector_size_bytes >= max_segment_size_bytes }) }; if no_segment_with_capacity { log::debug!("Creating new appendable segment, all existing segments are over capacity"); let segment_config = collection_params .to_base_segment_config(collection_quantization) .map_err(|err| OperationError::service_error(err.to_string()))?; segments.write().create_appendable_segment( segments_path, segment_config, payload_index_schema, )?; } Ok(()) } /// Trigger optimizers when CPU budget is available fn trigger_optimizers_on_resource_budget( optimizer_resource_budget: ResourceBudget, desired_cpus: usize, desired_io: usize, sender: Sender<OptimizerSignal>, ) -> JoinHandle<()> { task::spawn(async move { log::trace!("Skipping optimization checks, waiting for CPU budget to be available"); optimizer_resource_budget .notify_on_budget_available(desired_cpus, desired_io) .await; log::trace!("Continue optimization checks, new CPU budget available"); // Trigger optimizers with Nop operation sender.send(OptimizerSignal::Nop).await.unwrap_or_else(|_| { log::info!("Can't notify optimizers, assume process is dead. Restart is required") }); }) } /// Checks if there are any failed operations. /// If so - attempts to re-apply all failed operations. async fn try_recover( segments: LockedSegmentHolder, wal: LockedWal, update_operation_lock: Arc<tokio::sync::RwLock<()>>, update_tracker: UpdateTracker, ) -> CollectionResult<usize> { // Try to re-apply everything starting from the first failed operation let first_failed_operation_option = segments.read().failed_operation.iter().cloned().min(); match first_failed_operation_option { None => {} Some(first_failed_op) => { let wal_lock = wal.lock().await; for (op_num, operation) in wal_lock.read(first_failed_op) { CollectionUpdater::update( &segments, op_num, operation.operation, update_operation_lock.clone(), update_tracker.clone(), &HardwareCounterCell::disposable(), // Internal operation, no measurement needed )?; } } }; Ok(0) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/update_workers/flush_workers.rs
lib/collection/src/update_workers/flush_workers.rs
use std::cmp::min; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicU64; use std::time::Duration; use common::panic; use segment::common::operation_error::OperationResult; use segment::types::SeqNumberType; use shard::segment_holder::LockedSegmentHolder; use shard::wal::WalError; use tokio::sync::oneshot; use crate::shards::local_shard::LocalShardClocks; use crate::update_workers::UpdateWorkers; use crate::wal_delta::LockedWal; impl UpdateWorkers { /// Returns confirmed version after flush of all segments /// /// # Errors /// Returns an error on flush failure fn flush_segments(segments: LockedSegmentHolder) -> OperationResult<SeqNumberType> { let read_segments = segments.read(); let flushed_version = read_segments.flush_all(false, false)?; Ok(match read_segments.failed_operation.iter().cloned().min() { None => flushed_version, Some(failed_operation) => min(failed_operation, flushed_version), }) } #[allow(clippy::too_many_arguments)] fn flush_worker_internal( segments: LockedSegmentHolder, wal: LockedWal, wal_keep_from: Arc<AtomicU64>, clocks: LocalShardClocks, shard_path: PathBuf, ) { log::trace!("Attempting flushing"); let wal_flush_job = wal.blocking_lock().flush_async(); let wal_flush_res = match wal_flush_job.join() { Ok(Ok(())) => Ok(()), Ok(Err(err)) => Err(WalError::WriteWalError(format!( "failed to flush WAL: {err}" ))), Err(panic) => { let message = panic::downcast_str(&panic).unwrap_or(""); let separator = if !message.is_empty() { ": " } else { "" }; Err(WalError::WriteWalError(format!( "failed to flush WAL: flush task panicked{separator}{message}" ))) } }; if let Err(err) = wal_flush_res { log::error!("{err}"); segments.write().report_optimizer_error(err); return; } let confirmed_version = Self::flush_segments(segments.clone()); let confirmed_version = match confirmed_version { Ok(version) => version, Err(err) => { log::error!("Failed to flush: {err}"); segments.write().report_optimizer_error(err); return; } }; // Acknowledge confirmed version in WAL, but don't acknowledge the specified // `keep_from` index or higher. // This is to prevent truncating WAL entries that other bits of code still depend on // such as the queue proxy shard. // Default keep_from is `u64::MAX` to allow acknowledging all confirmed. let keep_from = wal_keep_from.load(std::sync::atomic::Ordering::Relaxed); // If we should keep the first message, do not acknowledge at all if keep_from == 0 { return; } let ack = confirmed_version.min(keep_from.saturating_sub(1)); if let Err(err) = clocks.store_if_changed(&shard_path) { log::warn!("Failed to store clock maps to disk: {err}"); segments.write().report_optimizer_error(err); } if let Err(err) = wal.blocking_lock().ack(ack) { log::warn!("Failed to acknowledge WAL version: {err}"); segments.write().report_optimizer_error(err); } } #[allow(clippy::too_many_arguments)] pub async fn flush_worker_fn( segments: LockedSegmentHolder, wal: LockedWal, wal_keep_from: Arc<AtomicU64>, clocks: LocalShardClocks, flush_interval_sec: u64, mut stop_receiver: oneshot::Receiver<()>, shard_path: PathBuf, ) { loop { tokio::select! { biased; // Stop flush worker on signal or if sender was dropped _ = &mut stop_receiver => { log::debug!("Stopping flush worker for shard {}", shard_path.display()); return; }, // Flush at the configured flush interval _ = tokio::time::sleep(Duration::from_secs(flush_interval_sec)) => {}, }; let segments_clone = segments.clone(); let wal_clone = wal.clone(); let wal_keep_from_clone = wal_keep_from.clone(); let clocks_clone = clocks.clone(); let shard_path_clone = shard_path.clone(); tokio::task::spawn_blocking(move || { Self::flush_worker_internal( segments_clone, wal_clone, wal_keep_from_clone, clocks_clone, shard_path_clone, ) }) .await .unwrap_or_else(|error| { log::error!("Flush worker failed: {error}",); }); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/update_workers/mod.rs
lib/collection/src/update_workers/mod.rs
pub mod flush_workers; mod optimization_worker; mod update_worker; pub struct UpdateWorkers {}
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/update_workers/update_worker.rs
lib/collection/src/update_workers/update_worker.rs
use std::sync::Arc; use std::time::Instant; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::types::SeqNumberType; use shard::operations::CollectionUpdateOperations; use shard::segment_holder::LockedSegmentHolder; use tokio::sync::mpsc::{Receiver, Sender}; use crate::collection_manager::collection_updater::CollectionUpdater; use crate::operations::generalizer::Generalizer; use crate::operations::types::{CollectionError, CollectionResult}; use crate::profiling::interface::log_request_to_collector; use crate::shards::CollectionId; use crate::shards::update_tracker::UpdateTracker; use crate::update_handler::{OperationData, OptimizerSignal, UpdateSignal}; use crate::update_workers::UpdateWorkers; use crate::wal_delta::LockedWal; impl UpdateWorkers { #[allow(clippy::too_many_arguments)] pub async fn update_worker_fn( collection_name: CollectionId, mut receiver: Receiver<UpdateSignal>, optimize_sender: Sender<OptimizerSignal>, wal: LockedWal, segments: LockedSegmentHolder, update_operation_lock: Arc<tokio::sync::RwLock<()>>, update_tracker: UpdateTracker, ) { while let Some(signal) = receiver.recv().await { match signal { UpdateSignal::Operation(OperationData { op_num, operation, sender, wait, hw_measurements, }) => { let collection_name_clone = collection_name.clone(); let wal_clone = wal.clone(); let segments_clone = segments.clone(); let update_operation_lock_clone = update_operation_lock.clone(); let update_tracker_clone = update_tracker.clone(); let operation_result = tokio::task::spawn_blocking(move || { Self::update_worker_internal( collection_name_clone, operation, op_num, wait, wal_clone, segments_clone, update_operation_lock_clone, update_tracker_clone, hw_measurements, ) }) .await; let res = match operation_result { Ok(Ok(update_res)) => optimize_sender .send(OptimizerSignal::Operation(op_num)) .await .and(Ok(update_res)) .map_err(|send_err| send_err.into()), Ok(Err(err)) => Err(err), Err(err) => Err(CollectionError::from(err)), }; if let Some(feedback) = sender { feedback.send(res).unwrap_or_else(|_| { log::debug!("Can't report operation {op_num} result. Assume already not required"); }); }; } UpdateSignal::Stop => { optimize_sender .send(OptimizerSignal::Stop) .await .unwrap_or_else(|_| log::debug!("Optimizer already stopped")); break; } UpdateSignal::Nop => optimize_sender .send(OptimizerSignal::Nop) .await .unwrap_or_else(|_| { log::info!( "Can't notify optimizers, assume process is dead. Restart is required" ); }), UpdateSignal::Plunger(callback_sender) => { callback_sender.send(()).unwrap_or_else(|_| { log::debug!("Can't notify sender, assume nobody is waiting anymore"); }); } } } // Transmitter was destroyed optimize_sender .send(OptimizerSignal::Stop) .await .unwrap_or_else(|_| log::debug!("Optimizer already stopped")); } #[allow(clippy::too_many_arguments)] fn update_worker_internal( collection_name: CollectionId, operation: CollectionUpdateOperations, op_num: SeqNumberType, wait: bool, wal: LockedWal, segments: LockedSegmentHolder, update_operation_lock: Arc<tokio::sync::RwLock<()>>, update_tracker: UpdateTracker, hw_measurements: HwMeasurementAcc, ) -> CollectionResult<usize> { // If wait flag is set, explicitly flush WAL first if wait { wal.blocking_lock().flush().map_err(|err| { CollectionError::service_error(format!( "Can't flush WAL before operation {op_num} - {err}" )) })?; } let start_time = Instant::now(); // This represents the operation without vectors and payloads for logging purposes // Do not use for anything else let loggable_operation = operation.remove_details(); let result = CollectionUpdater::update( &segments, op_num, operation, update_operation_lock.clone(), update_tracker.clone(), &hw_measurements.get_counter_cell(), ); let duration = start_time.elapsed(); log_request_to_collector(&collection_name, duration, move || loggable_operation); result } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/problems/unindexed_field.rs
lib/collection/src/problems/unindexed_field.rs
use std::any::TypeId; use std::collections::{HashMap, HashSet}; use std::sync::OnceLock; use std::time::Duration; use http::header::CONTENT_TYPE; use http::{HeaderMap, HeaderValue, Method, Uri}; use issues::{Action, Code, ImmediateSolution, Issue, Solution}; use itertools::Itertools; use segment::common::operation_error::OperationError; use segment::data_types::index::{TextIndexParams, TextIndexType}; use segment::index::query_optimization::rescore_formula::parsed_formula::VariableId; use segment::json_path::JsonPath; use segment::types::{ AnyVariants, Condition, FieldCondition, Filter, Match, MatchValue, PayloadFieldSchema, PayloadKeyType, PayloadSchemaParams, PayloadSchemaType, RangeInterface, UuidPayloadType, }; use strum::{EnumIter, IntoEnumIterator as _}; use crate::operations::universal_query::formula::ExpressionInternal; #[derive(Debug)] pub struct UnindexedField { field_name: JsonPath, field_schemas: HashSet<PayloadFieldSchema>, collection_name: String, endpoint: Uri, instance_id: String, } /// Don't use this directly, use `UnindexedField::slow_query_threshold()` instead pub static SLOW_QUERY_THRESHOLD: OnceLock<Duration> = OnceLock::new(); impl UnindexedField { const DEFAULT_SLOW_QUERY_SECS: f32 = 1.2; pub fn slow_query_threshold() -> Duration { *SLOW_QUERY_THRESHOLD.get_or_init(|| Duration::from_secs_f32(Self::DEFAULT_SLOW_QUERY_SECS)) } pub fn get_instance_id(collection_name: &str, field_name: &JsonPath) -> String { format!("{collection_name}/{field_name}") } pub fn get_collection_name(code: &Code) -> &str { debug_assert!(code.issue_type == TypeId::of::<Self>()); code.instance_id.split('/').next().unwrap_or("") // Code format is always the same } /// Try to form an issue from a field condition and a collection name /// /// # Errors /// /// Will fail if the field condition cannot be used for inferring an appropriate schema. /// For example, when there is no index that can be built to improve performance. pub fn try_new( field_name: JsonPath, field_schemas: HashSet<PayloadFieldSchema>, collection_name: String, ) -> Result<Self, OperationError> { if field_schemas.is_empty() { return Err(OperationError::ValidationError { description: "Cannot create issue which won't have a solution".to_string(), }); } let encoded_collection_name = urlencoding::encode(&collection_name); let endpoint = match Uri::builder() .path_and_query(format!("/collections/{encoded_collection_name}/index").as_str()) .build() { Ok(uri) => uri, Err(e) => { log::trace!("Failed to build uri: {e}"); return Err(OperationError::ValidationError { description: "Bad collection name".to_string(), }); } }; let instance_id = Self::get_instance_id(&collection_name, &field_name); Ok(Self { field_name, field_schemas, collection_name, endpoint, instance_id, }) } pub fn submit_possible_suspects( filter: &Filter, payload_schema: &HashMap<PayloadKeyType, PayloadFieldSchema>, collection_name: String, ) { let unindexed_issues = IssueExtractor::new(filter, payload_schema, collection_name).into_issues(); log::trace!("Found unindexed issues: {unindexed_issues:#?}"); for issue in unindexed_issues { issue.submit(); } } } impl Issue for UnindexedField { fn instance_id(&self) -> &str { &self.instance_id } fn name() -> &'static str { "UNINDEXED_FIELD" } fn related_collection(&self) -> Option<String> { Some(self.collection_name.clone()) } fn description(&self) -> String { format!( "Unindexed field '{}' might be slowing queries down in collection '{}'", self.field_name, self.collection_name ) } fn solution(&self) -> Solution { let mut solutions = self.field_schemas.iter().map(|field_schema| { let request_body = serde_json::json!({ "field_name": self.field_name, "field_schema": field_schema, }) .as_object() .unwrap() .clone(); let headers = HeaderMap::from_iter([ (CONTENT_TYPE, HeaderValue::from_static("application/json")), ]); ImmediateSolution { message: format!( "Create an index on field '{}' of schema {} in collection '{}'. Check the documentation for more details: https://qdrant.tech/documentation/concepts/indexing/#payload-index", self.field_name, serde_json::to_string(&field_schema).unwrap(), self.collection_name ), action: Action { method: Method::PUT, uri: self.endpoint.clone(), headers, body: Some(request_body), }, } }).collect_vec(); match solutions.len() { 0 => unreachable!( "Cannot create a solution without a field schema, protected by try_new()" ), 1 => Solution::Immediate(Box::new(solutions.pop().unwrap())), _ => Solution::ImmediateChoice(solutions), } } } /// Suggest any index, let user choose depending on their data type fn all_indexes() -> impl Iterator<Item = FieldIndexType> { FieldIndexType::iter() } fn infer_index_from_match_value(value: &MatchValue) -> Vec<FieldIndexType> { match &value.value { segment::types::ValueVariants::String(string) => { let mut inferred = Vec::new(); if UuidPayloadType::parse_str(string).is_ok() { inferred.push(FieldIndexType::UuidMatch) } inferred.push(FieldIndexType::KeywordMatch); inferred } segment::types::ValueVariants::Integer(_integer) => { vec![FieldIndexType::IntMatch] } segment::types::ValueVariants::Bool(_boolean) => { vec![FieldIndexType::BoolMatch] } } } fn infer_index_from_any_variants(value: &AnyVariants) -> Vec<FieldIndexType> { match value { AnyVariants::Strings(strings) => { let mut inferred = Vec::new(); if strings .iter() .all(|s| UuidPayloadType::parse_str(s).is_ok()) { inferred.push(FieldIndexType::UuidMatch) } inferred.push(FieldIndexType::KeywordMatch); inferred } AnyVariants::Integers(_integers) => { vec![FieldIndexType::IntMatch] } } } fn infer_index_from_field_condition(field_condition: &FieldCondition) -> Vec<FieldIndexType> { let FieldCondition { key: _key, r#match, range, geo_bounding_box, geo_radius, geo_polygon, values_count, is_empty, is_null, } = field_condition; let mut required_indexes = Vec::new(); if let Some(r#match) = r#match { required_indexes.extend(match r#match { Match::Value(match_value) => infer_index_from_match_value(match_value), Match::Text(_match_text) => vec![FieldIndexType::Text], Match::Phrase(_match_text) => vec![FieldIndexType::TextPhrase], Match::Any(match_any) => infer_index_from_any_variants(&match_any.any), Match::Except(match_except) => infer_index_from_any_variants(&match_except.except), Match::TextAny(_match_text_any) => vec![FieldIndexType::Text], }) } if let Some(range_interface) = range { match range_interface { RangeInterface::DateTime(_) => { required_indexes.push(FieldIndexType::DatetimeRange); } RangeInterface::Float(_) => { required_indexes.push(FieldIndexType::FloatRange); required_indexes.push(FieldIndexType::IntRange); } } } if geo_bounding_box.is_some() || geo_radius.is_some() || geo_polygon.is_some() { required_indexes.push(FieldIndexType::Geo); } if values_count.is_some() || is_empty.is_some() || is_null.is_some() { // Any index will do, let user choose depending on their data type required_indexes.extend(all_indexes()); } required_indexes } pub struct IssueExtractor<'a> { extractor: Extractor<'a>, collection_name: String, } impl<'a> IssueExtractor<'a> { pub fn new( filter: &Filter, payload_schema: &'a HashMap<PayloadKeyType, PayloadFieldSchema>, collection_name: String, ) -> Self { let extractor = Extractor::new_eager(filter, payload_schema); Self { extractor, collection_name, } } fn into_issues(self) -> Vec<UnindexedField> { self.extractor .unindexed_schema .into_iter() .filter_map(|(key, field_schemas)| { let field_schemas: HashSet<_> = field_schemas .iter() .map(PayloadFieldSchema::kind) .filter(|kind| { let is_advanced = matches!(kind, PayloadSchemaType::Uuid); !is_advanced }) .map(PayloadFieldSchema::from) .collect(); UnindexedField::try_new(key, field_schemas, self.collection_name.clone()).ok() }) .collect() } } pub struct Extractor<'a> { payload_schema: &'a HashMap<PayloadKeyType, PayloadFieldSchema>, unindexed_schema: HashMap<PayloadKeyType, Vec<PayloadFieldSchema>>, } impl<'a> Extractor<'a> { /// Creates an extractor and eagerly extracts all unindexed fields from the provided filter. fn new_eager( filter: &Filter, payload_schema: &'a HashMap<PayloadKeyType, PayloadFieldSchema>, ) -> Self { let mut extractor = Self { payload_schema, unindexed_schema: HashMap::new(), }; extractor.update_from_filter(None, filter); extractor } /// Creates a new lazy 'Extractor'. It needs to call some update method to extract unindexed fields. pub fn new(payload_schema: &'a HashMap<PayloadKeyType, PayloadFieldSchema>) -> Self { Self { payload_schema, unindexed_schema: HashMap::new(), } } /// Current unindexed schema. pub fn unindexed_schema(&self) -> &HashMap<PayloadKeyType, Vec<PayloadFieldSchema>> { &self.unindexed_schema } /// Checks the filter for unindexed fields. fn update_from_filter(&mut self, nested_prefix: Option<&JsonPath>, filter: &Filter) { for condition in filter.iter_conditions() { self.update_from_condition(nested_prefix, condition); } } /// Checks the filter for an unindexed field, stops at the first one found. pub fn update_from_filter_once(&mut self, nested_prefix: Option<&JsonPath>, filter: &Filter) { for condition in filter.iter_conditions() { self.update_from_condition(nested_prefix, condition); if !self.unindexed_schema.is_empty() { break; } } } fn update_from_condition(&mut self, nested_prefix: Option<&JsonPath>, condition: &Condition) { let key; let required_index; match condition { Condition::Field(field_condition) => { key = &field_condition.key; required_index = infer_index_from_field_condition(field_condition); } Condition::Filter(filter) => { self.update_from_filter(nested_prefix, filter); return; } Condition::Nested(nested) => { self.update_from_filter( Some(&JsonPath::extend_or_new( nested_prefix, &nested.raw_key().array_key(), )), nested.filter(), ); return; } // Any index will suffice to get the satellite null index Condition::IsEmpty(is_empty) => { key = &is_empty.is_empty.key; required_index = all_indexes().collect(); } Condition::IsNull(is_null) => { key = &is_null.is_null.key; required_index = all_indexes().collect(); } // No index needed Condition::HasId(_) => return, Condition::CustomIdChecker(_) => return, Condition::HasVector(_) => return, }; let full_key = JsonPath::extend_or_new(nested_prefix, key); if self.needs_index(&full_key, &required_index) { let schemas = required_index.into_iter().map(PayloadFieldSchema::from); self.unindexed_schema .entry(full_key) .or_default() .extend(schemas); } } fn needs_index(&self, key: &JsonPath, required_indexes: &[FieldIndexType]) -> bool { match self.payload_schema.get(key) { Some(index_info) => { // check if the index present has the right capabilities let index_field_types = schema_capabilities(index_info); let already_indexed = required_indexes .iter() .any(|required| index_field_types.contains(required)); !already_indexed } None => true, } } pub fn update_from_expression(&mut self, expression: &ExpressionInternal) { let key; let required_index; match expression { ExpressionInternal::Constant(_) => return, ExpressionInternal::Variable(variable) => { // check if it is indexed with a numeric index let Ok(var) = variable.parse::<VariableId>() else { // If it fails here, it will also fail when parsing. return; }; match var { VariableId::Score(_) => return, VariableId::Payload(json_path) => { key = json_path; required_index = vec![ FieldIndexType::IntMatch, FieldIndexType::IntRange, FieldIndexType::FloatRange, ]; } VariableId::Condition(_) => return, } } ExpressionInternal::Condition(condition) => { self.update_from_condition(None, condition); return; } ExpressionInternal::GeoDistance { origin: _, to } => { key = to.clone(); required_index = vec![FieldIndexType::Geo]; } ExpressionInternal::Datetime(_) => return, ExpressionInternal::DatetimeKey(variable) => { key = variable.clone(); required_index = vec![FieldIndexType::DatetimeRange]; } ExpressionInternal::Mult(expression_internals) => { for expr in expression_internals { self.update_from_expression(expr); } return; } ExpressionInternal::Sum(expression_internals) => { for expr in expression_internals { self.update_from_expression(expr); } return; } ExpressionInternal::Neg(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Div { left, right, by_zero_default: _, } => { self.update_from_expression(left); self.update_from_expression(right); return; } ExpressionInternal::Sqrt(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Pow { base, exponent } => { self.update_from_expression(base); self.update_from_expression(exponent); return; } ExpressionInternal::Exp(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Log10(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Ln(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Abs(expression_internal) => { self.update_from_expression(expression_internal); return; } ExpressionInternal::Decay { kind: _, x, target, midpoint: _, scale: _, } => { self.update_from_expression(x); if let Some(t) = target.as_ref() { self.update_from_expression(t) }; return; } } if self.needs_index(&key, &required_index) { let schemas = required_index.into_iter().map(PayloadFieldSchema::from); self.unindexed_schema .entry(key) .or_default() .extend(schemas); } } } /// All types of internal indexes #[derive(Debug, Eq, PartialEq, EnumIter, Hash)] enum FieldIndexType { IntMatch, IntRange, KeywordMatch, FloatRange, Text, TextPhrase, BoolMatch, UuidMatch, UuidRange, DatetimeRange, Geo, } fn schema_capabilities(value: &PayloadFieldSchema) -> HashSet<FieldIndexType> { let mut index_types = HashSet::new(); match value { PayloadFieldSchema::FieldType(payload_schema_type) => match payload_schema_type { PayloadSchemaType::Keyword => index_types.insert(FieldIndexType::KeywordMatch), PayloadSchemaType::Integer => { index_types.insert(FieldIndexType::IntMatch); index_types.insert(FieldIndexType::IntRange) } PayloadSchemaType::Uuid => { index_types.insert(FieldIndexType::UuidMatch); index_types.insert(FieldIndexType::UuidRange) } PayloadSchemaType::Bool => index_types.insert(FieldIndexType::BoolMatch), PayloadSchemaType::Float => index_types.insert(FieldIndexType::FloatRange), PayloadSchemaType::Geo => index_types.insert(FieldIndexType::Geo), PayloadSchemaType::Text => index_types.insert(FieldIndexType::Text), PayloadSchemaType::Datetime => index_types.insert(FieldIndexType::DatetimeRange), }, PayloadFieldSchema::FieldParams(payload_schema_params) => match payload_schema_params { PayloadSchemaParams::Keyword(_) => index_types.insert(FieldIndexType::KeywordMatch), PayloadSchemaParams::Integer(integer_index_params) => { if integer_index_params.lookup.unwrap_or(true) { index_types.insert(FieldIndexType::IntMatch); } if integer_index_params.range.unwrap_or(true) { index_types.insert(FieldIndexType::IntRange); } debug_assert!( !index_types.is_empty(), "lookup or range must be true for Integer payload index", ); // unifying match arm types true } PayloadSchemaParams::Uuid(_) => { index_types.insert(FieldIndexType::UuidMatch); index_types.insert(FieldIndexType::UuidRange) } PayloadSchemaParams::Bool(_) => index_types.insert(FieldIndexType::BoolMatch), PayloadSchemaParams::Float(_) => index_types.insert(FieldIndexType::FloatRange), PayloadSchemaParams::Geo(_) => index_types.insert(FieldIndexType::Geo), PayloadSchemaParams::Text(TextIndexParams { phrase_matching, .. }) => { if phrase_matching.unwrap_or_default() { index_types.insert(FieldIndexType::TextPhrase); } index_types.insert(FieldIndexType::Text) } PayloadSchemaParams::Datetime(_) => index_types.insert(FieldIndexType::DatetimeRange), }, }; index_types } impl From<FieldIndexType> for PayloadFieldSchema { fn from(val: FieldIndexType) -> Self { match val { FieldIndexType::IntMatch => PayloadFieldSchema::FieldType(PayloadSchemaType::Integer), FieldIndexType::IntRange => PayloadFieldSchema::FieldType(PayloadSchemaType::Integer), FieldIndexType::KeywordMatch => { PayloadFieldSchema::FieldType(PayloadSchemaType::Keyword) } FieldIndexType::FloatRange => PayloadFieldSchema::FieldType(PayloadSchemaType::Float), FieldIndexType::Text => PayloadFieldSchema::FieldType(PayloadSchemaType::Text), FieldIndexType::TextPhrase => { PayloadFieldSchema::FieldParams(PayloadSchemaParams::Text(TextIndexParams { r#type: TextIndexType::Text, phrase_matching: Some(true), ..Default::default() })) } FieldIndexType::BoolMatch => PayloadFieldSchema::FieldType(PayloadSchemaType::Bool), FieldIndexType::UuidMatch => PayloadFieldSchema::FieldType(PayloadSchemaType::Uuid), FieldIndexType::UuidRange => PayloadFieldSchema::FieldType(PayloadSchemaType::Uuid), FieldIndexType::DatetimeRange => { PayloadFieldSchema::FieldType(PayloadSchemaType::Datetime) } FieldIndexType::Geo => PayloadFieldSchema::FieldType(PayloadSchemaType::Geo), } } } #[cfg(test)] mod tests { use segment::data_types::index::IntegerIndexParams; use super::*; #[test] fn integer_index_capacities() { let params = PayloadSchemaParams::Integer(IntegerIndexParams { lookup: Some(true), range: Some(true), ..Default::default() }); let schema = PayloadFieldSchema::FieldParams(params); let index_types = schema_capabilities(&schema); assert!(index_types.contains(&FieldIndexType::IntMatch)); assert!(index_types.contains(&FieldIndexType::IntRange)); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/problems/mod.rs
lib/collection/src/problems/mod.rs
pub mod unindexed_field; pub use unindexed_field::UnindexedField;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/grouping/builder.rs
lib/collection/src/grouping/builder.rs
use std::time::Duration; use common::counter::hardware_accumulator::HwMeasurementAcc; use futures::Future; use itertools::Itertools; use tokio::sync::RwLockReadGuard; use super::group_by::{GroupRequest, group_by}; use crate::collection::Collection; use crate::lookup::lookup_ids; use crate::lookup::types::PseudoId; use crate::operations::consistency_params::ReadConsistency; use crate::operations::shard_selector_internal::ShardSelectorInternal; use crate::operations::types::{CollectionError, CollectionResult, PointGroup}; /// Builds on top of the group_by function to add lookup and possibly other features pub struct GroupBy<'a, F, Fut> where F: Fn(String) -> Fut + Clone, Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>, { group_by: GroupRequest, collection: &'a Collection, /// `Fn` to get a collection having its name. Obligatory for recommend and lookup collection_by_name: F, read_consistency: Option<ReadConsistency>, shard_selection: ShardSelectorInternal, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, } impl<'a, F, Fut> GroupBy<'a, F, Fut> where F: Fn(String) -> Fut + Clone, Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>, { /// Creates a basic GroupBy builder pub fn new( group_by: GroupRequest, collection: &'a Collection, collection_by_name: F, hw_measurement_acc: HwMeasurementAcc, ) -> Self { Self { group_by, collection, collection_by_name, read_consistency: None, shard_selection: ShardSelectorInternal::All, timeout: None, hw_measurement_acc, } } pub fn set_read_consistency(mut self, read_consistency: Option<ReadConsistency>) -> Self { self.read_consistency = read_consistency; self } pub fn set_shard_selection(mut self, shard_selection: ShardSelectorInternal) -> Self { self.shard_selection = shard_selection; self } pub fn set_timeout(mut self, timeout: Option<Duration>) -> Self { self.timeout = timeout; self } /// Runs the group by operation, optionally with a timeout. pub async fn execute(self) -> CollectionResult<Vec<PointGroup>> { if let Some(timeout) = self.timeout { tokio::time::timeout(timeout, self.run()) .await .map_err(|_| { log::debug!("GroupBy timeout reached: {timeout:?}"); CollectionError::timeout(timeout, "GroupBy") })? } else { self.run().await } } /// Does the actual grouping async fn run(self) -> CollectionResult<Vec<PointGroup>> { let start = std::time::Instant::now(); let with_lookup = self.group_by.with_lookup.clone(); let core_group_by = self .group_by .into_query_group_request( self.collection, self.collection_by_name.clone(), self.read_consistency, self.shard_selection.clone(), self.timeout, self.hw_measurement_acc.clone(), ) .await?; let mut groups = group_by( core_group_by, self.collection, self.read_consistency, self.shard_selection.clone(), self.timeout, self.hw_measurement_acc.clone(), ) .await?; if let Some(lookup) = with_lookup { // update timeout let timeout = self .timeout .map(|timeout| timeout.saturating_sub(start.elapsed())); let mut lookups = { let pseudo_ids = groups .iter() .map(|group| group.id.clone()) .map_into() .collect(); lookup_ids( lookup, pseudo_ids, self.collection_by_name, self.read_consistency, &self.shard_selection, timeout, self.hw_measurement_acc.clone(), ) .await? }; // Put the lookups in their respective groups groups.iter_mut().for_each(|group| { group.lookup = lookups .remove(&PseudoId::from(group.id.clone())) .map(api::rest::Record::from); }); } Ok(groups) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/grouping/types.rs
lib/collection/src/grouping/types.rs
use ahash::AHashMap; use segment::data_types::groups::GroupId; use segment::json_path::JsonPath; use segment::types::{PointIdType, ScoredPoint}; use crate::operations::types::PointGroup; use crate::operations::universal_query::shard_query::ShardQueryRequest; #[derive(PartialEq, Debug)] pub(super) enum AggregatorError { BadKeyType, KeyNotFound, } #[derive(Debug, Clone)] pub(super) struct Group { pub hits: Vec<ScoredPoint>, pub key: GroupId, } impl Group { pub(super) fn hydrate_from(&mut self, map: &AHashMap<PointIdType, ScoredPoint>) { self.hits.iter_mut().for_each(|hit| { if let Some(point) = map.get(&hit.id) { hit.payload.clone_from(&point.payload); hit.vector.clone_from(&point.vector); } }); } } impl From<Group> for PointGroup { fn from(group: Group) -> Self { Self { hits: group .hits .into_iter() .map(api::rest::ScoredPoint::from) .collect(), id: group.key, lookup: None, } } } #[derive(Clone)] pub struct QueryGroupRequest { /// Query request to use pub source: ShardQueryRequest, /// Path to the field to group by pub group_by: JsonPath, /// Limit of points to return per group pub group_size: usize, /// Limit of groups to return pub groups: usize, } #[cfg(test)] mod test { use segment::data_types::groups::GroupId; #[test] fn group_key_from_values() { use std::convert::TryFrom; use serde_json::json; let string = GroupId::try_from(&json!("string")).unwrap(); let int = GroupId::try_from(&json!(1)).unwrap(); assert!(GroupId::try_from(&json!(2.42)).is_err()); assert!(GroupId::try_from(&json!([5, 6, 7])).is_err()); assert!(GroupId::try_from(&json!(["a", "b", "c"])).is_err()); assert_eq!(string, GroupId::String("string".to_string())); assert_eq!(int.as_u64().unwrap(), 1); let bad_key = GroupId::try_from(&json!(true)); assert!(bad_key.is_err()); let empty_array = GroupId::try_from(&json!([])); assert!(empty_array.is_err()); let empty_object = GroupId::try_from(&json!({})); assert!(empty_object.is_err()); let null = GroupId::try_from(&serde_json::Value::Null); assert!(null.is_err()); let nested_array = GroupId::try_from(&json!([[1, 2, 3], [4, 5, 6]])); assert!(nested_array.is_err()); let nested_object = GroupId::try_from(&json!({"a": 1, "b": 2})); assert!(nested_object.is_err()); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/grouping/mod.rs
lib/collection/src/grouping/mod.rs
mod aggregator; mod builder; pub mod group_by; mod types; pub use builder::GroupBy;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/grouping/group_by.rs
lib/collection/src/grouping/group_by.rs
use std::future::Future; use std::time::Duration; use ahash::AHashMap; use api::rest::{BaseGroupRequest, SearchGroupsRequestInternal, SearchRequestInternal}; use common::counter::hardware_accumulator::HwMeasurementAcc; use fnv::FnvBuildHasher; use indexmap::IndexSet; use segment::json_path::JsonPath; use segment::types::{ AnyVariants, Condition, FieldCondition, Filter, Match, ScoredPoint, WithPayloadInterface, WithVector, }; use serde_json::Value; use tokio::sync::RwLockReadGuard; use super::aggregator::GroupsAggregator; use super::types::QueryGroupRequest; use crate::collection::Collection; use crate::common::fetch_vectors; use crate::common::fetch_vectors::build_vector_resolver_query; use crate::lookup::WithLookup; use crate::operations::consistency_params::ReadConsistency; use crate::operations::shard_selector_internal::ShardSelectorInternal; use crate::operations::types::{ CollectionResult, PointGroup, RecommendGroupsRequestInternal, RecommendRequestInternal, }; use crate::operations::universal_query::collection_query::{ CollectionQueryGroupsRequest, CollectionQueryRequest, }; use crate::operations::universal_query::shard_query::{self, ShardPrefetch, ShardQueryRequest}; use crate::recommendations::recommend_into_core_search; const MAX_GET_GROUPS_REQUESTS: usize = 5; const MAX_GROUP_FILLING_REQUESTS: usize = 5; #[derive(Clone, Debug, PartialEq)] pub enum SourceRequest { Search(SearchRequestInternal), Recommend(RecommendRequestInternal), Query(CollectionQueryRequest), } #[derive(Clone, Debug, PartialEq)] pub struct GroupRequest { /// Request to use (search or recommend) pub source: SourceRequest, /// Path to the field to group by pub group_by: JsonPath, /// Limit of points to return per group pub group_size: usize, /// Limit of groups to return pub limit: usize, /// Options for specifying how to use the group id to lookup points in another collection pub with_lookup: Option<WithLookup>, } impl GroupRequest { pub fn with_limit_from_request( source: SourceRequest, group_by: JsonPath, group_size: usize, ) -> Self { let limit = match &source { SourceRequest::Search(request) => request.limit, SourceRequest::Recommend(request) => request.limit, SourceRequest::Query(request) => request.limit, }; Self { source, group_by, group_size, limit, with_lookup: None, } } pub async fn into_query_group_request<'a, F, Fut>( self, collection: &Collection, collection_by_name: F, read_consistency: Option<ReadConsistency>, shard_selection: ShardSelectorInternal, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<QueryGroupRequest> where F: Fn(String) -> Fut, Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>, { let query_search = match self.source { SourceRequest::Search(search_req) => ShardQueryRequest::from(search_req), SourceRequest::Recommend(recommend_req) => { let referenced_vectors = fetch_vectors::resolve_referenced_vectors_batch( &[(recommend_req.clone(), shard_selection)], collection, collection_by_name, read_consistency, timeout, hw_measurement_acc.clone(), ) .await?; let core_search = recommend_into_core_search(&collection.id, recommend_req, &referenced_vectors)?; ShardQueryRequest::from(core_search) } SourceRequest::Query(query_req) => { // Lift nested prefetches to root queries for vector resolution let resolver_requests = build_vector_resolver_query(&query_req, &shard_selection); let referenced_vectors = fetch_vectors::resolve_referenced_vectors_batch( &resolver_requests, collection, collection_by_name, read_consistency, timeout, hw_measurement_acc.clone(), ) .await?; query_req.try_into_shard_request(&collection.id, &referenced_vectors)? } }; Ok(QueryGroupRequest { source: query_search, group_by: self.group_by, group_size: self.group_size, groups: self.limit, }) } } impl QueryGroupRequest { /// Make `group_by` field selector work with as `with_payload`. fn group_by_to_payload_selector(group_by: &JsonPath) -> WithPayloadInterface { WithPayloadInterface::Fields(vec![group_by.strip_wildcard_suffix()]) } async fn r#do( &self, collection: &Collection, read_consistency: Option<ReadConsistency>, shard_selection: ShardSelectorInternal, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<ScoredPoint>> { let mut request = self.source.clone(); // Adjust limit to fetch enough points to fill groups request.limit = self.groups * self.group_size; request.prefetches.iter_mut().for_each(|prefetch| { increase_limit_for_group(prefetch, self.group_size); }); let key_not_empty = Filter::new_must_not(Condition::IsEmpty(self.group_by.clone().into())); request.filter = Some(request.filter.unwrap_or_default().merge(&key_not_empty)); let with_group_by_payload = Self::group_by_to_payload_selector(&self.group_by); // We're enriching the final results at the end, so we'll keep this minimal request.with_payload = with_group_by_payload; request.with_vector = WithVector::Bool(false); collection .query( request, read_consistency, shard_selection, timeout, hw_measurement_acc, ) .await } } impl From<SearchGroupsRequestInternal> for GroupRequest { fn from(request: SearchGroupsRequestInternal) -> Self { let SearchGroupsRequestInternal { vector, filter, params, with_payload, with_vector, score_threshold, group_request: BaseGroupRequest { group_by, group_size, limit, with_lookup: with_lookup_interface, }, } = request; let search = SearchRequestInternal { vector, filter, params, limit: 0, offset: Some(0), with_payload, with_vector, score_threshold, }; GroupRequest { source: SourceRequest::Search(search), group_by, group_size: group_size as usize, limit: limit as usize, with_lookup: with_lookup_interface.map(Into::into), } } } impl From<RecommendGroupsRequestInternal> for GroupRequest { fn from(request: RecommendGroupsRequestInternal) -> Self { let RecommendGroupsRequestInternal { positive, negative, strategy, filter, params, with_payload, with_vector, score_threshold, using, lookup_from, group_request: BaseGroupRequest { group_by, group_size, limit, with_lookup: with_lookup_interface, }, } = request; let recommend = RecommendRequestInternal { positive, negative, strategy, filter, params, limit: 0, offset: None, with_payload, with_vector, score_threshold, using, lookup_from, }; GroupRequest { source: SourceRequest::Recommend(recommend), group_by, group_size: group_size as usize, limit: limit as usize, with_lookup: with_lookup_interface.map(Into::into), } } } impl From<CollectionQueryGroupsRequest> for GroupRequest { fn from(request: CollectionQueryGroupsRequest) -> Self { let CollectionQueryGroupsRequest { prefetch, query, using, filter, params, score_threshold, with_vector, with_payload, lookup_from, group_by, group_size, limit, with_lookup: with_lookup_interface, } = request; let collection_query_request = CollectionQueryRequest { prefetch: prefetch.into_iter().collect(), query, using, filter, score_threshold, limit, offset: 0, params, with_vector, with_payload, lookup_from, }; GroupRequest { source: SourceRequest::Query(collection_query_request), group_by, group_size, limit, with_lookup: with_lookup_interface, } } } /// Uses the request to fill up groups of points. pub async fn group_by( request: QueryGroupRequest, collection: &Collection, read_consistency: Option<ReadConsistency>, shard_selection: ShardSelectorInternal, timeout: Option<Duration>, hw_measurement_acc: HwMeasurementAcc, ) -> CollectionResult<Vec<PointGroup>> { let start = std::time::Instant::now(); let collection_params = collection.collection_config.read().await.params.clone(); let score_ordering = shard_query::query_result_order(request.source.query.as_ref(), &collection_params)?; let mut aggregator = GroupsAggregator::new( request.groups, request.group_size, request.group_by.clone(), score_ordering, ); // Try to complete amount of groups let mut needs_filling = true; for _ in 0..MAX_GET_GROUPS_REQUESTS { // update timeout let timeout = timeout.map(|t| t.saturating_sub(start.elapsed())); let mut request = request.clone(); let source = &mut request.source; // Construct filter to exclude already found groups let full_groups = aggregator.keys_of_filled_groups(); if !full_groups.is_empty() { let except_any = except_on(&request.group_by, &full_groups); if !except_any.is_empty() { let exclude_groups = Filter { must: Some(except_any), ..Default::default() }; source.filter = Some( source .filter .as_ref() .map(|filter| filter.merge(&exclude_groups)) .unwrap_or(exclude_groups), ); } } // Exclude already aggregated points let ids = aggregator.ids().clone(); if !ids.is_empty() { let exclude_ids = Filter::new_must_not(Condition::HasId(ids.into())); source.filter = Some( source .filter .as_ref() .map(|filter| filter.merge(&exclude_ids)) .unwrap_or(exclude_ids), ); } // Make request let points = request .r#do( collection, read_consistency, shard_selection.clone(), timeout, hw_measurement_acc.clone(), ) .await?; if points.is_empty() { break; } aggregator.add_points(&points); // TODO: should we break early if we have some amount of "enough" groups? if aggregator.len_of_filled_best_groups() >= request.groups { needs_filling = false; break; } } // Try to fill up groups if needs_filling { for _ in 0..MAX_GROUP_FILLING_REQUESTS { // update timeout let timeout = timeout.map(|t| t.saturating_sub(start.elapsed())); let mut request = request.clone(); let source = &mut request.source; // Construct filter to only include unsatisfied groups let unsatisfied_groups = aggregator.keys_of_unfilled_best_groups(); let match_any = match_on(&request.group_by, &unsatisfied_groups); if !match_any.is_empty() { let include_groups = Filter { must: Some(match_any), ..Default::default() }; source.filter = Some( source .filter .as_ref() .map(|filter| filter.merge(&include_groups)) .unwrap_or(include_groups), ); } // Exclude already aggregated points let ids = aggregator.ids().clone(); if !ids.is_empty() { let exclude_ids = Filter::new_must_not(Condition::HasId(ids.into())); source.filter = Some( source .filter .as_ref() .map(|filter| filter.merge(&exclude_ids)) .unwrap_or(exclude_ids), ); } // Make request let points = request .r#do( collection, read_consistency, shard_selection.clone(), timeout, hw_measurement_acc.clone(), ) .await?; if points.is_empty() { break; } aggregator.add_points(&points); if aggregator.len_of_filled_best_groups() >= request.groups { break; } } } // extract best results let mut groups = aggregator.distill(); // flatten results let bare_points = groups .iter() .cloned() .flat_map(|group| group.hits) .collect(); // update timeout let timeout = timeout.map(|t| t.saturating_sub(start.elapsed())); // enrich with payload and vector let enriched_points: AHashMap<_, _> = collection .fill_search_result_with_payload( bare_points, Some(request.source.with_payload), request.source.with_vector, read_consistency, &shard_selection, timeout, hw_measurement_acc.clone(), ) .await? .into_iter() .map(|point| (point.id, point)) .collect(); // hydrate groups with enriched points groups .iter_mut() .for_each(|group| group.hydrate_from(&enriched_points)); // turn into output form let groups = groups.into_iter().map(PointGroup::from).collect(); Ok(groups) } /// Uses the set of values to create Match::Except's, if possible fn except_on(path: &JsonPath, values: &[Value]) -> Vec<Condition> { values_to_any_variants(values) .into_iter() .map(|v| { Condition::Field(FieldCondition::new_match( path.clone(), Match::new_except(v), )) }) .collect() } /// Uses the set of values to create Match::Any's, if possible fn match_on(path: &JsonPath, values: &[Value]) -> Vec<Condition> { values_to_any_variants(values) .into_iter() .map(|any_variants| { Condition::Field(FieldCondition::new_match( path.clone(), Match::new_any(any_variants), )) }) .collect() } fn values_to_any_variants(values: &[Value]) -> Vec<AnyVariants> { let mut any_variants = Vec::new(); // gather int values let ints: IndexSet<_, FnvBuildHasher> = values.iter().filter_map(|v| v.as_i64()).collect(); if !ints.is_empty() { any_variants.push(AnyVariants::Integers(ints)); } // gather string values let strs: IndexSet<_, FnvBuildHasher> = values .iter() .filter_map(|v| v.as_str().map(|s| s.into())) .collect(); if !strs.is_empty() { any_variants.push(AnyVariants::Strings(strs)); } any_variants } fn increase_limit_for_group(shard_prefetch: &mut ShardPrefetch, group_size: usize) { shard_prefetch.limit *= group_size; shard_prefetch.prefetches.iter_mut().for_each(|prefetch| { increase_limit_for_group(prefetch, group_size); }); } #[cfg(test)] mod tests { use ahash::AHashMap; use segment::data_types::groups::GroupId; use segment::payload_json; use segment::types::{Payload, ScoredPoint}; use crate::grouping::types::Group; fn make_scored_point(id: u64, score: f32, payload: Option<Payload>) -> ScoredPoint { ScoredPoint { id: id.into(), version: 0, score, payload, vector: None, shard_key: None, order_value: None, } } #[test] fn test_hydrated_from() { // arrange let mut groups: Vec<Group> = Vec::new(); [ ( "a", [ make_scored_point(1, 1.0, None), make_scored_point(2, 1.0, None), ], ), ( "b", [ make_scored_point(3, 1.0, None), make_scored_point(4, 1.0, None), ], ), ] .into_iter() .for_each(|(key, points)| { let group = Group { key: GroupId::from(key), hits: points.into_iter().collect(), }; groups.push(group); }); let payload_a = payload_json! {"some_key": "some value a"}; let payload_b = payload_json! {"some_key": "some value b"}; let hydrated = vec![ make_scored_point(1, 1.0, Some(payload_a.clone())), make_scored_point(2, 1.0, Some(payload_a.clone())), make_scored_point(3, 1.0, Some(payload_b.clone())), make_scored_point(4, 1.0, Some(payload_b.clone())), ]; let set: AHashMap<_, _> = hydrated.into_iter().map(|p| (p.id, p)).collect(); // act groups.iter_mut().for_each(|group| group.hydrate_from(&set)); // assert assert_eq!(groups.len(), 2); assert_eq!(groups.first().unwrap().hits.len(), 2); assert_eq!(groups.get(1).unwrap().hits.len(), 2); let a = groups.first().unwrap(); let b = groups.get(1).unwrap(); assert!( a.hits .iter() .all(|x| x.payload.as_ref() == Some(&payload_a)), ); assert!( b.hits .iter() .all(|x| x.payload.as_ref() == Some(&payload_b)), ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/grouping/aggregator.rs
lib/collection/src/grouping/aggregator.rs
use std::cmp::Ordering; use std::collections::hash_map::Entry; use ahash::{AHashMap, AHashSet}; use itertools::Itertools; use segment::data_types::groups::GroupId; use segment::json_path::JsonPath; use segment::spaces::tools::{peek_top_largest_iterable, peek_top_smallest_iterable}; use segment::types::{ExtendedPointId, Order, PayloadContainer, PointIdType, ScoredPoint}; use serde_json::Value; use super::types::{AggregatorError, Group}; type Hits = AHashMap<PointIdType, ScoredPoint>; pub(super) struct GroupsAggregator { groups: AHashMap<GroupId, Hits>, max_group_size: usize, grouped_by: JsonPath, max_groups: usize, full_groups: AHashSet<GroupId>, group_best_scores: AHashMap<GroupId, ScoredPoint>, all_ids: AHashSet<ExtendedPointId>, order: Option<Order>, } impl GroupsAggregator { pub(super) fn new( groups: usize, group_size: usize, grouped_by: JsonPath, order: Option<Order>, ) -> Self { Self { groups: AHashMap::with_capacity(groups), max_group_size: group_size, grouped_by, max_groups: groups, full_groups: AHashSet::with_capacity(groups), group_best_scores: AHashMap::with_capacity(groups), all_ids: AHashSet::with_capacity(groups * group_size), order, } } /// Adds a point to the group that corresponds based on the group_by field, assumes that the point has the group_by field fn add_point(&mut self, point: &ScoredPoint) -> Result<(), AggregatorError> { // extract all values from the group_by field let payload_values: Vec<_> = point .payload .as_ref() .map(|p| { p.get_value(&self.grouped_by) .into_iter() .flat_map(|v| match v { Value::Array(arr) => arr.iter().collect(), _ => vec![v], }) .collect() }) .ok_or(AggregatorError::KeyNotFound)?; let group_keys = payload_values .into_iter() .map(GroupId::try_from) .collect::<Result<Vec<GroupId>, ()>>() .map_err(|_| AggregatorError::BadKeyType)?; let unique_group_keys: Vec<_> = group_keys.into_iter().unique().collect(); for group_key in unique_group_keys { let group = self .groups .entry(group_key.clone()) .or_insert_with(|| AHashMap::with_capacity(self.max_group_size)); let entry = group.entry(point.id); // if the point is already in the group, check if it has newer version match entry { Entry::Occupied(mut o) => { if o.get().version < point.version { o.insert(point.clone()); } } Entry::Vacant(v) => { v.insert(point.clone()); self.all_ids.insert(point.id); } } if group.len() == self.max_group_size { self.full_groups.insert(group_key.clone()); } // Insert score if better than the group best score self.group_best_scores .entry(group_key.clone()) .and_modify(|other_score| { let ordering = match self.order { Some(Order::LargeBetter) => point.cmp(other_score), Some(Order::SmallBetter) => (*other_score).cmp(point), None => Ordering::Equal, // No order can mean random sampling. }; if ordering == Ordering::Greater { *other_score = point.clone(); } }) .or_insert(point.clone()); } Ok(()) } /// Adds multiple points to the group that they correspond to based on the group_by field, assumes that the points always have the grouped_by field, else it just ignores them pub(super) fn add_points(&mut self, points: &[ScoredPoint]) { for point in points { match self.add_point(point) { Ok(()) | Err(AggregatorError::KeyNotFound | AggregatorError::BadKeyType) => { // ignore points that don't have the group_by field } } } } #[cfg(test)] pub(super) fn len(&self) -> usize { self.groups.len() } /// Return `max_groups` number of keys of the groups with the best score fn best_group_keys(&self) -> Vec<GroupId> { let mut pairs: Vec<_> = self.group_best_scores.iter().collect(); pairs.sort_unstable_by(|(_, score1), (_, score2)| match self.order { Some(Order::LargeBetter) => score2.cmp(score1), Some(Order::SmallBetter) => score1.cmp(score2), None => Ordering::Equal, }); pairs .iter() .take(self.max_groups) .map(|(k, _)| (*k).clone()) .collect() } /// Gets the keys of the groups that have less than the max group size pub(super) fn keys_of_unfilled_best_groups(&self) -> Vec<Value> { let best_group_keys: AHashSet<_> = self.best_group_keys().into_iter().collect(); best_group_keys .difference(&self.full_groups) .cloned() .map_into() .collect() } /// Gets the keys of the groups that have reached the max group size pub(super) fn keys_of_filled_groups(&self) -> Vec<Value> { self.full_groups.iter().cloned().map_into().collect() } /// Gets the amount of best groups that have reached the max group size pub(super) fn len_of_filled_best_groups(&self) -> usize { let best_group_keys: AHashSet<_> = self.best_group_keys().into_iter().collect(); best_group_keys.intersection(&self.full_groups).count() } /// Gets the ids of the already present points across all the groups pub(super) fn ids(&self) -> &AHashSet<ExtendedPointId> { &self.all_ids } /// Returns the best groups sorted by their best hit. The hits are sorted too. pub(super) fn distill(mut self) -> Vec<Group> { let best_groups = self.best_group_keys(); let mut groups = Vec::with_capacity(best_groups.len()); for group_key in best_groups { let mut group = self.groups.remove(&group_key).unwrap(); let scored_points_iter = group.drain().map(|(_, hit)| hit); let hits = match self.order { Some(Order::LargeBetter) => { peek_top_largest_iterable(scored_points_iter, self.max_group_size) } Some(Order::SmallBetter) => { peek_top_smallest_iterable(scored_points_iter, self.max_group_size) } None => scored_points_iter.take(self.max_group_size).collect(), }; groups.push(Group { hits, key: group_key, }); } groups } } #[cfg(test)] mod unit_tests { use common::types::ScoreType; use segment::payload_json; use serde_json::json; use super::*; fn point(idx: u64, score: ScoreType, payloads: Value) -> ScoredPoint { ScoredPoint { id: idx.into(), version: 0, score, payload: Some(payload_json! { "docId": payloads }), vector: None, shard_key: None, order_value: None, } } fn empty_point(idx: u64, score: ScoreType) -> ScoredPoint { ScoredPoint { id: idx.into(), version: 0, score, payload: None, vector: None, shard_key: None, order_value: None, } } #[test] fn test_group_with_multiple_payload_values() { let scored_points = vec![ point(1, 0.99, json!(["a", "a"])), point(2, 0.85, json!(["a", "b"])), point(3, 0.75, json!("b")), ]; let mut aggregator = GroupsAggregator::new(3, 2, "docId".parse().unwrap(), Some(Order::LargeBetter)); for point in &scored_points { aggregator.add_point(point).unwrap(); } let result = aggregator.distill(); assert_eq!(result.len(), 2); assert_eq!(result[0].hits.len(), 2); assert_eq!(result[0].hits[0].id, 1.into()); assert_eq!(result[0].hits[1].id, 2.into()); assert_eq!(result[1].hits.len(), 2); assert_eq!(result[1].hits[0].id, 2.into()); assert_eq!(result[1].hits[1].id, 3.into()); } struct Case { point: ScoredPoint, key: Value, group_size: usize, groups_count: usize, expected_result: Result<(), AggregatorError>, } impl Case { fn new( key: Value, group_size: usize, groups_count: usize, expected_result: Result<(), AggregatorError>, point: ScoredPoint, ) -> Self { Self { point, key, group_size, groups_count, expected_result, } } } #[test] fn it_adds_single_points() { let mut aggregator = GroupsAggregator::new(4, 3, "docId".parse().unwrap(), Some(Order::LargeBetter)); // cases #[rustfmt::skip] [ Case::new(json!("a"), 1, 1, Ok(()), point(1, 0.99, json!("a"))), Case::new(json!("a"), 1, 1, Ok(()), point(1, 0.97, json!("a"))), // should not add it because it already has a point with the same id Case::new(json!("a"), 2, 2, Ok(()), point(2, 0.81, json!(["a", "b"]))), // to both groups Case::new(json!("b"), 2, 2, Ok(()), point(3, 0.84, json!("b"))), // check that `b` of size 2 Case::new(json!("a"), 3, 2, Ok(()), point(4, 0.9, json!("a"))), // grow beyond the max groups, as we sort later Case::new(json!(3), 1, 3, Ok(()), point(5, 0.4, json!(3))), // check that `3` of size 2 Case::new(json!("d"), 1, 4, Ok(()), point(6, 0.3, json!("d"))), Case::new(json!("a"), 4, 4, Ok(()), point(100, 0.31, json!("a"))), // small score 'a' Case::new(json!("a"), 5, 4, Ok(()), point(101, 0.32, json!("a"))), // small score 'a' Case::new(json!("a"), 6, 4, Ok(()), point(102, 0.33, json!("a"))), // small score 'a' Case::new(json!("a"), 7, 4, Ok(()), point(103, 0.34, json!("a"))), // small score 'a' Case::new(json!("a"), 8, 4, Ok(()), point(104, 0.35, json!("a"))), // small score 'a' Case::new(json!("a"), 9, 4, Ok(()), point(105, 0.36, json!("a"))), // small score 'a' Case::new(json!("b"), 3, 4, Ok(()), point(7, 1.0, json!("b"))), Case::new(json!("false"), 0, 4, Err(AggregatorError::BadKeyType), point(8, 1.0, json!(false))), Case::new(json!("none"), 0, 4, Err(AggregatorError::KeyNotFound), empty_point(9, 1.0)), Case::new(json!(3), 2, 4, Ok(()), point(10, 0.6, json!(3))), Case::new(json!(3), 3, 4, Ok(()), point(11, 0.1, json!(3))), ] .into_iter() .enumerate() .for_each(|(case_idx, case)| { let result = aggregator.add_point(&case.point); assert_eq!(result, case.expected_result, "case {case_idx}"); assert_eq!(aggregator.len(), case.groups_count, "case {case_idx}"); let key = &GroupId::try_from(&case.key).unwrap(); if case.group_size > 0 { assert_eq!( aggregator.groups.get(key).unwrap().len(), case.group_size, "case {case_idx}" ); } else { assert!(!aggregator.groups.contains_key(key), "case {case_idx}"); } }); // assert final groups assert_eq!(aggregator.full_groups.len(), 3); assert_eq!(aggregator.keys_of_unfilled_best_groups(), vec![json!("d")]); assert_eq!(aggregator.len_of_filled_best_groups(), 3); let groups = aggregator.distill(); #[rustfmt::skip] let expected_groups = vec![ ( GroupId::from("b"), vec![ empty_point(7, 1.0), empty_point(3, 0.84), empty_point(2, 0.81), ], ), ( GroupId::from("a"), vec![ empty_point(1, 0.99), empty_point(4, 0.9), empty_point(2, 0.81) ], ), ( GroupId::try_from(&json!(3)).unwrap(), vec![ empty_point(10, 0.6), empty_point(5, 0.4), empty_point(11, 0.1), ], ), ( GroupId::from("d"), vec![ empty_point(6, 0.3), ], ), ]; for ((expected_key, expected_group_points), group) in expected_groups.into_iter().zip(groups.into_iter()) { assert_eq!(expected_key, group.key); let expected_id_score: Vec<_> = expected_group_points .into_iter() .map(|x| (x.id, x.score)) .collect(); let group_id_score: Vec<_> = group.hits.into_iter().map(|x| (x.id, x.score)).collect(); assert_eq!(expected_id_score, group_id_score); } } #[test] fn test_aggregate_less_groups() { let mut aggregator = GroupsAggregator::new(3, 2, "docId".parse().unwrap(), Some(Order::LargeBetter)); // cases [ point(1, 0.99, json!("a")), point(1, 0.97, json!("a")), // should not add it because it already has a point with the same id point(2, 0.81, json!(["a", "b"])), // to both groups point(3, 0.84, json!("b")), // check that `b` of size 2 point(4, 0.9, json!("a")), // grow beyond the max groups, as we sort later point(5, 0.4, json!(3)), // check that `3` of size 2 point(6, 0.3, json!("d")), point(100, 0.31, json!("a")), // small score 'a' point(101, 0.32, json!("a")), // small score 'a' point(102, 0.33, json!("a")), // small score 'a' point(103, 0.34, json!("a")), // small score 'a' point(104, 0.35, json!("a")), // small score 'a' point(105, 0.36, json!("a")), // small score 'a' point(7, 1.0, json!("b")), point(10, 0.6, json!(3)), point(11, 0.1, json!(3)), ] .iter() .for_each(|point| { aggregator.add_point(point).unwrap(); }); let groups = aggregator.distill(); #[rustfmt::skip] let expected_groups = vec![ ( GroupId::from("b"), vec![ empty_point(7, 1.0), empty_point(3, 0.84), ], ), ( GroupId::from("a"), vec![ empty_point(1, 0.99), empty_point(4, 0.9), ], ), ( GroupId::try_from(&json!(3)).unwrap(), vec![ empty_point(10, 0.6), empty_point(5, 0.4), ], ), ]; for ((key, expected_group_points), group) in expected_groups.into_iter().zip(groups.into_iter()) { assert_eq!(key, group.key); let expected_id_score: Vec<_> = expected_group_points .into_iter() .map(|x| (x.id, x.score)) .collect(); let group_id_score: Vec<_> = group.hits.into_iter().map(|x| (x.id, x.score)).collect(); assert_eq!(expected_id_score, group_id_score); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/pagination_test.rs
lib/collection/tests/integration/pagination_test.rs
use api::rest::SearchRequestInternal; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::types::WithPayloadInterface; use tempfile::Builder; use crate::common::{N_SHARDS, simple_collection_fixture}; #[tokio::test(flavor = "multi_thread")] async fn test_collection_paginated_search() { test_collection_paginated_search_with_shards(1).await; test_collection_paginated_search_with_shards(N_SHARDS).await; } async fn test_collection_paginated_search_with_shards(shard_number: u32) { let collection_dir = Builder::new() .prefix("test_collection_paginated_search") .tempdir() .unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; // Upload 1000 random vectors to the collection let mut points = Vec::new(); for i in 0..1000 { points.push(PointStructPersisted { id: i.into(), vector: VectorStructPersisted::Single(vec![i as f32, 0.0, 0.0, 0.0]), payload: Some(serde_json::from_str(r#"{"number": "John Doe"}"#).unwrap()), }); } let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(points), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let query_vector = vec![1.0, 0.0, 0.0, 0.0]; let full_search_request = SearchRequestInternal { vector: query_vector.clone().into(), filter: None, limit: 100, offset: Some(0), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: None, params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let reference_result = collection .search( full_search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); assert_eq!(reference_result.len(), 100); assert_eq!(reference_result[0].id, 999.into()); let page_size = 10; let page_1_request = SearchRequestInternal { vector: query_vector.clone().into(), filter: None, limit: 10, offset: Some(page_size), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: None, params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let page_1_result = collection .search( page_1_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); // Check that the first page is the same as the reference result assert_eq!(page_1_result.len(), 10); for i in 0..10 { assert_eq!(page_1_result[i], reference_result[page_size + i]); } let page_9_request = SearchRequestInternal { vector: query_vector.into(), filter: None, limit: 10, offset: Some(page_size * 9), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: None, params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let page_9_result = collection .search( page_9_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); // Check that the 9th page is the same as the reference result assert_eq!(page_9_result.len(), 10); for i in 0..10 { assert_eq!(page_9_result[i], reference_result[page_size * 9 + i]); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/distance_matrix_test.rs
lib/collection/tests/integration/distance_matrix_test.rs
use collection::collection::distance_matrix::CollectionSearchMatrixRequest; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use common::counter::hardware_accumulator::HwMeasurementAcc; use itertools::Itertools; use rand::prelude::SmallRng; use rand::{Rng, SeedableRng}; use segment::data_types::vectors::DEFAULT_VECTOR_NAME; use tempfile::Builder; use crate::common::simple_collection_fixture; const SEED: u64 = 42; #[tokio::test(flavor = "multi_thread")] async fn distance_matrix_empty() { let collection_dir = Builder::new().prefix("storage").tempdir().unwrap(); // empty collection let collection = simple_collection_fixture(collection_dir.path(), 1).await; let hw_acc = HwMeasurementAcc::new(); let sample_size = 100; let limit_per_sample = 10; let request = CollectionSearchMatrixRequest { sample_size, limit_per_sample, filter: None, using: DEFAULT_VECTOR_NAME.to_owned(), }; let matrix = collection .search_points_matrix(request, ShardSelectorInternal::All, None, None, hw_acc) .await .unwrap(); // assert all empty assert!(matrix.sample_ids.is_empty()); assert!(matrix.nearests.is_empty()); } #[tokio::test(flavor = "multi_thread")] async fn distance_matrix_anonymous_vector() { let collection_dir = Builder::new().prefix("storage").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), 1).await; let point_count = 2000; let ids = (0..point_count).map_into().collect(); let mut rng = SmallRng::seed_from_u64(SEED); let vectors = (0..point_count) .map(|_| rng.random::<[f32; 4]>().to_vec()) .collect_vec(); let batch = BatchPersisted { ids, vectors: BatchVectorStructPersisted::Single(vectors), payloads: None, }; let upsert_points = collection::operations::CollectionUpdateOperations::PointOperation( collection::operations::point_ops::PointOperations::UpsertPoints( collection::operations::point_ops::PointInsertOperationsInternal::from(batch), ), ); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(upsert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let hw_acc = HwMeasurementAcc::new(); let sample_size = 100; let limit_per_sample = 10; let request = CollectionSearchMatrixRequest { sample_size, limit_per_sample, filter: None, using: DEFAULT_VECTOR_NAME.to_owned(), }; let matrix = collection .search_points_matrix(request, ShardSelectorInternal::All, None, None, hw_acc) .await .unwrap(); assert_eq!(matrix.sample_ids.len(), sample_size); // no duplicate sample ids assert_eq!( matrix .sample_ids .iter() .collect::<std::collections::HashSet<_>>() .len(), sample_size ); assert_eq!(matrix.nearests.len(), sample_size); for nearest in matrix.nearests { assert_eq!(nearest.len(), limit_per_sample); // assert each row sorted by scores nearest.iter().tuple_windows().for_each(|(prev, next)| { assert!(prev.score >= next.score); }); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/multi_vec_test.rs
lib/collection/tests/integration/multi_vec_test.rs
use std::collections::BTreeMap; use std::num::NonZeroU32; use std::path::Path; use api::rest::SearchRequestInternal; use collection::collection::Collection; use collection::config::{CollectionConfigInternal, CollectionParams, WalConfig}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::operations::types::{ CollectionError, PointRequestInternal, RecommendRequestInternal, VectorsConfig, }; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::recommendations::recommend_by; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::data_types::named_vectors::NamedVectors; use segment::data_types::vectors::{NamedVector, VectorStructInternal}; use segment::types::{Distance, VectorName, WithPayloadInterface, WithVector}; use tempfile::Builder; use crate::common::{N_SHARDS, TEST_OPTIMIZERS_CONFIG, new_local_collection}; const VECTOR1_NAME: &VectorName = "vec1"; const VECTOR2_NAME: &VectorName = "vec2"; #[tokio::test(flavor = "multi_thread")] async fn test_multi_vec() { test_multi_vec_with_shards(1).await; test_multi_vec_with_shards(N_SHARDS).await; } #[cfg(test)] pub async fn multi_vec_collection_fixture(collection_path: &Path, shard_number: u32) -> Collection { let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let vector_params1 = VectorParamsBuilder::new(4, Distance::Dot).build(); let vector_params2 = VectorParamsBuilder::new(4, Distance::Dot).build(); let mut vectors_config = BTreeMap::new(); vectors_config.insert(VECTOR1_NAME.to_owned(), vector_params1); vectors_config.insert(VECTOR2_NAME.to_owned(), vector_params2); let collection_params = CollectionParams { vectors: VectorsConfig::Multi(vectors_config), shard_number: NonZeroU32::new(shard_number).expect("Shard number can not be zero"), ..CollectionParams::empty() }; let collection_config = CollectionConfigInternal { params: collection_params, optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(), wal_config, hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let snapshot_path = collection_path.join("snapshots"); // Default to a collection with all the shards local new_local_collection( "test".to_string(), collection_path, &snapshot_path, &collection_config, ) .await .unwrap() } async fn test_multi_vec_with_shards(shard_number: u32) { let collection_dir = Builder::new() .prefix("test_multi_vec_with_shards") .tempdir() .unwrap(); let collection = multi_vec_collection_fixture(collection_dir.path(), shard_number).await; // Upload 1000 random vectors to the collection let mut points = Vec::new(); for i in 0..1000 { let mut vectors = NamedVectors::default(); vectors.insert( VECTOR1_NAME.to_owned(), vec![i as f32, 0.0, 0.0, 0.0].into(), ); vectors.insert( VECTOR2_NAME.to_owned(), vec![0.0, i as f32, 0.0, 0.0].into(), ); points.push(PointStructPersisted { id: i.into(), vector: VectorStructPersisted::from(VectorStructInternal::from(vectors)), payload: Some(serde_json::from_str(r#"{"number": "John Doe"}"#).unwrap()), }); } let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(points), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let query_vector = vec![6.0, 0.0, 0.0, 0.0]; let full_search_request = SearchRequestInternal { vector: NamedVector { name: VECTOR1_NAME.to_owned(), vector: query_vector, } .into(), filter: None, limit: 10, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(true.into()), params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let result = collection .search( full_search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); for hit in result { match hit.vector.unwrap() { VectorStructInternal::Single(_) => panic!("expected multi vector"), VectorStructInternal::MultiDense(_) => panic!("expected multi vector"), VectorStructInternal::Named(vectors) => { assert!(vectors.contains_key(VECTOR1_NAME)); assert!(vectors.contains_key(VECTOR2_NAME)); } } } let query_vector = vec![0.0, 2.0, 0.0, 0.0]; let failed_search_request = SearchRequestInternal { vector: query_vector.clone().into(), filter: None, limit: 10, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(true.into()), params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let result = collection .search( failed_search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await; assert!( matches!(result, Err(CollectionError::BadInput { .. })), "{result:?}" ); let full_search_request = SearchRequestInternal { vector: NamedVector { name: VECTOR2_NAME.to_owned(), vector: query_vector, } .into(), filter: None, limit: 10, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(true.into()), params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let result = collection .search( full_search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); for hit in result { match hit.vector.unwrap() { VectorStructInternal::Single(_) => panic!("expected multi vector"), VectorStructInternal::MultiDense(_) => panic!("expected multi vector"), VectorStructInternal::Named(vectors) => { assert!(vectors.contains_key(VECTOR1_NAME)); assert!(vectors.contains_key(VECTOR2_NAME)); } } } let retrieve = collection .retrieve( PointRequestInternal { ids: vec![6.into()], with_payload: Some(WithPayloadInterface::Bool(false)), with_vector: WithVector::Selector(vec![VECTOR1_NAME.to_owned()]), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(retrieve.len(), 1); match retrieve[0].vector.as_ref().unwrap() { VectorStructInternal::Single(_) => panic!("expected multi vector"), VectorStructInternal::MultiDense(_) => panic!("expected multi vector"), VectorStructInternal::Named(vectors) => { assert!(vectors.contains_key(VECTOR1_NAME)); assert!(!vectors.contains_key(VECTOR2_NAME)); } } let hw_acc = HwMeasurementAcc::new(); let recommend_result = recommend_by( RecommendRequestInternal { positive: vec![6.into()], with_payload: Some(WithPayloadInterface::Bool(false)), with_vector: Some(WithVector::Selector(vec![VECTOR2_NAME.to_owned()])), limit: 10, ..Default::default() }, &collection, |_name| async { unreachable!("should not be called in this test") }, None, ShardSelectorInternal::All, None, hw_acc, ) .await; match recommend_result { Ok(_) => panic!("Error expected"), Err(err) => match err { CollectionError::BadRequest { .. } => {} CollectionError::BadInput { .. } => {} error => panic!("Unexpected error {error}"), }, } let hw_acc = HwMeasurementAcc::new(); let recommend_result = recommend_by( RecommendRequestInternal { positive: vec![6.into()], with_payload: Some(WithPayloadInterface::Bool(false)), with_vector: Some(WithVector::Selector(vec![VECTOR2_NAME.to_owned()])), limit: 10, using: Some(VECTOR1_NAME.to_owned().into()), ..Default::default() }, &collection, |_name| async { unreachable!("should not be called in this test") }, None, ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); assert_eq!(recommend_result.len(), 10); for hit in recommend_result { match hit.vector.as_ref().unwrap() { VectorStructInternal::Single(_) => panic!("expected multi vector"), VectorStructInternal::MultiDense(_) => panic!("expected multi vector"), VectorStructInternal::Named(vectors) => { assert!(!vectors.contains_key(VECTOR1_NAME)); assert!(vectors.contains_key(VECTOR2_NAME)); } } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/continuous_snapshot_test.rs
lib/collection/tests/integration/continuous_snapshot_test.rs
use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use collection::collection::Collection; use collection::config::{CollectionConfigInternal, CollectionParams}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::operations::shared_storage_config::SharedStorageConfig; use collection::operations::types::{ CollectionResult, NodeType, PointRequestInternal, UpdateStatus, VectorsConfig, }; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::shards::channel_service::ChannelService; use collection::shards::collection_shard_distribution::CollectionShardDistribution; use collection::shards::replica_set::replica_set_state::ReplicaState; use common::budget::ResourceBudget; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::flags::{FeatureFlags, init_feature_flags}; use segment::payload_json; use segment::types::{Distance, WithVector}; use shard::operations::payload_ops::{PayloadOps, SetPayloadOp}; use tempfile::Builder; use tokio::time::sleep; use crate::common::{ REST_PORT, TEST_OPTIMIZERS_CONFIG, dummy_abort_shard_transfer, dummy_on_replica_failure, dummy_request_shard_transfer, }; // RUST_LOG=trace cargo nextest run --workspace continuous --nocapture #[tokio::test(flavor = "multi_thread")] async fn test_continuous_snapshot() { // Initialize logger for tests let _ = env_logger::builder().is_test(true).try_init(); // Feature flags init_feature_flags(FeatureFlags::default()); let collection_params = CollectionParams { vectors: VectorsConfig::Single(VectorParamsBuilder::new(4, Distance::Dot).build()), ..CollectionParams::empty() }; let config = CollectionConfigInternal { params: collection_params, optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(), wal_config: Default::default(), hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap(); let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap(); let collection_name = "test".to_string(); let storage_config: SharedStorageConfig = SharedStorageConfig { node_type: NodeType::Normal, ..Default::default() }; let this_peer_id = 0; let shard_distribution = CollectionShardDistribution::all_local( Some(config.params.shard_number.into()), this_peer_id, ); let collection = Collection::new( collection_name, this_peer_id, collection_dir.path(), snapshots_path.path(), &config, Arc::new(storage_config), shard_distribution, None, ChannelService::new(REST_PORT, None), dummy_on_replica_failure(), dummy_request_shard_transfer(), dummy_abort_shard_transfer(), None, None, ResourceBudget::default(), None, ) .await .unwrap(); let local_shards = collection.get_local_shards().await; for shard_id in local_shards { collection .set_shard_replica_state(shard_id, 0, ReplicaState::Active, None) .await .unwrap(); } let collection = Arc::new(collection); let stop_flag = Arc::new(AtomicBool::new(false)); // Continuously insert the same point let points_count = 3; let points_task = { let collection = Arc::clone(&collection); let stop_flag = Arc::clone(&stop_flag); tokio::spawn(async move { while !stop_flag.load(Ordering::Relaxed) { // Delete all points let delete_points = CollectionUpdateOperations::PointOperation(PointOperations::DeletePoints { ids: (0..points_count).map(|i| i.into()).collect(), }); let hw_counter = HwMeasurementAcc::disposable(); collection .update_from_client_simple( delete_points, true, WriteOrdering::default(), hw_counter, ) .await?; // Insert one point at a time for i in 0..points_count { let point = PointStructPersisted { id: i.into(), vector: VectorStructPersisted::Single(vec![i as f32, 0.0, 0.0, 0.0]), payload: Some(serde_json::from_str(r#"{"number": "John Doe"}"#).unwrap()), }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(vec![point]), )); let hw_counter = HwMeasurementAcc::disposable(); let insert = collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter, ) .await?; assert_eq!(insert.status, UpdateStatus::Completed); } // Retrieve one point at a time for i in 0..points_count { let retrieve_point = PointRequestInternal { ids: vec![i.into()], with_payload: None, with_vector: WithVector::Bool(false), }; let hw_counter = HwMeasurementAcc::disposable(); let retrieve_result = collection .retrieve( retrieve_point, None, &ShardSelectorInternal::All, None, hw_counter, ) .await?; assert_eq!(retrieve_result.len(), 1); } // Set payload one point at a time for i in 0..points_count { let set_payload = CollectionUpdateOperations::PayloadOperation( PayloadOps::SetPayload(SetPayloadOp { payload: payload_json! { "city": "London", "color": "green", }, points: Some(vec![i.into()]), filter: None, key: None, }), ); let hw_counter = HwMeasurementAcc::disposable(); let set_result = collection .update_from_client_simple( set_payload, true, WriteOrdering::default(), hw_counter, ) .await?; assert_eq!(set_result.status, UpdateStatus::Completed); } // Retrieve one point at a time again with payload & vector for i in 0..points_count { let retrieve_point = PointRequestInternal { ids: vec![i.into()], with_payload: Some(true.into()), with_vector: WithVector::Bool(true), }; let hw_counter = HwMeasurementAcc::disposable(); let retrieve_result = collection .retrieve( retrieve_point, None, &ShardSelectorInternal::All, None, hw_counter, ) .await?; assert_eq!(retrieve_result.len(), 1); assert!(retrieve_result[0].vector.is_some(), "missing vector"); assert!(retrieve_result[0].payload.is_some(), "missing payload"); } } CollectionResult::Ok(()) }) }; // Loop taking snapshots and deletions of snapshots let snapshot_task = { let collection = Arc::clone(&collection); let stop_flag = Arc::clone(&stop_flag); let snapshots_temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap(); tokio::spawn(async move { while !stop_flag.load(Ordering::Relaxed) { // Take snapshot let _snapshot = collection .create_snapshot(snapshots_temp_dir.path(), 0) .await?; } CollectionResult::Ok(()) }) }; let timeout = sleep(Duration::from_secs(20)); tokio::pin!(timeout); tokio::select! { res = points_task => { stop_flag.store(true, Ordering::Relaxed); match res { Ok(Ok(())) => {}, Ok(Err(e)) => panic!("points_task error: {e}"), Err(e) => panic!("points_task panicked: {e}"), } } res = snapshot_task => { stop_flag.store(true, Ordering::Relaxed); match res { Ok(Ok(())) => {}, Ok(Err(e)) => panic!("snapshot_task error: {e}"), Err(e) => panic!("snapshot_task panicked: {e}"), } } _ = &mut timeout => { stop_flag.store(true, Ordering::Relaxed); log::info!("Timeout reached, stopping test"); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/main.rs
lib/collection/tests/integration/main.rs
mod collection_restore_test; mod collection_test; mod common; mod continuous_snapshot_test; mod distance_matrix_test; mod grouping_test; mod lookup_test; mod multi_vec_test; mod pagination_test; mod snapshot_recovery_test;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/grouping_test.rs
lib/collection/tests/integration/grouping_test.rs
use collection::collection::Collection; use collection::grouping::group_by::{GroupRequest, SourceRequest}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::WriteOrdering; use collection::operations::types::{RecommendRequestInternal, UpdateStatus}; use itertools::Itertools; use rand::Rng; use rand::distr::Uniform; use rand::rngs::ThreadRng; use segment::data_types::vectors::DenseVector; use segment::json_path::JsonPath; use segment::types::{Filter, WithPayloadInterface, WithVector}; use serde_json::json; use crate::common::simple_collection_fixture; fn rand_dense_vector(rng: &mut ThreadRng, size: usize) -> DenseVector { rng.sample_iter(Uniform::new(0.4, 0.6).unwrap()) .take(size) .collect() } mod group_by { use api::rest::SearchRequestInternal; use collection::grouping::GroupBy; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations, }; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::payload_json; use super::*; struct Resources { request: GroupRequest, collection: Collection, } async fn setup(docs: u64, chunks: u64) -> Resources { let mut rng = rand::rng(); let source = SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 4, offset: None, with_payload: None, with_vector: None, score_threshold: None, }); let request = GroupRequest::with_limit_from_request(source, JsonPath::new("docId"), 3); let collection_dir = tempfile::Builder::new() .prefix("collection") .tempdir() .unwrap(); let collection = simple_collection_fixture(collection_dir.path(), 1).await; let batch = BatchPersisted { ids: (0..docs * chunks).map(|x| x.into()).collect_vec(), vectors: BatchVectorStructPersisted::Single( (0..docs * chunks) .map(|_| rand_dense_vector(&mut rng, 4)) .collect_vec(), ), payloads: (0..docs) .flat_map(|x| { (0..chunks).map(move |_| { Some(payload_json! { "docId": x , "other_stuff": x.to_string() + "foo" }) }) }) .collect_vec() .into(), }; let insert_points = CollectionUpdateOperations::PointOperation( PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch)), ); let hw_counter = HwMeasurementAcc::new(); let insert_result = collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter.clone(), ) .await .expect("insert failed"); assert_eq!(insert_result.status, UpdateStatus::Completed); Resources { request, collection, } } #[tokio::test(flavor = "multi_thread")] async fn searching() { let resources = setup(16, 8).await; let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( resources.request.clone(), &resources.collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); let group_req = resources.request; assert_eq!(result.len(), group_req.limit); assert_eq!(result[0].hits.len(), group_req.group_size); // is sorted? let mut last_group_best_score = f32::MAX; for group in result { assert!(group.hits[0].score <= last_group_best_score); last_group_best_score = group.hits[0].score; let mut last_score = f32::MAX; for hit in group.hits { assert!(hit.score <= last_score); last_score = hit.score; } } } #[tokio::test(flavor = "multi_thread")] async fn recommending() { let resources = setup(16, 8).await; let request = GroupRequest::with_limit_from_request( SourceRequest::Recommend(RecommendRequestInternal { strategy: Default::default(), filter: None, params: None, limit: 4, offset: None, with_payload: None, with_vector: None, score_threshold: None, positive: vec![1.into(), 2.into(), 3.into()], negative: Vec::new(), using: None, lookup_from: None, }), JsonPath::new("docId"), 2, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( request.clone(), &resources.collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), request.limit); let mut last_group_best_score = f32::MAX; for group in result { assert_eq!(group.hits.len(), request.group_size); // is sorted? assert!(group.hits[0].score <= last_group_best_score); last_group_best_score = group.hits[0].score; let mut last_score = f32::MAX; for hit in group.hits { assert!(hit.score <= last_score); last_score = hit.score; } } } #[tokio::test(flavor = "multi_thread")] async fn with_filter() { let resources = setup(16, 8).await; let filter: Filter = serde_json::from_value(json!({ "must": [ { "key": "docId", "range": { "gte": 1, "lte": 2 } } ] })) .unwrap(); let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: Some(filter.clone()), params: None, limit: 4, offset: None, with_payload: None, with_vector: None, score_threshold: None, }), JsonPath::new("docId"), 3, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request, &resources.collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 2); } #[tokio::test(flavor = "multi_thread")] async fn with_payload_and_vectors() { let resources = setup(16, 8).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 4, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(WithVector::Bool(true)), score_threshold: None, }), JsonPath::new("docId"), 3, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &resources.collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 4); for group in result { assert_eq!(group.hits.len(), group_by_request.group_size); assert!(group.hits[0].payload.is_some()); assert!(group.hits[0].vector.is_some()); } } #[tokio::test(flavor = "multi_thread")] async fn group_by_string_field() { let Resources { collection, .. } = setup(16, 8).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 4, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(WithVector::Bool(true)), score_threshold: None, }), JsonPath::new("other_stuff"), 3, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 4); for group in result { assert_eq!(group.hits.len(), group_by_request.group_size); } } #[tokio::test(flavor = "multi_thread")] async fn zero_group_size() { let Resources { collection, .. } = setup(16, 8).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 4, offset: None, with_payload: None, with_vector: None, score_threshold: None, }), JsonPath::new("docId"), 0, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 0); } #[tokio::test(flavor = "multi_thread")] async fn zero_limit_groups() { let Resources { collection, .. } = setup(16, 8).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 0, offset: None, with_payload: None, with_vector: None, score_threshold: None, }), JsonPath::new("docId"), 3, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 0); } #[tokio::test(flavor = "multi_thread")] async fn big_limit_groups() { let Resources { collection, .. } = setup(1000, 5).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 500, offset: None, with_payload: None, with_vector: None, score_threshold: None, }), JsonPath::new("docId"), 3, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), group_by_request.limit); for group in result { assert_eq!(group.hits.len(), group_by_request.group_size); } } #[tokio::test(flavor = "multi_thread")] async fn big_group_size_groups() { let Resources { collection, .. } = setup(10, 500).await; let group_by_request = GroupRequest::with_limit_from_request( SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 3, offset: None, with_payload: None, with_vector: None, score_threshold: None, }), JsonPath::new("docId"), 400, ); let hw_acc = HwMeasurementAcc::new(); let group_by = GroupBy::new( group_by_request.clone(), &collection, |_| async { unreachable!() }, hw_acc, ); let result = group_by.execute().await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), group_by_request.limit); for group in result { assert_eq!(group.hits.len(), group_by_request.group_size); } } } /// Tests out the different features working together. The individual features are already tested in other places. mod group_by_builder { use api::rest::SearchRequestInternal; use collection::grouping::GroupBy; use collection::lookup::WithLookup; use collection::lookup::types::PseudoId; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations, }; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::json_path::JsonPath; use segment::payload_json; use tokio::sync::RwLock; use super::*; const BODY_TEXT: &str = "lorem ipsum dolor sit amet"; struct Resources { request: GroupRequest, lookup_collection: RwLock<Collection>, collection: Collection, } /// Sets up two collections: one for chunks and one for docs. async fn setup(docs: u64, chunks_per_doc: u64) -> Resources { let mut rng = rand::rng(); let source_request = SourceRequest::Search(SearchRequestInternal { vector: vec![0.5, 0.5, 0.5, 0.5].into(), filter: None, params: None, limit: 4, offset: None, with_payload: None, with_vector: None, score_threshold: None, }); let request = GroupRequest::with_limit_from_request(source_request, JsonPath::new("docId"), 3); let collection_dir = tempfile::Builder::new().prefix("chunks").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), 1).await; let hw_counter = HwMeasurementAcc::new(); // insert chunk points { let batch = BatchPersisted { ids: (0..docs * chunks_per_doc).map(|x| x.into()).collect_vec(), vectors: BatchVectorStructPersisted::Single( (0..docs * chunks_per_doc) .map(|_| rand_dense_vector(&mut rng, 4)) .collect_vec(), ), payloads: (0..docs) .flat_map(|x| { (0..chunks_per_doc).map(move |_| Some(payload_json! {"docId": x})) }) .collect_vec() .into(), }; let insert_points = CollectionUpdateOperations::PointOperation( PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch)), ); let insert_result = collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter.clone(), ) .await .expect("insert failed"); assert_eq!(insert_result.status, UpdateStatus::Completed); } let lookup_dir = tempfile::Builder::new().prefix("lookup").tempdir().unwrap(); let lookup_collection = simple_collection_fixture(lookup_dir.path(), 1).await; // insert doc points { let batch = BatchPersisted { ids: (0..docs).map(|x| x.into()).collect_vec(), vectors: BatchVectorStructPersisted::Single( (0..docs) .map(|_| rand_dense_vector(&mut rng, 4)) .collect_vec(), ), payloads: (0..docs) .map(|x| Some(payload_json! {"docId": x, "body": format!("{x} {BODY_TEXT}")})) .collect_vec() .into(), }; let insert_points = CollectionUpdateOperations::PointOperation( PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch)), ); let insert_result = lookup_collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter.clone(), ) .await .expect("insert failed"); assert_eq!(insert_result.status, UpdateStatus::Completed); } let lookup_collection = RwLock::new(lookup_collection); Resources { request, lookup_collection, collection, } } #[tokio::test(flavor = "multi_thread")] async fn only_group_by() { let Resources { request, collection, .. } = setup(16, 8).await; let collection_by_name = |_: String| async { unreachable!() }; let hw_acc = HwMeasurementAcc::new(); let result = GroupBy::new(request.clone(), &collection, collection_by_name, hw_acc) .execute() .await; assert!(result.is_ok()); let result = result.unwrap(); // minimal assertion assert_eq!(result.len(), request.limit); for group in result { assert_eq!(group.hits.len(), request.group_size); assert!(group.lookup.is_none()); } } #[tokio::test(flavor = "multi_thread")] async fn group_by_with_lookup() { let Resources { mut request, collection, lookup_collection, .. } = setup(16, 8).await; request.with_lookup = Some(WithLookup { collection_name: "test".to_string(), with_payload: Some(true.into()), with_vectors: Some(true.into()), }); let collection_by_name = |_: String| async { Some(lookup_collection.read().await) }; let hw_acc = HwMeasurementAcc::new(); let result = GroupBy::new(request.clone(), &collection, collection_by_name, hw_acc) .execute() .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), request.limit); for group in result { assert_eq!(group.hits.len(), request.group_size); let lookup = group.lookup.expect("lookup not found"); assert_eq!(PseudoId::from(group.id), PseudoId::from(lookup.id)); let payload = lookup.payload.unwrap(); let body = payload.0.get("body").unwrap().as_str().unwrap(); assert_eq!(body, &format!("{} {BODY_TEXT}", lookup.id)); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/collection_restore_test.rs
lib/collection/tests/integration/collection_restore_test.rs
use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::operations::types::ScrollRequestInternal; use common::counter::hardware_accumulator::HwMeasurementAcc; use itertools::Itertools; use segment::json_path::JsonPath; use segment::types::{PayloadContainer, PayloadSelectorExclude, WithPayloadInterface}; use serde_json::Value; use tempfile::Builder; use crate::common::{N_SHARDS, load_local_collection, simple_collection_fixture}; #[tokio::test(flavor = "multi_thread")] async fn test_collection_reloading() { test_collection_reloading_with_shards(1).await; test_collection_reloading_with_shards(N_SHARDS).await; } async fn test_collection_reloading_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; collection.stop_gracefully().await; for _i in 0..5 { let collection_path = collection_dir.path(); let collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsBatch(BatchPersisted { ids: vec![0, 1].into_iter().map(|x| x.into()).collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], ]), payloads: None, }), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); collection.stop_gracefully().await; } let collection_path = collection_dir.path(); let collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; assert_eq!( collection .info(&ShardSelectorInternal::All) .await .unwrap() .points_count, Some(2), ); collection.stop_gracefully().await; } #[tokio::test(flavor = "multi_thread")] async fn test_collection_payload_reloading() { test_collection_payload_reloading_with_shards(1).await; test_collection_payload_reloading_with_shards(N_SHARDS).await; } async fn test_collection_payload_reloading_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); { let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsBatch(BatchPersisted { ids: vec![0, 1].into_iter().map(|x| x.into()).collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], ]), payloads: serde_json::from_str(r#"[{ "k": "v1" } , { "k": "v2"}]"#).unwrap(), }), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); collection.stop_gracefully().await; } let collection_path = collection_dir.path(); let collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; let res = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(10), filter: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: true.into(), order_by: None, }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(res.points.len(), 2); match res.points[0] .payload .as_ref() .expect("has payload") .get_value(&JsonPath::new("k")) .into_iter() .next() .expect("has value") { Value::String(value) => assert_eq!("v1", value), _ => panic!("unexpected type"), } eprintln!( "res = {:#?}", res.points[0] .payload .as_ref() .unwrap() .get_value(&JsonPath::new("k")) ); collection.stop_gracefully().await; } #[tokio::test(flavor = "multi_thread")] async fn test_collection_payload_custom_payload() { test_collection_payload_custom_payload_with_shards(1).await; test_collection_payload_custom_payload_with_shards(N_SHARDS).await; } async fn test_collection_payload_custom_payload_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); { let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsBatch(BatchPersisted { ids: vec![0.into(), 1.into()], vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], ]), payloads: serde_json::from_str( r#"[{ "k1": "v1" }, { "k1": "v2" , "k2": "v3", "k3": "v4"}]"#, ) .unwrap(), }), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); collection.stop_gracefully().await; } let collection_path = collection_dir.path(); let collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; // Test res with filter payload let res_with_custom_payload = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(10), filter: None, with_payload: Some(WithPayloadInterface::Fields(vec![JsonPath::new("k2")])), with_vector: true.into(), order_by: None, }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert!( res_with_custom_payload.points[0] .payload .as_ref() .expect("has payload") .is_empty(), ); match res_with_custom_payload.points[1] .payload .as_ref() .expect("has payload") .get_value(&JsonPath::new("k2")) .into_iter() .next() .expect("has value") { Value::String(value) => assert_eq!("v3", value), _ => panic!("unexpected type"), } // Test res with filter payload dict let res_with_custom_payload = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(10), filter: None, with_payload: Some(PayloadSelectorExclude::new(vec![JsonPath::new("k1")]).into()), with_vector: false.into(), order_by: None, }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert!( res_with_custom_payload.points[0] .payload .as_ref() .expect("has payload") .is_empty(), ); assert_eq!( res_with_custom_payload.points[1] .payload .as_ref() .expect("has payload") .len(), 2 ); match res_with_custom_payload.points[1] .payload .as_ref() .expect("has payload") .get_value(&JsonPath::new("k3")) .into_iter() .next() .expect("has value") { Value::String(value) => assert_eq!("v4", value), _ => panic!("unexpected type"), }; collection.stop_gracefully().await; }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/snapshot_recovery_test.rs
lib/collection/tests/integration/snapshot_recovery_test.rs
use std::sync::Arc; use api::rest::SearchRequestInternal; use collection::collection::Collection; use collection::config::{CollectionConfigInternal, CollectionParams, WalConfig}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::operations::shared_storage_config::SharedStorageConfig; use collection::operations::types::{NodeType, VectorsConfig}; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::shards::channel_service::ChannelService; use collection::shards::collection_shard_distribution::CollectionShardDistribution; use collection::shards::replica_set::replica_set_state::ReplicaState; use common::budget::ResourceBudget; use common::counter::hardware_accumulator::HwMeasurementAcc; use segment::types::{Distance, WithPayloadInterface, WithVector}; use tempfile::Builder; use crate::common::{ REST_PORT, TEST_OPTIMIZERS_CONFIG, dummy_abort_shard_transfer, dummy_on_replica_failure, dummy_request_shard_transfer, }; async fn _test_snapshot_and_recover_collection(node_type: NodeType) { let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let collection_params = CollectionParams { vectors: VectorsConfig::Single(VectorParamsBuilder::new(4, Distance::Dot).build()), ..CollectionParams::empty() }; let config = CollectionConfigInternal { params: collection_params, optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(), wal_config, hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap(); let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap(); let recover_dir = Builder::new() .prefix("test_collection_rec") .tempdir() .unwrap(); let collection_name = "test".to_string(); let collection_name_rec = "test_rec".to_string(); let storage_config: SharedStorageConfig = SharedStorageConfig { node_type, ..Default::default() }; let this_peer_id = 0; let shard_distribution = CollectionShardDistribution::all_local( Some(config.params.shard_number.into()), this_peer_id, ); let collection = Collection::new( collection_name, this_peer_id, collection_dir.path(), snapshots_path.path(), &config, Arc::new(storage_config), shard_distribution, None, ChannelService::new(REST_PORT, None), dummy_on_replica_failure(), dummy_request_shard_transfer(), dummy_abort_shard_transfer(), None, None, ResourceBudget::default(), None, ) .await .unwrap(); let local_shards = collection.get_local_shards().await; for shard_id in local_shards { collection .set_shard_replica_state(shard_id, 0, ReplicaState::Active, None) .await .unwrap(); } // Upload 1000 random vectors to the collection let mut points = Vec::new(); for i in 0..100 { points.push(PointStructPersisted { id: i.into(), vector: VectorStructPersisted::Single(vec![i as f32, 0.0, 0.0, 0.0]), payload: Some(serde_json::from_str(r#"{"number": "John Doe"}"#).unwrap()), }); } let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(points), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); // Take a snapshot let snapshots_temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap(); let snapshot_description = collection .create_snapshot(snapshots_temp_dir.path(), 0) .await .unwrap(); if let Err(err) = Collection::restore_snapshot( &snapshots_path.path().join(snapshot_description.name), recover_dir.path(), 0, false, ) { panic!("Failed to restore snapshot: {err}") } let recovered_collection = Collection::load( collection_name_rec, this_peer_id, recover_dir.path(), snapshots_path.path(), Default::default(), ChannelService::new(REST_PORT, None), dummy_on_replica_failure(), dummy_request_shard_transfer(), dummy_abort_shard_transfer(), None, None, ResourceBudget::default(), None, ) .await; let query_vector = vec![1.0, 0.0, 0.0, 0.0]; let full_search_request = SearchRequestInternal { vector: query_vector.clone().into(), filter: None, limit: 100, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(WithVector::Bool(true)), params: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let reference_result = collection .search( full_search_request.clone().into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); let hw_acc = HwMeasurementAcc::new(); let recovered_result = recovered_collection .search( full_search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); assert_eq!(reference_result.len(), recovered_result.len()); for (reference, recovered) in reference_result.iter().zip(recovered_result.iter()) { assert_eq!(reference.id, recovered.id); assert_eq!(reference.payload, recovered.payload); assert_eq!(reference.vector, recovered.vector); } } #[tokio::test(flavor = "multi_thread")] async fn test_snapshot_and_recover_collection_normal() { _test_snapshot_and_recover_collection(NodeType::Normal).await; } #[tokio::test(flavor = "multi_thread")] async fn test_snapshot_and_recover_collection_listener() { _test_snapshot_and_recover_collection(NodeType::Listener).await; }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/collection_test.rs
lib/collection/tests/integration/collection_test.rs
use std::collections::{HashMap, HashSet}; use std::io::{BufReader, BufWriter}; use ahash::AHashSet; use api::rest::{OrderByInterface, SearchRequestInternal}; use collection::operations::CollectionUpdateOperations; use collection::operations::payload_ops::{PayloadOps, SetPayloadOp}; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::operations::types::{ CountRequestInternal, PointRequestInternal, RecommendRequestInternal, ScrollRequestInternal, UpdateStatus, }; use collection::recommendations::recommend_by; use collection::shards::replica_set::replica_set_state::{ReplicaSetState, ReplicaState}; use common::counter::hardware_accumulator::HwMeasurementAcc; use fs_err::File; use itertools::Itertools; use segment::data_types::order_by::{Direction, OrderBy}; use segment::data_types::vectors::VectorStructInternal; use segment::types::{ Condition, ExtendedPointId, FieldCondition, Filter, HasIdCondition, Payload, PayloadFieldSchema, PayloadSchemaType, PointIdType, WithPayloadInterface, }; use serde_json::Map; use tempfile::Builder; use crate::common::{N_SHARDS, load_local_collection, simple_collection_fixture}; #[tokio::test(flavor = "multi_thread")] async fn test_collection_updater() { test_collection_updater_with_shards(1).await; test_collection_updater_with_shards(N_SHARDS).await; } async fn test_collection_updater_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], vec![1.0, 1.0, 1.0, 1.0], vec![1.0, 1.0, 0.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_counter = HwMeasurementAcc::new(); let insert_result = collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await; match insert_result { Ok(res) => { assert_eq!(res.status, UpdateStatus::Completed) } Err(err) => panic!("operation failed: {err:?}"), } let search_request = SearchRequestInternal { vector: vec![1.0, 1.0, 1.0, 1.0].into(), with_payload: None, with_vector: None, filter: None, params: None, limit: 3, offset: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let search_res = collection .search( search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await; match search_res { Ok(res) => { assert_eq!(res.len(), 3); assert_eq!(res[0].id, 2.into()); assert!(res[0].payload.is_none()); } Err(err) => panic!("search failed: {err:?}"), } } #[tokio::test(flavor = "multi_thread")] async fn test_collection_search_with_payload_and_vector() { test_collection_search_with_payload_and_vector_with_shards(1).await; test_collection_search_with_payload_and_vector_with_shards(N_SHARDS).await; } async fn test_collection_search_with_payload_and_vector_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0.into(), 1.into()], vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], ]), payloads: serde_json::from_str( r#"[{ "k": { "type": "keyword", "value": "v1" } }, { "k": "v2" , "v": "v3"}]"#, ) .unwrap(), }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_counter = HwMeasurementAcc::new(); let insert_result = collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await; match insert_result { Ok(res) => { assert_eq!(res.status, UpdateStatus::Completed) } Err(err) => panic!("operation failed: {err:?}"), } let search_request = SearchRequestInternal { vector: vec![1.0, 0.0, 1.0, 1.0].into(), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: Some(true.into()), filter: None, params: None, limit: 3, offset: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let search_res = collection .search( search_request.into(), None, &ShardSelectorInternal::All, None, hw_acc, ) .await; match search_res { Ok(res) => { assert_eq!(res.len(), 2); assert_eq!(res[0].id, 0.into()); assert_eq!(res[0].payload.as_ref().unwrap().len(), 1); let vec = vec![1.0, 0.0, 1.0, 1.0]; match &res[0].vector { Some(VectorStructInternal::Single(v)) => assert_eq!(v.clone(), vec), _ => panic!("vector is not returned"), } } Err(err) => panic!("search failed: {err:?}"), } let count_request = CountRequestInternal { filter: Some(Filter::new_must(Condition::Field( FieldCondition::new_match( "k".parse().unwrap(), serde_json::from_str(r#"{ "value": "v2" }"#).unwrap(), ), ))), exact: true, }; let hw_acc = HwMeasurementAcc::new(); let count_res = collection .count( count_request, None, &ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); assert_eq!(count_res.count, 1); } // FIXME: does not work #[tokio::test(flavor = "multi_thread")] async fn test_collection_loading() { test_collection_loading_with_shards(1).await; test_collection_loading_with_shards(N_SHARDS).await; } async fn test_collection_loading_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); { let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], vec![1.0, 1.0, 1.0, 1.0], vec![1.0, 1.0, 0.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation( PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch)), ); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let payload: Payload = serde_json::from_str(r#"{"color":"red"}"#).unwrap(); let assign_payload = CollectionUpdateOperations::PayloadOperation(PayloadOps::SetPayload(SetPayloadOp { payload, points: Some(vec![2.into(), 3.into()]), filter: None, key: None, })); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(assign_payload, true, WriteOrdering::default(), hw_counter) .await .unwrap(); collection.stop_gracefully().await; } let collection_path = collection_dir.path(); let loaded_collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; let request = PointRequestInternal { ids: vec![1.into(), 2.into()], with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: true.into(), }; let retrieved = loaded_collection .retrieve( request, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(retrieved.len(), 2); for record in retrieved { if record.id == 2.into() { let non_empty_payload = record.payload.unwrap(); assert_eq!(non_empty_payload.len(), 1) } } loaded_collection.stop_gracefully().await; println!("Function end"); } #[test] fn test_deserialization() { let batch = BatchPersisted { ids: vec![0.into(), 1.into()], vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let json_str = serde_json::to_string_pretty(&insert_points).unwrap(); let _read_obj: CollectionUpdateOperations = serde_json::from_str(&json_str).unwrap(); let crob_bytes = rmp_serde::to_vec(&insert_points).unwrap(); let _read_obj2: CollectionUpdateOperations = rmp_serde::from_slice(&crob_bytes).unwrap(); } #[test] fn test_deserialization2() { let points = vec![ PointStructPersisted { id: 0.into(), vector: VectorStructPersisted::from(vec![1.0, 0.0, 1.0, 1.0]), payload: None, }, PointStructPersisted { id: 1.into(), vector: VectorStructPersisted::from(vec![1.0, 0.0, 1.0, 0.0]), payload: None, }, ]; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(points), )); let json_str = serde_json::to_string_pretty(&insert_points).unwrap(); let _read_obj: CollectionUpdateOperations = serde_json::from_str(&json_str).unwrap(); let raw_bytes = rmp_serde::to_vec(&insert_points).unwrap(); let _read_obj2: CollectionUpdateOperations = rmp_serde::from_slice(&raw_bytes).unwrap(); } // Request to find points sent to all shards but they might not have a particular id, so they will return an error #[tokio::test(flavor = "multi_thread")] async fn test_recommendation_api() { test_recommendation_api_with_shards(1).await; test_recommendation_api_with_shards(N_SHARDS).await; } async fn test_recommendation_api_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4, 5, 6, 7, 8] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![0.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], vec![1.0, 0.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 0.0, 1.0], vec![0.0, 0.0, 0.0, 1.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_acc = HwMeasurementAcc::new(); collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_acc.clone(), ) .await .unwrap(); let result = recommend_by( RecommendRequestInternal { positive: vec![0.into()], negative: vec![8.into()], limit: 5, ..Default::default() }, &collection, |_name| async { unreachable!("Should not be called in this test") }, None, ShardSelectorInternal::All, None, hw_acc, ) .await .unwrap(); assert!(!result.is_empty()); let top1 = &result[0]; assert!(top1.id == 5.into() || top1.id == 6.into()); } #[tokio::test(flavor = "multi_thread")] async fn test_read_api() { test_read_api_with_shards(1).await; test_read_api_with_shards(N_SHARDS).await; } async fn test_read_api_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4, 5, 6, 7, 8] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![0.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], vec![1.0, 0.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 0.0, 1.0], vec![0.0, 0.0, 0.0, 1.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(insert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let result = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(2), filter: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: None, }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(result.next_page_offset, Some(2.into())); assert_eq!(result.points.len(), 2); } #[tokio::test(flavor = "multi_thread")] async fn test_ordered_read_api() { test_ordered_scroll_api_with_shards(1).await; test_ordered_scroll_api_with_shards(N_SHARDS).await; } async fn test_ordered_scroll_api_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; const PRICE_FLOAT_KEY: &str = "price_float"; const PRICE_INT_KEY: &str = "price_int"; const MULTI_VALUE_KEY: &str = "multi_value"; let get_payload = |value: f64| -> Option<Payload> { let mut payload_map = Map::new(); payload_map.insert(PRICE_FLOAT_KEY.to_string(), value.into()); payload_map.insert(PRICE_INT_KEY.to_string(), (value as i64).into()); payload_map.insert( MULTI_VALUE_KEY.to_string(), vec![value, value + 20.0].into(), ); Some(Payload(payload_map)) }; let payloads: Vec<Option<Payload>> = vec![ get_payload(11.0), get_payload(10.0), get_payload(9.0), get_payload(8.0), get_payload(7.0), get_payload(6.0), get_payload(5.0), get_payload(5.0), get_payload(5.0), get_payload(5.0), get_payload(4.0), get_payload(3.0), get_payload(2.0), get_payload(1.0), ]; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![0.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], vec![1.0, 0.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 1.0, 0.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 1.0, 0.0], vec![0.0, 0.0, 0.0, 1.0], vec![0.0, 0.0, 0.0, 1.0], vec![0.0, 1.0, 1.0, 1.0], vec![0.0, 1.0, 1.0, 1.0], vec![0.0, 1.0, 1.0, 1.0], vec![0.0, 1.0, 1.0, 1.0], vec![1.0, 1.0, 1.0, 1.0], ]), payloads: Some(payloads), }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter.clone(), ) .await .unwrap(); collection .create_payload_index_with_wait( PRICE_FLOAT_KEY.parse().unwrap(), PayloadFieldSchema::FieldType(PayloadSchemaType::Float), true, hw_counter.clone(), ) .await .unwrap(); collection .create_payload_index_with_wait( PRICE_INT_KEY.parse().unwrap(), PayloadFieldSchema::FieldType(PayloadSchemaType::Integer), true, hw_counter.clone(), ) .await .unwrap(); collection .create_payload_index_with_wait( MULTI_VALUE_KEY.parse().unwrap(), PayloadFieldSchema::FieldType(PayloadSchemaType::Float), true, hw_counter.clone(), ) .await .unwrap(); ///////// Test single-valued fields /////////// for key in [PRICE_FLOAT_KEY, PRICE_INT_KEY] { let result_asc = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(3), filter: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: Some(OrderByInterface::Struct(OrderBy { key: key.parse().unwrap(), direction: Some(Direction::Asc), start_from: None, })), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(result_asc.points.len(), 3); assert_eq!(result_asc.next_page_offset, None); assert!(result_asc.points.iter().tuple_windows().all(|(a, b)| { let a = a.payload.as_ref().unwrap(); let b = b.payload.as_ref().unwrap(); let a = a.0.get(key).unwrap().as_f64(); let b = b.0.get(key).unwrap().as_f64(); a <= b })); let result_desc = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(5), filter: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: Some(OrderByInterface::Struct(OrderBy { key: key.parse().unwrap(), direction: Some(Direction::Desc), start_from: None, })), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert_eq!(result_desc.points.len(), 5); assert_eq!(result_desc.next_page_offset, None); assert!( result_desc.points.iter().tuple_windows().all(|(a, b)| { let a = a.payload.as_ref().unwrap(); let b = b.payload.as_ref().unwrap(); let a = a.0.get(key).unwrap().as_f64(); let b = b.0.get(key).unwrap().as_f64(); a >= b }), "Expected descending order when using {key} key, got: {:#?}", result_desc.points ); let asc_already_seen: AHashSet<_> = result_asc.points.iter().map(|x| x.id).collect(); dbg!(&asc_already_seen); let asc_second_page = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(5), filter: Some(Filter::new_must_not(Condition::HasId( HasIdCondition::from(asc_already_seen), ))), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: Some(OrderByInterface::Struct(OrderBy { key: key.parse().unwrap(), direction: Some(Direction::Asc), start_from: None, })), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); let asc_second_page_points = asc_second_page .points .iter() .map(|x| x.id) .collect::<HashSet<_>>(); let valid_asc_second_page_points = [10, 9, 8, 7, 6] .into_iter() .map(|x| x.into()) .collect::<HashSet<ExtendedPointId>>(); assert_eq!(asc_second_page.points.len(), 5); assert!(asc_second_page_points.is_subset(&valid_asc_second_page_points)); let desc_already_seen: AHashSet<_> = result_desc.points.iter().map(|x| x.id).collect(); dbg!(&desc_already_seen); let desc_second_page = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(4), filter: Some(Filter::new_must_not(Condition::HasId( HasIdCondition::from(desc_already_seen), ))), with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: Some(OrderByInterface::Struct(OrderBy { key: key.parse().unwrap(), direction: Some(Direction::Desc), start_from: None, })), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); let desc_second_page_points = desc_second_page .points .iter() .map(|x| x.id) .collect::<HashSet<_>>(); let valid_desc_second_page_points = [5, 6, 7, 8, 9] .into_iter() .map(|x| x.into()) .collect::<HashSet<ExtendedPointId>>(); assert_eq!(desc_second_page.points.len(), 4); assert!( desc_second_page_points.is_subset(&valid_desc_second_page_points), "expected: {valid_desc_second_page_points:?}, got: {desc_second_page_points:?}" ); } ///////// Test multi-valued field /////////// let result_multi = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(100), filter: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: false.into(), order_by: Some(OrderByInterface::Key(MULTI_VALUE_KEY.parse().unwrap())), }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); assert!( result_multi .points .iter() .fold(HashMap::<PointIdType, usize, _>::new(), |mut acc, point| { acc.entry(point.id) .and_modify(|x| { *x += 1; }) .or_insert(1); acc }) .values() .all(|&x| x == 2), ); } #[tokio::test(flavor = "multi_thread")] async fn test_collection_delete_points_by_filter() { test_collection_delete_points_by_filter_with_shards(1).await; test_collection_delete_points_by_filter_with_shards(N_SHARDS).await; } async fn test_collection_delete_points_by_filter_with_shards(shard_number: u32) { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), shard_number).await; let batch = BatchPersisted { ids: vec![0, 1, 2, 3, 4] .into_iter() .map(|x| x.into()) .collect_vec(), vectors: BatchVectorStructPersisted::Single(vec![ vec![1.0, 0.0, 1.0, 1.0], vec![1.0, 0.0, 1.0, 0.0], vec![1.0, 1.0, 1.0, 1.0], vec![1.0, 1.0, 0.0, 1.0], vec![1.0, 0.0, 0.0, 0.0], ]), payloads: None, }; let insert_points = CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::from(batch), )); let hw_counter = HwMeasurementAcc::new(); let insert_result = collection .update_from_client_simple( insert_points, true, WriteOrdering::default(), hw_counter.clone(), ) .await; match insert_result { Ok(res) => { assert_eq!(res.status, UpdateStatus::Completed) } Err(err) => panic!("operation failed: {err:?}"), } // delete points with id (0, 3) let to_be_deleted: AHashSet<PointIdType> = vec![0.into(), 3.into()].into_iter().collect(); let delete_filter = segment::types::Filter::new_must(Condition::HasId(HasIdCondition::from(to_be_deleted))); let delete_points = CollectionUpdateOperations::PointOperation( PointOperations::DeletePointsByFilter(delete_filter), ); let delete_result = collection .update_from_client_simple(delete_points, true, WriteOrdering::default(), hw_counter) .await; match delete_result { Ok(res) => { assert_eq!(res.status, UpdateStatus::Completed) } Err(err) => panic!("operation failed: {err:?}"), } let result = collection .scroll_by( ScrollRequestInternal { offset: None, limit: Some(10), filter: None, with_payload: Some(WithPayloadInterface::Bool(false)), with_vector: false.into(), order_by: None, }, None, &ShardSelectorInternal::All, None, HwMeasurementAcc::new(), ) .await .unwrap(); // check if we only have 3 out of 5 points left and that the point id were really deleted assert_eq!(result.points.len(), 3); assert_eq!(result.points.first().unwrap().id, 1.into()); assert_eq!(result.points.get(1).unwrap().id, 2.into()); assert_eq!(result.points.get(2).unwrap().id, 4.into()); } #[tokio::test(flavor = "multi_thread")] async fn test_collection_local_load_initializing_not_stuck() { let collection_dir = Builder::new().prefix("collection").tempdir().unwrap(); // Create and unload collection simple_collection_fixture(collection_dir.path(), 1) .await .stop_gracefully() .await; // Modify replica state file on disk, set state to Initializing // This is to simulate a situation where a collection was not fully created, we cannot create // this situation through our collection interface { let replica_state_path = collection_dir.path().join("0/replica_state.json"); let replica_state_file = BufReader::new(File::open(&replica_state_path).unwrap()); let mut replica_set_state: ReplicaSetState = serde_json::from_reader(replica_state_file).unwrap(); for peer_id in replica_set_state.peers().clone().into_keys() { replica_set_state.set_peer_state(peer_id, ReplicaState::Initializing); } let replica_state_file = BufWriter::new(File::create(&replica_state_path).unwrap()); serde_json::to_writer(replica_state_file, &replica_set_state).unwrap(); } // Reload collection let collection_path = collection_dir.path(); let loaded_collection = load_local_collection( "test".to_string(), collection_path, &collection_path.join("snapshots"), ) .await; // Local replica must be in Active state after loading (all replicas are local) let loaded_state = loaded_collection.state().await; for shard_info in loaded_state.shards.values() { for replica_state in shard_info.replicas.values() { assert_eq!(replica_state, &ReplicaState::Active); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/lookup_test.rs
lib/collection/tests/integration/lookup_test.rs
use collection::collection::Collection; use collection::lookup::types::PseudoId; use collection::lookup::{WithLookup, lookup_ids}; use collection::operations::consistency_params::ReadConsistency; use collection::operations::point_ops::{ BatchPersisted, BatchVectorStructPersisted, PointInsertOperationsInternal, PointOperations, WriteOrdering, }; use collection::operations::shard_selector_internal::ShardSelectorInternal; use collection::shards::shard::ShardId; use common::counter::hardware_accumulator::HwMeasurementAcc; use itertools::Itertools; use rand::rngs::SmallRng; use rand::{self, Rng, SeedableRng}; use rstest::*; use segment::data_types::vectors::VectorStructInternal; use segment::payload_json; use segment::types::PointIdType; use tempfile::Builder; use tokio::sync::RwLock; use uuid::Uuid; use crate::common::simple_collection_fixture; const SEED: u64 = 42; struct Resources { request: WithLookup, collection: RwLock<Collection>, read_consistency: Option<ReadConsistency>, shard_selection: Option<ShardId>, } async fn setup() -> Resources { let request = WithLookup { collection_name: "test".to_string(), with_payload: None, with_vectors: None, }; let collection_dir = Builder::new().prefix("storage").tempdir().unwrap(); let collection = simple_collection_fixture(collection_dir.path(), 1).await; let int_ids = (0..1000).map(PointIdType::from); let mut rng = SmallRng::seed_from_u64(SEED); let uuids = (0..1000).map(|_| PointIdType::Uuid(Uuid::from_u128(rng.random()))); let ids = int_ids.chain(uuids).collect_vec(); let mut rng = SmallRng::seed_from_u64(SEED); let vectors = (0..2000) .map(|_| rng.random::<[f32; 4]>().to_vec()) .collect_vec(); let payloads = ids .iter() .map(|i| Some(payload_json! {"foo": format!("bar {}", i)})) .collect_vec(); let batch = BatchPersisted { ids, vectors: BatchVectorStructPersisted::Single(vectors), payloads: Some(payloads), }; let upsert_points = collection::operations::CollectionUpdateOperations::PointOperation( PointOperations::UpsertPoints(PointInsertOperationsInternal::from(batch)), ); let hw_counter = HwMeasurementAcc::new(); collection .update_from_client_simple(upsert_points, true, WriteOrdering::default(), hw_counter) .await .unwrap(); let read_consistency = None; let shard_selection = None; Resources { request, collection: RwLock::new(collection), read_consistency, shard_selection, } } #[tokio::test(flavor = "multi_thread")] async fn happy_lookup_ids() { let Resources { mut request, collection, read_consistency, shard_selection, } = setup().await; let collection = collection.read().await; let collection_by_name = |_: String| async { Some(collection) }; let n = 100u64; let ints = (0..n).map_into(); let mut rng = SmallRng::seed_from_u64(SEED); let uuids = (0..n) .map(|_| Uuid::from_u128(rng.random()).to_string()) .map_into(); let values = ints.chain(uuids).collect_vec(); request.with_payload = Some(true.into()); request.with_vectors = Some(true.into()); let shard_selection = match shard_selection { Some(shard_id) => ShardSelectorInternal::ShardId(shard_id), None => ShardSelectorInternal::All, }; let result = lookup_ids( request.clone(), values.clone(), collection_by_name, read_consistency, &shard_selection, None, HwMeasurementAcc::new(), ) .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), (n * 2) as usize); let mut rng = SmallRng::seed_from_u64(SEED); // use points 0..n and 1000..1000+n as expected vectors let expected_vectors = (0..1000 + n) .map(|i| (i, rng.random::<[f32; 4]>().to_vec())) .filter(|(i, _)| !(&n..&1000).contains(&i)) .map(|(_, v)| v) .map(VectorStructInternal::from); for (id_value, vector) in values.into_iter().zip(expected_vectors) { let record = result .get(&id_value) .unwrap_or_else(|| panic!("Expected to find record for id {id_value}")); assert_eq!(record.id, PointIdType::try_from(id_value.clone()).unwrap()); assert_eq!( record.payload, Some(payload_json! { "foo": format!("bar {}", id_value) }) ); assert_eq!(record.vector, Some(vector)); } } fn first_uuid() -> String { let mut rng = SmallRng::seed_from_u64(SEED); Uuid::from_u128(rng.random()).to_string() } #[rstest] #[case::existing_uuid(first_uuid())] #[case::zero_int(0i64)] #[case::positive_int(1i64)] #[case::existing_uint(999u64)] fn parsable_pseudo_id_to_point_id(#[case] value: impl Into<PseudoId>) { let value = value.into(); assert!(PointIdType::try_from(value).is_ok()); } #[rstest] #[case::negative_int(-1i64)] #[case::non_uuid_string("not a uuid")] fn non_parsable_pseudo_id_to_point_id(#[case] value: impl Into<PseudoId>) { let value = value.into(); assert!(PointIdType::try_from(value).is_err()); } #[rstest] #[case::uuid(Uuid::new_v4().to_string())] #[case::int(1001u64)] #[tokio::test(flavor = "multi_thread")] async fn nonexistent_lookup_ids_are_ignored(#[case] value: impl Into<PseudoId>) { let value = value.into(); let Resources { mut request, collection, read_consistency, shard_selection, } = setup().await; let shard_selection = match shard_selection { Some(shard_id) => ShardSelectorInternal::ShardId(shard_id), None => ShardSelectorInternal::All, }; let collection = collection.read().await; let collection_by_name = |_: String| async { Some(collection) }; let values = vec![value]; request.with_payload = Some(true.into()); request.with_vectors = Some(true.into()); let result = lookup_ids( request, values, collection_by_name, read_consistency, &shard_selection, None, HwMeasurementAcc::new(), ) .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(result.len(), 0); } #[tokio::test(flavor = "multi_thread")] async fn err_when_collection_by_name_returns_none() { let Resources { request, read_consistency, shard_selection, .. } = setup().await; let shard_selection = match shard_selection { Some(shard_id) => ShardSelectorInternal::ShardId(shard_id), None => ShardSelectorInternal::All, }; let collection_by_name = |_: String| async { None }; let result = lookup_ids( request, vec![], collection_by_name, read_consistency, &shard_selection, None, HwMeasurementAcc::new(), ) .await; assert!(result.is_err()); assert_eq!( result.unwrap_err().to_string(), "Collection test not found".to_string() ); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/tests/integration/common/mod.rs
lib/collection/tests/integration/common/mod.rs
use std::num::NonZeroU32; use std::path::Path; use std::sync::Arc; use collection::collection::{Collection, RequestShardTransfer}; use collection::config::{CollectionConfigInternal, CollectionParams, WalConfig}; use collection::operations::types::CollectionResult; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::optimizers_builder::OptimizersConfig; use collection::shards::CollectionId; use collection::shards::channel_service::ChannelService; use collection::shards::collection_shard_distribution::CollectionShardDistribution; use collection::shards::replica_set::replica_set_state::ReplicaState; use collection::shards::replica_set::{AbortShardTransfer, ChangePeerFromState}; use common::budget::ResourceBudget; use segment::types::Distance; /// Test collections for this upper bound of shards. /// Testing with more shards is problematic due to `number of open files problem` /// See https://github.com/qdrant/qdrant/issues/379 pub const N_SHARDS: u32 = 3; pub const REST_PORT: u16 = 6333; pub const TEST_OPTIMIZERS_CONFIG: OptimizersConfig = OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 2, max_segment_size: None, #[expect(deprecated)] memmap_threshold: None, indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: Some(2), }; #[cfg(test)] pub async fn simple_collection_fixture(collection_path: &Path, shard_number: u32) -> Collection { let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let collection_params = CollectionParams { vectors: VectorParamsBuilder::new(4, Distance::Dot).build().into(), shard_number: NonZeroU32::new(shard_number).expect("Shard number can not be zero"), ..CollectionParams::empty() }; let collection_config = CollectionConfigInternal { params: collection_params, optimizer_config: TEST_OPTIMIZERS_CONFIG.clone(), wal_config, hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let snapshot_path = collection_path.join("snapshots"); // Default to a collection with all the shards local new_local_collection( "test".to_string(), collection_path, &snapshot_path, &collection_config, ) .await .unwrap() } pub fn dummy_on_replica_failure() -> ChangePeerFromState { Arc::new(move |_peer_id, _shard_id, _from_state| {}) } pub fn dummy_request_shard_transfer() -> RequestShardTransfer { Arc::new(move |_transfer| {}) } pub fn dummy_abort_shard_transfer() -> AbortShardTransfer { Arc::new(|_transfer, _reason| {}) } /// Default to a collection with all the shards local #[cfg(test)] pub async fn new_local_collection( id: CollectionId, path: &Path, snapshots_path: &Path, config: &CollectionConfigInternal, ) -> CollectionResult<Collection> { let collection = Collection::new( id, 0, path, snapshots_path, config, Default::default(), CollectionShardDistribution::all_local(Some(config.params.shard_number.into()), 0), None, ChannelService::new(REST_PORT, None), dummy_on_replica_failure(), dummy_request_shard_transfer(), dummy_abort_shard_transfer(), None, None, ResourceBudget::default(), None, ) .await; let collection = collection?; let local_shards = collection.get_local_shards().await; for shard_id in local_shards { collection .set_shard_replica_state(shard_id, 0, ReplicaState::Active, None) .await?; } Ok(collection) } /// Default to a collection with all the shards local #[cfg(test)] pub async fn load_local_collection( id: CollectionId, path: &Path, snapshots_path: &Path, ) -> Collection { Collection::load( id, 0, path, snapshots_path, Default::default(), ChannelService::new(REST_PORT, None), dummy_on_replica_failure(), dummy_request_shard_transfer(), dummy_abort_shard_transfer(), None, None, ResourceBudget::default(), None, ) .await }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/benches/prof.rs
lib/collection/benches/prof.rs
use std::io::Write; use std::os::raw::c_int; use std::path::Path; use criterion::profiler::Profiler; use fs_err as fs; use fs_err::File; use pprof::ProfilerGuard; use pprof::flamegraph::TextTruncateDirection; use pprof::protos::Message; /// Small custom profiler that can be used with Criterion to create a flamegraph for benchmarks. /// Also see [the Criterion documentation on this][custom-profiler]. /// /// ## Example on how to enable the custom profiler: /// /// ``` /// mod perf; /// use perf::FlamegraphProfiler; /// /// fn fibonacci_profiled(criterion: &mut Criterion) { /// // Use the criterion struct as normal here. /// } /// /// fn custom() -> Criterion { /// Criterion::default().with_profiler(FlamegraphProfiler::new()) /// } /// /// criterion_group! { /// name = benches; /// config = custom(); /// targets = fibonacci_profiled /// } /// ``` /// /// The neat thing about this is that it will sample _only_ the benchmark, and not other stuff like /// the setup process. /// /// Further, it will only kick in if `--profile-time <time>` is passed to the benchmark binary. /// A flamegraph will be created for each individual benchmark in its report directory under /// `profile/flamegraph.svg`. /// /// [custom-profiler]: https://bheisler.github.io/criterion.rs/book/user_guide/profiling.html#implementing-in-process-profiling-hooks pub struct FlamegraphProfiler<'a> { frequency: c_int, active_profiler: Option<ProfilerGuard<'a>>, } impl FlamegraphProfiler<'_> { #[allow(dead_code)] pub fn new(frequency: c_int) -> Self { FlamegraphProfiler { frequency, active_profiler: None, } } } impl Profiler for FlamegraphProfiler<'_> { fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) { self.active_profiler = Some(ProfilerGuard::new(self.frequency).unwrap()); } fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) { fs::create_dir_all(benchmark_dir).unwrap(); let pprof_path = benchmark_dir.join("profile.pb"); let flamegraph_path = benchmark_dir.join("flamegraph.svg"); eprintln!("\nflamegraph_path = {flamegraph_path:#?}"); let flamegraph_file = File::create(&flamegraph_path) .expect("File system error while creating flamegraph.svg"); let mut options = pprof::flamegraph::Options::default(); options.hash = true; options.image_width = Some(2500); options.text_truncate_direction = TextTruncateDirection::Left; options.font_size /= 3; if let Some(profiler) = self.active_profiler.take() { let report = profiler.report().build().unwrap(); let mut file = File::create(pprof_path).unwrap(); let profile = report.pprof().unwrap(); let mut content = Vec::new(); profile.encode(&mut content).unwrap(); file.write_all(&content).unwrap(); report .flamegraph_with_options(flamegraph_file, &mut options) .expect("Error writing flamegraph"); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/benches/hash_ring_bench.rs
lib/collection/benches/hash_ring_bench.rs
#[cfg(not(target_os = "windows"))] mod prof; use collection::hash_ring::HashRing; use criterion::{Criterion, criterion_group, criterion_main}; use rand::Rng; fn hash_ring_bench(c: &mut Criterion) { let mut group = c.benchmark_group("hash-ring-bench"); let mut ring_raw = HashRing::raw(); let mut ring_fair = HashRing::fair(100); // add 10 shards to ring for i in 0..10 { ring_raw.add(i); ring_fair.add(i); } let mut rnd = rand::rng(); group.bench_function("hash-ring-fair", |b| { b.iter(|| { let point = rnd.random_range(0..100000); let _shard = ring_fair.get(&point); }) }); group.bench_function("hash-ring-raw", |b| { b.iter(|| { let point = rnd.random_range(0..100000); let _shard = ring_raw.get(&point); }) }); } criterion_group! { name = benches; config = Criterion::default(); targets = hash_ring_bench, } criterion_main!(benches);
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/benches/batch_search_bench.rs
lib/collection/benches/batch_search_bench.rs
use std::sync::Arc; use api::rest::SearchRequestInternal; use collection::config::{CollectionConfigInternal, CollectionParams, WalConfig}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, }; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::optimizers_builder::OptimizersConfig; use collection::shards::local_shard::LocalShard; use collection::shards::shard_trait::ShardOperation; use common::budget::ResourceBudget; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::save_on_disk::SaveOnDisk; use criterion::{Criterion, criterion_group, criterion_main}; use ordered_float::OrderedFloat; use rand::rng; use segment::data_types::vectors::{VectorStructInternal, only_default_vector}; use segment::fixtures::payload_fixtures::random_vector; use segment::types::{Condition, Distance, FieldCondition, Filter, Payload, Range}; use serde_json::Map; use shard::search::CoreSearchRequestBatch; use tempfile::Builder; use tokio::runtime::Runtime; use tokio::sync::RwLock; #[cfg(not(target_os = "windows"))] mod prof; fn create_rnd_batch() -> CollectionUpdateOperations { let mut rng = rng(); let num_points = 2000; let dim = 100; let mut points = Vec::with_capacity(num_points); for i in 0..num_points { let mut payload_map = Map::new(); payload_map.insert("a".to_string(), (i % 5).into()); let vector = random_vector(&mut rng, dim); let vectors = only_default_vector(&vector); let point = PointStructPersisted { id: (i as u64).into(), vector: VectorStructInternal::from(vectors).into(), payload: Some(Payload(payload_map)), }; points.push(point); } CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(points), )) } fn batch_search_bench(c: &mut Criterion) { let storage_dir = Builder::new().prefix("storage").tempdir().unwrap(); let runtime = Runtime::new().unwrap(); let search_runtime = Runtime::new().unwrap(); let search_runtime_handle = search_runtime.handle(); let handle = runtime.handle().clone(); let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let collection_params = CollectionParams { vectors: VectorParamsBuilder::new(100, Distance::Dot).build().into(), ..CollectionParams::empty() }; let collection_config = CollectionConfigInternal { params: collection_params, optimizer_config: OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 2, max_segment_size: Some(100_000), #[expect(deprecated)] memmap_threshold: Some(100_000), indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: Some(2), }, wal_config, hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let optimizers_config = collection_config.optimizer_config.clone(); let shared_config = Arc::new(RwLock::new(collection_config)); let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap(); let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json"); let payload_index_schema = Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap()); let shard = handle .block_on(LocalShard::build_local( 0, "test_collection".to_string(), storage_dir.path(), shared_config, Default::default(), payload_index_schema, handle.clone(), handle.clone(), ResourceBudget::default(), optimizers_config, )) .unwrap(); let rnd_batch = create_rnd_batch(); handle .block_on(shard.update(rnd_batch.into(), true, HwMeasurementAcc::new())) .unwrap(); let mut group = c.benchmark_group("batch-search-bench"); let filters = vec![ None, Some(Filter::new_must(Condition::Field( FieldCondition::new_match("a".parse().unwrap(), 3.into()), ))), Some(Filter::new_must(Condition::Field( FieldCondition::new_range( "a".parse().unwrap(), Range { lt: None, gt: Some(OrderedFloat(-1.)), gte: None, lte: Some(OrderedFloat(100.0)), }, ), ))), ]; let batch_size = 100; for (fid, filter) in filters.into_iter().enumerate() { group.bench_function(format!("search-{fid}"), |b| { b.iter(|| { runtime.block_on(async { let mut rng = rng(); for _i in 0..batch_size { let query = random_vector(&mut rng, 100); let search_query = SearchRequestInternal { vector: query.into(), filter: filter.clone(), params: None, limit: 10, offset: None, with_payload: None, with_vector: None, score_threshold: None, }; let hw_acc = HwMeasurementAcc::new(); let result = shard .core_search( Arc::new(CoreSearchRequestBatch { searches: vec![search_query.into()], }), search_runtime_handle, None, hw_acc, ) .await .unwrap(); assert!(!result.is_empty()); } }); }) }); group.bench_function(format!("search-batch-{fid}"), |b| { b.iter(|| { runtime.block_on(async { let mut rng = rng(); let mut searches = Vec::with_capacity(batch_size); for _i in 0..batch_size { let query = random_vector(&mut rng, 100); let search_query = SearchRequestInternal { vector: query.into(), filter: filter.clone(), params: None, limit: 10, offset: None, with_payload: None, with_vector: None, score_threshold: None, }; searches.push(search_query.into()); } let hw_acc = HwMeasurementAcc::new(); let search_query = CoreSearchRequestBatch { searches }; let result = shard .core_search(Arc::new(search_query), search_runtime_handle, None, hw_acc) .await .unwrap(); assert!(!result.is_empty()); }); }) }); } group.finish(); search_runtime.block_on(async { shard.stop_gracefully().await; }); } criterion_group! { name = benches; config = Criterion::default(); targets = batch_search_bench, } criterion_main!(benches);
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/benches/batch_query_bench.rs
lib/collection/benches/batch_query_bench.rs
use std::sync::Arc; use api::rest::SearchRequestInternal; use collection::config::{CollectionConfigInternal, CollectionParams, WalConfig}; use collection::operations::CollectionUpdateOperations; use collection::operations::point_ops::{ PointInsertOperationsInternal, PointOperations, PointStructPersisted, }; use collection::operations::universal_query::shard_query::{ FusionInternal, ScoringQuery, ShardPrefetch, ShardQueryRequest, }; use collection::operations::vector_params_builder::VectorParamsBuilder; use collection::optimizers_builder::OptimizersConfig; use collection::shards::local_shard::LocalShard; use collection::shards::shard_trait::ShardOperation; use common::budget::ResourceBudget; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::save_on_disk::SaveOnDisk; use criterion::{Criterion, criterion_group, criterion_main}; use ordered_float::OrderedFloat; use rand::rng; use segment::common::reciprocal_rank_fusion::DEFAULT_RRF_K; use segment::data_types::vectors::{VectorStructInternal, only_default_vector}; use segment::fixtures::payload_fixtures::random_vector; use segment::types::{ Condition, Distance, FieldCondition, Filter, Payload, Range, WithPayloadInterface, WithVector, }; use serde_json::Map; use shard::search::CoreSearchRequestBatch; use tempfile::{Builder, TempDir}; use tokio::runtime::Runtime; use tokio::sync::RwLock; #[cfg(not(target_os = "windows"))] mod prof; fn setup() -> (TempDir, LocalShard, Runtime) { let storage_dir = Builder::new().prefix("storage").tempdir().unwrap(); let runtime = Runtime::new().unwrap(); let handle = runtime.handle().clone(); let wal_config = WalConfig { wal_capacity_mb: 1, wal_segments_ahead: 0, wal_retain_closed: 1, }; let collection_params = CollectionParams { vectors: VectorParamsBuilder::new(100, Distance::Dot).build().into(), ..CollectionParams::empty() }; let collection_config = CollectionConfigInternal { params: collection_params, optimizer_config: OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 2, max_segment_size: Some(100_000), #[expect(deprecated)] memmap_threshold: Some(100_000), indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: Some(2), }, wal_config, hnsw_config: Default::default(), quantization_config: Default::default(), strict_mode_config: Default::default(), uuid: None, metadata: None, }; let optimizers_config = collection_config.optimizer_config.clone(); let shared_config = Arc::new(RwLock::new(collection_config)); let payload_index_schema_dir = Builder::new().prefix("qdrant-test").tempdir().unwrap(); let payload_index_schema_file = payload_index_schema_dir.path().join("payload-schema.json"); let payload_index_schema = Arc::new(SaveOnDisk::load_or_init_default(payload_index_schema_file).unwrap()); let shard = handle .block_on(LocalShard::build_local( 0, "test_collection".to_string(), storage_dir.path(), shared_config, Default::default(), payload_index_schema, handle.clone(), handle.clone(), ResourceBudget::default(), optimizers_config, )) .unwrap(); let rnd_batch = create_rnd_batch(); handle .block_on(shard.update(rnd_batch.into(), true, HwMeasurementAcc::new())) .unwrap(); (storage_dir, shard, runtime) } fn create_rnd_batch() -> CollectionUpdateOperations { let mut rng = rng(); let num_points = 2000; let dim = 100; let mut points = Vec::with_capacity(num_points); for i in 0..num_points { let mut payload_map = Map::new(); payload_map.insert("a".to_string(), (i % 5).into()); let vector = random_vector(&mut rng, dim); let vectors = only_default_vector(&vector); let point = PointStructPersisted { id: (i as u64).into(), vector: VectorStructInternal::from(vectors).into(), payload: Some(Payload(payload_map)), }; points.push(point); } CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints( PointInsertOperationsInternal::PointsList(points), )) } fn some_filters() -> Vec<Option<Filter>> { vec![ None, Some(Filter::new_must(Condition::Field( FieldCondition::new_match("a".parse().unwrap(), 3.into()), ))), Some(Filter::new_must(Condition::Field( FieldCondition::new_range( "a".parse().unwrap(), Range { lt: None, gt: Some(OrderedFloat(-1.)), gte: None, lte: Some(OrderedFloat(100.0)), }, ), ))), ] } /// Compare nearest neighbors query vs normal search fn batch_search_bench(c: &mut Criterion) { let (_tempdir, shard, search_runtime) = setup(); let search_runtime_handle = search_runtime.handle(); let mut group = c.benchmark_group("batch-search-bench"); let batch_size = 100; for (fid, filter) in some_filters().into_iter().enumerate() { group.bench_function(format!("query-batch-{fid}"), |b| { b.iter(|| { search_runtime.block_on(async { let mut rng = rng(); let mut searches = Vec::with_capacity(batch_size); for _i in 0..batch_size { let query = random_vector(&mut rng, 100); let search_query = ShardQueryRequest { prefetches: vec![], query: Some(ScoringQuery::Vector(query.into())), filter: filter.clone(), params: None, limit: 10, offset: 0, with_payload: WithPayloadInterface::Bool(true), with_vector: WithVector::Bool(false), score_threshold: None, }; searches.push(search_query); } let hw_acc = HwMeasurementAcc::new(); let result = shard .query_batch(Arc::new(searches), search_runtime_handle, None, hw_acc) .await .unwrap(); assert!(!result.is_empty()); }); }) }); group.bench_function(format!("search-batch-{fid}"), |b| { b.iter(|| { search_runtime.block_on(async { let mut rng = rng(); let mut searches = Vec::with_capacity(batch_size); for _i in 0..batch_size { let query = random_vector(&mut rng, 100); let search_query = SearchRequestInternal { vector: query.into(), filter: filter.clone(), params: None, limit: 10, offset: None, with_payload: Some(WithPayloadInterface::Bool(true)), with_vector: None, score_threshold: None, }; searches.push(search_query.into()); } let hw_acc = HwMeasurementAcc::new(); let search_query = CoreSearchRequestBatch { searches }; let result = shard .core_search(Arc::new(search_query), search_runtime_handle, None, hw_acc) .await .unwrap(); assert!(!result.is_empty()); }); }) }); } group.finish(); search_runtime.block_on(async { shard.stop_gracefully().await; }); } fn batch_rrf_query_bench(c: &mut Criterion) { let (_tempdir, shard, search_runtime) = setup(); let search_runtime_handle = search_runtime.handle(); let mut group = c.benchmark_group("batch-rrf-bench"); let batch_size = 100; for (fid, filter) in some_filters().into_iter().enumerate() { group.bench_function(format!("hybrid-query-batch-{fid}"), |b| { b.iter(|| { search_runtime.block_on(async { let mut rng = rng(); let mut searches = Vec::with_capacity(batch_size); for _i in 0..batch_size { let query1 = random_vector(&mut rng, 100); let query2 = random_vector(&mut rng, 100); let search_query = ShardQueryRequest { prefetches: vec![ ShardPrefetch { prefetches: vec![], query: Some(ScoringQuery::Vector(query1.into())), limit: 100, params: None, filter: None, score_threshold: None, }, ShardPrefetch { prefetches: vec![], query: Some(ScoringQuery::Vector(query2.into())), limit: 100, params: None, filter: None, score_threshold: None, }, ], query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))), filter: filter.clone(), params: None, limit: 10, offset: 0, with_payload: WithPayloadInterface::Bool(true), with_vector: WithVector::Bool(false), score_threshold: None, }; searches.push(search_query); } let hw_acc = HwMeasurementAcc::new(); let result = shard .query_batch(Arc::new(searches), search_runtime_handle, None, hw_acc) .await .unwrap(); assert!(!result.is_empty()); }); }) }); } group.finish(); search_runtime.block_on(async { shard.stop_gracefully().await; }); } fn batch_rescore_bench(c: &mut Criterion) { let (_tempdir, shard, search_runtime) = setup(); let search_runtime_handle = search_runtime.handle(); let mut group = c.benchmark_group("batch-rescore-bench"); let batch_size = 100; for (fid, filter) in some_filters().into_iter().enumerate() { group.bench_function(format!("rescore-query-batch-{fid}"), |b| { b.iter(|| { search_runtime.block_on(async { let mut rng = rng(); let mut searches = Vec::with_capacity(batch_size); for _i in 0..batch_size { let query1 = random_vector(&mut rng, 100); let query2 = random_vector(&mut rng, 100); let search_query = ShardQueryRequest { prefetches: vec![ShardPrefetch { prefetches: vec![], query: Some(ScoringQuery::Vector(query1.into())), limit: 100, params: None, filter: None, score_threshold: None, }], query: Some(ScoringQuery::Vector(query2.into())), filter: filter.clone(), params: None, limit: 10, offset: 0, with_payload: WithPayloadInterface::Bool(true), with_vector: WithVector::Bool(false), score_threshold: None, }; searches.push(search_query); } let hw_acc = HwMeasurementAcc::new(); let result = shard .query_batch(Arc::new(searches), search_runtime_handle, None, hw_acc) .await .unwrap(); assert!(!result.is_empty()); }); }) }); } group.finish(); search_runtime.block_on(async { shard.stop_gracefully().await; }); } criterion_group! { name = benches; config = Criterion::default().significance_level(0.01).sample_size(500); targets = batch_search_bench, batch_rrf_query_bench, batch_rescore_bench } criterion_main!(benches);
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/build.rs
lib/api/build.rs
use std::path::PathBuf; use std::process::Command; use std::{env, str}; use common::defaults; use tonic_build::Builder; fn main() -> std::io::Result<()> { // Ensure Qdrant version is configured correctly assert_eq!( defaults::QDRANT_VERSION.to_string(), env!("CARGO_PKG_VERSION"), "crate version does not match with defaults.rs", ); let build_out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); // Build gRPC bits from proto file tonic_build::configure() // Because we want to attach all validation rules to the generated gRPC types, we must do // so by extending the builder. This is ugly, but better than manually implementing // `Validation` for all these types and seems to be the best approach. The line below // configures all attributes. .configure_validation() .file_descriptor_set_path(build_out_dir.join("qdrant_descriptor.bin")) .out_dir("src/grpc/") // saves generated structures at this location .compile( &["src/grpc/proto/qdrant.proto"], // proto entry point &["src/grpc/proto"], // specify the root location to search proto dependencies )?; // Append trait extension imports to generated gRPC output append_to_file( "src/grpc/qdrant.rs", "use super::validate::ValidateExt;\nuse validator::Validate;", ); // Fetch git commit ID and pass it to the compiler let git_commit_id = option_env!("GIT_COMMIT_ID").map(String::from).or_else(|| { match Command::new("git").args(["rev-parse", "HEAD"]).output() { Ok(output) if output.status.success() => { Some(str::from_utf8(&output.stdout).unwrap().trim().to_string()) } _ => { println!("cargo:warning=current git commit hash could not be determined"); None } } }); if let Some(commit_id) = git_commit_id { println!("cargo:rustc-env=GIT_COMMIT_ID={commit_id}"); } Ok(()) } /// Extension to [`Builder`] to configure validation attributes. trait BuilderExt { fn configure_validation(self) -> Self; fn validates(self, fields: &[(&str, &str)], extra_derives: &[&str]) -> Self; fn derive_validate(self, path: &str) -> Self; fn derive_validates(self, paths: &[&str]) -> Self; fn field_validate(self, path: &str, constraint: &str) -> Self; fn field_validates(self, paths: &[(&str, &str)]) -> Self; } impl BuilderExt for Builder { fn configure_validation(self) -> Self { configure_validation(self) } fn validates(self, fields: &[(&str, &str)], extra_derives: &[&str]) -> Self { // Build list of structs to derive validation on, guess these from list of fields let mut derives = fields .iter() .map(|(field, _)| field.split_once('.').unwrap().0) .collect::<Vec<&str>>(); derives.extend(extra_derives); derives.sort_unstable(); derives.dedup(); self.derive_validates(&derives).field_validates(fields) } fn derive_validate(self, path: &str) -> Self { self.type_attribute(path, "#[derive(validator::Validate)]") } fn derive_validates(self, paths: &[&str]) -> Self { paths.iter().fold(self, |c, path| c.derive_validate(path)) } fn field_validate(self, path: &str, constraint: &str) -> Self { if constraint.is_empty() { self.field_attribute(path, "#[validate(nested)]") } else { self.field_attribute(path, format!("#[validate({constraint})]")) } } fn field_validates(self, fields: &[(&str, &str)]) -> Self { fields.iter().fold(self, |c, (path, constraint)| { c.field_validate(path, constraint) }) } } /// Configure additional attributes required for validation on generated gRPC types. /// /// These are grouped by service file. #[rustfmt::skip] fn configure_validation(builder: Builder) -> Builder { builder // prost_wkt_types needed for serde support .extern_path(".google.protobuf.Timestamp", "::prost_wkt_types::Timestamp") // Service: collections.proto .validates(&[ ("GetCollectionInfoRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CollectionExistsRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CreateCollection.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name\")"), ("CreateCollection.hnsw_config", ""), ("CreateCollection.wal_config", ""), ("CreateCollection.optimizers_config", ""), ("CreateCollection.vectors_config", ""), ("CreateCollection.quantization_config", ""), ("CreateCollection.shard_number", "range(min = 1)"), ("CreateCollection.replication_factor", "range(min = 1)"), ("CreateCollection.write_consistency_factor", "range(min = 1)"), ("CreateCollection.strict_mode_config", ""), ("UpdateCollection.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpdateCollection.optimizers_config", ""), ("UpdateCollection.params", ""), ("UpdateCollection.timeout", "range(min = 1)"), ("UpdateCollection.hnsw_config", ""), ("UpdateCollection.vectors_config", ""), ("UpdateCollection.quantization_config", ""), ("UpdateCollection.strict_mode_config", ""), ("CollectionParamsDiff.replication_factor", "range(min = 1)"), ("CollectionParamsDiff.write_consistency_factor", "range(min = 1)"), ("DeleteCollection.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteCollection.timeout", "range(min = 1)"), ("CollectionParams.vectors_config", ""), ("ChangeAliases.timeout", "range(min = 1)"), ("ListCollectionAliasesRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("HnswConfigDiff.ef_construct", "range(min = 4)"), ("WalConfigDiff.wal_capacity_mb", "range(min = 1)"), ("WalConfigDiff.wal_retain_closed", "range(min = 1)"), ("OptimizersConfigDiff.deleted_threshold", "range(min = 0.0, max = 1.0)"), ("OptimizersConfigDiff.vacuum_min_vector_number", "range(min = 100)"), ("OptimizersConfigDiff.max_segment_size", "range(min = 1)"), ("VectorsConfig.config", ""), ("VectorsConfigDiff.config", ""), ("VectorParams.size", "range(min = 1, max = 65536)"), ("VectorParams.hnsw_config", ""), ("VectorParams.quantization_config", ""), ("VectorParamsMap.map", ""), ("VectorParamsDiff.hnsw_config", ""), ("VectorParamsDiff.quantization_config", ""), ("VectorParamsDiffMap.map", ""), ("QuantizationConfig.quantization", ""), ("QuantizationConfigDiff.quantization", ""), ("ScalarQuantization.quantile", "range(min = 0.5, max = 1.0)"), ("UpdateCollectionClusterSetupRequest.timeout", "range(min = 1)"), ("UpdateCollectionClusterSetupRequest.operation", ""), ("StrictModeConfig.max_query_limit", "range(min = 1)"), ("StrictModeConfig.max_timeout", "range(min = 1)"), ("StrictModeConfig.max_points_count", "range(min = 1)"), ("StrictModeConfig.read_rate_limit", "range(min = 1)"), ("StrictModeConfig.write_rate_limit", "range(min = 1)"), ("StrictModeConfig.multivector_config", ""), ("StrictModeConfig.sparse_config", ""), ("StrictModeSparseConfig.sparse_config", ""), ("StrictModeSparse.max_length", "range(min = 1)"), ("StrictModeMultivectorConfig.multivector_config", ""), ("StrictModeMultivector.max_vectors", "range(min = 1)"), ], &[ "ListCollectionsRequest", "ListAliasesRequest", "CollectionClusterInfoRequest", "UpdateCollectionClusterSetupRequest", "ProductQuantization", "BinaryQuantization", "Disabled", "QuantizationConfigDiff", "quantization_config_diff::Quantization", "Replica", "ListShardKeysRequest", ]) // Service: collections_internal.proto .validates(&[ ("GetCollectionInfoRequestInternal.get_collection_info_request", ""), ("InitiateShardTransferRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("WaitForShardStateRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("WaitForShardStateRequest.timeout", "range(min = 1)"), ("GetShardRecoveryPointRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpdateShardCutoffPointRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ], &[]) // Service: points.proto .validates(&[ ("AcornSearchParams.max_selectivity", "range(min = 0.0, max = 1.0)"), ("PointsSelector.points_selector_one_of", ""), ("UpsertPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpsertPoints.points", ""), ("UpsertPoints.update_filter", ""), ("DeletePoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpdatePointVectors.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpdatePointVectors.points", ""), ("UpdatePointVectors.update_filter", ""), ("DeletePointVectors.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeletePointVectors.vector_names", "length(min = 1, message = \"must specify vector names to delete\")"), ("DeletePointVectors.points_selector", ""), ("PointVectors.vectors", ""), ("GetPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SetPayloadPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SetPayloadPoints.points_selector", ""), ("DeletePayloadPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeletePayloadPoints.points_selector", ""), ("ClearPayloadPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("ClearPayloadPoints.points", ""), ("UpdateBatchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("UpdateBatchPoints.operations", "length(min = 1)"), ("CreateFieldIndexCollection.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CreateFieldIndexCollection.field_name", "length(min = 1)"), ("CreateFieldIndexCollection.field_index_params", ""), ("PayloadIndexParams.index_params", ""), ("DeleteFieldIndexCollection.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteFieldIndexCollection.field_name", "length(min = 1)"), ("SearchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SearchPoints.filter", ""), ("SearchPoints.limit", "range(min = 1)"), ("SearchPoints.params", ""), ("SearchPoints.timeout", "range(min = 1)"), ("SearchBatchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SearchBatchPoints.search_points", ""), ("SearchBatchPoints.timeout", "range(min = 1)"), ("SearchPointGroups.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SearchPointGroups.group_by", "length(min = 1)"), ("SearchPointGroups.filter", ""), ("SearchPointGroups.params", ""), ("SearchPointGroups.group_size", "range(min = 1)"), ("SearchPointGroups.limit", "range(min = 1)"), ("SearchPointGroups.timeout", "range(min = 1)"), ("SearchParams.quantization", ""), ("SearchParams.acorn", ""), ("QuantizationSearchParams.oversampling", "range(min = 1.0)"), ("ScrollPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("ScrollPoints.filter", ""), ("ScrollPoints.limit", "range(min = 1)"), ("RecommendPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("RecommendPoints.filter", ""), ("RecommendPoints.params", ""), ("RecommendPoints.timeout", "range(min = 1)"), ("RecommendPoints.positive_vectors", ""), ("RecommendPoints.negative_vectors", ""), ("RecommendBatchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("RecommendBatchPoints.recommend_points", ""), ("RecommendBatchPoints.timeout", "range(min = 1)"), ("RecommendPointGroups.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("RecommendPointGroups.filter", ""), ("RecommendPointGroups.group_by", "length(min = 1)"), ("RecommendPointGroups.group_size", "range(min = 1)"), ("RecommendPointGroups.limit", "range(min = 1)"), ("RecommendPointGroups.params", ""), ("RecommendPointGroups.timeout", "range(min = 1)"), ("RecommendPointGroups.positive_vectors", ""), ("RecommendPointGroups.negative_vectors", ""), ("DiscoverPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DiscoverPoints.filter", ""), ("DiscoverPoints.params", ""), ("DiscoverPoints.limit", "range(min = 1)"), ("DiscoverPoints.timeout", "range(min = 1)"), ("DiscoverBatchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DiscoverBatchPoints.discover_points", ""), ("DiscoverBatchPoints.timeout", "range(min = 1)"), ("CountPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CountPoints.filter", ""), ("GeoPolygon.exterior", "custom(function = \"crate::grpc::validate::validate_geo_polygon_exterior\")"), ("GeoPolygon.interiors", "custom(function = \"crate::grpc::validate::validate_geo_polygon_interiors\")"), ("Filter.should", ""), ("Filter.must", ""), ("Filter.must_not", ""), ("NestedCondition.filter", ""), ("Condition.condition_one_of", ""), ("Filter.min_should", ""), ("MinShould.conditions", ""), ("PointStruct.vectors", ""), ("Vectors.vectors_options", ""), ("NamedVectors.vectors", ""), ("DatetimeRange.lt", "custom(function = \"crate::grpc::validate::validate_timestamp\")"), ("DatetimeRange.gt", "custom(function = \"crate::grpc::validate::validate_timestamp\")"), ("DatetimeRange.lte", "custom(function = \"crate::grpc::validate::validate_timestamp\")"), ("DatetimeRange.gte", "custom(function = \"crate::grpc::validate::validate_timestamp\")"), ("VectorInput.variant", ""), ("RecommendInput.positive", ""), ("RecommendInput.negative", ""), ("DiscoverInput.target", ""), ("DiscoverInput.context", ""), ("ContextInputPair.positive", ""), ("ContextInputPair.negative", ""), ("ContextInput.pairs", ""), ("Formula.expression", ""), ("Expression.variant", ""), ("MultExpression.mult", ""), ("SumExpression.sum", ""), ("DivExpression.left", ""), ("DivExpression.right", ""), ("PowExpression.base", ""), ("PowExpression.exponent", ""), ("DecayParamsExpression.x", ""), ("DecayParamsExpression.target", ""), ("NearestInputWithMmr.nearest", ""), ("NearestInputWithMmr.mmr", ""), ("Mmr.diversity", "range(min = 0.0, max = 1.0)"), ("Mmr.candidates_limit", "range(max = 16_384)"), ("Rrf.k", "range(min = 1)"), ("Query.variant", ""), ("PrefetchQuery.prefetch", ""), ("PrefetchQuery.query", ""), ("PrefetchQuery.filter", ""), ("PrefetchQuery.params", ""), ("PrefetchQuery.limit", "range(min = 1)"), ("QueryPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("QueryPoints.limit", "range(min = 1)"), ("QueryPoints.prefetch", ""), ("QueryPoints.query", ""), ("QueryPoints.filter", ""), ("QueryPoints.params", ""), ("QueryPoints.timeout", "range(min = 1)"), ("QueryBatchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("QueryBatchPoints.query_points", ""), ("QueryBatchPoints.timeout", "range(min = 1)"), ("QueryPointGroups.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("QueryPointGroups.prefetch", ""), ("QueryPointGroups.query", ""), ("QueryPointGroups.group_by", "length(min = 1)"), ("QueryPointGroups.filter", ""), ("QueryPointGroups.params", ""), ("QueryPointGroups.group_size", "range(min = 1)"), ("QueryPointGroups.limit", "range(min = 1)"), ("QueryPointGroups.timeout", "range(min = 1)"), ("FacetCounts.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("FacetCounts.key", "length(min = 1)"), ("FacetCounts.filter", ""), ("FacetCounts.timeout", "range(min = 1)"), ("SearchMatrixPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SearchMatrixPoints.filter", ""), ("SearchMatrixPoints.sample", "range(min = 2)"), ("SearchMatrixPoints.limit", "range(min = 1)"), ("SearchMatrixPoints.timeout", "range(min = 1)") ], &[]) .type_attribute(".", "#[derive(serde::Serialize)]") // Service: points_internal_service.proto .validates(&[ ("UpsertPointsInternal.upsert_points", ""), ("DeletePointsInternal.delete_points", ""), ("UpdateVectorsInternal.update_vectors", ""), ("DeleteVectorsInternal.delete_vectors", ""), ("SetPayloadPointsInternal.set_payload_points", ""), ("DeletePayloadPointsInternal.delete_payload_points", ""), ("ClearPayloadPointsInternal.clear_payload_points", ""), ("CreateFieldIndexCollectionInternal.create_field_index_collection", ""), ("DeleteFieldIndexCollectionInternal.delete_field_index_collection", ""), ("UpdateOperation.update", ""), ("UpdateBatchInternal.operations", ""), ("SearchPointsInternal.search_points", ""), ("SearchBatchPointsInternal.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("SearchBatchPointsInternal.search_points", ""), ("CoreSearchPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CoreSearchPoints.filter", ""), ("CoreSearchPoints.limit", "range(min = 1)"), ("CoreSearchPoints.params", ""), ("CoreSearchBatchPointsInternal.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("CoreSearchBatchPointsInternal.search_points", ""), ("RecoQuery.positives", ""), ("RecoQuery.negatives", ""), ("ContextPair.positive", ""), ("ContextPair.negative", ""), ("DiscoveryQuery.target", ""), ("DiscoveryQuery.context", ""), ("ContextQuery.context", ""), ("RecommendPointsInternal.recommend_points", ""), ("ScrollPointsInternal.scroll_points", ""), ("GetPointsInternal.get_points", ""), ("CountPointsInternal.count_points", ""), ("SyncPointsInternal.sync_points", ""), ("SyncPoints.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("QueryBatchPointsInternal.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("QueryBatchPointsInternal.timeout", "range(min = 1)"), ("FacetCountsInternal.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("FacetCountsInternal.timeout", "range(min = 1)"), ], &[]) // Service: raft_service.proto .validates(&[ ("AddPeerToKnownMessage.uri", "custom(function = \"common::validation::validate_not_empty\")"), ("AddPeerToKnownMessage.port", "range(min = 1)"), ], &[]) // Service: snapshot_service.proto .validates(&[ ("CreateSnapshotRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("ListSnapshotsRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteSnapshotRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteSnapshotRequest.snapshot_name", "length(min = 1)"), ("DeleteFullSnapshotRequest.snapshot_name", "length(min = 1)"), ("CreateShardSnapshotRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("ListShardSnapshotsRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteShardSnapshotRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("DeleteShardSnapshotRequest.snapshot_name", "length(min = 1)"), ("RecoverShardSnapshotRequest.collection_name", "length(min = 1, max = 255), custom(function = \"common::validation::validate_collection_name_legacy\")"), ("RecoverShardSnapshotRequest.snapshot_name", "length(min = 1)"), ("RecoverShardSnapshotRequest.checksum", "custom(function = \"common::validation::validate_sha256_hash\")"), ("SnapshotDescription.creation_time", "custom(function = \"crate::grpc::validate::validate_timestamp\")"), ], &[ "CreateFullSnapshotRequest", "ListFullSnapshotsRequest", ]) } fn append_to_file(path: &str, line: &str) { use std::io::prelude::*; #[expect(clippy::disallowed_types, reason = "build script, ok to use std::fs")] writeln!( std::fs::OpenOptions::new().append(true).open(path).unwrap(), "{line}", ) .unwrap() }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/lib.rs
lib/api/src/lib.rs
pub mod conversions; pub mod grpc; pub mod rest;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/conversions/vectors.rs
lib/api/src/conversions/vectors.rs
use itertools::Itertools; use segment::common::operation_error::OperationError; use segment::data_types::vectors::{ DenseVector, MultiDenseVectorInternal, NamedVectorStruct, VectorInternal, VectorStructInternal, }; use sparse::common::sparse_vector::SparseVector; use tonic::Status; use crate::grpc::qdrant as grpc; use crate::grpc::vector::Vector; use crate::rest::schema as rest; fn convert_to_plain_multi_vector( data: Vec<f32>, vectors_count: usize, ) -> Result<rest::MultiDenseVector, OperationError> { let dim = data.len() / vectors_count; if dim * vectors_count != data.len() { return Err(OperationError::ValidationError { description: format!( "Data length is not divisible by vectors count. Data length: {}, vectors count: {}", data.len(), vectors_count ), }); } Ok(data .into_iter() .chunks(dim) .into_iter() .map(Iterator::collect) .collect()) } impl TryFrom<rest::VectorOutput> for grpc::VectorOutput { type Error = OperationError; fn try_from(value: rest::VectorOutput) -> Result<Self, Self::Error> { let vector = match value { rest::VectorOutput::Dense(dense) => { let internal_vector = VectorInternal::from(dense); grpc::VectorOutput::from(internal_vector) } rest::VectorOutput::Sparse(sparse) => { let internal_vector = VectorInternal::from(sparse); grpc::VectorOutput::from(internal_vector) } rest::VectorOutput::MultiDense(multi) => { let internal_vector = VectorInternal::try_from(multi)?; grpc::VectorOutput::from(internal_vector) } }; Ok(vector) } } impl TryFrom<rest::VectorStructOutput> for grpc::VectorsOutput { type Error = OperationError; fn try_from( vector_struct: crate::rest::schema::VectorStructOutput, ) -> Result<Self, Self::Error> { let vectors = match vector_struct { crate::rest::schema::VectorStructOutput::Single(dense) => { let vector = VectorInternal::from(dense); Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vector( grpc::VectorOutput::from(vector), )), } } crate::rest::schema::VectorStructOutput::MultiDense(vector) => { let vector = VectorInternal::try_from(vector)?; Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vector( grpc::VectorOutput::from(vector), )), } } crate::rest::schema::VectorStructOutput::Named(vectors) => { let vectors: Result<_, _> = vectors .into_iter() .map(|(name, vector)| grpc::VectorOutput::try_from(vector).map(|v| (name, v))) .collect(); Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vectors( grpc::NamedVectorsOutput { vectors: vectors? }, )), } } }; Ok(vectors) } } impl From<VectorInternal> for grpc::VectorOutput { #[expect(deprecated)] fn from(vector: VectorInternal) -> Self { match vector { VectorInternal::Dense(vector) => Self { data: vector, indices: None, vectors_count: None, vector: None, }, VectorInternal::Sparse(vector) => Self { data: vector.values, indices: Some(grpc::SparseIndices { data: vector.indices, }), vectors_count: None, vector: None, }, VectorInternal::MultiDense(vector) => { let vector_count = vector.multi_vectors().count() as u32; Self { data: vector.flattened_vectors, indices: None, vectors_count: Some(vector_count), vector: None, } } } } } impl From<VectorStructInternal> for grpc::VectorsOutput { fn from(vector_struct: VectorStructInternal) -> Self { match vector_struct { VectorStructInternal::Single(vector) => { let vector = VectorInternal::from(vector); Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vector( grpc::VectorOutput::from(vector), )), } } VectorStructInternal::MultiDense(vector) => { let vector = VectorInternal::from(vector); Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vector( grpc::VectorOutput::from(vector), )), } } VectorStructInternal::Named(vectors) => Self { vectors_options: Some(grpc::vectors_output::VectorsOptions::Vectors( grpc::NamedVectorsOutput { vectors: vectors .into_iter() .map(|(name, vector)| (name, grpc::VectorOutput::from(vector))) .collect(), }, )), }, } } } impl TryFrom<grpc::Vectors> for rest::VectorStruct { type Error = Status; fn try_from(vectors: grpc::Vectors) -> Result<Self, Self::Error> { let grpc::Vectors { vectors_options } = vectors; match vectors_options { Some(vectors_options) => Ok(match vectors_options { grpc::vectors::VectorsOptions::Vector(vector) => { #[expect(deprecated)] let grpc::Vector { data, indices, vectors_count, vector, } = vector; if let Some(vector) = vector { return match vector { grpc::vector::Vector::Dense(dense) => { let grpc::DenseVector { data } = dense; Ok(rest::VectorStruct::Single(data)) } grpc::vector::Vector::Sparse(_sparse) => { return Err(Status::invalid_argument( "Sparse vector must be named".to_string(), )); } grpc::vector::Vector::MultiDense(multi) => { let grpc::MultiDenseVector { vectors } = multi; Ok(rest::VectorStruct::MultiDense( vectors.into_iter().map(|v| v.data).collect(), )) } grpc::vector::Vector::Document(document) => Ok( rest::VectorStruct::Document(rest::Document::try_from(document)?), ), grpc::vector::Vector::Image(image) => { Ok(rest::VectorStruct::Image(rest::Image::try_from(image)?)) } grpc::vector::Vector::Object(object) => Ok(rest::VectorStruct::Object( rest::InferenceObject::try_from(object)?, )), }; } if indices.is_some() { return Err(Status::invalid_argument( "Sparse vector must be named".to_string(), )); } if let Some(vectors_count) = vectors_count { let multi = convert_to_plain_multi_vector(data, vectors_count as usize) .map_err(|err| { Status::invalid_argument(format!( "Unable to convert to multi-dense vector: {err}" )) })?; rest::VectorStruct::MultiDense(multi) } else { rest::VectorStruct::Single(data) } } grpc::vectors::VectorsOptions::Vectors(vectors) => { let grpc::NamedVectors { vectors } = vectors; let named_vectors: Result<_, _> = vectors .into_iter() .map(|(k, v)| rest::Vector::try_from(v).map(|res| (k, res))) .collect(); rest::VectorStruct::Named(named_vectors?) } }), None => Err(Status::invalid_argument("No Vector Provided")), } } } impl TryFrom<grpc::Vector> for rest::Vector { type Error = Status; fn try_from(vector: grpc::Vector) -> Result<Self, Self::Error> { #[expect(deprecated)] let grpc::Vector { data, indices, vectors_count, vector, } = vector; if let Some(vector) = vector { return match vector { grpc::vector::Vector::Dense(dense) => { let grpc::DenseVector { data } = dense; Ok(rest::Vector::Dense(data)) } grpc::vector::Vector::Sparse(sparse) => Ok(rest::Vector::Sparse( sparse::common::sparse_vector::SparseVector::from(sparse), )), grpc::vector::Vector::MultiDense(multi) => { let grpc::MultiDenseVector { vectors } = multi; Ok(rest::Vector::MultiDense( vectors.into_iter().map(|v| v.data).collect(), )) } grpc::vector::Vector::Document(document) => { Ok(rest::Vector::Document(rest::Document::try_from(document)?)) } grpc::vector::Vector::Image(image) => { Ok(rest::Vector::Image(rest::Image::try_from(image)?)) } grpc::vector::Vector::Object(object) => Ok(rest::Vector::Object( rest::InferenceObject::try_from(object)?, )), }; } if let Some(indices) = indices { let grpc::SparseIndices { data: data_indices } = indices; return Ok(rest::Vector::Sparse(SparseVector { values: data, indices: data_indices, })); } if let Some(vectors_count) = vectors_count { let multi = convert_to_plain_multi_vector(data, vectors_count as usize).map_err(|err| { Status::invalid_argument(format!( "Unable to convert to multi-dense vector: {err}" )) })?; Ok(rest::Vector::MultiDense(multi)) } else { Ok(rest::Vector::Dense(data)) } } } impl grpc::MultiDenseVector { pub fn into_matrix(self) -> Vec<Vec<f32>> { self.vectors.into_iter().map(|v| v.data).collect() } } impl TryFrom<grpc::VectorOutput> for VectorInternal { type Error = OperationError; #[expect(deprecated)] fn try_from(vector: grpc::VectorOutput) -> Result<Self, Self::Error> { let grpc::VectorOutput { data, indices, vectors_count, vector, } = vector; if let Some(vector) = vector { return match vector { grpc::vector_output::Vector::Dense(dense) => { let grpc::DenseVector { data } = dense; Ok(VectorInternal::Dense(data)) } grpc::vector_output::Vector::Sparse(sparse) => Ok(VectorInternal::Sparse( sparse::common::sparse_vector::SparseVector::from(sparse), )), grpc::vector_output::Vector::MultiDense(multi) => Ok(VectorInternal::MultiDense( MultiDenseVectorInternal::try_from_matrix(multi.into_matrix())?, )), }; } if let Some(indices) = indices { let grpc::SparseIndices { data: data_indices } = indices; return Ok(VectorInternal::Sparse(SparseVector { values: data, indices: data_indices, })); } if let Some(vectors_count) = vectors_count { let dim = data.len() / vectors_count as usize; let multi = MultiDenseVectorInternal::try_from_flatten(data, dim)?; Ok(VectorInternal::MultiDense(multi)) } else { Ok(VectorInternal::Dense(data)) } } } impl TryFrom<grpc::VectorsOutput> for VectorStructInternal { type Error = OperationError; #[expect(deprecated)] fn try_from(vectors_output: grpc::VectorsOutput) -> Result<Self, Self::Error> { let grpc::VectorsOutput { vectors_options } = vectors_output; match vectors_options { Some(vectors_options) => Ok(match vectors_options { grpc::vectors_output::VectorsOptions::Vector(vector) => { let grpc::VectorOutput { data, indices, vectors_count, vector, } = vector; if let Some(vector) = vector { return match vector { grpc::vector_output::Vector::Dense(dense) => { let grpc::DenseVector { data } = dense; Ok(VectorStructInternal::Single(data)) } grpc::vector_output::Vector::Sparse(_sparse) => { return Err(OperationError::ValidationError { description: "Sparse vector must be named".to_string(), }); } grpc::vector_output::Vector::MultiDense(multi) => { Ok(VectorStructInternal::MultiDense( MultiDenseVectorInternal::try_from_matrix(multi.into_matrix())?, )) } }; } if indices.is_some() { return Err(OperationError::ValidationError { description: "Sparse vector must be named".to_string(), }); } if let Some(vectors_count) = vectors_count { let dim = data.len() / vectors_count as usize; let multi = MultiDenseVectorInternal::try_from_flatten(data, dim)?; VectorStructInternal::MultiDense(multi) } else { VectorStructInternal::Single(data) } } grpc::vectors_output::VectorsOptions::Vectors(vectors) => { let grpc::NamedVectorsOutput { vectors } = vectors; let named_vectors: Result<_, _> = vectors .into_iter() .map(|(k, v)| VectorInternal::try_from(v).map(|res| (k, res))) .collect(); VectorStructInternal::Named(named_vectors?) } }), None => Err(OperationError::ValidationError { description: "No Vector Provided".to_string(), }), } } } impl From<VectorInternal> for grpc::Vector { fn from(vector: VectorInternal) -> Self { // ToDo(v1.17): before deprecating `data`, `indices`, and `vectors_count`, ensure // that `vector` field is generated here. #[expect(deprecated)] match vector { VectorInternal::Dense(vector) => Self { data: vector, indices: None, vectors_count: None, vector: None, }, VectorInternal::Sparse(vector) => { let SparseVector { values, indices } = vector; Self { data: values, indices: Some(grpc::SparseIndices { data: indices }), vectors_count: None, vector: None, } } VectorInternal::MultiDense(vector) => { let vector_count = vector.multi_vectors().count() as u32; let MultiDenseVectorInternal { flattened_vectors, dim: _, } = vector; Self { data: flattened_vectors, indices: None, vectors_count: Some(vector_count), vector: None, } } } } } impl From<VectorStructInternal> for grpc::Vectors { fn from(vector_struct: VectorStructInternal) -> Self { match vector_struct { VectorStructInternal::Single(vector) => { let vector = VectorInternal::from(vector); Self { vectors_options: Some(grpc::vectors::VectorsOptions::Vector( grpc::Vector::from(vector), )), } } VectorStructInternal::MultiDense(vector) => { let vector = VectorInternal::from(vector); Self { vectors_options: Some(grpc::vectors::VectorsOptions::Vector( grpc::Vector::from(vector), )), } } VectorStructInternal::Named(vectors) => Self { vectors_options: Some(grpc::vectors::VectorsOptions::Vectors(grpc::NamedVectors { vectors: vectors .into_iter() .map(|(name, vector)| (name, grpc::Vector::from(vector))) .collect(), })), }, } } } impl TryFrom<grpc::Vector> for VectorInternal { type Error = Status; fn try_from(vector: grpc::Vector) -> Result<Self, Self::Error> { #[expect(deprecated)] let grpc::Vector { data, indices, vectors_count, vector, } = vector; if let Some(vector) = vector { return match vector { Vector::Dense(dense) => { let grpc::DenseVector { data } = dense; Ok(VectorInternal::Dense(data)) } Vector::Sparse(sparse) => Ok(VectorInternal::Sparse( sparse::common::sparse_vector::SparseVector::from(sparse), )), Vector::MultiDense(multi_dense) => Ok(VectorInternal::MultiDense( MultiDenseVectorInternal::try_from_matrix(multi_dense.into_matrix()).map_err( |e| Status::invalid_argument(format!("Malformed multi-dense vector: {e}")), )?, )), Vector::Document(_) => Err(Status::invalid_argument( "Document can't be converted to VectorInternal".to_string(), )), Vector::Image(_) => Err(Status::invalid_argument( "Image can't be converted to VectorInternal".to_string(), )), Vector::Object(_) => Err(Status::invalid_argument( "Object can't be converted to VectorInternal".to_string(), )), }; } // sparse vector if let Some(indices) = indices { let grpc::SparseIndices { data: data_indices } = indices; return Ok(VectorInternal::Sparse( SparseVector::new(data_indices, data).map_err(|e| { Status::invalid_argument(format!( "Sparse indices does not match sparse vector conditions: {e}" )) })?, )); } // multi vector if let Some(vector_count) = vectors_count { if vector_count == 0 { return Err(Status::invalid_argument( "Vector count should be greater than 0", )); } let dim = data.len() / vector_count as usize; let multi = MultiDenseVectorInternal::new(data, dim); return Ok(VectorInternal::MultiDense(multi)); } // dense vector Ok(VectorInternal::Dense(data)) } } impl From<grpc::DenseVector> for DenseVector { fn from(value: grpc::DenseVector) -> Self { let grpc::DenseVector { data } = value; data } } impl From<DenseVector> for grpc::DenseVector { fn from(value: DenseVector) -> Self { Self { data: value } } } impl From<SparseVector> for grpc::SparseVector { fn from(value: SparseVector) -> Self { let SparseVector { indices, values } = value; Self { values, indices } } } impl From<grpc::SparseVector> for SparseVector { fn from(value: grpc::SparseVector) -> Self { let grpc::SparseVector { indices, values } = value; Self { indices, values } } } impl From<MultiDenseVectorInternal> for grpc::MultiDenseVector { fn from(value: MultiDenseVectorInternal) -> Self { let MultiDenseVectorInternal { flattened_vectors, dim, } = value; let vectors = flattened_vectors .into_iter() .chunks(dim) .into_iter() .map(Iterator::collect::<Vec<_>>) .map(grpc::DenseVector::from) .collect(); Self { vectors } } } impl From<grpc::MultiDenseVector> for MultiDenseVectorInternal { /// Uses the equivalent of [`MultiDenseVectorInternal::new_unchecked`], but rewritten to avoid collecting twice fn from(value: grpc::MultiDenseVector) -> Self { let grpc::MultiDenseVector { vectors } = value; let dim = vectors[0].data.len(); let inner_vector = vectors.into_iter().flat_map(DenseVector::from).collect(); Self { flattened_vectors: inner_vector, dim, } } } impl From<VectorInternal> for grpc::RawVector { fn from(value: VectorInternal) -> Self { use crate::grpc::qdrant::raw_vector::Variant; let variant = match value { VectorInternal::Dense(vector) => Variant::Dense(grpc::DenseVector::from(vector)), VectorInternal::Sparse(vector) => Variant::Sparse(grpc::SparseVector::from(vector)), VectorInternal::MultiDense(vector) => { Variant::MultiDense(grpc::MultiDenseVector::from(vector)) } }; Self { variant: Some(variant), } } } impl TryFrom<grpc::RawVector> for VectorInternal { type Error = Status; fn try_from(value: grpc::RawVector) -> Result<Self, Self::Error> { use crate::grpc::qdrant::raw_vector::Variant; let grpc::RawVector { variant } = value; let variant = variant.ok_or_else(|| Status::invalid_argument("No vector variant provided"))?; let vector = match variant { Variant::Dense(dense) => VectorInternal::Dense(DenseVector::from(dense)), Variant::Sparse(sparse) => { VectorInternal::Sparse(sparse::common::sparse_vector::SparseVector::from(sparse)) } Variant::MultiDense(multi_dense) => { VectorInternal::MultiDense(MultiDenseVectorInternal::from(multi_dense)) } }; Ok(vector) } } impl From<NamedVectorStruct> for grpc::RawVector { fn from(value: NamedVectorStruct) -> Self { Self::from(value.to_vector()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/conversions/json.rs
lib/api/src/conversions/json.rs
use std::collections::HashMap; use segment::json_path::JsonPath; use tonic::Status; use crate::grpc::qdrant::value::Kind; use crate::grpc::qdrant::{ListValue, Struct, Value}; pub fn payload_to_proto(payload: segment::types::Payload) -> HashMap<String, Value> { payload .into_iter() .map(|(k, v)| (k, json_to_proto(v))) .collect() } pub fn dict_to_proto(dict: HashMap<String, serde_json::Value>) -> HashMap<String, Value> { dict.into_iter() .map(|(k, v)| (k, json_to_proto(v))) .collect() } pub fn json_to_proto(json_value: serde_json::Value) -> Value { let kind = match json_value { serde_json::Value::Null => Kind::NullValue(0), serde_json::Value::Bool(v) => Kind::BoolValue(v), serde_json::Value::Number(n) => { if let Some(int) = n.as_i64() { Kind::IntegerValue(int) } else { Kind::DoubleValue(n.as_f64().unwrap()) } } serde_json::Value::String(s) => Kind::StringValue(s), serde_json::Value::Array(v) => Kind::ListValue(ListValue { values: v.into_iter().map(json_to_proto).collect(), }), serde_json::Value::Object(m) => Kind::StructValue(Struct { fields: m.into_iter().map(|(k, v)| (k, json_to_proto(v))).collect(), }), }; Value { kind: Some(kind) } } pub fn json_path_from_proto(a: &str) -> Result<JsonPath, Status> { JsonPath::try_from(a) .map_err(|_| Status::invalid_argument(format!("Invalid json path: \'{a}\'"))) } pub fn proto_to_payloads(proto: HashMap<String, Value>) -> Result<segment::types::Payload, Status> { proto .into_iter() .map(|(k, v)| proto_to_json(v).map(|json| (k, json))) .collect::<Result<serde_json::Map<_, _>, _>>() .map(segment::types::Payload) } pub fn proto_dict_to_json( proto: HashMap<String, Value>, ) -> Result<HashMap<String, serde_json::Value>, Status> { proto .into_iter() .map(|(k, v)| proto_to_json(v).map(|json| (k, json))) .collect::<Result<_, _>>() } pub fn proto_to_json(proto: Value) -> Result<serde_json::Value, Status> { let Some(kind) = proto.kind else { return Ok(serde_json::Value::default()); }; let json_value = match kind { Kind::NullValue(_) => serde_json::Value::Null, Kind::DoubleValue(n) => { let Some(v) = serde_json::Number::from_f64(n) else { return Err(Status::invalid_argument("cannot convert to json number")); }; serde_json::Value::Number(v) } Kind::IntegerValue(i) => serde_json::Value::Number(i.into()), Kind::StringValue(s) => serde_json::Value::String(s), Kind::BoolValue(b) => serde_json::Value::Bool(b), Kind::StructValue(s) => serde_json::Value::Object( s.fields .into_iter() .map(|(k, v)| proto_to_json(v).map(|json| (k, json))) .collect::<Result<_, _>>()?, ), Kind::ListValue(l) => serde_json::Value::Array( l.values .into_iter() .map(proto_to_json) .collect::<Result<_, _>>()?, ), }; Ok(json_value) } #[cfg(test)] mod tests { use std::collections::HashMap; use super::*; use crate::grpc::qdrant::value::Kind; use crate::grpc::qdrant::{Struct, Value}; fn gen_proto_json_dicts() -> (HashMap<String, serde_json::Value>, HashMap<String, Value>) { let raw_json = r#" { "f64": 1.0, "i64": 1, "string": "s", "bool": true, "struct": {"i64": 1}, "list": [1,2], "null": null }"#; let values = vec![ ("null", Kind::NullValue(0)), ("f64", Kind::DoubleValue(1.0)), ("i64", Kind::IntegerValue(1)), ("string", Kind::StringValue("s".to_string())), ("bool", Kind::BoolValue(true)), ( "struct", Kind::StructValue(Struct { fields: HashMap::from([( "i64".to_string(), Value { kind: Some(Kind::IntegerValue(1)), }, )]), }), ), ( "list", Kind::ListValue(ListValue { values: vec![ Value { kind: Some(Kind::IntegerValue(1)), }, Value { kind: Some(Kind::IntegerValue(2)), }, ], }), ), ]; let json_map: HashMap<String, serde_json::Value> = serde_json::from_str(raw_json).unwrap(); let proto_map: HashMap<String, Value> = values .into_iter() .map(|(k, v)| (k.to_string(), Value { kind: Some(v) })) .collect(); (json_map, proto_map) } #[test] fn test_dict_to_prot() { let (json_map, proto_map) = gen_proto_json_dicts(); assert_eq!(dict_to_proto(json_map), proto_map); } #[test] fn test_proto_dict_to_json() { let (mut json_map, mut proto_map) = gen_proto_json_dicts(); proto_map.insert("unknown".to_string(), Value { kind: None }); json_map.insert("unknown".to_string(), serde_json::Value::default()); let got_json_map = proto_dict_to_json(proto_map); assert!(got_json_map.is_ok()); assert_eq!(got_json_map.unwrap(), json_map); } #[test] fn test_proto_payload() { let (json_map, proto_map) = gen_proto_json_dicts(); let payload: serde_json::Map<String, serde_json::Value> = json_map.into_iter().collect(); let payload = segment::types::Payload(payload); // proto to payload let got_json_payload = proto_to_payloads(proto_map.clone()); assert!(got_json_payload.is_ok()); assert_eq!(got_json_payload.unwrap(), payload); // payload to proto let proto = payload_to_proto(payload); assert_eq!(proto, proto_map); } #[test] fn test_proto_to_json_invalid_struct_value() { let proto = Value { kind: Some(Kind::StructValue(Struct { fields: HashMap::from([ ( "int".to_string(), Value { kind: Some(Kind::IntegerValue(1)), }, ), ( "nan".to_string(), Value { kind: Some(Kind::DoubleValue(f64::NAN)), }, ), ]), })), }; let result = proto_to_json(proto); assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument); } #[test] fn test_proto_to_json_invalid_list_value() { let proto = Value { kind: Some(Kind::ListValue(ListValue { values: vec![ Value { kind: Some(Kind::IntegerValue(1)), }, Value { kind: Some(Kind::DoubleValue(f64::NAN)), }, ], })), }; let result = proto_to_json(proto); assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/conversions/mod.rs
lib/api/src/conversions/mod.rs
pub mod inference; pub mod json; pub mod vectors;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/conversions/inference.rs
lib/api/src/conversions/inference.rs
use tonic::Status; use crate::conversions::json::{dict_to_proto, json_to_proto, proto_dict_to_json, proto_to_json}; use crate::grpc::qdrant as grpc; use crate::rest::{DocumentOptions, Options, schema as rest}; impl From<rest::Document> for grpc::Document { fn from(document: rest::Document) -> Self { let rest::Document { text, model, options, } = document; Self { text, model, options: options .map(DocumentOptions::into_options) .map(dict_to_proto) .unwrap_or_default(), } } } impl TryFrom<grpc::Document> for rest::Document { type Error = Status; fn try_from(document: grpc::Document) -> Result<Self, Self::Error> { let grpc::Document { text, model, options, } = document; Ok(Self { text, model, options: Some(DocumentOptions::Common(proto_dict_to_json(options)?)), }) } } impl From<rest::Image> for grpc::Image { fn from(image: rest::Image) -> Self { let rest::Image { image, model, options, } = image; Self { image: Some(json_to_proto(image)), model, options: options.options.map(dict_to_proto).unwrap_or_default(), } } } impl TryFrom<grpc::Image> for rest::Image { type Error = Status; fn try_from(image: grpc::Image) -> Result<Self, Self::Error> { let grpc::Image { image, model, options, } = image; let image = image.ok_or_else(|| Status::invalid_argument("Empty image is not allowed"))?; Ok(Self { image: proto_to_json(image)?, model, options: Options { options: Some(proto_dict_to_json(options)?), }, }) } } impl From<rest::InferenceObject> for grpc::InferenceObject { fn from(object: rest::InferenceObject) -> Self { let rest::InferenceObject { object, model, options, } = object; Self { object: Some(json_to_proto(object)), model, options: options.options.map(dict_to_proto).unwrap_or_default(), } } } impl TryFrom<grpc::InferenceObject> for rest::InferenceObject { type Error = Status; fn try_from(object: grpc::InferenceObject) -> Result<Self, Self::Error> { let grpc::InferenceObject { object, model, options, } = object; let object = object.ok_or_else(|| Status::invalid_argument("Empty object is not allowed"))?; Ok(Self { object: proto_to_json(object)?, model, options: Options { options: Some(proto_dict_to_json(options)?), }, }) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/dynamic_channel_pool.rs
lib/api/src/grpc/dynamic_channel_pool.rs
use std::time::Duration; use parking_lot::Mutex; use tonic::transport::{Channel, ClientTlsConfig, Error as TonicError, Uri}; use crate::grpc::dynamic_pool::{CountedItem, DynamicPool}; pub async fn make_grpc_channel( timeout: Duration, connection_timeout: Duration, uri: Uri, tls_config: Option<ClientTlsConfig>, ) -> Result<Channel, TonicError> { let mut endpoint = Channel::builder(uri) .timeout(timeout) .connect_timeout(connection_timeout); if let Some(config) = tls_config { endpoint = endpoint.tls_config(config)?; } // `connect` is using the `Reconnect` network service internally to handle dropped connections endpoint.connect().await } pub struct DynamicChannelPool { pool: Mutex<DynamicPool<Channel>>, uri: Uri, timeout: Duration, connection_timeout: Duration, tls_config: Option<ClientTlsConfig>, } impl DynamicChannelPool { pub async fn new( uri: Uri, timeout: Duration, connection_timeout: Duration, tls_config: Option<ClientTlsConfig>, usage_per_channel: usize, min_channels: usize, ) -> Result<Self, TonicError> { let mut channels = Vec::with_capacity(min_channels); for _ in 0..min_channels { let channel = make_grpc_channel(timeout, connection_timeout, uri.clone(), tls_config.clone()) .await?; channels.push(channel); } let pool = DynamicPool::new(channels, usage_per_channel, min_channels); Ok(Self { pool: Mutex::new(pool), uri, timeout, connection_timeout, tls_config, }) } pub async fn choose(&self) -> Result<CountedItem<Channel>, TonicError> { let channel = self.pool.lock().choose(); let channel = match channel { None => { let channel = make_grpc_channel( self.timeout, self.connection_timeout, self.uri.clone(), self.tls_config.clone(), ) .await?; self.pool.lock().add(channel) } Some(channel) => channel, }; Ok(channel) } pub fn drop_channel(&self, channel: CountedItem<Channel>) { self.pool.lock().drop_item(channel); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/transport_channel_pool.rs
lib/api/src/grpc/transport_channel_pool.rs
use std::collections::HashMap; use std::future::Future; use std::num::NonZeroUsize; use std::time::Duration; use rand::{Rng, rng}; use tokio::select; use tonic::codegen::InterceptedService; use tonic::service::Interceptor; use tonic::transport::{Channel, ClientTlsConfig, Error as TonicError, Uri}; use tonic::{Code, Request, Status}; use crate::grpc::dynamic_channel_pool::DynamicChannelPool; use crate::grpc::dynamic_pool::CountedItem; use crate::grpc::qdrant::HealthCheckRequest; use crate::grpc::qdrant::qdrant_client::QdrantClient; /// Maximum lifetime of a gRPC channel. /// /// Using 1 day (24 hours) because the request with the longest timeout currently uses the same /// timeout value. Namely the shard recovery call used in shard snapshot transfer. pub const MAX_GRPC_CHANNEL_TIMEOUT: Duration = Duration::from_secs(24 * 60 * 60); pub const DEFAULT_GRPC_TIMEOUT: Duration = Duration::from_secs(60); pub const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_secs(2); pub const DEFAULT_POOL_SIZE: usize = 2; /// Allow a large number of connections per channel, that is close to the limit of /// `http2_max_pending_accept_reset_streams` that we configure to minimize the chance of /// GOAWAY/ENHANCE_YOUR_CALM errors from occurring. /// More info: <https://github.com/qdrant/qdrant/issues/1907> const MAX_CONNECTIONS_PER_CHANNEL: usize = 1024; pub const DEFAULT_RETRIES: usize = 2; const DEFAULT_BACKOFF: Duration = Duration::from_millis(100); /// How long to wait for response from server, before checking health of the server const SMART_CONNECT_INTERVAL: Duration = Duration::from_secs(1); /// There is no indication, that health-check API is affected by high parallel load /// So we can use small timeout for health-check const HEALTH_CHECK_TIMEOUT: Duration = Duration::from_secs(2); /// Try to recreate channel, if there were no successful requests within this time const CHANNEL_TTL: Duration = Duration::from_secs(5); #[derive(thiserror::Error, Debug)] pub enum RequestError<E: std::error::Error> { #[error("Error in closure supplied to transport channel pool: {0}")] FromClosure(E), #[error("Tonic error: {0}")] Tonic(#[from] TonicError), } enum RetryAction { Fail(Status), RetryOnce(Status), RetryWithBackoff(Status), RetryImmediately(Status), } #[derive(Debug)] enum HealthCheckError { NoChannel, ConnectionError(TonicError), RequestError(Status), } #[derive(Debug)] enum RequestFailure { HealthCheck(HealthCheckError), RequestError(Status), RequestConnection(TonicError), } /// Intercepts gRPC requests and adds a default timeout if it wasn't already set. pub struct AddTimeout { default_timeout: Duration, } impl AddTimeout { pub fn new(default_timeout: Duration) -> Self { Self { default_timeout } } } impl Interceptor for AddTimeout { fn call(&mut self, mut request: Request<()>) -> Result<Request<()>, Status> { if request.metadata().get("grpc-timeout").is_none() { request.set_timeout(self.default_timeout); } Ok(request) } } /// Holds a pool of channels established for a set of URIs. /// Channel are shared by cloning them. /// Make the `pool_size` larger to increase throughput. pub struct TransportChannelPool { uri_to_pool: tokio::sync::RwLock<HashMap<Uri, DynamicChannelPool>>, pool_size: NonZeroUsize, grpc_timeout: Duration, connection_timeout: Duration, tls_config: Option<ClientTlsConfig>, } impl Default for TransportChannelPool { fn default() -> Self { Self { uri_to_pool: tokio::sync::RwLock::new(HashMap::new()), pool_size: NonZeroUsize::new(DEFAULT_POOL_SIZE).unwrap(), grpc_timeout: DEFAULT_GRPC_TIMEOUT, connection_timeout: DEFAULT_CONNECT_TIMEOUT, tls_config: None, } } } impl TransportChannelPool { pub fn new( p2p_grpc_timeout: Duration, connection_timeout: Duration, pool_size: usize, tls_config: Option<ClientTlsConfig>, ) -> Self { Self { uri_to_pool: Default::default(), grpc_timeout: p2p_grpc_timeout, connection_timeout, pool_size: NonZeroUsize::new(pool_size).unwrap(), tls_config, } } async fn _init_pool_for_uri(&self, uri: Uri) -> Result<DynamicChannelPool, TonicError> { DynamicChannelPool::new( uri, MAX_GRPC_CHANNEL_TIMEOUT, self.connection_timeout, self.tls_config.clone(), MAX_CONNECTIONS_PER_CHANNEL, self.pool_size.get(), ) .await } /// Initialize a pool for the URI and return a clone of the first channel. /// Does not fail if the pool already exist. async fn init_pool_for_uri(&self, uri: Uri) -> Result<CountedItem<Channel>, TonicError> { let mut guard = self.uri_to_pool.write().await; match guard.get_mut(&uri) { None => { let channels = self._init_pool_for_uri(uri.clone()).await?; let channel = channels.choose().await?; guard.insert(uri, channels); Ok(channel) } Some(channels) => channels.choose().await, } } pub async fn drop_pool(&self, uri: &Uri) { let mut guard = self.uri_to_pool.write().await; guard.remove(uri); } pub async fn drop_channel(&self, uri: &Uri, channel: CountedItem<Channel>) { let guard = self.uri_to_pool.read().await; if let Some(pool) = guard.get(uri) { pool.drop_channel(channel); } } async fn get_pooled_channel( &self, uri: &Uri, ) -> Option<Result<CountedItem<Channel>, TonicError>> { let guard = self.uri_to_pool.read().await; match guard.get(uri) { None => None, Some(channels) => Some(channels.choose().await), } } async fn get_or_create_pooled_channel( &self, uri: &Uri, ) -> Result<CountedItem<Channel>, TonicError> { match self.get_pooled_channel(uri).await { None => self.init_pool_for_uri(uri.clone()).await, Some(channel) => channel, } } /// Checks if the channel is still alive. /// /// It uses duplicate "fast" channel, equivalent to the original, but with smaller timeout. /// If it can't get healthcheck response in the timeout, it assumes the channel is dead. /// And we need to drop the pool for the uri and try again. /// For performance reasons, we start the check only after `SMART_CONNECT_TIMEOUT`. async fn check_connectability(&self, uri: &Uri) -> HealthCheckError { loop { tokio::time::sleep(SMART_CONNECT_INTERVAL).await; let channel = self.get_pooled_channel(uri).await; match channel { None => return HealthCheckError::NoChannel, Some(Err(tonic_error)) => return HealthCheckError::ConnectionError(tonic_error), Some(Ok(channel)) => { let mut client = QdrantClient::new(channel.item().clone()); let resp: Result<_, Status> = select! { res = client.health_check(HealthCheckRequest {}) => { res } _ = tokio::time::sleep(HEALTH_CHECK_TIMEOUT) => { // Current healthcheck timed out, but maybe there were other requests // that succeeded in a given time window. // If so, we can continue watching. if channel.last_success_age() > HEALTH_CHECK_TIMEOUT { return HealthCheckError::RequestError(Status::deadline_exceeded(format!("Healthcheck timeout {}ms exceeded", HEALTH_CHECK_TIMEOUT.as_millis()))) } else { continue; } } }; match resp { Ok(_) => { channel.report_success(); // continue watching } Err(status) => return HealthCheckError::RequestError(status), } } } } } async fn make_request<T, O: Future<Output = Result<T, Status>>>( &self, uri: &Uri, f: &impl Fn(InterceptedService<Channel, AddTimeout>) -> O, timeout: Duration, ) -> Result<T, RequestFailure> { let channel = match self.get_or_create_pooled_channel(uri).await { Ok(channel) => channel, Err(tonic_error) => { return Err(RequestFailure::RequestConnection(tonic_error)); } }; let intercepted_channel = InterceptedService::new(channel.item().clone(), AddTimeout::new(timeout)); let result: RequestFailure = select! { res = f(intercepted_channel) => { match res { Ok(body) => { channel.report_success(); return Ok(body); }, Err(err) => RequestFailure::RequestError(err) } } res = self.check_connectability(uri) => { RequestFailure::HealthCheck(res) } }; // After this point the request is not successful, but we can try to recover let last_success_age = channel.last_success_age(); if last_success_age > CHANNEL_TTL { // There were no successful requests for a long time, we can try to reconnect // It might be possible that server died and changed its ip address self.drop_channel(uri, channel).await; } else { // We don't need this channel anymore, drop before waiting for the backoff drop(channel); } Err(result) } // Allows to use channel to `uri`. If there is no channels to specified uri - they will be created. pub async fn with_channel_timeout<T, O: Future<Output = Result<T, Status>>>( &self, uri: &Uri, f: impl Fn(InterceptedService<Channel, AddTimeout>) -> O, timeout: Option<Duration>, retries: usize, ) -> Result<T, RequestError<Status>> { let mut retries_left = retries; let mut attempt = 0; let max_timeout = timeout.unwrap_or_else(|| self.grpc_timeout + self.connection_timeout); loop { let request_result: Result<T, _> = self.make_request(uri, &f, max_timeout).await; let error_result = match request_result { Ok(body) => return Ok(body), Err(err) => err, }; let action = match error_result { RequestFailure::HealthCheck(healthcheck_error) => { match healthcheck_error { HealthCheckError::NoChannel => { // The channel pool was dropped during the request processing. // Meaning that the peer is not available anymore. // So we can just fail the request. RetryAction::Fail(Status::unavailable(format!( "Peer {uri} is not available" ))) } HealthCheckError::ConnectionError(error) => { // Can't establish connection to the server during the healthcheck. // Possible situation: // - Server was killed during the request processing and request timed out. // Actions: // - retry no backoff RetryAction::RetryImmediately(Status::unavailable(format!( "Failed to connect to {uri}, error: {error}" ))) } HealthCheckError::RequestError(status) => { // Channel might be unavailable or overloaded. // Or server might be dead. RetryAction::RetryWithBackoff(status) } } } RequestFailure::RequestError(status) => { match status.code() { Code::Cancelled | Code::Unavailable => { // Possible situations: // - Server is frozen and will never respond. // - Server is overloaded and will respond in the future. RetryAction::RetryWithBackoff(status) } Code::Internal => { // Something is broken, but let's retry anyway, but only once. RetryAction::RetryOnce(status) } _ => { // No special handling, just fail already. RetryAction::Fail(status) } } } RequestFailure::RequestConnection(error) => { // Can't establish connection to the server during the request. // Possible situation: // - Server is killed // - Server is overloaded // Actions: // - retry with backoff RetryAction::RetryWithBackoff(Status::unavailable(format!( "Failed to connect to {uri}, error: {error}" ))) } }; let (backoff_time, fallback_status) = match action { RetryAction::Fail(err) => return Err(RequestError::FromClosure(err)), RetryAction::RetryImmediately(fallback_status) => (Duration::ZERO, fallback_status), RetryAction::RetryWithBackoff(fallback_status) => { // Calculate backoff let backoff = DEFAULT_BACKOFF * 2u32.pow(attempt as u32) + Duration::from_millis(rng().random_range(0..100)); if backoff > max_timeout { // We can't wait for the request any longer, return the error as is return Err(RequestError::FromClosure(fallback_status)); } (backoff, fallback_status) } RetryAction::RetryOnce(fallback_status) => { if retries_left > 1 { retries_left = 1; } (Duration::ZERO, fallback_status) } }; attempt += 1; if retries_left == 0 { return Err(RequestError::FromClosure(fallback_status)); } retries_left = retries_left.saturating_sub(1); // Wait for the backoff tokio::time::sleep(backoff_time).await; } } // Allows to use channel to `uri`. If there is no channels to specified uri - they will be created. pub async fn with_channel<T, O: Future<Output = Result<T, Status>>>( &self, uri: &Uri, f: impl Fn(InterceptedService<Channel, AddTimeout>) -> O, ) -> Result<T, RequestError<Status>> { self.with_channel_timeout(uri, f, None, DEFAULT_RETRIES) .await } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/dynamic_pool.rs
lib/api/src/grpc/dynamic_pool.rs
use std::collections::HashMap; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::{Duration, Instant}; use rand::Rng; #[derive(Debug)] struct ItemWithStats<T: Clone> { pub item: T, pub usage: AtomicUsize, pub last_success: AtomicUsize, } impl<T: Clone> ItemWithStats<T> { fn new(item: T, last_used_since: usize) -> Self { Self { item, usage: AtomicUsize::new(0), last_success: AtomicUsize::new(last_used_since), } } } pub struct DynamicPool<T: Clone> { items: HashMap<u64, Arc<ItemWithStats<T>>>, /// How many times one item can be used max_usage_per_item: usize, /// Minimal number of items in the pool min_items: usize, /// Instant when the pool was created init_at: Instant, } pub struct CountedItem<T: Clone> { item: Arc<ItemWithStats<T>>, item_id: u64, init_at: Instant, } impl<T: Clone> CountedItem<T> { fn new(item_id: u64, item: Arc<ItemWithStats<T>>, init_at: Instant) -> Self { item.usage.fetch_add(1, Ordering::Relaxed); Self { item, item_id, init_at, } } pub fn item(&self) -> &T { &self.item.item } pub fn report_success(&self) { let time_since_init = Instant::now().duration_since(self.init_at).as_millis() as usize; self.item .last_success .store(time_since_init, Ordering::Relaxed); } pub fn last_success_age(&self) -> Duration { let time_since_init = Instant::now().duration_since(self.init_at).as_millis() as usize; let time_since_last_success = self.item.last_success.load(Ordering::Relaxed); Duration::from_millis((time_since_init - time_since_last_success) as u64) } } impl<T: Clone> Drop for CountedItem<T> { fn drop(&mut self) { self.item.usage.fetch_sub(1, Ordering::Relaxed); } } impl<T: Clone> DynamicPool<T> { fn random_idx() -> u64 { rand::rng().random() } pub fn new(items: Vec<T>, max_usage_per_item: usize, min_items: usize) -> Self { debug_assert!(max_usage_per_item > 0); debug_assert!(items.len() >= min_items); let init_at = Instant::now(); let last_success_since = Instant::now().duration_since(init_at).as_millis() as usize; let items = items .into_iter() .map(|item| { let item = Arc::new(ItemWithStats::new(item, last_success_since)); (Self::random_idx(), item) }) .collect(); Self { items, max_usage_per_item, min_items, init_at, } } pub fn drop_item(&mut self, item: CountedItem<T>) { let item_id = item.item_id; self.items.remove(&item_id); } pub fn add(&mut self, item: T) -> CountedItem<T> { let item_with_stats = Arc::new(ItemWithStats::new( item, Instant::now().duration_since(self.init_at).as_millis() as usize, )); let item_id = Self::random_idx(); self.items.insert(item_id, item_with_stats.clone()); CountedItem::new(item_id, item_with_stats, self.init_at) } // Returns None if current capacity is not enough pub fn choose(&mut self) -> Option<CountedItem<T>> { if self.items.len() < self.min_items { return None; } // If all items are used too much, we cannot use any of them so we return None let mut total_usage = 0; let min_usage_idx = *self .items .iter() .map(|(idx, item)| { let usage = item.usage.load(Ordering::Relaxed); total_usage += usage; (idx, usage) }) .filter(|(_, min_usage)| *min_usage < self.max_usage_per_item) .min_by_key(|(_, usage)| *usage)? .0; let current_usage_capacity = self.items.len().saturating_mul(self.max_usage_per_item); if current_usage_capacity.saturating_sub(total_usage) > self.max_usage_per_item.saturating_mul(2) && self.items.len() > self.min_items { // We have too many items, and we have enough capacity to remove some of them let item = self .items .remove(&min_usage_idx) .expect("Item must exist, as we just found it"); return Some(CountedItem::new(min_usage_idx, item, self.init_at)); } Some(CountedItem::new( min_usage_idx, self.items .get(&min_usage_idx) .expect("Item must exist, as we just found it") .clone(), self.init_at, )) } } #[cfg(test)] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use super::*; async fn use_item(item: CountedItem<Arc<AtomicUsize>>) { item.item().fetch_add(1, Ordering::SeqCst); // Sleep for 1-100 ms tokio::time::sleep(std::time::Duration::from_millis( rand::random::<u64>() % 100 + 1, )) .await; item.item().fetch_sub(1, Ordering::SeqCst); item.report_success(); drop(item); } #[test] fn test_dynamic_pool() { let items = vec![Arc::new(AtomicUsize::new(0)), Arc::new(AtomicUsize::new(0))]; let mut pool = DynamicPool::new(items, 5, 2); let mut items = vec![]; for _ in 0..17 { let item = match pool.choose() { None => pool.add(Arc::new(AtomicUsize::new(0))), Some(it) => it, }; item.item().fetch_add(1, Ordering::SeqCst); items.push(item); } assert_eq!(pool.items.len(), 4); for _ in 0..10 { items.pop(); } for (idx, item) in pool.items.iter() { println!("{idx} -> {item:?}"); } assert!(pool.choose().is_some()); assert_eq!(pool.items.len(), 3); } #[test] fn test_dynamic_pool_with_runtime() { let items = vec![Arc::new(AtomicUsize::new(0)), Arc::new(AtomicUsize::new(0))]; let runtime = tokio::runtime::Runtime::new().unwrap(); let mut pool = DynamicPool::new(items, 5, 2); let mut handles = vec![]; for _ in 0..1000 { let item = match pool.choose() { None => pool.add(Arc::new(AtomicUsize::new(0))), Some(it) => it, }; let handle = runtime.spawn(async move { use_item(item).await }); handles.push(handle); // Sleep for 3 ms with std std::thread::sleep(std::time::Duration::from_millis(2)); } runtime.block_on(async move { for handle in handles { handle.await.unwrap(); } }); pool.items.iter().for_each(|(_, item)| { assert_eq!(item.item.load(Ordering::SeqCst), 0); assert_eq!(item.usage.load(Ordering::SeqCst), 0); }); assert!(pool.items.len() < 50); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/mod.rs
lib/api/src/grpc/mod.rs
pub mod conversions; #[allow(clippy::all)] #[rustfmt::skip] // tonic uses `prettyplease` to format its output pub mod qdrant; pub mod dynamic_channel_pool; pub mod dynamic_pool; #[rustfmt::skip] // tonic uses `prettyplease` to format its output #[path = "grpc.health.v1.rs"] pub mod grpc_health_v1; pub mod ops; pub mod transport_channel_pool; pub mod validate; pub use qdrant::*; pub const fn api_crate_version() -> &'static str { env!("CARGO_PKG_VERSION") } pub const QDRANT_DESCRIPTOR_SET: &[u8] = tonic::include_file_descriptor_set!("qdrant_descriptor");
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/grpc.health.v1.rs
lib/api/src/grpc/grpc.health.v1.rs
// This file is @generated by prost-build. #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HealthCheckRequest { #[prost(string, tag = "1")] pub service: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HealthCheckResponse { #[prost(enumeration = "health_check_response::ServingStatus", tag = "1")] pub status: i32, } /// Nested message and enum types in `HealthCheckResponse`. pub mod health_check_response { #[derive(serde::Serialize)] #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum ServingStatus { Unknown = 0, Serving = 1, NotServing = 2, /// Used only by the Watch method. ServiceUnknown = 3, } impl ServingStatus { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { ServingStatus::Unknown => "UNKNOWN", ServingStatus::Serving => "SERVING", ServingStatus::NotServing => "NOT_SERVING", ServingStatus::ServiceUnknown => "SERVICE_UNKNOWN", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option<Self> { match value { "UNKNOWN" => Some(Self::Unknown), "SERVING" => Some(Self::Serving), "NOT_SERVING" => Some(Self::NotServing), "SERVICE_UNKNOWN" => Some(Self::ServiceUnknown), _ => None, } } } } /// Generated client implementations. pub mod health_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct HealthClient<T> { inner: tonic::client::Grpc<T>, } impl HealthClient<tonic::transport::Channel> { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error> where D: TryInto<tonic::transport::Endpoint>, D::Error: Into<StdError>, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl<T> HealthClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::Error: Into<StdError>, T::ResponseBody: Body<Data = Bytes> + Send + 'static, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> HealthClient<InterceptedService<T, F>> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service< http::Request<tonic::body::BoxBody>, >>::Error: Into<StdError> + Send + Sync, { HealthClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } pub async fn check( &mut self, request: impl tonic::IntoRequest<super::HealthCheckRequest>, ) -> std::result::Result< tonic::Response<super::HealthCheckResponse>, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/grpc.health.v1.Health/Check", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("grpc.health.v1.Health", "Check")); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod health_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with HealthServer. #[async_trait] pub trait Health: Send + Sync + 'static { async fn check( &self, request: tonic::Request<super::HealthCheckRequest>, ) -> std::result::Result< tonic::Response<super::HealthCheckResponse>, tonic::Status, >; } #[derive(Debug)] pub struct HealthServer<T: Health> { inner: _Inner<T>, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option<usize>, max_encoding_message_size: Option<usize>, } struct _Inner<T>(Arc<T>); impl<T: Health> HealthServer<T> { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc<T>) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> InterceptedService<Self, F> where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl<T, B> tonic::codegen::Service<http::Request<B>> for HealthServer<T> where T: Health, B: Body + Send + 'static, B::Error: Into<StdError> + Send + 'static, { type Response = http::Response<tonic::body::BoxBody>; type Error = std::convert::Infallible; type Future = BoxFuture<Self::Response, Self::Error>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<std::result::Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request<B>) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/grpc.health.v1.Health/Check" => { #[allow(non_camel_case_types)] struct CheckSvc<T: Health>(pub Arc<T>); impl< T: Health, > tonic::server::UnaryService<super::HealthCheckRequest> for CheckSvc<T> { type Response = super::HealthCheckResponse; type Future = BoxFuture< tonic::Response<Self::Response>, tonic::Status, >; fn call( &mut self, request: tonic::Request<super::HealthCheckRequest>, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { <T as Health>::check(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = CheckSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl<T: Health> Clone for HealthServer<T> { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl<T: Health> Clone for _Inner<T> { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl<T: Health> tonic::server::NamedService for HealthServer<T> { const NAME: &'static str = "grpc.health.v1.Health"; } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/conversions.rs
lib/api/src/grpc/conversions.rs
use std::collections::{BTreeMap, HashMap}; use std::str::FromStr as _; use std::time::Instant; use ahash::AHashSet; use chrono::{NaiveDateTime, Timelike}; use common::counter::hardware_accumulator::HwMeasurementAcc; use common::counter::hardware_data::HardwareData; use common::types::ScoreType; use itertools::Itertools; use ordered_float::OrderedFloat; use segment::common::operation_error::OperationError; use segment::data_types::index::{ BoolIndexType, DatetimeIndexType, FloatIndexType, GeoIndexType, IntegerIndexType, KeywordIndexType, SnowballLanguage, TextIndexType, UuidIndexType, }; use segment::data_types::modifier::Modifier; use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, NamedMultiDenseVector, VectorInternal}; use segment::data_types::{facets as segment_facets, vectors as segment_vectors}; use segment::index::query_optimization::rescore_formula::parsed_formula::{ DatetimeExpression, DecayKind, ParsedExpression, ParsedFormula, }; use segment::types::{DateTimePayloadType, FloatPayloadType, default_quantization_ignore_value}; use segment::vector_storage::query::{self as segment_query, NaiveFeedbackCoefficients}; use sparse::common::sparse_vector::validate_sparse_vector_impl; use tonic::Status; use uuid::Uuid; use super::qdrant::{ BinaryQuantization, BoolIndexParams, CompressionRatio, DatetimeIndexParams, DatetimeRange, Direction, FacetHit, FacetHitInternal, FacetValue, FacetValueInternal, FieldType, FloatIndexParams, GeoIndexParams, GeoLineString, GroupId, HardwareUsage, HasVectorCondition, KeywordIndexParams, LookupLocation, MaxOptimizationThreads, MultiVectorComparator, MultiVectorConfig, OrderBy, OrderValue, Range, RawVector, RecommendStrategy, RetrievedPoint, SearchMatrixPair, SearchPointGroups, SearchPoints, ShardKeySelector, StartFrom, StrictModeMultivector, StrictModeMultivectorConfig, StrictModeSparse, StrictModeSparseConfig, UuidIndexParams, VectorsOutput, WithLookup, raw_query, start_from, }; use super::stemming_algorithm::StemmingParams; use super::{Expression, Formula, RecoQuery, SnowballParams, StemmingAlgorithm, Usage}; use crate::conversions::json::{self, json_to_proto}; use crate::grpc::qdrant::condition::ConditionOneOf; use crate::grpc::qdrant::r#match::MatchValue; use crate::grpc::qdrant::payload_index_params::IndexParams; use crate::grpc::qdrant::point_id::PointIdOptions; use crate::grpc::qdrant::with_payload_selector::SelectorOptions; use crate::grpc::qdrant::{ AcornSearchParams, CollectionDescription, CollectionOperationResponse, Condition, Distance, FieldCondition, Filter, GeoBoundingBox, GeoPoint, GeoPolygon, GeoRadius, HasIdCondition, HealthCheckReply, HnswConfigDiff, IntegerIndexParams, IsEmptyCondition, IsNullCondition, ListCollectionsResponse, ListShardKeysResponse, Match, MinShould, NamedVectors, NestedCondition, PayloadExcludeSelector, PayloadIncludeSelector, PayloadIndexParams, PayloadSchemaInfo, PayloadSchemaType, PointId, PointStruct, PointsOperationResponse, PointsOperationResponseInternal, ProductQuantization, QuantizationConfig, QuantizationSearchParams, QuantizationType, RepeatedIntegers, RepeatedStrings, ScalarQuantization, ScoredPoint, SearchParams, ShardKey, ShardKeyDescription, StopwordsSet, StrictModeConfig, TextIndexParams, TokenizerType, UpdateResult, UpdateResultInternal, ValuesCount, VectorsSelector, WithPayloadSelector, WithVectorsSelector, shard_key, with_vectors_selector, }; use crate::grpc::{ self, BinaryQuantizationEncoding, BinaryQuantizationQueryEncoding, DecayParamsExpression, DivExpression, GeoDistance, MultExpression, PowExpression, SumExpression, }; use crate::rest::models::{CollectionsResponse, ShardKeysResponse, VersionInfo}; use crate::rest::schema as rest; pub fn convert_shard_key_to_grpc(value: segment::types::ShardKey) -> ShardKey { match value { segment::types::ShardKey::Keyword(keyword) => ShardKey { key: Some(shard_key::Key::Keyword(keyword.to_string())), }, segment::types::ShardKey::Number(number) => ShardKey { key: Some(shard_key::Key::Number(number)), }, } } pub fn convert_shard_key_from_grpc(value: ShardKey) -> Option<segment::types::ShardKey> { let ShardKey { key } = value; key.map(|key| match key { shard_key::Key::Keyword(keyword) => segment::types::ShardKey::from(keyword), shard_key::Key::Number(number) => segment::types::ShardKey::Number(number), }) } pub fn convert_shard_key_from_grpc_opt( value: Option<ShardKey>, ) -> Option<segment::types::ShardKey> { value.and_then(|value| value.key).map(|key| match key { shard_key::Key::Keyword(keyword) => segment::types::ShardKey::from(keyword), shard_key::Key::Number(number) => segment::types::ShardKey::Number(number), }) } impl TryFrom<ShardKeySelector> for rest::ShardKeySelector { type Error = Status; fn try_from(value: ShardKeySelector) -> Result<Self, Self::Error> { let ShardKeySelector { shard_keys, fallback, } = value; let shard_keys: Vec<_> = shard_keys .into_iter() .filter_map(convert_shard_key_from_grpc) .collect(); if shard_keys.len() == 1 { let key = shard_keys.into_iter().next().unwrap(); match fallback.and_then(convert_shard_key_from_grpc) { Some(fallback) => Ok(rest::ShardKeySelector::ShardKeyWithFallback( rest::ShardKeyWithFallback { target: key, fallback, }, )), None => Ok(rest::ShardKeySelector::ShardKey(key)), } } else { if fallback.is_some() { return Err(Status::invalid_argument(format!( "Fallback shard key {fallback:?} can only be set when a single shard key is provided", ))); } Ok(rest::ShardKeySelector::ShardKeys(shard_keys)) } } } impl From<(Instant, ShardKeysResponse)> for ListShardKeysResponse { fn from(value: (Instant, ShardKeysResponse)) -> Self { let (timing, response) = value; let ShardKeysResponse { shard_keys } = response; let shard_keys = shard_keys .into_iter() .flatten() .map(|key_desc| { let key = Some(convert_shard_key_to_grpc(key_desc.key)); ShardKeyDescription { key } }) .collect(); Self { shard_keys, time: timing.elapsed().as_secs_f64(), } } } impl From<VersionInfo> for HealthCheckReply { fn from(info: VersionInfo) -> Self { let VersionInfo { title, version, commit, } = info; HealthCheckReply { title, version, commit, } } } impl From<(Instant, CollectionsResponse)> for ListCollectionsResponse { fn from(value: (Instant, CollectionsResponse)) -> Self { let (timing, response) = value; let CollectionsResponse { collections } = response; let collections = collections .into_iter() .map(|desc| CollectionDescription { name: desc.name }) .collect::<Vec<_>>(); Self { collections, time: timing.elapsed().as_secs_f64(), } } } impl From<segment::data_types::index::TokenizerType> for TokenizerType { fn from(tokenizer_type: segment::data_types::index::TokenizerType) -> Self { match tokenizer_type { segment::data_types::index::TokenizerType::Prefix => TokenizerType::Prefix, segment::data_types::index::TokenizerType::Whitespace => TokenizerType::Whitespace, segment::data_types::index::TokenizerType::Multilingual => TokenizerType::Multilingual, segment::data_types::index::TokenizerType::Word => TokenizerType::Word, } } } impl From<segment::data_types::index::KeywordIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::KeywordIndexParams) -> Self { let segment::data_types::index::KeywordIndexParams { r#type: _, is_tenant, on_disk, } = params; PayloadIndexParams { index_params: Some(IndexParams::KeywordIndexParams(KeywordIndexParams { is_tenant, on_disk, })), } } } impl From<segment::data_types::index::IntegerIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::IntegerIndexParams) -> Self { let segment::data_types::index::IntegerIndexParams { r#type: _, lookup, range, on_disk, is_principal, } = params; PayloadIndexParams { index_params: Some(IndexParams::IntegerIndexParams(IntegerIndexParams { lookup, range, is_principal, on_disk, })), } } } impl From<segment::data_types::index::FloatIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::FloatIndexParams) -> Self { let segment::data_types::index::FloatIndexParams { r#type: _, on_disk, is_principal, } = params; PayloadIndexParams { index_params: Some(IndexParams::FloatIndexParams(FloatIndexParams { on_disk, is_principal, })), } } } impl From<segment::data_types::index::GeoIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::GeoIndexParams) -> Self { let segment::data_types::index::GeoIndexParams { r#type: _, on_disk } = params; PayloadIndexParams { index_params: Some(IndexParams::GeoIndexParams(GeoIndexParams { on_disk })), } } } impl From<segment::data_types::index::TextIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::TextIndexParams) -> Self { let segment::data_types::index::TextIndexParams { r#type: _, tokenizer, min_token_len, max_token_len, lowercase, ascii_folding, phrase_matching, on_disk, stopwords, stemmer, } = params; let tokenizer = TokenizerType::from(tokenizer); // Convert stopwords if present let stopwords_set = stopwords.map(StopwordsSet::from); let stemming_algo = stemmer.map(StemmingAlgorithm::from); PayloadIndexParams { index_params: Some(IndexParams::TextIndexParams(TextIndexParams { tokenizer: tokenizer as i32, lowercase, ascii_folding, min_token_len: min_token_len.map(|x| x as u64), max_token_len: max_token_len.map(|x| x as u64), phrase_matching, on_disk, stopwords: stopwords_set, stemmer: stemming_algo, })), } } } impl From<segment::data_types::index::BoolIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::BoolIndexParams) -> Self { let segment::data_types::index::BoolIndexParams { r#type: _, on_disk } = params; PayloadIndexParams { index_params: Some(IndexParams::BoolIndexParams(BoolIndexParams { on_disk })), } } } impl From<segment::data_types::index::UuidIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::UuidIndexParams) -> Self { let segment::data_types::index::UuidIndexParams { r#type: _, is_tenant, on_disk, } = params; PayloadIndexParams { index_params: Some(IndexParams::UuidIndexParams(UuidIndexParams { is_tenant, on_disk, })), } } } impl From<segment::data_types::index::DatetimeIndexParams> for PayloadIndexParams { fn from(params: segment::data_types::index::DatetimeIndexParams) -> Self { let segment::data_types::index::DatetimeIndexParams { r#type: _, on_disk, is_principal, } = params; PayloadIndexParams { index_params: Some(IndexParams::DatetimeIndexParams(DatetimeIndexParams { on_disk, is_principal, })), } } } impl From<segment::types::PayloadIndexInfo> for PayloadSchemaInfo { fn from(schema: segment::types::PayloadIndexInfo) -> Self { let segment::types::PayloadIndexInfo { data_type, params, points, } = schema; PayloadSchemaInfo { data_type: PayloadSchemaType::from(data_type) as i32, params: params.map(|p| p.into()), points: Some(points as u64), } } } impl From<segment::types::PayloadSchemaType> for PayloadSchemaType { fn from(schema_type: segment::types::PayloadSchemaType) -> Self { match schema_type { segment::types::PayloadSchemaType::Keyword => PayloadSchemaType::Keyword, segment::types::PayloadSchemaType::Integer => PayloadSchemaType::Integer, segment::types::PayloadSchemaType::Float => PayloadSchemaType::Float, segment::types::PayloadSchemaType::Geo => PayloadSchemaType::Geo, segment::types::PayloadSchemaType::Text => PayloadSchemaType::Text, segment::types::PayloadSchemaType::Bool => PayloadSchemaType::Bool, segment::types::PayloadSchemaType::Datetime => PayloadSchemaType::Datetime, segment::types::PayloadSchemaType::Uuid => PayloadSchemaType::Uuid, } } } impl From<segment::types::PayloadSchemaType> for FieldType { fn from(schema_type: segment::types::PayloadSchemaType) -> Self { match schema_type { segment::types::PayloadSchemaType::Keyword => FieldType::Keyword, segment::types::PayloadSchemaType::Integer => FieldType::Integer, segment::types::PayloadSchemaType::Float => FieldType::Float, segment::types::PayloadSchemaType::Geo => FieldType::Geo, segment::types::PayloadSchemaType::Text => FieldType::Text, segment::types::PayloadSchemaType::Bool => FieldType::Bool, segment::types::PayloadSchemaType::Datetime => FieldType::Datetime, segment::types::PayloadSchemaType::Uuid => FieldType::Uuid, } } } impl From<segment::data_types::index::StopwordsInterface> for StopwordsSet { fn from(stopwords: segment::data_types::index::StopwordsInterface) -> Self { match stopwords { segment::data_types::index::StopwordsInterface::Language(lang) => { let lang_str = lang.to_string(); StopwordsSet { languages: vec![lang_str], custom: vec![], } } segment::data_types::index::StopwordsInterface::Set(set) => { let languages = if let Some(languages) = set.languages { languages.iter().map(|lang| lang.to_string()).collect() } else { vec![] }; let custom = if let Some(custom) = set.custom { custom.into_iter().collect() } else { vec![] }; StopwordsSet { languages, custom } } } } } impl From<segment::data_types::index::StemmingAlgorithm> for StemmingAlgorithm { fn from(value: segment::data_types::index::StemmingAlgorithm) -> Self { let stemming_params = match value { segment::data_types::index::StemmingAlgorithm::Snowball(snowball_params) => { let segment::data_types::index::SnowballParams { r#type: _, language, } = snowball_params; let language = language.to_string(); StemmingParams::Snowball(SnowballParams { language }) } }; StemmingAlgorithm { stemming_params: Some(stemming_params), } } } impl TryFrom<TokenizerType> for segment::data_types::index::TokenizerType { type Error = Status; fn try_from(tokenizer_type: TokenizerType) -> Result<Self, Self::Error> { match tokenizer_type { TokenizerType::Unknown => Err(Status::invalid_argument("unknown tokenizer type")), TokenizerType::Prefix => Ok(segment::data_types::index::TokenizerType::Prefix), TokenizerType::Multilingual => { Ok(segment::data_types::index::TokenizerType::Multilingual) } TokenizerType::Whitespace => Ok(segment::data_types::index::TokenizerType::Whitespace), TokenizerType::Word => Ok(segment::data_types::index::TokenizerType::Word), } } } impl From<segment::types::PayloadSchemaParams> for PayloadIndexParams { fn from(params: segment::types::PayloadSchemaParams) -> Self { match params { segment::types::PayloadSchemaParams::Keyword(p) => p.into(), segment::types::PayloadSchemaParams::Integer(p) => p.into(), segment::types::PayloadSchemaParams::Float(p) => p.into(), segment::types::PayloadSchemaParams::Geo(p) => p.into(), segment::types::PayloadSchemaParams::Text(p) => p.into(), segment::types::PayloadSchemaParams::Bool(p) => p.into(), segment::types::PayloadSchemaParams::Datetime(p) => p.into(), segment::types::PayloadSchemaParams::Uuid(p) => p.into(), } } } impl TryFrom<KeywordIndexParams> for segment::data_types::index::KeywordIndexParams { type Error = Status; fn try_from(params: KeywordIndexParams) -> Result<Self, Self::Error> { let KeywordIndexParams { is_tenant, on_disk } = params; Ok(segment::data_types::index::KeywordIndexParams { r#type: KeywordIndexType::Keyword, is_tenant, on_disk, }) } } impl TryFrom<IntegerIndexParams> for segment::data_types::index::IntegerIndexParams { type Error = Status; fn try_from(params: IntegerIndexParams) -> Result<Self, Self::Error> { let IntegerIndexParams { lookup, range, is_principal, on_disk, } = params; Ok(segment::data_types::index::IntegerIndexParams { r#type: IntegerIndexType::Integer, lookup, range, is_principal, on_disk, }) } } impl TryFrom<FloatIndexParams> for segment::data_types::index::FloatIndexParams { type Error = Status; fn try_from(params: FloatIndexParams) -> Result<Self, Self::Error> { let FloatIndexParams { on_disk, is_principal, } = params; Ok(segment::data_types::index::FloatIndexParams { r#type: FloatIndexType::Float, on_disk, is_principal, }) } } impl TryFrom<GeoIndexParams> for segment::data_types::index::GeoIndexParams { type Error = Status; fn try_from(params: GeoIndexParams) -> Result<Self, Self::Error> { let GeoIndexParams { on_disk } = params; Ok(segment::data_types::index::GeoIndexParams { r#type: GeoIndexType::Geo, on_disk, }) } } impl TryFrom<StopwordsSet> for segment::data_types::index::StopwordsInterface { type Error = Status; fn try_from(value: StopwordsSet) -> Result<Self, Self::Error> { let StopwordsSet { languages, custom } = value; let result_languages = if languages.is_empty() { None } else { Some( languages .into_iter() .map(|lang| segment::data_types::index::Language::from_str(&lang)) .collect::<Result<_, _>>() .map_err(|e| Status::invalid_argument(format!("unknown language: {e}")))?, ) }; let result_custom = if custom.is_empty() { None } else { Some(custom.into_iter().map(|word| word.to_lowercase()).collect()) }; Ok(segment::data_types::index::StopwordsInterface::Set( segment::data_types::index::StopwordsSet { languages: result_languages, custom: result_custom, }, )) } } impl TryFrom<TextIndexParams> for segment::data_types::index::TextIndexParams { type Error = Status; fn try_from(params: TextIndexParams) -> Result<Self, Self::Error> { let TextIndexParams { tokenizer, lowercase, ascii_folding, min_token_len, max_token_len, phrase_matching, on_disk, stopwords, stemmer, } = params; // Convert stopwords if present let stopwords_converted = if let Some(set) = stopwords { Some(segment::data_types::index::StopwordsInterface::try_from( set, )?) } else { None }; let stemmer = stemmer .and_then(|i| i.stemming_params) .map(segment::data_types::index::StemmingAlgorithm::try_from) .transpose()?; Ok(segment::data_types::index::TextIndexParams { r#type: TextIndexType::Text, tokenizer: TokenizerType::try_from(tokenizer) .map(|x| x.try_into()) .unwrap_or_else(|_| Err(Status::invalid_argument("unknown tokenizer type")))?, lowercase, ascii_folding, min_token_len: min_token_len.map(|x| x as usize), max_token_len: max_token_len.map(|x| x as usize), phrase_matching, on_disk, stopwords: stopwords_converted, stemmer, }) } } impl TryFrom<StemmingParams> for segment::data_types::index::StemmingAlgorithm { type Error = Status; fn try_from(value: StemmingParams) -> Result<Self, Self::Error> { match value { StemmingParams::Snowball(params) => { let language = SnowballLanguage::from_str(&params.language).map_err(|_| { Status::invalid_argument(format!("Language {:?} not found.", params.language)) })?; Ok(segment::data_types::index::StemmingAlgorithm::Snowball( segment::data_types::index::SnowballParams { r#type: segment::data_types::index::Snowball::Snowball, language, }, )) } } } } impl TryFrom<BoolIndexParams> for segment::data_types::index::BoolIndexParams { type Error = Status; fn try_from(params: BoolIndexParams) -> Result<Self, Self::Error> { let BoolIndexParams { on_disk } = params; Ok(segment::data_types::index::BoolIndexParams { r#type: BoolIndexType::Bool, on_disk, }) } } impl TryFrom<DatetimeIndexParams> for segment::data_types::index::DatetimeIndexParams { type Error = Status; fn try_from(params: DatetimeIndexParams) -> Result<Self, Self::Error> { let DatetimeIndexParams { on_disk, is_principal, } = params; Ok(segment::data_types::index::DatetimeIndexParams { r#type: DatetimeIndexType::Datetime, on_disk, is_principal, }) } } impl TryFrom<UuidIndexParams> for segment::data_types::index::UuidIndexParams { type Error = Status; fn try_from(params: UuidIndexParams) -> Result<Self, Self::Error> { let UuidIndexParams { is_tenant, on_disk } = params; Ok(segment::data_types::index::UuidIndexParams { r#type: UuidIndexType::Uuid, is_tenant, on_disk, }) } } impl TryFrom<IndexParams> for segment::types::PayloadSchemaParams { type Error = Status; fn try_from(value: IndexParams) -> Result<Self, Self::Error> { Ok(match value { IndexParams::KeywordIndexParams(p) => { segment::types::PayloadSchemaParams::Keyword(p.try_into()?) } IndexParams::IntegerIndexParams(p) => { segment::types::PayloadSchemaParams::Integer(p.try_into()?) } IndexParams::FloatIndexParams(p) => { segment::types::PayloadSchemaParams::Float(p.try_into()?) } IndexParams::GeoIndexParams(p) => { segment::types::PayloadSchemaParams::Geo(p.try_into()?) } IndexParams::TextIndexParams(p) => { segment::types::PayloadSchemaParams::Text(p.try_into()?) } IndexParams::BoolIndexParams(p) => { segment::types::PayloadSchemaParams::Bool(p.try_into()?) } IndexParams::DatetimeIndexParams(p) => { segment::types::PayloadSchemaParams::Datetime(p.try_into()?) } IndexParams::UuidIndexParams(p) => { segment::types::PayloadSchemaParams::Uuid(p.try_into()?) } }) } } impl TryFrom<PayloadSchemaInfo> for segment::types::PayloadIndexInfo { type Error = Status; fn try_from(schema: PayloadSchemaInfo) -> Result<Self, Self::Error> { let PayloadSchemaInfo { data_type, params, points, } = schema; let data_type = match PayloadSchemaType::try_from(data_type) { Err(_) => { return Err(Status::invalid_argument( "Malformed payload schema".to_string(), )); } Ok(data_type) => match data_type { PayloadSchemaType::Keyword => segment::types::PayloadSchemaType::Keyword, PayloadSchemaType::Integer => segment::types::PayloadSchemaType::Integer, PayloadSchemaType::Float => segment::types::PayloadSchemaType::Float, PayloadSchemaType::Geo => segment::types::PayloadSchemaType::Geo, PayloadSchemaType::Text => segment::types::PayloadSchemaType::Text, PayloadSchemaType::Bool => segment::types::PayloadSchemaType::Bool, PayloadSchemaType::Datetime => segment::types::PayloadSchemaType::Datetime, PayloadSchemaType::UnknownType => { return Err(Status::invalid_argument( "Malformed payload schema".to_string(), )); } PayloadSchemaType::Uuid => segment::types::PayloadSchemaType::Uuid, }, }; let params = match params { None => None, Some(PayloadIndexParams { index_params: None }) => None, Some(PayloadIndexParams { index_params: Some(index_params), }) => Some(index_params.try_into()?), }; Ok(segment::types::PayloadIndexInfo { data_type, params, points: points.unwrap_or(0) as usize, }) } } impl From<(Instant, bool)> for CollectionOperationResponse { fn from(value: (Instant, bool)) -> Self { let (timing, result) = value; CollectionOperationResponse { result, time: timing.elapsed().as_secs_f64(), } } } impl From<segment::types::GeoPoint> for GeoPoint { fn from(geo: segment::types::GeoPoint) -> Self { let segment::types::GeoPoint { lon, lat } = geo; Self { lon: lon.0, lat: lat.0, } } } impl TryFrom<WithPayloadSelector> for segment::types::WithPayloadInterface { type Error = Status; fn try_from(value: WithPayloadSelector) -> Result<Self, Self::Error> { let WithPayloadSelector { selector_options } = value; match selector_options { Some(options) => Ok(match options { SelectorOptions::Enable(flag) => segment::types::WithPayloadInterface::Bool(flag), SelectorOptions::Exclude(s) => segment::types::PayloadSelectorExclude::new( s.fields .iter() .map(|i| json::json_path_from_proto(i)) .collect::<Result<_, _>>()?, ) .into(), SelectorOptions::Include(s) => segment::types::PayloadSelectorInclude::new( s.fields .iter() .map(|i| json::json_path_from_proto(i)) .collect::<Result<_, _>>()?, ) .into(), }), _ => Err(Status::invalid_argument("No PayloadSelector".to_string())), } } } impl From<segment::types::WithPayloadInterface> for WithPayloadSelector { fn from(value: segment::types::WithPayloadInterface) -> Self { let selector_options = match value { segment::types::WithPayloadInterface::Bool(flag) => SelectorOptions::Enable(flag), segment::types::WithPayloadInterface::Fields(fields) => { SelectorOptions::Include(PayloadIncludeSelector { fields: fields.iter().map(|f| f.to_string()).collect(), }) } segment::types::WithPayloadInterface::Selector(selector) => match selector { segment::types::PayloadSelector::Include(s) => { SelectorOptions::Include(PayloadIncludeSelector { fields: s.include.iter().map(|f| f.to_string()).collect(), }) } segment::types::PayloadSelector::Exclude(s) => { SelectorOptions::Exclude(PayloadExcludeSelector { fields: s.exclude.iter().map(|f| f.to_string()).collect(), }) } }, }; WithPayloadSelector { selector_options: Some(selector_options), } } } impl From<QuantizationSearchParams> for segment::types::QuantizationSearchParams { fn from(params: QuantizationSearchParams) -> Self { let QuantizationSearchParams { ignore, rescore, oversampling, } = params; Self { ignore: ignore.unwrap_or(default_quantization_ignore_value()), rescore, oversampling, } } } impl From<segment::types::QuantizationSearchParams> for QuantizationSearchParams { fn from(params: segment::types::QuantizationSearchParams) -> Self { let segment::types::QuantizationSearchParams { ignore, rescore, oversampling, } = params; Self { ignore: Some(ignore), rescore, oversampling, } } } impl From<AcornSearchParams> for segment::types::AcornSearchParams { fn from(params: AcornSearchParams) -> Self { let AcornSearchParams { enable, max_selectivity, } = params; Self { enable: enable.unwrap_or(false), max_selectivity: max_selectivity.map(OrderedFloat), } } } impl From<segment::types::AcornSearchParams> for AcornSearchParams { fn from(params: segment::types::AcornSearchParams) -> Self { let segment::types::AcornSearchParams { enable, max_selectivity, } = params; Self { enable: Some(enable), max_selectivity: max_selectivity.map(|OrderedFloat(x)| x), } } } impl From<SearchParams> for segment::types::SearchParams { fn from(params: SearchParams) -> Self { let SearchParams { hnsw_ef, exact, quantization, indexed_only, acorn, } = params; Self { hnsw_ef: hnsw_ef.map(|x| x as usize), exact: exact.unwrap_or(false), quantization: quantization.map(|q| q.into()), indexed_only: indexed_only.unwrap_or(false), acorn: acorn.map(segment::types::AcornSearchParams::from), } } } impl From<segment::types::SearchParams> for SearchParams {
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/validate.rs
lib/api/src/grpc/validate.rs
use std::borrow::Cow; use std::collections::HashMap; use common::validation::{validate_range_generic, validate_shard_different_peers}; use segment::data_types::index::validate_integer_index_params; use validator::{Validate, ValidationError, ValidationErrors}; use super::qdrant as grpc; const TIMESTAMP_MIN_SECONDS: i64 = -62_135_596_800; // 0001-01-01T00:00:00Z const TIMESTAMP_MAX_SECONDS: i64 = 253_402_300_799; // 9999-12-31T23:59:59Z pub trait ValidateExt { fn validate(&self) -> Result<(), ValidationErrors>; } impl Validate for dyn ValidateExt { #[inline] fn validate(&self) -> Result<(), ValidationErrors> { ValidateExt::validate(self) } } impl<V> ValidateExt for ::core::option::Option<V> where V: Validate, { #[inline] fn validate(&self) -> Result<(), ValidationErrors> { (&self).validate() } } impl<V> ValidateExt for &::core::option::Option<V> where V: Validate, { #[inline] fn validate(&self) -> Result<(), ValidationErrors> { self.as_ref().map(Validate::validate).unwrap_or(Ok(())) } } impl<K, V> ValidateExt for HashMap<K, V> where V: Validate, { #[inline] fn validate(&self) -> Result<(), ValidationErrors> { match self.values().find_map(|v| v.validate().err()) { Some(err) => ValidationErrors::merge(Err(Default::default()), "[]", Err(err)), None => Ok(()), } } } impl Validate for grpc::vectors_config::Config { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::vectors_config::Config; match self { Config::Params(params) => params.validate(), Config::ParamsMap(params_map) => params_map.validate(), } } } impl Validate for grpc::vectors_config_diff::Config { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::vectors_config_diff::Config; match self { Config::Params(params) => params.validate(), Config::ParamsMap(params_map) => params_map.validate(), } } } impl Validate for grpc::quantization_config::Quantization { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::quantization_config::Quantization; match self { Quantization::Scalar(scalar) => scalar.validate(), Quantization::Product(product) => product.validate(), Quantization::Binary(binary) => binary.validate(), } } } impl Validate for grpc::quantization_config_diff::Quantization { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::quantization_config_diff::Quantization; match self { Quantization::Scalar(scalar) => scalar.validate(), Quantization::Product(product) => product.validate(), Quantization::Binary(binary) => binary.validate(), Quantization::Disabled(_) => Ok(()), } } } impl Validate for grpc::update_collection_cluster_setup_request::Operation { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::update_collection_cluster_setup_request::Operation; match self { Operation::MoveShard(op) => op.validate(), Operation::ReplicateShard(op) => op.validate(), Operation::AbortTransfer(op) => op.validate(), Operation::DropReplica(op) => op.validate(), Operation::CreateShardKey(op) => op.validate(), Operation::DeleteShardKey(op) => op.validate(), Operation::RestartTransfer(op) => op.validate(), Operation::ReplicatePoints(op) => op.validate(), } } } impl Validate for grpc::MoveShard { fn validate(&self) -> Result<(), ValidationErrors> { validate_shard_different_peers( self.from_peer_id, self.to_peer_id, self.shard_id, self.to_shard_id, ) } } impl Validate for grpc::ReplicateShard { fn validate(&self) -> Result<(), ValidationErrors> { validate_shard_different_peers( self.from_peer_id, self.to_peer_id, self.shard_id, self.to_shard_id, ) } } impl Validate for crate::grpc::qdrant::AbortShardTransfer { fn validate(&self) -> Result<(), ValidationErrors> { validate_shard_different_peers( self.from_peer_id, self.to_peer_id, self.shard_id, self.to_shard_id, ) } } impl Validate for grpc::CreateShardKey { fn validate(&self) -> Result<(), ValidationErrors> { if self.replication_factor == Some(0) { let mut errors = ValidationErrors::new(); errors.add( "replication_factor", ValidationError::new("Replication factor must be greater than 0"), ); return Err(errors); } if self.shards_number == Some(0) { let mut errors = ValidationErrors::new(); errors.add( "shards_number", ValidationError::new("Shards number must be greater than 0"), ); return Err(errors); } Ok(()) } } impl Validate for grpc::DeleteShardKey { fn validate(&self) -> Result<(), ValidationErrors> { Ok(()) } } impl Validate for grpc::RestartTransfer { fn validate(&self) -> Result<(), ValidationErrors> { validate_shard_different_peers( self.from_peer_id, self.to_peer_id, self.shard_id, self.to_shard_id, ) } } impl Validate for grpc::ReplicatePoints { fn validate(&self) -> Result<(), ValidationErrors> { if self.from_shard_key != self.to_shard_key { return Ok(()); } let mut errors = ValidationErrors::new(); errors.add( "to_shard_key", validator::ValidationError::new("must be different from from_shard_key"), ); Err(errors) } } impl Validate for grpc::condition::ConditionOneOf { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::condition::ConditionOneOf; match self { ConditionOneOf::Field(field_condition) => field_condition.validate(), ConditionOneOf::Nested(nested) => nested.validate(), ConditionOneOf::Filter(filter) => filter.validate(), ConditionOneOf::IsEmpty(_) => Ok(()), ConditionOneOf::HasId(_) => Ok(()), ConditionOneOf::IsNull(_) => Ok(()), ConditionOneOf::HasVector(_) => Ok(()), } } } impl Validate for grpc::update_operation::Update { fn validate(&self) -> Result<(), ValidationErrors> { use grpc::update_operation::Update; match self { Update::Sync(op) => op.validate(), Update::Upsert(op) => op.validate(), Update::Delete(op) => op.validate(), Update::UpdateVectors(op) => op.validate(), Update::DeleteVectors(op) => op.validate(), Update::SetPayload(op) => op.validate(), Update::OverwritePayload(op) => op.validate(), Update::DeletePayload(op) => op.validate(), Update::ClearPayload(op) => op.validate(), Update::CreateFieldIndex(op) => op.validate(), Update::DeleteFieldIndex(op) => op.validate(), } } } impl Validate for grpc::FieldCondition { fn validate(&self) -> Result<(), ValidationErrors> { let grpc::FieldCondition { key: _, r#match, range, datetime_range, geo_bounding_box, geo_radius, geo_polygon, values_count, is_empty, is_null, } = self; let all_fields_none = r#match.is_none() && range.is_none() && datetime_range.is_none() && geo_bounding_box.is_none() && geo_radius.is_none() && geo_polygon.is_none() && values_count.is_none() && is_empty.is_none() && is_null.is_none(); if all_fields_none { let mut errors = ValidationErrors::new(); errors.add( "match", ValidationError::new("At least one field condition must be specified"), ); Err(errors) } else { Ok(()) } } } impl Validate for grpc::Vector { fn validate(&self) -> Result<(), ValidationErrors> { #[expect(deprecated)] let grpc::Vector { data, indices, vectors_count, vector, } = self; if let Some(vector) = vector { vector.validate()?; } match (indices, vectors_count) { (Some(_), Some(_)) => { let mut errors = ValidationErrors::new(); errors.add( "indices", ValidationError::new("`indices` and `vectors_count` cannot be both specified"), ); Err(errors) } (Some(indices), None) => { sparse::common::sparse_vector::validate_sparse_vector_impl(&indices.data, data) } (None, Some(vectors_count)) => { common::validation::validate_multi_vector_len(*vectors_count, data) } (None, None) => Ok(()), } } } impl Validate for grpc::vector::Vector { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::vector::Vector::Dense(_dense) => Ok(()), grpc::vector::Vector::Sparse(sparse) => sparse.validate(), grpc::vector::Vector::MultiDense(multi) => multi.validate(), grpc::vector::Vector::Document(_document) => Ok(()), grpc::vector::Vector::Image(_image) => Ok(()), grpc::vector::Vector::Object(_obj) => Ok(()), } } } impl Validate for grpc::SparseVector { fn validate(&self) -> Result<(), ValidationErrors> { let grpc::SparseVector { indices, values } = self; sparse::common::sparse_vector::validate_sparse_vector_impl(indices, values) } } impl Validate for grpc::MultiDenseVector { fn validate(&self) -> Result<(), ValidationErrors> { let grpc::MultiDenseVector { vectors } = self; let multivec_length: Vec<_> = vectors.iter().map(|v| v.data.len()).collect(); common::validation::validate_multi_vector_by_length(&multivec_length) } } impl Validate for super::qdrant::vectors::VectorsOptions { fn validate(&self) -> Result<(), ValidationErrors> { match self { super::qdrant::vectors::VectorsOptions::Vector(v) => v.validate(), super::qdrant::vectors::VectorsOptions::Vectors(v) => v.validate(), } } } impl Validate for super::qdrant::query_enum::Query { fn validate(&self) -> Result<(), ValidationErrors> { match self { super::qdrant::query_enum::Query::NearestNeighbors(q) => q.validate(), super::qdrant::query_enum::Query::RecommendBestScore(q) => q.validate(), super::qdrant::query_enum::Query::RecommendSumScores(q) => q.validate(), super::qdrant::query_enum::Query::Discover(q) => q.validate(), super::qdrant::query_enum::Query::Context(q) => q.validate(), } } } impl Validate for super::qdrant::query::Variant { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::query::Variant::Nearest(q) => q.validate(), grpc::query::Variant::NearestWithMmr(q) => q.validate(), grpc::query::Variant::Recommend(q) => q.validate(), grpc::query::Variant::Discover(q) => q.validate(), grpc::query::Variant::Context(q) => q.validate(), grpc::query::Variant::Formula(q) => q.validate(), grpc::query::Variant::Rrf(q) => q.validate(), grpc::query::Variant::Sample(_) | grpc::query::Variant::Fusion(_) | grpc::query::Variant::OrderBy(_) => Ok(()), } } } impl Validate for super::qdrant::vector_input::Variant { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::vector_input::Variant::Id(_) | grpc::vector_input::Variant::Dense(_) | grpc::vector_input::Variant::Document(_) | grpc::vector_input::Variant::Image(_) | grpc::vector_input::Variant::Object(_) => Ok(()), grpc::vector_input::Variant::Sparse(sparse_vector) => sparse_vector.validate(), grpc::vector_input::Variant::MultiDense(multi_dense_vector) => { multi_dense_vector.validate() } } } } impl Validate for super::qdrant::expression::Variant { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::expression::Variant::Constant(_) => Ok(()), grpc::expression::Variant::Variable(_) => Ok(()), grpc::expression::Variant::Condition(condition) => condition.validate(), grpc::expression::Variant::GeoDistance(_) => Ok(()), grpc::expression::Variant::Datetime(_) => Ok(()), grpc::expression::Variant::DatetimeKey(_) => Ok(()), grpc::expression::Variant::Mult(mult_expression) => mult_expression.validate(), grpc::expression::Variant::Sum(sum_expression) => sum_expression.validate(), grpc::expression::Variant::Div(div_expression) => div_expression.validate(), grpc::expression::Variant::Neg(expression) => expression.validate(), grpc::expression::Variant::Abs(expression) => expression.validate(), grpc::expression::Variant::Sqrt(expression) => expression.validate(), grpc::expression::Variant::Pow(pow_expression) => pow_expression.validate(), grpc::expression::Variant::Exp(expression) => expression.validate(), grpc::expression::Variant::Log10(expression) => expression.validate(), grpc::expression::Variant::Ln(expression) => expression.validate(), grpc::expression::Variant::ExpDecay(decay_params_expression) => { decay_params_expression.validate() } grpc::expression::Variant::GaussDecay(decay_params_expression) => { decay_params_expression.validate() } grpc::expression::Variant::LinDecay(decay_params_expression) => { decay_params_expression.validate() } } } } /// Validate that GeoLineString has at least 4 points and is closed. pub fn validate_geo_polygon_line_helper(line: &grpc::GeoLineString) -> Result<(), ValidationError> { let points = &line.points; let min_length = 4; if points.len() < min_length { let mut err: ValidationError = ValidationError::new("min_line_length"); err.add_param(Cow::from("length"), &points.len()); err.add_param(Cow::from("min_length"), &min_length); return Err(err); } let first_point = &points[0]; let last_point = &points[points.len() - 1]; if first_point != last_point { return Err(ValidationError::new("closed_line")); } Ok(()) } pub fn validate_geo_polygon_exterior(line: &grpc::GeoLineString) -> Result<(), ValidationError> { if line.points.is_empty() { return Err(ValidationError::new("not_empty")); } validate_geo_polygon_line_helper(line)?; Ok(()) } pub fn validate_geo_polygon_interiors( lines: &Vec<grpc::GeoLineString>, ) -> Result<(), ValidationError> { for line in lines { validate_geo_polygon_line_helper(line)?; } Ok(()) } /// Validate that the timestamp is within the range specified in the protobuf docs. /// <https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp> pub fn validate_timestamp(ts: &prost_wkt_types::Timestamp) -> Result<(), ValidationError> { validate_range_generic( ts.seconds, Some(TIMESTAMP_MIN_SECONDS), Some(TIMESTAMP_MAX_SECONDS), )?; validate_range_generic(ts.nanos, Some(0), Some(999_999_999))?; Ok(()) } impl Validate for super::qdrant::payload_index_params::IndexParams { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::payload_index_params::IndexParams::KeywordIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::IntegerIndexParams(integer_index_params) => { integer_index_params.validate() } grpc::payload_index_params::IndexParams::FloatIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::GeoIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::TextIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::BoolIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::DatetimeIndexParams(_) => Ok(()), grpc::payload_index_params::IndexParams::UuidIndexParams(_) => Ok(()), } } } impl Validate for super::qdrant::IntegerIndexParams { fn validate(&self) -> Result<(), ValidationErrors> { let super::qdrant::IntegerIndexParams { lookup, range, is_principal: _, on_disk: _, } = &self; validate_integer_index_params(lookup, range) } } impl Validate for super::qdrant::points_selector::PointsSelectorOneOf { fn validate(&self) -> Result<(), ValidationErrors> { match self { grpc::points_selector::PointsSelectorOneOf::Points(_) => Ok(()), grpc::points_selector::PointsSelectorOneOf::Filter(filter) => filter.validate(), } } } #[cfg(test)] mod tests { use validator::Validate; use crate::grpc::qdrant::{ CreateCollection, CreateFieldIndexCollection, GeoLineString, GeoPoint, GeoPolygon, SearchPoints, UpdateCollection, }; #[test] fn test_good_request() { let bad_request = CreateCollection { collection_name: "test_collection".into(), timeout: Some(10), ..Default::default() }; assert!( bad_request.validate().is_ok(), "good collection request should not error on validation" ); // Collection name validation must not be strict on non-creation let bad_request = UpdateCollection { collection_name: "no\\path".into(), ..Default::default() }; assert!( bad_request.validate().is_ok(), "good collection request should not error on validation" ); // Collection name validation must not be strict on non-creation let bad_request = UpdateCollection { collection_name: "no*path".into(), ..Default::default() }; assert!( bad_request.validate().is_ok(), "good collection request should not error on validation" ); } #[test] fn test_bad_collection_request() { let bad_request = CreateCollection { collection_name: "".into(), timeout: Some(0), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad collection request should error on validation" ); // Collection name validation must be strict on creation let bad_request = CreateCollection { collection_name: "no/path".into(), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad collection request should error on validation" ); // Collection name validation must be strict on creation let bad_request = CreateCollection { collection_name: "no*path".into(), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad collection request should error on validation" ); // Collection name validation must still disallow some characters on update let bad_request = UpdateCollection { collection_name: "no/path".into(), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad collection request should error on validation" ); } #[test] fn test_bad_index_request() { let bad_request = CreateFieldIndexCollection { collection_name: "".into(), field_name: "".into(), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad index request should error on validation" ); } #[test] fn test_bad_search_request() { let bad_request = SearchPoints { collection_name: "".into(), limit: 0, vector_name: Some("".into()), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad search request should error on validation" ); let bad_request = SearchPoints { limit: 0, ..Default::default() }; assert!( bad_request.validate().is_err(), "bad search request should error on validation" ); let bad_request = SearchPoints { vector_name: Some("".into()), ..Default::default() }; assert!( bad_request.validate().is_err(), "bad search request should error on validation" ); } #[test] fn test_geo_polygon() { let bad_polygon = GeoPolygon { exterior: Some(GeoLineString { points: vec![] }), interiors: vec![], }; assert!( bad_polygon.validate().is_err(), "bad polygon should error on validation" ); let bad_polygon = GeoPolygon { exterior: Some(GeoLineString { points: vec![ GeoPoint { lat: 1., lon: 1. }, GeoPoint { lat: 2., lon: 2. }, GeoPoint { lat: 3., lon: 3. }, ], }), interiors: vec![], }; assert!( bad_polygon.validate().is_err(), "bad polygon should error on validation" ); let bad_polygon = GeoPolygon { exterior: Some(GeoLineString { points: vec![ GeoPoint { lat: 1., lon: 1. }, GeoPoint { lat: 2., lon: 2. }, GeoPoint { lat: 3., lon: 3. }, GeoPoint { lat: 4., lon: 4. }, ], }), interiors: vec![], }; assert!( bad_polygon.validate().is_err(), "bad polygon should error on validation" ); let bad_polygon = GeoPolygon { exterior: Some(GeoLineString { points: vec![ GeoPoint { lat: 1., lon: 1. }, GeoPoint { lat: 2., lon: 2. }, GeoPoint { lat: 3., lon: 3. }, GeoPoint { lat: 1., lon: 1. }, ], }), interiors: vec![GeoLineString { points: vec![ GeoPoint { lat: 1., lon: 1. }, GeoPoint { lat: 2., lon: 2. }, GeoPoint { lat: 3., lon: 3. }, GeoPoint { lat: 2., lon: 2. }, ], }], }; assert!( bad_polygon.validate().is_err(), "bad polygon should error on validation" ); let good_polygon = GeoPolygon { exterior: Some(GeoLineString { points: vec![ GeoPoint { lat: 1., lon: 1. }, GeoPoint { lat: 2., lon: 2. }, GeoPoint { lat: 3., lon: 3. }, GeoPoint { lat: 1., lon: 1. }, ], }), interiors: vec![], }; assert!( good_polygon.validate().is_ok(), "good polygon should not error on validation" ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/qdrant.rs
lib/api/src/grpc/qdrant.rs
// This file is @generated by prost-build. /// `Struct` represents a structured data value, consisting of fields /// which map to dynamically typed values. In some languages, `Struct` /// might be supported by a native representation. For example, in /// scripting languages like JS a struct is represented as an /// object. The details of that representation are described together /// with the proto support for the language. /// /// The JSON representation for `Struct` is a JSON object. #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Struct { /// Unordered map of dynamically typed values. #[prost(map = "string, message", tag = "1")] pub fields: ::std::collections::HashMap<::prost::alloc::string::String, Value>, } /// `Value` represents a dynamically typed value which can be either /// null, a number, a string, a boolean, a recursive struct value, or a /// list of values. A producer of value is expected to set one of those /// variants, absence of any variant indicates an error. /// /// The JSON representation for `Value` is a JSON value. #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Value { /// The kind of value. #[prost(oneof = "value::Kind", tags = "1, 2, 3, 4, 5, 6, 7")] pub kind: ::core::option::Option<value::Kind>, } /// Nested message and enum types in `Value`. pub mod value { /// The kind of value. #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { /// Represents a null value. #[prost(enumeration = "super::NullValue", tag = "1")] NullValue(i32), /// Represents a double value. #[prost(double, tag = "2")] DoubleValue(f64), /// Represents an integer value #[prost(int64, tag = "3")] IntegerValue(i64), /// Represents a string value. #[prost(string, tag = "4")] StringValue(::prost::alloc::string::String), /// Represents a boolean value. #[prost(bool, tag = "5")] BoolValue(bool), /// Represents a structured value. #[prost(message, tag = "6")] StructValue(super::Struct), /// Represents a repeated `Value`. #[prost(message, tag = "7")] ListValue(super::ListValue), } } /// `ListValue` is a wrapper around a repeated field of values. /// /// The JSON representation for `ListValue` is a JSON array. #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListValue { /// Repeated field of dynamically typed values. #[prost(message, repeated, tag = "1")] pub values: ::prost::alloc::vec::Vec<Value>, } /// `NullValue` is a singleton enumeration to represent the null value for the /// `Value` type union. /// /// The JSON representation for `NullValue` is JSON `null`. #[derive(serde::Serialize)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum NullValue { /// Null value. NullValue = 0, } impl NullValue { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { NullValue::NullValue => "NULL_VALUE", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option<Self> { match value { "NULL_VALUE" => Some(Self::NullValue), _ => None, } } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PointId { #[prost(oneof = "point_id::PointIdOptions", tags = "1, 2")] pub point_id_options: ::core::option::Option<point_id::PointIdOptions>, } /// Nested message and enum types in `PointId`. pub mod point_id { #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum PointIdOptions { /// Numerical ID of the point #[prost(uint64, tag = "1")] Num(u64), /// UUID #[prost(string, tag = "2")] Uuid(::prost::alloc::string::String), } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoPoint { #[prost(double, tag = "1")] pub lon: f64, #[prost(double, tag = "2")] pub lat: f64, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Filter { /// At least one of those conditions should match #[prost(message, repeated, tag = "1")] #[validate(nested)] pub should: ::prost::alloc::vec::Vec<Condition>, /// All conditions must match #[prost(message, repeated, tag = "2")] #[validate(nested)] pub must: ::prost::alloc::vec::Vec<Condition>, /// All conditions must NOT match #[prost(message, repeated, tag = "3")] #[validate(nested)] pub must_not: ::prost::alloc::vec::Vec<Condition>, /// At least minimum amount of given conditions should match #[prost(message, optional, tag = "4")] #[validate(nested)] pub min_should: ::core::option::Option<MinShould>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MinShould { #[prost(message, repeated, tag = "1")] #[validate(nested)] pub conditions: ::prost::alloc::vec::Vec<Condition>, #[prost(uint64, tag = "2")] pub min_count: u64, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Condition { #[prost(oneof = "condition::ConditionOneOf", tags = "1, 2, 3, 4, 5, 6, 7")] #[validate(nested)] pub condition_one_of: ::core::option::Option<condition::ConditionOneOf>, } /// Nested message and enum types in `Condition`. pub mod condition { #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum ConditionOneOf { #[prost(message, tag = "1")] Field(super::FieldCondition), #[prost(message, tag = "2")] IsEmpty(super::IsEmptyCondition), #[prost(message, tag = "3")] HasId(super::HasIdCondition), #[prost(message, tag = "4")] Filter(super::Filter), #[prost(message, tag = "5")] IsNull(super::IsNullCondition), #[prost(message, tag = "6")] Nested(super::NestedCondition), #[prost(message, tag = "7")] HasVector(super::HasVectorCondition), } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IsEmptyCondition { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IsNullCondition { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HasIdCondition { #[prost(message, repeated, tag = "1")] pub has_id: ::prost::alloc::vec::Vec<PointId>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HasVectorCondition { #[prost(string, tag = "1")] pub has_vector: ::prost::alloc::string::String, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NestedCondition { /// Path to nested object #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, /// Filter condition #[prost(message, optional, tag = "2")] #[validate(nested)] pub filter: ::core::option::Option<Filter>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldCondition { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, /// Check if point has field with a given value #[prost(message, optional, tag = "2")] pub r#match: ::core::option::Option<Match>, /// Check if points value lies in a given range #[prost(message, optional, tag = "3")] pub range: ::core::option::Option<Range>, /// Check if points geolocation lies in a given area #[prost(message, optional, tag = "4")] pub geo_bounding_box: ::core::option::Option<GeoBoundingBox>, /// Check if geo point is within a given radius #[prost(message, optional, tag = "5")] pub geo_radius: ::core::option::Option<GeoRadius>, /// Check number of values for a specific field #[prost(message, optional, tag = "6")] pub values_count: ::core::option::Option<ValuesCount>, /// Check if geo point is within a given polygon #[prost(message, optional, tag = "7")] pub geo_polygon: ::core::option::Option<GeoPolygon>, /// Check if datetime is within a given range #[prost(message, optional, tag = "8")] pub datetime_range: ::core::option::Option<DatetimeRange>, /// Check if field is empty #[prost(bool, optional, tag = "9")] pub is_empty: ::core::option::Option<bool>, /// Check if field is null #[prost(bool, optional, tag = "10")] pub is_null: ::core::option::Option<bool>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Match { #[prost(oneof = "r#match::MatchValue", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10")] pub match_value: ::core::option::Option<r#match::MatchValue>, } /// Nested message and enum types in `Match`. pub mod r#match { #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum MatchValue { /// Match string keyword #[prost(string, tag = "1")] Keyword(::prost::alloc::string::String), /// Match integer #[prost(int64, tag = "2")] Integer(i64), /// Match boolean #[prost(bool, tag = "3")] Boolean(bool), /// Match text #[prost(string, tag = "4")] Text(::prost::alloc::string::String), /// Match multiple keywords #[prost(message, tag = "5")] Keywords(super::RepeatedStrings), /// Match multiple integers #[prost(message, tag = "6")] Integers(super::RepeatedIntegers), /// Match any other value except those integers #[prost(message, tag = "7")] ExceptIntegers(super::RepeatedIntegers), /// Match any other value except those keywords #[prost(message, tag = "8")] ExceptKeywords(super::RepeatedStrings), /// Match phrase text #[prost(string, tag = "9")] Phrase(::prost::alloc::string::String), /// Match any word in the text #[prost(string, tag = "10")] TextAny(::prost::alloc::string::String), } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct RepeatedStrings { #[prost(string, repeated, tag = "1")] pub strings: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct RepeatedIntegers { #[prost(int64, repeated, tag = "1")] pub integers: ::prost::alloc::vec::Vec<i64>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Range { #[prost(double, optional, tag = "1")] pub lt: ::core::option::Option<f64>, #[prost(double, optional, tag = "2")] pub gt: ::core::option::Option<f64>, #[prost(double, optional, tag = "3")] pub gte: ::core::option::Option<f64>, #[prost(double, optional, tag = "4")] pub lte: ::core::option::Option<f64>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DatetimeRange { #[prost(message, optional, tag = "1")] #[validate(custom(function = "crate::grpc::validate::validate_timestamp"))] pub lt: ::core::option::Option<::prost_wkt_types::Timestamp>, #[prost(message, optional, tag = "2")] #[validate(custom(function = "crate::grpc::validate::validate_timestamp"))] pub gt: ::core::option::Option<::prost_wkt_types::Timestamp>, #[prost(message, optional, tag = "3")] #[validate(custom(function = "crate::grpc::validate::validate_timestamp"))] pub gte: ::core::option::Option<::prost_wkt_types::Timestamp>, #[prost(message, optional, tag = "4")] #[validate(custom(function = "crate::grpc::validate::validate_timestamp"))] pub lte: ::core::option::Option<::prost_wkt_types::Timestamp>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoBoundingBox { /// north-west corner #[prost(message, optional, tag = "1")] pub top_left: ::core::option::Option<GeoPoint>, /// south-east corner #[prost(message, optional, tag = "2")] pub bottom_right: ::core::option::Option<GeoPoint>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoRadius { /// Center of the circle #[prost(message, optional, tag = "1")] pub center: ::core::option::Option<GeoPoint>, /// In meters #[prost(float, tag = "2")] pub radius: f32, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoLineString { /// Ordered sequence of GeoPoints representing the line #[prost(message, repeated, tag = "1")] pub points: ::prost::alloc::vec::Vec<GeoPoint>, } /// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must /// consist of a minimum of 4 points. /// Additionally, the first and last points of each GeoLineString must be the same. #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoPolygon { /// The exterior line bounds the surface #[prost(message, optional, tag = "1")] #[validate( custom(function = "crate::grpc::validate::validate_geo_polygon_exterior") )] pub exterior: ::core::option::Option<GeoLineString>, /// Interior lines (if present) bound holes within the surface #[prost(message, repeated, tag = "2")] #[validate( custom(function = "crate::grpc::validate::validate_geo_polygon_interiors") )] pub interiors: ::prost::alloc::vec::Vec<GeoLineString>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValuesCount { #[prost(uint64, optional, tag = "1")] pub lt: ::core::option::Option<u64>, #[prost(uint64, optional, tag = "2")] pub gt: ::core::option::Option<u64>, #[prost(uint64, optional, tag = "3")] pub gte: ::core::option::Option<u64>, #[prost(uint64, optional, tag = "4")] pub lte: ::core::option::Option<u64>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorParams { /// Size of the vectors #[prost(uint64, tag = "1")] #[validate(range(min = 1, max = 65536))] pub size: u64, /// Distance function used for comparing vectors #[prost(enumeration = "Distance", tag = "2")] pub distance: i32, /// Configuration of vector HNSW graph. /// If omitted - the collection configuration will be used #[prost(message, optional, tag = "3")] #[validate(nested)] pub hnsw_config: ::core::option::Option<HnswConfigDiff>, /// Configuration of vector quantization config. /// If omitted - the collection configuration will be used #[prost(message, optional, tag = "4")] #[validate(nested)] pub quantization_config: ::core::option::Option<QuantizationConfig>, /// If true - serve vectors from disk. /// If set to false, the vectors will be loaded in RAM. #[prost(bool, optional, tag = "5")] pub on_disk: ::core::option::Option<bool>, /// Data type of the vectors #[prost(enumeration = "Datatype", optional, tag = "6")] pub datatype: ::core::option::Option<i32>, /// Configuration for multi-vector search #[prost(message, optional, tag = "7")] pub multivector_config: ::core::option::Option<MultiVectorConfig>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorParamsDiff { /// Update params for HNSW index. /// If empty object - it will be unset #[prost(message, optional, tag = "1")] #[validate(nested)] pub hnsw_config: ::core::option::Option<HnswConfigDiff>, /// Update quantization params. If none - it is left unchanged. #[prost(message, optional, tag = "2")] #[validate(nested)] pub quantization_config: ::core::option::Option<QuantizationConfigDiff>, /// If true - serve vectors from disk. /// If set to false, the vectors will be loaded in RAM. #[prost(bool, optional, tag = "3")] pub on_disk: ::core::option::Option<bool>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorParamsMap { #[prost(map = "string, message", tag = "1")] #[validate(nested)] pub map: ::std::collections::HashMap<::prost::alloc::string::String, VectorParams>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorParamsDiffMap { #[prost(map = "string, message", tag = "1")] #[validate(nested)] pub map: ::std::collections::HashMap< ::prost::alloc::string::String, VectorParamsDiff, >, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorsConfig { #[prost(oneof = "vectors_config::Config", tags = "1, 2")] #[validate(nested)] pub config: ::core::option::Option<vectors_config::Config>, } /// Nested message and enum types in `VectorsConfig`. pub mod vectors_config { #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Config { #[prost(message, tag = "1")] Params(super::VectorParams), #[prost(message, tag = "2")] ParamsMap(super::VectorParamsMap), } } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorsConfigDiff { #[prost(oneof = "vectors_config_diff::Config", tags = "1, 2")] #[validate(nested)] pub config: ::core::option::Option<vectors_config_diff::Config>, } /// Nested message and enum types in `VectorsConfigDiff`. pub mod vectors_config_diff { #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Config { #[prost(message, tag = "1")] Params(super::VectorParamsDiff), #[prost(message, tag = "2")] ParamsMap(super::VectorParamsDiffMap), } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SparseVectorParams { /// Configuration of sparse index #[prost(message, optional, tag = "1")] pub index: ::core::option::Option<SparseIndexConfig>, /// If set - apply modifier to the vector values #[prost(enumeration = "Modifier", optional, tag = "2")] pub modifier: ::core::option::Option<i32>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SparseVectorConfig { #[prost(map = "string, message", tag = "1")] pub map: ::std::collections::HashMap< ::prost::alloc::string::String, SparseVectorParams, >, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiVectorConfig { /// Comparator for multi-vector search #[prost(enumeration = "MultiVectorComparator", tag = "1")] pub comparator: i32, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetCollectionInfoRequest { /// Name of the collection #[prost(string, tag = "1")] #[validate( length(min = 1, max = 255), custom(function = "common::validation::validate_collection_name_legacy") )] pub collection_name: ::prost::alloc::string::String, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionExistsRequest { #[prost(string, tag = "1")] #[validate( length(min = 1, max = 255), custom(function = "common::validation::validate_collection_name_legacy") )] pub collection_name: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionExists { #[prost(bool, tag = "1")] pub exists: bool, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionExistsResponse { #[prost(message, optional, tag = "1")] pub result: ::core::option::Option<CollectionExists>, /// Time spent to process #[prost(double, tag = "2")] pub time: f64, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListCollectionsRequest {} #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionDescription { /// Name of the collection #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetCollectionInfoResponse { #[prost(message, optional, tag = "1")] pub result: ::core::option::Option<CollectionInfo>, /// Time spent to process #[prost(double, tag = "2")] pub time: f64, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListCollectionsResponse { #[prost(message, repeated, tag = "1")] pub collections: ::prost::alloc::vec::Vec<CollectionDescription>, /// Time spent to process #[prost(double, tag = "2")] pub time: f64, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MaxOptimizationThreads { #[prost(oneof = "max_optimization_threads::Variant", tags = "1, 2")] pub variant: ::core::option::Option<max_optimization_threads::Variant>, } /// Nested message and enum types in `MaxOptimizationThreads`. pub mod max_optimization_threads { #[derive(serde::Serialize)] #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum Setting { Auto = 0, } impl Setting { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Setting::Auto => "Auto", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option<Self> { match value { "Auto" => Some(Self::Auto), _ => None, } } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Variant { #[prost(uint64, tag = "1")] Value(u64), #[prost(enumeration = "Setting", tag = "2")] Setting(i32), } } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OptimizerStatus { #[prost(bool, tag = "1")] pub ok: bool, #[prost(string, tag = "2")] pub error: ::prost::alloc::string::String, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionWarning { #[prost(string, tag = "1")] pub message: ::prost::alloc::string::String, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HnswConfigDiff { /// Number of edges per node in the index graph. /// Larger the value - more accurate the search, more space required. #[prost(uint64, optional, tag = "1")] pub m: ::core::option::Option<u64>, /// Number of neighbours to consider during the index building. /// Larger the value - more accurate the search, more time required to build the index. #[prost(uint64, optional, tag = "2")] #[validate(range(min = 4))] pub ef_construct: ::core::option::Option<u64>, /// Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. /// This measures the total size of vectors being queried against. /// When the maximum estimated amount of points that a condition satisfies is smaller than /// `full_scan_threshold`, the query planner will use full-scan search instead of HNSW index /// traversal for better performance. /// Note: 1Kb = 1 vector of size 256 #[prost(uint64, optional, tag = "3")] pub full_scan_threshold: ::core::option::Option<u64>, /// Number of parallel threads used for background index building. /// If 0 - automatically select from 8 to 16. /// Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. /// On small CPUs, less threads are used. #[prost(uint64, optional, tag = "4")] pub max_indexing_threads: ::core::option::Option<u64>, /// Store HNSW index on disk. If set to false, the index will be stored in RAM. #[prost(bool, optional, tag = "5")] pub on_disk: ::core::option::Option<bool>, /// Number of additional payload-aware links per node in the index graph. /// If not set - regular M parameter will be used. #[prost(uint64, optional, tag = "6")] pub payload_m: ::core::option::Option<u64>, /// Store copies of original and quantized vectors within the HNSW index file. Default: false. /// Enabling this option will trade the search speed for disk usage by reducing amount of /// random seeks during the search. /// Requires quantized vectors to be enabled. Multi-vectors are not supported. #[prost(bool, optional, tag = "7")] pub inline_storage: ::core::option::Option<bool>, } #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SparseIndexConfig { /// Prefer a full scan search upto (excluding) this number of vectors. /// Note: this is number of vectors, not KiloBytes. #[prost(uint64, optional, tag = "1")] pub full_scan_threshold: ::core::option::Option<u64>, /// Store inverted index on disk. If set to false, the index will be stored in RAM. #[prost(bool, optional, tag = "2")] pub on_disk: ::core::option::Option<bool>, /// Datatype used to store weights in the index. #[prost(enumeration = "Datatype", optional, tag = "3")] pub datatype: ::core::option::Option<i32>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WalConfigDiff { /// Size of a single WAL block file #[prost(uint64, optional, tag = "1")] #[validate(range(min = 1))] pub wal_capacity_mb: ::core::option::Option<u64>, /// Number of segments to create in advance #[prost(uint64, optional, tag = "2")] pub wal_segments_ahead: ::core::option::Option<u64>, /// Number of closed segments to retain #[prost(uint64, optional, tag = "3")] #[validate(range(min = 1))] pub wal_retain_closed: ::core::option::Option<u64>, } #[derive(validator::Validate)] #[derive(serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OptimizersConfigDiff { /// The minimal fraction of deleted vectors in a segment, required to perform /// segment optimization #[prost(double, optional, tag = "1")] #[validate(range(min = 0.0, max = 1.0))] pub deleted_threshold: ::core::option::Option<f64>, /// The minimal number of vectors in a segment, required to perform segment /// optimization #[prost(uint64, optional, tag = "2")] #[validate(range(min = 100))] pub vacuum_min_vector_number: ::core::option::Option<u64>, /// Target amount of segments the optimizer will try to keep. /// Real amount of segments may vary depending on multiple parameters: /// /// * Amount of stored points. /// * Current write RPS. /// /// It is recommended to select the default number of segments as a factor of the number of search threads, /// so that each segment would be handled evenly by one of the threads. #[prost(uint64, optional, tag = "3")] pub default_segment_number: ::core::option::Option<u64>, /// Deprecated: /// /// Do not create segments larger this size (in kilobytes). /// Large segments might require disproportionately long indexation times, /// therefore it makes sense to limit the size of segments. /// /// If indexing speed is more important - make this parameter lower. /// If search speed is more important - make this parameter higher. /// Note: 1Kb = 1 vector of size 256 /// If not set, will be automatically selected considering the number of available CPUs. #[prost(uint64, optional, tag = "4")] #[validate(range(min = 1))] pub max_segment_size: ::core::option::Option<u64>, /// Maximum size (in kilobytes) of vectors to store in-memory per segment. /// Segments larger than this threshold will be stored as read-only memmapped file. /// /// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. /// /// To disable memmap storage, set this to `0`. /// /// Note: 1Kb = 1 vector of size 256 #[prost(uint64, optional, tag = "5")] pub memmap_threshold: ::core::option::Option<u64>, /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding /// this threshold will enable vector indexing /// /// Default value is 20,000, based on /// <<https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.> ///
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/grpc/ops.rs
lib/api/src/grpc/ops.rs
use std::collections::HashMap; use crate::grpc::{HardwareUsage, InferenceUsage, ModelUsage, Usage}; impl HardwareUsage { pub fn add(&mut self, other: Self) { let Self { cpu, payload_io_read, payload_io_write, payload_index_io_read, payload_index_io_write, vector_io_read, vector_io_write, } = other; self.cpu += cpu; self.payload_io_read += payload_io_read; self.payload_io_write += payload_io_write; self.payload_index_io_read += payload_index_io_read; self.payload_index_io_write += payload_index_io_write; self.vector_io_read += vector_io_read; self.vector_io_write += vector_io_write; } pub fn is_empty(&self) -> bool { let Self { cpu, payload_io_read, payload_io_write, payload_index_io_read, payload_index_io_write, vector_io_read, vector_io_write, } = self; *cpu == 0 && *payload_io_read == 0 && *payload_io_write == 0 && *payload_index_io_read == 0 && *payload_index_io_write == 0 && *vector_io_read == 0 && *vector_io_write == 0 } pub fn into_non_empty(self) -> Option<Self> { (!self.is_empty()).then_some(self) } } impl Usage { pub fn new(hardware: Option<HardwareUsage>, inference: Option<InferenceUsage>) -> Self { Self { hardware, inference, } } pub fn is_empty(&self) -> bool { let Usage { hardware, inference, } = self; let hardware_empty = hardware.as_ref().is_none_or(|h| h.is_empty()); let inference_empty = inference.as_ref().is_none_or(|i| i.is_empty()); hardware_empty && inference_empty } pub fn into_non_empty(self) -> Option<Self> { (!self.is_empty()).then_some(self) } pub fn from_hardware_usage(hardware: Option<HardwareUsage>) -> Self { Usage { hardware, inference: None, } } pub fn from_inference_usage(inference: Option<InferenceUsage>) -> Self { Usage { hardware: None, inference, } } } impl InferenceUsage { pub fn new() -> Self { Self { models: HashMap::new(), } } pub fn is_empty(&self) -> bool { self.models.is_empty() } pub fn into_non_empty(self) -> Option<Self> { (!self.is_empty()).then_some(self) } pub fn merge(&mut self, other: Self) { for (model_name, model_usage) in other.models { self.models .entry(model_name) .and_modify(|existing| { let ModelUsage { tokens } = existing; *tokens += model_usage.tokens; }) .or_insert(model_usage); } } pub fn merge_opt(&mut self, other: Option<Self>) { if let Some(other) = other { self.merge(other); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/rest/schema.rs
lib/api/src/rest/schema.rs
use std::borrow::Cow; use std::collections::HashMap; use std::hash::{Hash, Hasher}; use common::types::ScoreType; use common::validation::validate_multi_vector; use ordered_float::NotNan; use schemars::JsonSchema; use segment::common::utils::MaybeOneOrMany; use segment::data_types::index::{StemmingAlgorithm, StopwordsInterface, TokenizerType}; use segment::data_types::order_by::OrderBy; use segment::json_path::JsonPath; use segment::types::{ Condition, Filter, GeoPoint, IntPayloadType, Payload, PointIdType, SearchParams, ShardKey, VectorNameBuf, WithPayloadInterface, WithVector, }; use serde::{Deserialize, Serialize}; use serde_json::Value; use sparse::common::sparse_vector::SparseVector; use validator::{Validate, ValidationErrors}; /// Type for dense vector pub type DenseVector = Vec<segment::data_types::vectors::VectorElementType>; /// Type for multi dense vector pub type MultiDenseVector = Vec<DenseVector>; /// Vector Data /// Vectors can be described directly with values /// Or specified with source "objects" for inference #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum Vector { Dense(DenseVector), Sparse(SparseVector), MultiDense(MultiDenseVector), Document(Document), Image(Image), Object(InferenceObject), } /// Vector Data stored in Point #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum VectorOutput { Dense(DenseVector), Sparse(SparseVector), MultiDense(MultiDenseVector), } impl Validate for Vector { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { Vector::Dense(_) => Ok(()), Vector::Sparse(v) => v.validate(), Vector::MultiDense(m) => validate_multi_vector(m), Vector::Document(_) => Ok(()), Vector::Image(_) => Ok(()), Vector::Object(_) => Ok(()), } } } fn vector_example() -> DenseVector { vec![0.875, 0.140625, 0.8976] } fn multi_dense_vector_example() -> MultiDenseVector { vec![ vec![0.875, 0.140625, 0.1102], vec![0.758, 0.28126, 0.96871], vec![0.621, 0.421878, 0.9375], ] } fn named_vector_example() -> HashMap<VectorNameBuf, Vector> { let mut map = HashMap::new(); map.insert( "image-embeddings".into(), Vector::Dense(vec![0.873, 0.140625, 0.8976]), ); map } /// Full vector data per point separator with single and multiple vector modes #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum VectorStruct { #[schemars(example = "vector_example")] Single(DenseVector), #[schemars(example = "multi_dense_vector_example")] MultiDense(MultiDenseVector), #[schemars(example = "named_vector_example")] Named(HashMap<VectorNameBuf, Vector>), Document(Document), Image(Image), Object(InferenceObject), } /// Vector data stored in Point #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum VectorStructOutput { #[schemars(example = "vector_example")] Single(DenseVector), #[schemars(example = "multi_dense_vector_example")] MultiDense(MultiDenseVector), #[schemars(example = "named_vector_example")] Named(HashMap<VectorNameBuf, VectorOutput>), } impl VectorStruct { /// Check if this vector struct is empty. pub fn is_empty(&self) -> bool { match self { VectorStruct::Single(vector) => vector.is_empty(), VectorStruct::MultiDense(vector) => vector.is_empty(), VectorStruct::Named(vectors) => vectors.values().all(|v| match v { Vector::Dense(vector) => vector.is_empty(), Vector::Sparse(vector) => vector.indices.is_empty(), Vector::MultiDense(vector) => vector.is_empty(), Vector::Document(_) => false, Vector::Image(_) => false, Vector::Object(_) => false, }), VectorStruct::Document(_) => false, VectorStruct::Image(_) => false, VectorStruct::Object(_) => false, } } } impl Validate for VectorStruct { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { VectorStruct::Single(_) => Ok(()), VectorStruct::MultiDense(v) => validate_multi_vector(v), VectorStruct::Named(v) => common::validation::validate_iter(v.values()), VectorStruct::Document(_) => Ok(()), VectorStruct::Image(_) => Ok(()), VectorStruct::Object(_) => Ok(()), } } } #[derive(Clone, Default, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)] pub struct Options { /// Parameters for the model /// Values of the parameters are model-specific pub options: Option<HashMap<String, Value>>, } impl Hash for Options { fn hash<H: Hasher>(&self, state: &mut H) { // Order of keys in the map should not affect the hash if let Some(options) = &self.options { let mut keys: Vec<_> = options.keys().collect(); keys.sort(); for key in keys { key.hash(state); options.get(key).unwrap().hash(state); } } } } /// Configuration of the local bm25 models. #[derive(Debug, Deserialize, Serialize, Clone, JsonSchema, PartialEq, Eq)] pub struct Bm25Config { /// Controls term frequency saturation. Higher values mean term frequency has more impact. /// Default is 1.2 #[serde(default = "default_k")] pub k: NotNan<f64>, #[serde(default = "default_b")] /// Controls document length normalization. Ranges from 0 (no normalization) to 1 (full normalization). /// Higher values mean longer documents have less impact. /// Default is 0.75. pub b: NotNan<f64>, #[serde(default = "default_avg_len")] /// Expected average document length in the collection. Default is 256. pub avg_len: NotNan<f64>, /// Tokenizer type to use for text preprocessing. #[serde(default)] pub tokenizer: TokenizerType, #[serde(default, flatten)] pub text_preprocessing_config: TextPreprocessingConfig, } impl Default for Bm25Config { fn default() -> Self { Self { k: default_k(), b: default_b(), avg_len: default_avg_len(), tokenizer: TokenizerType::default(), text_preprocessing_config: TextPreprocessingConfig::default(), } } } const fn default_k() -> NotNan<f64> { unsafe { NotNan::new_unchecked(1.2) } } const fn default_b() -> NotNan<f64> { unsafe { NotNan::new_unchecked(0.75) } } const fn default_avg_len() -> NotNan<f64> { unsafe { NotNan::new_unchecked(256.0) } } /// Bm25 tokenizer configurations. #[derive(Debug, Deserialize, Serialize, Clone, Default, JsonSchema, PartialEq, Eq)] pub struct TextPreprocessingConfig { /// Defines which language to use for text preprocessing. /// This parameter is used to construct default stopwords filter and stemmer. /// To disable language-specific processing, set this to `"language": "none"`. /// If not specified, English is assumed. #[serde(default, skip_serializing_if = "Option::is_none")] pub language: Option<String>, /// Lowercase the text before tokenization. /// Default is `true`. #[serde(default, skip_serializing_if = "Option::is_none")] pub lowercase: Option<bool>, /// If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). /// Default is `false`. #[serde(default, skip_serializing_if = "Option::is_none")] pub ascii_folding: Option<bool>, /// Configuration of the stopwords filter. Supports list of pre-defined languages and custom stopwords. /// Default: initialized for specified `language` or English if not specified. #[serde(default, skip_serializing_if = "Option::is_none")] pub stopwords: Option<StopwordsInterface>, /// Configuration of the stemmer. Processes tokens to their root form. /// Default: initialized Snowball stemmer for specified `language` or English if not specified. #[serde(default, skip_serializing_if = "Option::is_none")] pub stemmer: Option<StemmingAlgorithm>, /// Minimum token length to keep. If token is shorter than this, it will be discarded. /// Default is `None`, which means no minimum length. #[serde(default, skip_serializing_if = "Option::is_none")] pub min_token_len: Option<usize>, /// Maximum token length to keep. If token is longer than this, it will be discarded. /// Default is `None`, which means no maximum length. #[serde(default, skip_serializing_if = "Option::is_none")] pub max_token_len: Option<usize>, } impl Bm25Config { pub fn to_options(&self) -> HashMap<String, Value> { debug_assert!( false, "this code should never be called, it is only for schema generation", ); let value = serde_json::to_value(self) .expect("conversion of internal structure to JSON should never fail"); match value { Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) | Value::Array(_) => HashMap::default(), // not expected Value::Object(map) => map.into_iter().collect(), } } } /// Option variants for text documents. /// Ether general-purpose options or BM25-specific options. /// BM25-specific will only take effect if the `qdrant/bm25` is specified as a model. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum DocumentOptions { // This option should go first Common(HashMap<String, Value>), // This should never be deserialized into, but we keep it for schema generation Bm25(Bm25Config), } #[cfg(test)] mod tests { use super::*; #[test] fn test_document_options_should_deserialize_into_common() { let json = Bm25Config { tokenizer: TokenizerType::Word, ..Default::default() }; let valid_bm25_config = serde_json::to_string(&json).unwrap(); let options: DocumentOptions = serde_json::from_str(&valid_bm25_config).unwrap(); // Bm25 option is used only for schema, actual deserialization will happen in specialized code assert!(matches!(options, DocumentOptions::Common(_))); } } impl DocumentOptions { pub fn into_options(self) -> HashMap<String, Value> { match self { DocumentOptions::Common(options) => options, DocumentOptions::Bm25(bm25) => bm25.to_options(), } } } impl Hash for DocumentOptions { fn hash<H: Hasher>(&self, state: &mut H) { let options = match self { DocumentOptions::Common(options) => Cow::Borrowed(options), DocumentOptions::Bm25(bm25) => Cow::Owned(bm25.to_options()), }; // Order of keys in the map should not affect the hash let mut keys: Vec<_> = options.keys().collect(); keys.sort(); for key in keys { key.hash(state); options.get(key).unwrap().hash(state); } } } /// WARN: Work-in-progress, unimplemented /// /// Text document for embedding. Requires inference infrastructure, unimplemented. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema, Hash, Validate)] pub struct Document { /// Text of the document. /// This field will be used as input for the embedding model. #[schemars(example = "document_text_example")] pub text: String, /// Name of the model used to generate the vector. /// List of available models depends on a provider. #[validate(length(min = 1))] #[schemars(length(min = 1), example = "model_example")] pub model: String, /// Additional options for the model, will be passed to the inference service as-is. /// See model cards for available options. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option<DocumentOptions>, } /// WARN: Work-in-progress, unimplemented /// /// Image object for embedding. Requires inference infrastructure, unimplemented. #[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema, Validate, Hash)] pub struct Image { /// Image data: base64 encoded image or an URL #[schemars(example = "image_value_example")] pub image: Value, /// Name of the model used to generate the vector. /// List of available models depends on a provider. #[validate(length(min = 1))] #[schemars(length(min = 1), example = "image_model_example")] pub model: String, /// Parameters for the model. /// Values of the parameters are model-specific. #[serde(flatten)] pub options: Options, } /// WARN: Work-in-progress, unimplemented /// /// Custom object for embedding. Requires inference infrastructure, unimplemented. #[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema, Hash, Validate)] pub struct InferenceObject { /// Arbitrary data, used as input for the embedding model. /// Used if the model requires more than one input or a custom input. pub object: Value, /// Name of the model used to generate the vector. /// List of available models depends on a provider. #[validate(length(min = 1))] #[schemars(length(min = 1), example = "model_example")] pub model: String, /// Parameters for the model. /// Values of the parameters are model-specific. #[serde(flatten)] pub options: Options, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(untagged, rename_all = "snake_case")] pub enum BatchVectorStruct { Single(Vec<DenseVector>), MultiDense(Vec<MultiDenseVector>), Named(HashMap<VectorNameBuf, Vec<Vector>>), Document(Vec<Document>), Image(Vec<Image>), Object(Vec<InferenceObject>), } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct Batch { pub ids: Vec<PointIdType>, pub vectors: BatchVectorStruct, pub payloads: Option<Vec<Option<Payload>>>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] pub struct ShardKeyWithFallback { pub target: ShardKey, /// Fallback shard key will be used if target shard key is not created or active pub fallback: ShardKey, } #[derive(Debug, Deserialize, Serialize, Clone, JsonSchema, PartialEq, Hash)] #[serde(untagged)] #[serde(expecting = "Expected a string or an integer")] pub enum ShardKeySelector { ShardKey(ShardKey), ShardKeys(Vec<ShardKey>), ShardKeyWithFallback(ShardKeyWithFallback), // ToDo: select by pattern } fn version_example() -> segment::types::SeqNumberType { 3 } fn score_example() -> common::types::ScoreType { 0.75 } fn document_text_example() -> String { "This is a document text".to_string() } fn model_example() -> String { "jinaai/jina-embeddings-v2-base-en".to_string() } fn image_value_example() -> String { "https://example.com/image.jpg".to_string() } fn image_model_example() -> String { "Qdrant/clip-ViT-B-32-vision".to_string() } /// Search result #[derive(Serialize, JsonSchema, Clone, Debug)] pub struct ScoredPoint { /// Point id pub id: PointIdType, /// Point version #[schemars(example = "version_example")] pub version: segment::types::SeqNumberType, /// Points vector distance to the query vector #[schemars(example = "score_example")] pub score: ScoreType, /// Payload - values assigned to the point #[serde(skip_serializing_if = "Option::is_none")] pub payload: Option<segment::types::Payload>, /// Vector of the point #[serde(skip_serializing_if = "Option::is_none")] pub vector: Option<VectorStructOutput>, /// Shard Key #[serde(skip_serializing_if = "Option::is_none")] pub shard_key: Option<ShardKey>, /// Order-by value #[serde(skip_serializing_if = "Option::is_none")] pub order_value: Option<segment::data_types::order_by::OrderValue>, } /// Point data #[derive(Clone, Debug, PartialEq, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct Record { /// Id of the point pub id: segment::types::PointIdType, /// Payload - values assigned to the point #[serde(skip_serializing_if = "Option::is_none")] pub payload: Option<segment::types::Payload>, /// Vector of the point #[serde(skip_serializing_if = "Option::is_none")] pub vector: Option<VectorStructOutput>, /// Shard Key #[serde(skip_serializing_if = "Option::is_none")] pub shard_key: Option<segment::types::ShardKey>, #[serde(skip_serializing_if = "Option::is_none")] pub order_value: Option<segment::data_types::order_by::OrderValue>, } /// Vector data separator for named and unnamed modes /// Unnamed mode: /// /// { /// "vector": [1.0, 2.0, 3.0] /// } /// /// or named mode: /// /// { /// "vector": { /// "vector": [1.0, 2.0, 3.0], /// "name": "image-embeddings" /// } /// } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq)] #[serde(rename_all = "snake_case")] #[serde(untagged)] pub enum NamedVectorStruct { Default(segment::data_types::vectors::DenseVector), Dense(segment::data_types::vectors::NamedVector), Sparse(segment::data_types::vectors::NamedSparseVector), // No support for multi-dense vectors in search } #[derive(Deserialize, Serialize, JsonSchema, Clone, Debug, PartialEq, Hash)] #[serde(untagged)] #[serde(expecting = "Expected a string, or an object with a key, direction and/or start_from")] pub enum OrderByInterface { Key(JsonPath), Struct(OrderBy), } /// Fusion algorithm allows to combine results of multiple prefetches. /// /// Available fusion algorithms: /// /// * `rrf` - Reciprocal Rank Fusion (with default parameters) /// * `dbsf` - Distribution-Based Score Fusion #[derive(Debug, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum Fusion { Rrf, Dbsf, } /// Parameters for Reciprocal Rank Fusion #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct Rrf { /// K parameter for reciprocal rank fusion #[validate(range(min = 1))] #[serde(default)] pub k: Option<usize>, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] #[serde(untagged)] pub enum VectorInput { DenseVector(DenseVector), SparseVector(SparseVector), MultiDenseVector(MultiDenseVector), Id(segment::types::PointIdType), Document(Document), Image(Image), Object(InferenceObject), } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct QueryRequestInternal { /// Sub-requests to perform first. If present, the query will be performed on the results of the prefetch(es). #[validate(nested)] #[serde(default, with = "MaybeOneOrMany")] #[schemars(with = "MaybeOneOrMany<Prefetch>")] pub prefetch: Option<Vec<Prefetch>>, /// Query to perform. If missing without prefetches, returns points ordered by their IDs. #[validate(nested)] pub query: Option<QueryInterface>, /// Define which vector name to use for querying. If missing, the default vector is used. pub using: Option<VectorNameBuf>, /// Filter conditions - return only those points that satisfy the specified conditions. #[validate(nested)] pub filter: Option<Filter>, /// Search params for when there is no prefetch #[validate(nested)] pub params: Option<SearchParams>, /// Return points with scores better than this threshold. pub score_threshold: Option<ScoreType>, /// Max number of points to return. Default is 10. #[validate(range(min = 1))] pub limit: Option<usize>, /// Offset of the result. Skip this many points. Default is 0 pub offset: Option<usize>, /// Options for specifying which vectors to include into the response. Default is false. #[serde(alias = "with_vectors")] pub with_vector: Option<WithVector>, /// Options for specifying which payload to include or not. Default is false. pub with_payload: Option<WithPayloadInterface>, /// The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector /// Note: the other collection vectors should have the same vector size as the 'using' vector in the current collection #[serde(default)] pub lookup_from: Option<LookupLocation>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct QueryRequest { #[validate(nested)] #[serde(flatten)] pub internal: QueryRequestInternal, pub shard_key: Option<ShardKeySelector>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct QueryRequestBatch { #[validate(nested)] pub searches: Vec<QueryRequest>, } #[derive(Debug, Serialize, JsonSchema)] pub struct QueryResponse { pub points: Vec<ScoredPoint>, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] #[serde(untagged)] #[serde(expecting = "Expected some form of vector, id, or a type of query")] pub enum QueryInterface { Nearest(VectorInput), Query(Query), } #[derive(Debug, Serialize, Deserialize, JsonSchema)] #[serde(untagged)] pub enum Query { /// Find the nearest neighbors to this vector. Nearest(NearestQuery), /// Use multiple positive and negative vectors to find the results. Recommend(RecommendQuery), /// Search for nearest points, but constrain the search space with context Discover(DiscoverQuery), /// Return points that live in positive areas. Context(ContextQuery), /// Order the points by a payload field. OrderBy(OrderByQuery), /// Fuse the results of multiple prefetches. Fusion(FusionQuery), /// Apply reciprocal rank fusion to multiple prefetches Rrf(RrfQuery), /// Score boosting via an arbitrary formula Formula(FormulaQuery), /// Sample points from the collection, non-deterministically. Sample(SampleQuery), } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct NearestQuery { /// The vector to search for nearest neighbors. #[validate(nested)] pub nearest: VectorInput, /// Perform MMR (Maximal Marginal Relevance) reranking after search, /// using the same vector in this query to calculate relevance. #[validate(nested)] pub mmr: Option<Mmr>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct RecommendQuery { #[validate(nested)] pub recommend: RecommendInput, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct DiscoverQuery { #[validate(nested)] pub discover: DiscoverInput, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct ContextQuery { #[validate(nested)] pub context: ContextInput, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct OrderByQuery { #[validate(nested)] pub order_by: OrderByInterface, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct FusionQuery { #[validate(nested)] pub fusion: Fusion, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct RrfQuery { #[validate(nested)] pub rrf: Rrf, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct FormulaQuery { pub formula: Expression, #[serde(default)] pub defaults: HashMap<String, Value>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct SampleQuery { #[validate(nested)] pub sample: Sample, } /// Maximal Marginal Relevance (MMR) algorithm for re-ranking the points. #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] #[serde(rename_all = "snake_case")] pub struct Mmr { /// Tunable parameter for the MMR algorithm. /// Determines the balance between diversity and relevance. /// /// A higher value favors diversity (dissimilarity to selected results), /// while a lower value favors relevance (similarity to the query vector). /// /// Must be in the range [0, 1]. /// Default value is 0.5. #[validate(range(min = 0.0, max = 1.0))] pub diversity: Option<f32>, /// The maximum number of candidates to consider for re-ranking. /// /// If not specified, the `limit` value is used. #[validate(range(max = 16_384))] // artificial maximum, to avoid too expensive query. pub candidates_limit: Option<usize>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct Prefetch { /// Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. #[validate(nested)] #[serde(default, with = "MaybeOneOrMany")] #[schemars(with = "MaybeOneOrMany<Prefetch>")] pub prefetch: Option<Vec<Prefetch>>, /// Query to perform. If missing without prefetches, returns points ordered by their IDs. #[validate(nested)] pub query: Option<QueryInterface>, /// Define which vector name to use for querying. If missing, the default vector is used. pub using: Option<VectorNameBuf>, /// Filter conditions - return only those points that satisfy the specified conditions. #[validate(nested)] pub filter: Option<Filter>, /// Search params for when there is no prefetch #[validate(nested)] pub params: Option<SearchParams>, /// Return points with scores better than this threshold. pub score_threshold: Option<ScoreType>, /// Max number of points to return. Default is 10. #[validate(range(min = 1))] pub limit: Option<usize>, /// The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector /// Note: the other collection vectors should have the same vector size as the 'using' vector in the current collection #[serde(default)] pub lookup_from: Option<LookupLocation>, } /// How to use positive and negative examples to find the results, default is `average_vector`: /// /// * `average_vector` - Average positive and negative vectors and create a single query /// with the formula `query = avg_pos + avg_pos - avg_neg`. Then performs normal search. /// /// * `best_score` - Uses custom search objective. Each candidate is compared against all /// examples, its score is then chosen from the `max(max_pos_score, max_neg_score)`. /// If the `max_neg_score` is chosen then it is squared and negated, otherwise it is just /// the `max_pos_score`. /// /// * `sum_scores` - Uses custom search objective. Compares against all inputs, sums all the scores. /// Scores against positive vectors are added, against negatives are subtracted. #[derive(Debug, Deserialize, Serialize, JsonSchema, Default, PartialEq, Clone, Copy)] #[serde(rename_all = "snake_case")] pub enum RecommendStrategy { #[default] AverageVector, BestScore, SumScores, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct RecommendInput { /// Look for vectors closest to the vectors from these points pub positive: Option<Vec<VectorInput>>, /// Try to avoid vectors like the vector from these points pub negative: Option<Vec<VectorInput>>, /// How to use the provided vectors to find the results pub strategy: Option<RecommendStrategy>, } impl RecommendInput { pub fn iter(&self) -> impl Iterator<Item = &VectorInput> { self.positive .iter() .flatten() .chain(self.negative.iter().flatten()) } } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct DiscoverInput { /// Use this as the primary search objective #[validate(nested)] pub target: VectorInput, /// Search space will be constrained by these pairs of vectors #[validate(nested)] #[serde(with = "MaybeOneOrMany")] #[schemars(with = "MaybeOneOrMany<ContextPair>")] pub context: Option<Vec<ContextPair>>, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct ContextInput( /// Search space will be constrained by these pairs of vectors #[serde(with = "MaybeOneOrMany")] #[schemars(with = "MaybeOneOrMany<ContextPair>")] pub Option<Vec<ContextPair>>, ); #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct ContextPair { /// A positive vector #[validate(nested)] pub positive: VectorInput, /// Repel from this vector #[validate(nested)] pub negative: VectorInput, } impl ContextPair { pub fn iter(&self) -> impl Iterator<Item = &VectorInput> { std::iter::once(&self.positive).chain(std::iter::once(&self.negative)) } } #[derive(Debug, Serialize, Deserialize, JsonSchema)] #[serde(untagged)] pub enum Expression { Constant(f32), Variable(String), Condition(Box<Condition>), GeoDistance(GeoDistance), Datetime(DatetimeExpression), DatetimeKey(DatetimeKeyExpression), Mult(MultExpression), Sum(SumExpression), Neg(NegExpression), Abs(AbsExpression), Div(DivExpression), Sqrt(SqrtExpression), Pow(PowExpression), Exp(ExpExpression), Log10(Log10Expression), Ln(LnExpression), LinDecay(LinDecayExpression), ExpDecay(ExpDecayExpression), GaussDecay(GaussDecayExpression), } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct GeoDistance { pub geo_distance: GeoDistanceParams, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct GeoDistanceParams { /// The origin geo point to measure from pub origin: GeoPoint, /// Payload field with the destination geo point pub to: JsonPath, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct DatetimeExpression { pub datetime: String, } #[derive(Debug, Serialize, Deserialize, JsonSchema)] pub struct DatetimeKeyExpression { pub datetime_key: JsonPath, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct MultExpression { #[validate(nested)] pub mult: Vec<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct SumExpression { #[validate(nested)] pub sum: Vec<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct NegExpression { #[validate(nested)] pub neg: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct AbsExpression { #[validate(nested)] pub abs: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct DivExpression { #[validate(nested)] pub div: DivParams, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct DivParams { #[validate(nested)] pub left: Box<Expression>, #[validate(nested)] pub right: Box<Expression>, pub by_zero_default: Option<ScoreType>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct SqrtExpression { #[validate(nested)] pub sqrt: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct PowExpression { pub pow: PowParams, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct PowParams { #[validate(nested)] pub base: Box<Expression>, #[validate(nested)] pub exponent: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct ExpExpression { #[validate(nested)] pub exp: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct Log10Expression { #[validate(nested)] pub log10: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct LnExpression { #[validate(nested)] pub ln: Box<Expression>, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct LinDecayExpression { #[validate(nested)] pub lin_decay: DecayParamsExpression, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Validate)] pub struct ExpDecayExpression { #[validate(nested)]
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
true
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/rest/mod.rs
lib/api/src/rest/mod.rs
pub mod conversions; pub mod models; pub mod schema; pub mod validate; pub use schema::*;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/rest/conversions.rs
lib/api/src/rest/conversions.rs
use std::collections::HashMap; use segment::data_types::order_by::OrderBy; use segment::data_types::vectors::{VectorInternal, VectorStructInternal}; use uuid::Uuid; use super::schema::{ScoredPoint, Vector}; use super::{ FacetRequestInternal, FacetResponse, FacetValue, FacetValueHit, NearestQuery, OrderByInterface, Query, QueryInterface, VectorOutput, VectorStructOutput, }; use crate::grpc; use crate::rest::models::InferenceUsage; use crate::rest::{DenseVector, NamedVectorStruct}; impl From<InferenceUsage> for grpc::InferenceUsage { fn from(value: InferenceUsage) -> Self { let mut grpc_usage_models = HashMap::with_capacity(value.models.len()); for (model, usage) in value.models { grpc_usage_models.insert( model, grpc::ModelUsage { tokens: usage.tokens, }, ); } grpc::InferenceUsage { models: grpc_usage_models, } } } impl From<VectorInternal> for VectorOutput { fn from(value: VectorInternal) -> Self { match value { VectorInternal::Dense(vector) => VectorOutput::Dense(vector), VectorInternal::Sparse(vector) => VectorOutput::Sparse(vector), VectorInternal::MultiDense(vector) => { VectorOutput::MultiDense(vector.into_multi_vectors()) } } } } impl From<VectorStructInternal> for VectorStructOutput { fn from(value: VectorStructInternal) -> Self { // ToDo: this conversion should be removed match value { VectorStructInternal::Single(vector) => VectorStructOutput::Single(vector), VectorStructInternal::MultiDense(vector) => { VectorStructOutput::MultiDense(vector.into_multi_vectors()) } VectorStructInternal::Named(vectors) => VectorStructOutput::Named( vectors .into_iter() .map(|(k, v)| (k, VectorOutput::from(v))) .collect(), ), } } } impl From<Vector> for VectorInternal { fn from(value: Vector) -> Self { match value { Vector::Dense(vector) => VectorInternal::Dense(vector), Vector::Sparse(vector) => VectorInternal::Sparse(vector), Vector::MultiDense(vectors) => VectorInternal::MultiDense( segment::data_types::vectors::MultiDenseVectorInternal::new_unchecked(vectors), ), Vector::Document(_) | Vector::Image(_) | Vector::Object(_) => { // If this is reached, it means validation failed unimplemented!("Inference is not implemented, please use vectors instead") } } } } impl From<segment::types::ScoredPoint> for ScoredPoint { fn from(value: segment::types::ScoredPoint) -> Self { let segment::types::ScoredPoint { id, version, score, payload, vector, shard_key, order_value, } = value; ScoredPoint { id, version, score, payload, vector: vector.map(VectorStructOutput::from), shard_key, order_value, } } } impl From<NamedVectorStruct> for segment::data_types::vectors::NamedVectorStruct { fn from(value: NamedVectorStruct) -> Self { match value { NamedVectorStruct::Default(vector) => { segment::data_types::vectors::NamedVectorStruct::Default(vector) } NamedVectorStruct::Dense(vector) => { segment::data_types::vectors::NamedVectorStruct::Dense(vector) } NamedVectorStruct::Sparse(vector) => { segment::data_types::vectors::NamedVectorStruct::Sparse(vector) } } } } impl From<DenseVector> for NamedVectorStruct { fn from(v: DenseVector) -> Self { NamedVectorStruct::Default(v) } } impl From<segment::data_types::vectors::NamedVector> for NamedVectorStruct { fn from(v: segment::data_types::vectors::NamedVector) -> Self { NamedVectorStruct::Dense(v) } } impl From<OrderByInterface> for OrderBy { fn from(order_by: OrderByInterface) -> Self { match order_by { OrderByInterface::Key(key) => OrderBy { key, direction: None, start_from: None, }, OrderByInterface::Struct(order_by) => order_by, } } } impl From<QueryInterface> for Query { fn from(value: QueryInterface) -> Self { match value { QueryInterface::Nearest(vector) => Query::Nearest(NearestQuery { nearest: vector, mmr: None, }), QueryInterface::Query(query) => query, } } } impl From<segment::data_types::facets::FacetValue> for FacetValue { fn from(value: segment::data_types::facets::FacetValue) -> Self { match value { segment::data_types::facets::FacetValue::Keyword(keyword) => Self::String(keyword), segment::data_types::facets::FacetValue::Int(integer) => Self::Integer(integer), segment::data_types::facets::FacetValue::Uuid(uuid_int) => { Self::String(Uuid::from_u128(uuid_int).to_string()) } segment::data_types::facets::FacetValue::Bool(b) => Self::Bool(b), } } } impl From<segment::data_types::facets::FacetValueHit> for FacetValueHit { fn from(value: segment::data_types::facets::FacetValueHit) -> Self { let segment::data_types::facets::FacetValueHit { value, count } = value; Self { value: From::from(value), count, } } } impl From<segment::data_types::facets::FacetResponse> for FacetResponse { fn from(value: segment::data_types::facets::FacetResponse) -> Self { let segment::data_types::facets::FacetResponse { hits } = value; Self { hits: hits.into_iter().map(From::from).collect(), } } } impl From<FacetRequestInternal> for segment::data_types::facets::FacetParams { fn from(value: FacetRequestInternal) -> Self { let FacetRequestInternal { key, limit, filter, exact, } = value; Self { key, limit: limit.unwrap_or(Self::DEFAULT_LIMIT), filter, exact: exact.unwrap_or(Self::DEFAULT_EXACT), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/rest/validate.rs
lib/api/src/rest/validate.rs
use std::borrow::Cow; use common::validation::validate_multi_vector; use segment::index::query_optimization::rescore_formula::parsed_formula::VariableId; use validator::{Validate, ValidationError, ValidationErrors}; use super::{ Batch, BatchVectorStruct, ContextInput, Expression, FormulaQuery, Fusion, NamedVectorStruct, OrderByInterface, PointVectors, Query, QueryInterface, RecommendInput, Sample, VectorInput, }; impl Validate for NamedVectorStruct { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { NamedVectorStruct::Default(_) => Ok(()), NamedVectorStruct::Dense(_) => Ok(()), NamedVectorStruct::Sparse(v) => v.validate(), } } } impl Validate for QueryInterface { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { QueryInterface::Nearest(vector) => vector.validate(), QueryInterface::Query(query) => query.validate(), } } } impl Validate for Query { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { Query::Nearest(vector) => vector.validate(), Query::Recommend(recommend) => recommend.validate(), Query::Discover(discover) => discover.validate(), Query::Context(context) => context.validate(), Query::Fusion(fusion) => fusion.validate(), Query::Rrf(rrf) => rrf.validate(), Query::Formula(formula) => formula.validate(), Query::OrderBy(order_by) => order_by.validate(), Query::Sample(sample) => sample.validate(), } } } impl Validate for VectorInput { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { VectorInput::Id(_id) => Ok(()), VectorInput::DenseVector(_dense) => Ok(()), VectorInput::SparseVector(sparse) => sparse.validate(), VectorInput::MultiDenseVector(multi) => validate_multi_vector(multi), VectorInput::Document(doc) => doc.validate(), VectorInput::Image(image) => image.validate(), VectorInput::Object(obj) => obj.validate(), } } } impl Validate for RecommendInput { fn validate(&self) -> Result<(), validator::ValidationErrors> { let no_positives = self.positive.as_ref().map(|p| p.is_empty()).unwrap_or(true); let no_negatives = self.negative.as_ref().map(|n| n.is_empty()).unwrap_or(true); if no_positives && no_negatives { let mut errors = validator::ValidationErrors::new(); errors.add( "positives, negatives", ValidationError::new( "At least one positive or negative vector/id must be provided", ), ); return Err(errors); } for item in self.iter() { item.validate()?; } Ok(()) } } impl Validate for ContextInput { fn validate(&self) -> Result<(), validator::ValidationErrors> { for item in self.0.iter().flatten().flat_map(|item| item.iter()) { item.validate()?; } Ok(()) } } impl Validate for Fusion { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { Fusion::Rrf | Fusion::Dbsf => Ok(()), } } } impl Validate for FormulaQuery { fn validate(&self) -> Result<(), validator::ValidationErrors> { let Self { formula, defaults } = self; // validate formula Expression formula.validate()?; let mut errors = validator::ValidationErrors::new(); for (key, value) in defaults.iter() { let var_id = match key.parse() { Ok(var_id) => var_id, Err(err) => { let validation = ValidationError::new("Invalid variable name").with_message(Cow::Owned(err)); errors.add("defaults", validation); continue; } }; match var_id { VariableId::Score(_) if value.as_number().is_none() => { let validation = ValidationError::new("Score default must be a number"); errors.add("defaults", validation); } _ => (), } } Ok(()) } } impl Validate for OrderByInterface { fn validate(&self) -> Result<(), validator::ValidationErrors> { match self { OrderByInterface::Key(_key) => Ok(()), // validated during parsing OrderByInterface::Struct(order_by) => order_by.validate(), } } } impl Validate for Sample { fn validate(&self) -> Result<(), ValidationErrors> { match self { Sample::Random => Ok(()), } } } impl Validate for BatchVectorStruct { fn validate(&self) -> Result<(), ValidationErrors> { match self { BatchVectorStruct::Single(_) => Ok(()), BatchVectorStruct::MultiDense(vectors) => { for vector in vectors { validate_multi_vector(vector)?; } Ok(()) } BatchVectorStruct::Named(v) => { common::validation::validate_iter(v.values().flat_map(|batch| batch.iter())) } BatchVectorStruct::Document(_) => Ok(()), BatchVectorStruct::Image(_) => Ok(()), BatchVectorStruct::Object(_) => Ok(()), } } } impl Validate for Batch { fn validate(&self) -> Result<(), ValidationErrors> { let batch = self; let bad_input_description = |ids: usize, vecs: usize| -> String { format!("number of ids and vectors must be equal ({ids} != {vecs})") }; let create_error = |message: String| -> ValidationErrors { let mut errors = ValidationErrors::new(); errors.add("batch", { let mut error = ValidationError::new("point_insert_operation"); error.message.replace(Cow::from(message)); error }); errors }; self.vectors.validate()?; match &batch.vectors { BatchVectorStruct::Single(vectors) => { if batch.ids.len() != vectors.len() { return Err(create_error(bad_input_description( batch.ids.len(), vectors.len(), ))); } } BatchVectorStruct::MultiDense(vectors) => { if batch.ids.len() != vectors.len() { return Err(create_error(bad_input_description( batch.ids.len(), vectors.len(), ))); } } BatchVectorStruct::Named(named_vectors) => { for vectors in named_vectors.values() { if batch.ids.len() != vectors.len() { return Err(create_error(bad_input_description( batch.ids.len(), vectors.len(), ))); } } } BatchVectorStruct::Document(_) => {} BatchVectorStruct::Image(_) => {} BatchVectorStruct::Object(_) => {} } if let Some(payload_vector) = &batch.payloads && payload_vector.len() != batch.ids.len() { return Err(create_error(format!( "number of ids and payloads must be equal ({} != {})", batch.ids.len(), payload_vector.len(), ))); } Ok(()) } } impl Validate for PointVectors { fn validate(&self) -> Result<(), ValidationErrors> { if self.vector.is_empty() { let mut err = ValidationError::new("length"); err.message = Some(Cow::from("must specify vectors to update for point")); err.add_param(Cow::from("min"), &1); let mut errors = ValidationErrors::new(); errors.add("vector", err); Err(errors) } else { self.vector.validate() } } } impl Validate for Expression { fn validate(&self) -> Result<(), ValidationErrors> { match self { Expression::Constant(_) => Ok(()), Expression::Variable(_) => Ok(()), Expression::Condition(condition) => condition.validate(), Expression::GeoDistance(_) => Ok(()), Expression::Datetime(_) => Ok(()), Expression::DatetimeKey(_) => Ok(()), Expression::Mult(mult_expression) => mult_expression.validate(), Expression::Sum(sum_expression) => sum_expression.validate(), Expression::Neg(neg_expression) => neg_expression.validate(), Expression::Abs(abs_expression) => abs_expression.validate(), Expression::Div(div_expression) => div_expression.validate(), Expression::Sqrt(sqrt_expression) => sqrt_expression.validate(), Expression::Pow(pow_expression) => pow_expression.validate(), Expression::Exp(exp_expression) => exp_expression.validate(), Expression::Log10(log10_expression) => log10_expression.validate(), Expression::Ln(ln_expression) => ln_expression.validate(), Expression::LinDecay(lin_decay_expression) => lin_decay_expression.validate(), Expression::ExpDecay(exp_decay_expression) => exp_decay_expression.validate(), Expression::GaussDecay(gauss_decay_expression) => gauss_decay_expression.validate(), } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/api/src/rest/models.rs
lib/api/src/rest/models.rs
use std::fmt::Debug; use ahash::HashMap; use schemars::JsonSchema; use segment::common::anonymize::Anonymize; use segment::types::ShardKey; use serde; use serde::{Deserialize, Serialize}; pub fn get_git_commit_id() -> Option<String> { option_env!("GIT_COMMIT_ID") .map(ToString::to_string) .filter(|s| !s.trim().is_empty()) } #[derive(Serialize, JsonSchema)] pub struct VersionInfo { pub title: String, pub version: String, #[serde(skip_serializing_if = "Option::is_none")] pub commit: Option<String>, } impl Default for VersionInfo { fn default() -> Self { VersionInfo { title: "qdrant - vector search engine".to_string(), version: env!("CARGO_PKG_VERSION").to_string(), commit: get_git_commit_id(), } } } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum ApiStatus { Ok, Error(String), Accepted, AlreadyInProgress, } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct ApiResponse<D> { #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<D>, pub status: ApiStatus, pub time: f64, #[serde(skip_serializing_if = "is_usage_none_or_empty")] pub usage: Option<Usage>, } /// Usage of the hardware resources, spent to process the request #[derive(Debug, Serialize, JsonSchema, Anonymize, Clone)] #[serde(rename_all = "snake_case")] #[anonymize(false)] pub struct Usage { #[serde(skip_serializing_if = "Option::is_none")] pub hardware: Option<HardwareUsage>, pub inference: Option<InferenceUsage>, } impl Usage { pub fn is_empty(&self) -> bool { let Usage { hardware, inference, } = self; hardware.is_none() && inference.is_none() } } fn is_usage_none_or_empty(u: &Option<Usage>) -> bool { u.as_ref().is_none_or(|usage| usage.is_empty()) } /// Usage of the hardware resources, spent to process the request #[derive(Debug, Default, Serialize, JsonSchema, Anonymize, Clone)] #[serde(rename_all = "snake_case")] #[anonymize(false)] pub struct HardwareUsage { pub cpu: usize, pub payload_io_read: usize, pub payload_io_write: usize, pub payload_index_io_read: usize, pub payload_index_io_write: usize, pub vector_io_read: usize, pub vector_io_write: usize, } #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct InferenceUsage { pub models: HashMap<String, ModelUsage>, } impl InferenceUsage { pub fn is_empty(&self) -> bool { self.models.is_empty() } pub fn into_non_empty(self) -> Option<Self> { if self.is_empty() { None } else { Some(self) } } pub fn merge(&mut self, other: Self) { for (model_name, model_usage) in other.models { self.models .entry(model_name) .and_modify(|existing| { let ModelUsage { tokens } = existing; *tokens += model_usage.tokens; }) .or_insert(model_usage); } } pub fn merge_opt(&mut self, other: Option<Self>) { if let Some(other) = other { self.merge(other); } } } #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct ModelUsage { pub tokens: u64, } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct CollectionDescription { pub name: String, } fn example_collections_response() -> CollectionsResponse { CollectionsResponse { collections: vec![ CollectionDescription { name: "arxiv-title".to_string(), }, CollectionDescription { name: "arxiv-abstract".to_string(), }, CollectionDescription { name: "medium-title".to_string(), }, CollectionDescription { name: "medium-text".to_string(), }, ], } } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] #[schemars(example = "example_collections_response")] pub struct CollectionsResponse { pub collections: Vec<CollectionDescription>, } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct ShardKeyDescription { pub key: ShardKey, } #[derive(Debug, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct ShardKeysResponse { /// The existing shard keys. Only available when sharding method is `custom` #[serde(skip_serializing_if = "Option::is_none")] pub shard_keys: Option<Vec<ShardKeyDescription>>, }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/typemap.rs
lib/common/issues/src/typemap.rs
use std::any::{Any, TypeId}; use std::collections::HashMap; pub struct TypeMap(HashMap<TypeId, Box<dyn Any + Send + Sync>>); impl TypeMap { pub fn new() -> Self { Self(HashMap::new()) } pub fn has<T: 'static>(&self) -> bool { self.0.contains_key(&TypeId::of::<T>()) } pub fn insert<T: 'static + Send + Sync>(&mut self, value: T) { self.0.insert(TypeId::of::<T>(), Box::new(value)); } pub fn get<T: 'static>(&self) -> Option<&T> { self.0 .get(&TypeId::of::<T>()) .map(|value| value.downcast_ref().unwrap()) } pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> { self.0 .get_mut(&TypeId::of::<T>()) .map(|value| value.downcast_mut().unwrap()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/lib.rs
lib/common/issues/src/lib.rs
pub mod broker; mod dashboard; mod issue; pub mod problems; mod solution; pub(crate) mod typemap; pub use broker::{add_subscriber, publish}; pub use dashboard::{ Code, all_collection_issues, all_issues, clear, solve, solve_by_filter, submit, }; pub use issue::{Issue, IssueRecord}; pub use solution::{Action, ImmediateSolution, Solution};
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/dashboard.rs
lib/common/issues/src/dashboard.rs
use std::any::TypeId; use std::collections::HashSet; use std::sync::{Arc, OnceLock}; use dashmap::DashMap; use crate::issue::{Issue, IssueRecord}; #[derive(Hash, Eq, PartialEq, Clone)] pub struct Code { pub issue_type: TypeId, pub instance_id: String, } impl Code { pub fn new<T: 'static>(instance_id: impl Into<String>) -> Self { Self { issue_type: TypeId::of::<T>(), instance_id: instance_id.into(), } } /// Internal code for the issue fn of<I: Issue + 'static>(issue: &I) -> Self where Self: std::marker::Sized + 'static, { Code::new::<I>(issue.instance_id()) } } impl AsRef<Code> for Code { fn as_ref(&self) -> &Code { self } } #[derive(Default)] pub(crate) struct Dashboard { pub issues: DashMap<Code, IssueRecord>, } impl Dashboard { /// Activates an issue, returning true if the issue was not active before pub(crate) fn add_issue<I: Issue + 'static>(&self, issue: I) -> bool { let code = Code::of(&issue); if self.issues.contains_key(&code) { return false; } let issue = IssueRecord::from(issue); self.issues.insert(code, issue).is_none() } /// Deactivates an issue by its code, returning true if the issue was active before pub(crate) fn remove_issue<S: AsRef<Code>>(&self, code: S) -> bool { if self.issues.contains_key(code.as_ref()) { return self.issues.remove(code.as_ref()).is_some(); } false } /// Returns all issues in the dashboard. This operation clones every issue, so it is more expensive. pub(crate) fn get_all_issues(&self) -> Vec<IssueRecord> { self.issues.iter().map(|kv| kv.value().clone()).collect() } pub(crate) fn get_all_collection_issues(&self, collection_name: &str) -> Vec<IssueRecord> { self.issues .iter() .filter(|kv| { kv.value() .related_collection .as_ref() .is_some_and(|c| c == collection_name) }) .map(|kv| kv.value().clone()) .collect() } fn get_codes<I: 'static>(&self) -> HashSet<Code> { let type_id = TypeId::of::<I>(); self.issues .iter() .filter(|kv| kv.key().issue_type == type_id) .map(|kv| kv.key().clone()) .collect() } } fn dashboard() -> Arc<Dashboard> { static DASHBOARD: OnceLock<Arc<Dashboard>> = OnceLock::new(); DASHBOARD .get_or_init(|| Arc::new(Dashboard::default())) .clone() } /// Submits an issue to the dashboard, returning true if the issue code was not active before pub fn submit(issue: impl Issue + 'static) -> bool { dashboard().add_issue(issue) } /// Solves an issue by its code, returning true if the issue code was active before pub fn solve<C: AsRef<Code>>(code: C) -> bool { dashboard().remove_issue(code) } pub fn all_issues() -> Vec<IssueRecord> { dashboard().get_all_issues() } pub fn all_collection_issues(collection_name: &str) -> Vec<IssueRecord> { dashboard().get_all_collection_issues(collection_name) } /// Clears all issues from the dashboard pub fn clear() { dashboard().issues.clear(); } /// Solves all issues of the given type that match the given predicate pub fn solve_by_filter<I: Issue + 'static, F: Fn(&Code) -> bool>(filter: F) { let codes = dashboard().get_codes::<I>(); for code in codes { if filter(&code) { solve(code); } } } #[cfg(test)] mod tests { use serial_test::serial; use super::*; use crate::issue::DummyIssue; #[test] fn test_dashboard() { let dashboard = Dashboard::default(); let issue = DummyIssue { distinctive: "test".to_string(), }; assert!(dashboard.add_issue(issue.clone())); assert!(!dashboard.add_issue(issue.clone())); assert!(dashboard.remove_issue(Code::of(&issue))); assert!(!dashboard.remove_issue(Code::of(&issue))); } #[test] #[serial] fn test_singleton() -> std::thread::Result<()> { clear(); let handle1 = std::thread::spawn(|| { submit(DummyIssue::new("issue1")); submit(DummyIssue::new("issue2")); submit(DummyIssue::new("issue3")); }); let handle2 = std::thread::spawn(|| { submit(DummyIssue::new("issue4")); submit(DummyIssue::new("issue5")); submit(DummyIssue::new("issue6")); }); handle1.join()?; handle2.join()?; assert_eq!(all_issues().len(), 6); assert!(solve(Code::new::<DummyIssue>("issue1"))); assert!(solve(Code::new::<DummyIssue>("issue2"))); assert!(solve(Code::new::<DummyIssue>("issue3"))); assert!(solve(Code::new::<DummyIssue>("issue4"))); assert!(solve(Code::new::<DummyIssue>("issue5"))); assert!(solve(Code::new::<DummyIssue>("issue6"))); clear(); Ok(()) } #[test] #[serial] fn test_solve_by_filter() { crate::clear(); submit(DummyIssue::new("my_collection:issue1")); submit(DummyIssue::new("my_collection:issue2")); submit(DummyIssue::new("my_collection:issue3")); submit(DummyIssue::new("my_other_collection:issue2")); submit(DummyIssue::new("my_other_collection:issue2")); submit(DummyIssue::new("issue2")); submit(DummyIssue::new("issue3")); // Solve all dummy issues that contain "my_collection" solve_by_filter::<DummyIssue, _>(|code| code.instance_id.contains("my_collection")); assert_eq!(all_issues().len(), 3); assert!(solve(Code::new::<DummyIssue>("issue2"))); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/issue.rs
lib/common/issues/src/issue.rs
use std::fmt::Debug; use chrono::{DateTime, Utc}; use schemars::JsonSchema; use serde::Serialize; use crate::solution::Solution; pub trait Issue { /// Differentiates issues of the same type. This can hold any information that makes the issue unique and filterable. fn instance_id(&self) -> &str; /// The codename for all issues of this type fn name() -> &'static str; /// The name of the collection that this issue is related to /// If none, collection is not relevant to any particular collection fn related_collection(&self) -> Option<String>; /// A human-readable description of the issue fn description(&self) -> String; /// Actionable solution to the issue fn solution(&self) -> Solution; /// Submits the issue to the dashboard singleton fn submit(self) -> bool where Self: std::marker::Sized + 'static, { crate::dashboard::submit(self) } } /// An issue that can be identified by its code #[derive(Debug, Serialize, JsonSchema, Clone)] pub struct IssueRecord { pub id: String, pub description: String, pub solution: Solution, pub timestamp: DateTime<Utc>, pub related_collection: Option<String>, } impl<I: Issue> From<I> for IssueRecord { fn from(val: I) -> Self { let id = format!("{}/{}", I::name(), val.instance_id()); Self { id, description: val.description(), solution: val.solution(), timestamp: Utc::now(), related_collection: val.related_collection(), } } } #[cfg(test)] #[derive(Clone)] pub(crate) struct DummyIssue { pub distinctive: String, } #[cfg(test)] impl DummyIssue { #[cfg(test)] pub fn new(distinctive: impl Into<String>) -> Self { Self { distinctive: distinctive.into(), } } } #[cfg(test)] impl Issue for DummyIssue { fn instance_id(&self) -> &str { &self.distinctive } fn name() -> &'static str { "DUMMY" } fn related_collection(&self) -> Option<String> { None } fn description(&self) -> String { "".to_string() } fn solution(&self) -> Solution { Solution::Refactor("".to_string()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_issue_record() { let issue = DummyIssue::new("test"); let record = IssueRecord::from(issue); assert_eq!(record.id, "DUMMY/test"); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/broker.rs
lib/common/issues/src/broker.rs
use std::sync::{Arc, OnceLock, RwLock}; use crate::typemap::TypeMap; pub trait Subscriber<E> { fn notify(&self, event: Arc<E>); } struct SubscriberMap(TypeMap); type DynSubscriber<E> = Box<dyn Subscriber<E> + Send + Sync>; type SubscriVec<E> = Vec<Arc<DynSubscriber<E>>>; impl SubscriberMap { fn new() -> Self { Self(TypeMap::new()) } fn push<E: 'static>(&mut self, subscriber: DynSubscriber<E>) { if !self.0.has::<SubscriVec<E>>() { self.0.insert(SubscriVec::<E>::new()); } let sub = Arc::new(subscriber); self.0.get_mut::<SubscriVec<E>>().unwrap().push(sub); } fn get<E: 'static>(&self) -> Option<&SubscriVec<E>> { self.0.get() } } pub struct EventBroker { subscriptions: SubscriberMap, } impl EventBroker { pub fn new() -> Self { Self { subscriptions: SubscriberMap::new(), } } pub fn add_subscriber<E: 'static>(&mut self, subscriber: DynSubscriber<E>) { self.subscriptions.push(subscriber); } /// Notify all subscribers of the event. This method will block until all subscribers have handled the event, subscribers can choose if they want to handle the event in the background (non-blocking). pub fn publish<E: 'static>(&self, event: E) { if let Some(subscribers) = self.subscriptions.get::<E>() { let event = Arc::new(event); for sub in subscribers { sub.notify(event.clone()); } } } } impl Default for EventBroker { fn default() -> Self { Self::new() } } fn broker() -> Arc<RwLock<EventBroker>> { static BROKER: OnceLock<Arc<RwLock<EventBroker>>> = OnceLock::new(); BROKER .get_or_init(|| Arc::new(RwLock::new(EventBroker::default()))) .clone() } pub fn publish<E: 'static>(event: E) { // This will only read if the lock is not poisoned if let Ok(guard) = broker().read() { guard.publish(event) } } pub fn add_subscriber<E: 'static>(subscriber: Box<dyn Subscriber<E> + Send + Sync>) { // This will only write if the lock is not poisoned if let Ok(mut guard) = broker().write() { guard.add_subscriber(subscriber); } } #[cfg(test)] mod tests { use super::*; use crate::dashboard::Dashboard; use crate::issue::DummyIssue; #[derive(Clone)] struct DummySubscriber { dashboard: Arc<Dashboard>, } struct DummyEvent { pub collection_id: String, } impl Subscriber<DummyEvent> for DummySubscriber { fn notify(&self, event: Arc<DummyEvent>) { let issue = DummyIssue::new(event.collection_id.clone()); self.dashboard.add_issue(issue); } } struct ClearAllIssuesEvent; impl Subscriber<ClearAllIssuesEvent> for DummySubscriber { fn notify(&self, _event: Arc<ClearAllIssuesEvent>) { self.dashboard.issues.clear(); } } #[test] fn test_basic_use() { let mut broker = EventBroker::new(); let test_dashboard = Arc::new(Dashboard::default()); let subscriber = DummySubscriber { dashboard: test_dashboard.clone(), }; broker.add_subscriber::<DummyEvent>(Box::new(subscriber.clone())); broker.add_subscriber::<ClearAllIssuesEvent>(Box::new(subscriber)); broker.publish(DummyEvent { collection_id: "dummy".to_string(), }); assert!( test_dashboard .get_all_issues() .iter() .any(|issue| issue.id == "DUMMY/dummy"), ); broker.publish(ClearAllIssuesEvent); assert!( test_dashboard .get_all_issues() .iter() .all(|issue| issue.id != "DUMMY/dummy"), "{:?}", test_dashboard.get_all_issues() ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/solution.rs
lib/common/issues/src/solution.rs
use http::{HeaderMap, Method, Uri}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_json::Value; #[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] #[serde(rename_all = "snake_case")] pub enum Solution { /// A solution that can be applied immediately Immediate(Box<ImmediateSolution>), /// Two or more solutions to choose from ImmediateChoice(Vec<ImmediateSolution>), /// A solution that requires manual intervention Refactor(String), } #[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] pub struct ImmediateSolution { pub message: String, pub action: Action, } #[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] pub struct Action { #[serde(with = "http_serde::method")] #[schemars(with = "http_schemars::Method")] pub method: Method, #[serde(with = "http_serde::uri")] #[schemars(with = "http_schemars::Uri")] pub uri: Uri, #[serde(with = "http_serde::header_map")] #[schemars(with = "http_schemars::HeaderMap")] pub headers: HeaderMap, pub body: Option<serde_json::Map<String, Value>>, } mod http_schemars { use std::collections::HashMap; use schemars::JsonSchema; pub struct Method; impl JsonSchema for Method { fn schema_name() -> String { "Method".to_string() } fn json_schema( generator: &mut schemars::r#gen::SchemaGenerator, ) -> schemars::schema::Schema { let mut schema = generator.subschema_for::<String>().into_object(); schema.metadata().description = Some("HTTP method".to_string()); schema.into() } } pub struct Uri; impl JsonSchema for Uri { fn schema_name() -> String { "Uri".to_string() } fn json_schema( generator: &mut schemars::r#gen::SchemaGenerator, ) -> schemars::schema::Schema { let mut schema = generator.subschema_for::<String>().into_object(); schema.metadata().description = Some("HTTP URI".to_string()); schema.into() } } pub struct HeaderMap; impl JsonSchema for HeaderMap { fn schema_name() -> String { "HeaderMap".to_string() } fn json_schema( generator: &mut schemars::r#gen::SchemaGenerator, ) -> schemars::schema::Schema { let mut schema = generator .subschema_for::<HashMap<String, String>>() .into_object(); schema.metadata().description = Some("HTTP headers".to_string()); schema.into() } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/problems/too_many_collections.rs
lib/common/issues/src/problems/too_many_collections.rs
use crate::issue::Issue; use crate::solution::Solution; pub struct TooManyCollections; impl Issue for TooManyCollections { fn instance_id(&self) -> &str { "" // Only one issue for the whole app } fn name() -> &'static str { "TOO_MANY_COLLECTIONS" } fn related_collection(&self) -> Option<String> { None } fn description(&self) -> String { "It looks like you have too many collections.\nIf your architecture creates collections programmatically, it's probably better to restructure your solution into a fixed number of them. \nLearn more here: https://qdrant.tech/documentation/guides/multiple-partitions/".to_string() } fn solution(&self) -> Solution { Solution::Refactor( "Restructure your solution into a fixed number of collections".to_string(), ) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/issues/src/problems/mod.rs
lib/common/issues/src/problems/mod.rs
mod too_many_collections; pub use too_many_collections::TooManyCollections;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/cancel/src/blocking.rs
lib/common/cancel/src/blocking.rs
use tokio_util::task::AbortOnDropHandle; use super::*; /// # Cancel safety /// /// This function is cancel safe. /// /// If cancelled, the cancellation token provided to the `task` will be triggered automatically. /// /// This may prematurely abort the blocking task if it has not started yet. pub async fn spawn_cancel_on_drop<Out, Task>(task: Task) -> Result<Out, Error> where Task: FnOnce(CancellationToken) -> Out + Send + 'static, Out: Send + 'static, { let cancel = CancellationToken::new(); let task = { let cancel = cancel.child_token(); move || task(cancel) }; let guard = cancel.drop_guard(); let handle = AbortOnDropHandle::new(tokio::task::spawn_blocking(task)); let output = handle.await?; guard.disarm(); Ok(output) } /// # Cancel safety /// /// This function is cancel safe. /// /// If cancelled without triggering the cancellation token, the `task` will still run to completion. /// /// This function *will* return early, and the `task` *may* never run or return early by triggering /// the cancellation token. pub async fn spawn_cancel_on_token<Out, Task>( cancel: CancellationToken, task: Task, ) -> Result<Out, Error> where Task: FnOnce(CancellationToken) -> Out + Send + 'static, Out: Send + 'static, { let task = { let cancel = cancel.child_token(); move || task(cancel) }; let handle = tokio::task::spawn_blocking(task); let output = future::cancel_and_abort_on_token(cancel, handle).await?; Ok(output) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/cancel/src/lib.rs
lib/common/cancel/src/lib.rs
pub mod blocking; pub mod future; pub use tokio_util::sync::{CancellationToken, DropGuard}; #[derive(Debug, thiserror::Error)] pub enum Error { #[error(transparent)] Join(#[from] tokio::task::JoinError), #[error("task was cancelled")] Cancelled, }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/cancel/src/future.rs
lib/common/cancel/src/future.rs
use std::future::Future; use tokio::task::JoinHandle; use super::*; /// # Cancel safety /// /// This function is cancel safe. /// /// If cancelled, the cancellation token provided to the `task` will be triggered automatically. pub async fn spawn_cancel_on_drop<Task, Fut>(task: Task) -> Result<Fut::Output, Error> where Task: FnOnce(CancellationToken) -> Fut, Fut: Future + Send + 'static, Fut::Output: Send + 'static, { let cancel = CancellationToken::new(); let future = task(cancel.child_token()); let guard = cancel.drop_guard(); let output = tokio::task::spawn(future).await?; guard.disarm(); Ok(output) } /// # Cancel safety /// /// This function is cancel safe. /// /// The provided future must be cancel safe. pub async fn cancel_on_token<Fut>( cancel: CancellationToken, future: Fut, ) -> Result<Fut::Output, Error> where Fut: Future, { tokio::select! { biased; _ = cancel.cancelled() => Err(Error::Cancelled), output = future => Ok(output), } } /// Cancel and abort the given blocking task when triggering the cancellation token. /// /// This may prematurely abort the blocking task if it has not started yet. /// /// # Cancel safety /// /// This function is cancel safe. /// /// The provided future must be cancel safe. pub async fn cancel_and_abort_on_token<T>( cancel: CancellationToken, handle: JoinHandle<T>, ) -> Result<T, Error> { let abort_handle = handle.abort_handle(); tokio::select! { biased; _ = cancel.cancelled() => { // Prematurely abort blocking task if not started yet abort_handle.abort(); Err(Error::Cancelled) }, output = handle => Ok(output.map_err(Error::Join)?), } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/build.rs
lib/common/memory/build.rs
fn main() { println!("cargo:rustc-check-cfg=cfg(posix_fadvise_supported)"); // Matches all platforms that have `nix::fcntl::posix_fadvise` function. // https://github.com/nix-rust/nix/blob/v0.29.0/src/fcntl.rs#L35-L42 if matches!( std::env::var("CARGO_CFG_TARGET_OS").unwrap().as_str(), "linux" | "freebsd" | "android" | "fuchsia" | "emscripten" | "wasi" ) || matches!( std::env::var("CARGO_CFG_TARGET_ENV").unwrap().as_str(), "uclibc" ) { println!("cargo:rustc-cfg=posix_fadvise_supported") } println!("cargo:rustc-check-cfg=cfg(fs_type_check_supported)"); // Matches all platforms, that have `nix::sys::statfs::statfs` function. // https://github.com/nix-rust/nix/blob/v0.29.0/src/sys/mod.rs#L131 if matches!( std::env::var("CARGO_CFG_TARGET_OS").unwrap().as_str(), "linux" | "freebsd" | "android" | "openbsd" | "ios" | "macos" | "watchos" | "tvos" | "visionos" ) { println!("cargo:rustc-cfg=fs_type_check_supported") } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/mmap_ops.rs
lib/common/memory/src/mmap_ops.rs
use std::mem::{align_of, size_of}; use std::path::Path; use std::sync::{LazyLock, OnceLock}; use std::{io, mem, ptr}; use fs_err as fs; use fs_err::{File, OpenOptions}; use memmap2::{Mmap, MmapMut}; use crate::madvise::{self, AdviceSetting, Madviseable}; pub const TEMP_FILE_EXTENSION: &str = "tmp"; /// If multiple mmaps to the same file are supported in this environment /// /// Some environments corrupt data on the file system if multiple memory maps are opened on the /// same piece of data. This variable allows disabling the use of multiple memory maps at runtime. /// An example of such environment is Docker on Windows with a mount into Windows. pub static MULTI_MMAP_IS_SUPPORTED: LazyLock<bool> = LazyLock::new(|| { let mut supported = true; // Opt-out if multi-mmap support check at startup failed match MULTI_MMAP_SUPPORT_CHECK_RESULT.get() { Some(true) => {} Some(false) => { log::warn!( "Not using multi-mmap due to limited support, you may see reduced performance", ); supported = false; } None => { // We hit this branch if MULTI_MMAP_SUPPORT_CHECK_RESULT was never explicitly set // In tests this can be safely ignored because we don't start through main.rs there log::warn!( "MULTI_MMAP_SUPPORT_CHECK_RESULT should be initialized before accessing MULTI_MMAP_IS_SUPPORTED" ); } } // Opt-out if environment variable is set if supported && std::env::var_os("QDRANT_NO_MULTI_MMAP").is_some_and(|val| !val.is_empty()) { supported = false; log::warn!( "Not using multi-mmap because QDRANT_NO_MULTI_MMAP is set, you may see reduced performance" ); } supported }); /// If multi-mmap support is checked at Qdrant startup, the result is stored in this cell. pub static MULTI_MMAP_SUPPORT_CHECK_RESULT: OnceLock<bool> = OnceLock::new(); pub fn create_and_ensure_length(path: &Path, length: usize) -> io::Result<File> { if path.exists() { let file = OpenOptions::new() .read(true) .write(true) // Don't truncate because we explicitly set the length later .truncate(false) .open(path)?; file.set_len(length as u64)?; Ok(file) } else { let temp_path = path.with_extension(TEMP_FILE_EXTENSION); { // create temporary file with the required length // Temp file is used to avoid situations, where crash happens between file creation and setting the length let temp_file = OpenOptions::new() .read(true) .write(true) .create(true) // Don't truncate because we explicitly set the length later .truncate(false) .open(&temp_path)?; temp_file.set_len(length as u64)?; } fs::rename(&temp_path, path)?; OpenOptions::new().read(true).write(true).open(path) } } pub fn open_read_mmap(path: &Path, advice: AdviceSetting, populate: bool) -> io::Result<Mmap> { let file = OpenOptions::new() .read(true) .append(true) .create(true) .open(path)?; let mmap = unsafe { Mmap::map(&file)? }; // Populate before advising // Because we want to read data with normal advice if populate { mmap.populate(); } madvise::madvise(&mmap, advice.resolve())?; Ok(mmap) } pub fn open_write_mmap(path: &Path, advice: AdviceSetting, populate: bool) -> io::Result<MmapMut> { let file = OpenOptions::new().read(true).write(true).open(path)?; let mmap = unsafe { MmapMut::map_mut(&file)? }; // Populate before advising // Because we want to read data with normal advice if populate { mmap.populate(); } madvise::madvise(&mmap, advice.resolve())?; Ok(mmap) } pub fn transmute_from_u8<T>(v: &[u8]) -> &T { debug_assert_eq!(v.len(), size_of::<T>()); debug_assert_eq!( v.as_ptr().align_offset(align_of::<T>()), 0, "transmuting byte slice {:p} into {}: \ required alignment is {} bytes, \ byte slice misaligned by {} bytes", v.as_ptr(), std::any::type_name::<T>(), align_of::<T>(), v.as_ptr().align_offset(align_of::<T>()), ); unsafe { &*v.as_ptr().cast::<T>() } } pub fn transmute_to_u8<T: Sized>(v: &T) -> &[u8] { unsafe { std::slice::from_raw_parts(ptr::from_ref::<T>(v).cast::<u8>(), mem::size_of_val(v)) } } pub fn transmute_from_u8_to_slice<T>(data: &[u8]) -> &[T] { debug_assert_eq!(data.len() % size_of::<T>(), 0); debug_assert_eq!( data.as_ptr().align_offset(align_of::<T>()), 0, "transmuting byte slice {:p} into slice of {}: \ required alignment is {} bytes, \ byte slice misaligned by {} bytes", data.as_ptr(), std::any::type_name::<T>(), align_of::<T>(), data.as_ptr().align_offset(align_of::<T>()), ); let len = data.len() / size_of::<T>(); let ptr = data.as_ptr().cast::<T>(); unsafe { std::slice::from_raw_parts(ptr, len) } } pub fn transmute_from_u8_to_mut_slice<T>(data: &mut [u8]) -> &mut [T] { debug_assert_eq!(data.len() % size_of::<T>(), 0); debug_assert_eq!( data.as_ptr().align_offset(align_of::<T>()), 0, "transmuting byte slice {:p} into mutable slice of {}: \ required alignment is {} bytes, \ byte slice misaligned by {} bytes", data.as_ptr(), std::any::type_name::<T>(), align_of::<T>(), data.as_ptr().align_offset(align_of::<T>()), ); let len = data.len() / size_of::<T>(); let ptr = data.as_mut_ptr().cast::<T>(); unsafe { std::slice::from_raw_parts_mut(ptr, len) } } pub fn transmute_to_u8_slice<T>(v: &[T]) -> &[u8] { unsafe { std::slice::from_raw_parts(v.as_ptr().cast::<u8>(), mem::size_of_val(v)) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/mmap_type_readonly.rs
lib/common/memory/src/mmap_type_readonly.rs
//! Typed read-ponly memory maps //! //! This module adds type to directly map types and a slice of types onto a memory mapped file. //! The typed memory maps can be directly used as if it were that type. //! //! Types: //! - [`MmapTypeReadOnly`] //! - [`MmapSliceReadOnly`] //! //! Various additional functions are added for use within Qdrant, such as `flusher` to obtain a //! flusher handle to explicitly flush the underlying memory map at a later time. //! //! # Safety //! //! Code in this module is `unsafe` and very error prone. It is therefore compacted in this single //! module to make it easier to review, to make it easier to check for soundness, and to make it //! easier to reason about. The interface provided by types in this module is as-safe-as-possible //! and uses `unsafe` where appropriate. //! //! Please prevent touching code in this file. If modifications must be done, please do so with the //! utmost care. Security is critical here as this is an easy place to introduce undefined //! behavior. Problems caused by this are very hard to debug. use std::ops::Deref; use std::sync::Arc; use std::{fmt, mem, slice}; use memmap2::Mmap; use crate::madvise::Madviseable; use crate::mmap_type::Error; /// Result for mmap errors. type Result<T> = std::result::Result<T, Error>; /// Type `T` on a memory mapped file /// /// Functions as if it is `T` because this implements [`Deref`] /// /// # Safety /// /// This directly maps (transmutes) the type onto the memory mapped data. This is dangerous and /// very error prone and must be used with utmost care. Types holding references are not supported /// for example. Malformed data in the mmap will break type `T` and will cause undefined behavior. pub struct MmapTypeReadOnly<T> where T: ?Sized + 'static, { /// Type accessor: mutable reference to access the type /// /// This has the same lifetime as the backing `mmap`, and thus this struct. A borrow must /// never be leased out for longer. /// /// Since we own this reference inside this struct, we can guarantee we never lease it out for /// longer. /// /// # Safety /// /// This is an alias to the data inside `mmap`. We should prevent using both together at all /// costs because the Rust compiler assumes `noalias` for optimization. /// /// See: <https://doc.rust-lang.org/nomicon/aliasing.html> r#type: &'static T, /// Type storage: memory mapped file as backing store for type /// /// Has an exact size to fit the type. /// /// This should never be accessed directly, because it shares a mutable reference with /// `r#type`. That must be used instead. The sole purpose of this is to keep ownership of the /// mmap, and to allow properly cleaning up when this struct is dropped. mmap: Arc<Mmap>, } impl<T: ?Sized> fmt::Debug for MmapTypeReadOnly<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MmapTypeReadOnly") .field("mmap", &self.mmap) .finish_non_exhaustive() } } impl<T> MmapTypeReadOnly<T> where T: Sized + 'static, { /// Transform a mmap into a typed mmap of type `T`. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the size of the mmap doesn't match size `T` /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_prefix_to_type_unbounded`] pub unsafe fn from(mmap_with_type: Mmap) -> Self { unsafe { Self::try_from(mmap_with_type).unwrap() } } /// Transform a mmap into a typed mmap of type `T`. /// /// Returns an error when the mmap has an incorrect size. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_prefix_to_type_unbounded`] pub unsafe fn try_from(mmap_with_type: Mmap) -> Result<Self> { let r#type = unsafe { mmap_prefix_to_type_unbounded(&mmap_with_type)? }; let mmap = Arc::new(mmap_with_type); Ok(Self { r#type, mmap }) } } impl<T> MmapTypeReadOnly<[T]> where T: 'static, { /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// Returns an error when the mmap has an incorrect size. /// /// # Warning /// /// This does not support slices, because those cannot be transmuted directly because it has /// extra parts. See [`MmapSliceReadOnly`] and [`std::slice::from_raw_parts`]. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn try_slice_from(mmap_with_slice: Mmap) -> Result<Self> { let r#type = unsafe { mmap_to_slice_unbounded(&mmap_with_slice, 0)? }; let mmap = Arc::new(mmap_with_slice); Ok(Self { r#type, mmap }) } pub fn populate(&self) -> std::io::Result<()> { self.mmap.populate(); Ok(()) } } /// Get a second mutable reference for type `T` from the given mmap /// /// # Warning /// /// The returned reference is unbounded. The user must ensure it never outlives the `mmap` type. /// /// # Safety /// /// - unsafe because we create a second (unbounded) mutable reference /// - malformed data in the mmap may break the transmuted type `T` resulting in undefined behavior /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` unsafe fn mmap_prefix_to_type_unbounded<'unbnd, T>(mmap: &Mmap) -> Result<&'unbnd T> where T: Sized, { let size_t = mem::size_of::<T>(); // Assert size if mmap.len() < size_t { return Err(Error::SizeLess(size_t, mmap.len())); } // Obtain unbounded bytes slice into mmap let bytes: &'unbnd [u8] = unsafe { let slice = mmap.deref(); slice::from_raw_parts(slice.as_ptr(), size_t) }; // Assert alignment and size assert_alignment::<_, T>(bytes); #[cfg(debug_assertions)] if mmap.len() != size_t { log::warn!( "Mmap length {} is not equal to size of type {}", mmap.len(), size_t, ); } #[cfg(debug_assertions)] if bytes.len() != mem::size_of::<T>() { return Err(Error::SizeExact(mem::size_of::<T>(), bytes.len())); } let ptr = bytes.as_ptr().cast::<T>(); Ok(unsafe { &*ptr }) } /// Get a second mutable reference for a slice of type `T` from the given mmap /// /// A (non-zero) header size in bytes may be provided to omit from the BitSlice data. /// /// # Warning /// /// The returned reference is unbounded. The user must ensure it never outlives the `mmap` type. /// /// # Safety /// /// - unsafe because we create a second (unbounded) mutable reference /// - malformed data in the mmap may break the transmuted slice for type `T` resulting in undefined /// behavior /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - panics when the header size isn't a multiple of size `T` unsafe fn mmap_to_slice_unbounded<'unbnd, T>(mmap: &Mmap, header_size: usize) -> Result<&'unbnd [T]> where T: Sized, { let size_t = mem::size_of::<T>(); // Assert size if size_t == 0 { // For zero-sized T, data part must be zero-sized as well, we cannot have infinite slice debug_assert_eq!( mmap.len().saturating_sub(header_size), 0, "mmap data must be zero-sized, because size T is zero", ); } else { // Must be multiple of size T debug_assert_eq!(header_size % size_t, 0, "header not multiple of size T"); if !mmap.len().is_multiple_of(size_t) { return Err(Error::SizeMultiple(size_t, mmap.len())); } } // Obtain unbounded bytes slice into mmap let bytes: &'unbnd [u8] = unsafe { let slice = mmap.deref(); &slice::from_raw_parts(slice.as_ptr(), slice.len())[header_size..] }; // Assert alignment and bytes size assert_alignment::<_, T>(bytes); debug_assert_eq!(bytes.len() + header_size, mmap.len()); // Transmute slice types unsafe { Ok(slice::from_raw_parts( bytes.as_ptr().cast::<T>(), bytes.len().checked_div(size_t).unwrap_or(0), )) } } impl<T> Deref for MmapTypeReadOnly<T> where T: ?Sized + 'static, { type Target = T; // Has explicit 'bounded lifetime to clarify the inner reference never outlives this struct, // even though the reference has a static lifetime internally. #[allow(clippy::needless_lifetimes)] fn deref<'bounded>(&'bounded self) -> &'bounded Self::Target { self.r#type } } /// Slice of type `T` on a memory mapped file /// /// Functions as if it is `&[T]` because this implements [`Deref`] /// /// A helper because [`MmapTypeReadOnly`] doesn't support slices directly. pub struct MmapSliceReadOnly<T> where T: Sized + 'static, { mmap: MmapTypeReadOnly<[T]>, } impl<T> fmt::Debug for MmapSliceReadOnly<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MmapSliceReadOnly") .field("mmap", &self.mmap) .finish_non_exhaustive() } } impl<T> MmapSliceReadOnly<T> { /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// This method is specifically intended for slices. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the size of the mmap isn't a multiple of size `T` /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn from(mmap_with_slice: Mmap) -> Self { unsafe { Self::try_from(mmap_with_slice).unwrap() } } /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// This method is specifically intended for slices. /// /// Returns an error when the mmap has an incorrect size. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn try_from(mmap_with_slice: Mmap) -> Result<Self> { let r#type = unsafe { MmapTypeReadOnly::try_slice_from(mmap_with_slice) }; r#type.map(|mmap| Self { mmap }) } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> std::io::Result<()> { self.mmap.populate()?; Ok(()) } } impl<T> Deref for MmapSliceReadOnly<T> { type Target = MmapTypeReadOnly<[T]>; fn deref(&self) -> &Self::Target { &self.mmap } } /// Assert slice `&[S]` is correctly aligned for type `T`. /// /// # Panics /// /// Panics when alignment is wrong. fn assert_alignment<S, T>(bytes: &[S]) { assert_eq!( bytes.as_ptr().align_offset(mem::align_of::<T>()), 0, "type must be aligned", ); } #[cfg(test)] mod tests { use tempfile::{Builder, NamedTempFile}; use super::*; use crate::madvise::AdviceSetting; use crate::mmap_ops; fn create_temp_mmap_file(len: usize) -> NamedTempFile { let tempfile = Builder::new() .prefix("test.") .suffix(".mmap") .tempfile() .unwrap(); #[allow(clippy::disallowed_methods, reason = "test code")] tempfile.as_file().set_len(len as u64).unwrap(); tempfile } #[test] fn test_open_from() { const SIZE: usize = 1024; let tempfile = create_temp_mmap_file(SIZE); let mmap = mmap_ops::open_read_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let result = unsafe { MmapSliceReadOnly::<u64>::try_from(mmap).unwrap() }; assert_eq!(result.len(), SIZE / size_of::<u64>()); assert_eq!(result[10], 0); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/chunked_utils.rs
lib/common/memory/src/chunked_utils.rs
use std::io; use std::path::{Path, PathBuf}; use ahash::AHashMap; use fs_err as fs; use crate::madvise::{Advice, AdviceSetting}; use crate::mmap_ops::{ MULTI_MMAP_IS_SUPPORTED, create_and_ensure_length, open_read_mmap, open_write_mmap, }; use crate::mmap_type::{Error as MmapError, MmapFlusher, MmapSlice}; use crate::mmap_type_readonly::MmapSliceReadOnly; const MMAP_CHUNKS_PATTERN_START: &str = "chunk_"; const MMAP_CHUNKS_PATTERN_END: &str = ".mmap"; /// Memory mapped chunk data, that can be read and written and also maintain sequential read-only view #[derive(Debug)] pub struct UniversalMmapChunk<T: Sized + 'static> { /// Main data mmap slice for read/write /// /// Best suited for random reads. mmap: MmapSlice<T>, /// Read-only mmap slice best suited for sequential reads /// /// `None` on platforms that do not support multiple memory maps to the same file. /// Use [`as_seq_slice`] utility function to access this mmap slice if available. _mmap_seq: Option<MmapSliceReadOnly<T>>, } impl<T: Sized + 'static> UniversalMmapChunk<T> { pub fn as_slice(&self) -> &[T] { &self.mmap } pub fn as_mut_slice(&mut self) -> &mut [T] { &mut self.mmap } /// Helper to get a slice suited for sequential reads if available, otherwise use the main mmap pub fn as_seq_slice(&self) -> &[T] { #[expect(clippy::used_underscore_binding)] self._mmap_seq .as_ref() .map(|m| m.as_ref()) .unwrap_or(self.mmap.as_ref()) } pub fn flusher(&self) -> MmapFlusher { self.mmap.flusher() } pub fn len(&self) -> usize { self.mmap.len() } pub fn is_empty(&self) -> bool { self.mmap.is_empty() } pub fn populate(&self) -> io::Result<()> { #[expect(clippy::used_underscore_binding)] if let Some(mmap_seq) = &self._mmap_seq { mmap_seq.populate()?; } Ok(()) } } /// Checks if the file name matches the pattern for mmap chunks /// Return ID from the file name if it matches, None otherwise fn check_mmap_file_name_pattern(file_name: &str) -> Option<usize> { file_name .strip_prefix(MMAP_CHUNKS_PATTERN_START) .and_then(|file_name| file_name.strip_suffix(MMAP_CHUNKS_PATTERN_END)) .and_then(|file_name| file_name.parse::<usize>().ok()) } pub fn read_mmaps<T: Sized>( directory: &Path, populate: bool, advice: AdviceSetting, ) -> Result<Vec<UniversalMmapChunk<T>>, MmapError> { let mut mmap_files: AHashMap<usize, _> = AHashMap::new(); for entry in fs::read_dir(directory)? { let entry = entry?; let path = entry.path(); if path.is_file() { let chunk_id = path .file_name() .and_then(|file_name| file_name.to_str()) .and_then(check_mmap_file_name_pattern); if let Some(chunk_id) = chunk_id { mmap_files.insert(chunk_id, path); } } } let num_chunks = mmap_files.len(); let mut result = Vec::with_capacity(num_chunks); for chunk_id in 0..num_chunks { let mmap_file = mmap_files.remove(&chunk_id).ok_or_else(|| { MmapError::MissingFile(format!( "Missing mmap chunk {chunk_id} in {}", directory.display(), )) })?; let mmap = open_write_mmap(&mmap_file, advice, populate)?; let mmap = unsafe { MmapSlice::try_from(mmap) }?; // Only open second mmap for sequential reads if supported let mmap_seq = if *MULTI_MMAP_IS_SUPPORTED { let mmap_seq = open_read_mmap(&mmap_file, AdviceSetting::Advice(Advice::Sequential), false)?; Some(unsafe { MmapSliceReadOnly::try_from(mmap_seq) }?) } else { None }; result.push(UniversalMmapChunk { mmap, _mmap_seq: mmap_seq, }); } Ok(result) } pub fn chunk_name(directory: &Path, chunk_id: usize) -> PathBuf { directory.join(format!( "{MMAP_CHUNKS_PATTERN_START}{chunk_id}{MMAP_CHUNKS_PATTERN_END}", )) } pub fn create_chunk<T: Sized>( directory: &Path, chunk_id: usize, chunk_length_bytes: usize, ) -> Result<UniversalMmapChunk<T>, MmapError> { let chunk_file_path = chunk_name(directory, chunk_id); create_and_ensure_length(&chunk_file_path, chunk_length_bytes)?; let mmap = open_write_mmap( &chunk_file_path, AdviceSetting::Global, false, // don't populate newly created chunk, as it's empty and will be filled later )?; let mmap = unsafe { MmapSlice::try_from(mmap) }?; // Only open second mmap for sequential reads if supported let mmap_seq = if *MULTI_MMAP_IS_SUPPORTED { let mmap_seq = open_read_mmap( &chunk_file_path, AdviceSetting::Advice(Advice::Sequential), false, )?; Some(unsafe { MmapSliceReadOnly::try_from(mmap_seq) }?) } else { None }; Ok(UniversalMmapChunk { mmap, _mmap_seq: mmap_seq, }) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/lib.rs
lib/common/memory/src/lib.rs
pub mod checkfs; pub mod chunked_utils; pub mod fadvise; pub mod madvise; pub mod mmap_ops; pub mod mmap_type; pub mod mmap_type_readonly;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/madvise.rs
lib/common/memory/src/madvise.rs
//! Platform-independent abstractions over [`memmap2::Mmap::advise`]/[`memmap2::MmapMut::advise`] //! and [`memmap2::Advice`]. use std::hint::black_box; use std::io; use std::num::Wrapping; use serde::Deserialize; /// Global [`Advice`] value, to trivially set [`Advice`] value /// used by all memmaps created by the `segment` crate. /// /// See [`set_global`] and [`get_global`]. static ADVICE: parking_lot::RwLock<Advice> = parking_lot::RwLock::new(Advice::Random); /// Set global [`Advice`] value. /// /// When the `segment` crate creates [`memmap2::Mmap`] or [`memmap2::MmapMut`] /// _for a memory-mapped, on-disk HNSW index or vector storage access_ /// it will "advise" the created memmap with the current global [`Advice`] value /// (obtained with [`get_global`]). /// /// It is recommended to set the desired [`Advice`] value before calling any other function /// from the `segment` crate and not to change it afterwards. /// /// The `segment` crate itself does not modify the global [`Advice`] value. /// /// The default global [`Advice`] value is [`Advice::Random`]. pub fn set_global(advice: Advice) { *ADVICE.write() = advice; } /// Get current global [`Advice`] value. pub fn get_global() -> Advice { *ADVICE.read() } /// Platform-independent version of [`memmap2::Advice`]. /// See [`memmap2::Advice`] and [`madvise(2)`] man page. /// /// [`madvise(2)`]: https://man7.org/linux/man-pages/man2/madvise.2.html #[derive(Copy, Clone, Debug, Deserialize)] #[serde(rename_all = "snake_case")] pub enum Advice { /// See [`memmap2::Advice::Normal`]. Normal, /// See [`memmap2::Advice::Random`]. Random, /// See [`memmap2::Advice::Sequential`]. Sequential, } #[cfg(unix)] impl From<Advice> for memmap2::Advice { fn from(advice: Advice) -> Self { match advice { Advice::Normal => memmap2::Advice::Normal, Advice::Random => memmap2::Advice::Random, Advice::Sequential => memmap2::Advice::Sequential, } } } /// Either the global [`Advice`] value or a specific [`Advice`] value. #[derive(Copy, Clone, Debug)] pub enum AdviceSetting { /// Use the global [`Advice`] value (see [`set_global`] and [`get_global`]). Global, /// Use the specific [`Advice`] value. Advice(Advice), } impl From<Advice> for AdviceSetting { fn from(advice: Advice) -> Self { AdviceSetting::Advice(advice) } } impl AdviceSetting { /// Get the specific [`Advice`] value. pub fn resolve(self) -> Advice { match self { AdviceSetting::Global => get_global(), AdviceSetting::Advice(advice) => advice, } } } /// Advise OS how given memory map will be accessed. On non-Unix platforms this is a no-op. pub fn madvise(madviseable: &impl Madviseable, advice: Advice) -> io::Result<()> { madviseable.madvise(advice) } /// Generic, platform-independent abstraction /// over [`memmap2::Mmap::advise`] and [`memmap2::MmapMut::advise`]. pub trait Madviseable { /// Advise OS how given memory map will be accessed. On non-Unix platforms this is a no-op. fn madvise(&self, advice: Advice) -> io::Result<()>; fn populate(&self); } impl Madviseable for memmap2::Mmap { fn madvise(&self, advice: Advice) -> io::Result<()> { #[cfg(unix)] self.advise(advice.into())?; #[cfg(not(unix))] log::debug!("Ignore {advice:?} on this platform"); Ok(()) } fn populate(&self) { #[cfg(target_os = "linux")] if *POPULATE_READ_IS_SUPPORTED { match self.advise(memmap2::Advice::PopulateRead) { Ok(()) => return, Err(err) => log::warn!( "Failed to populate with MADV_POPULATE_READ: {err}. \ Falling back to naive approach." ), } } populate_simple(self); } } impl Madviseable for memmap2::MmapMut { fn madvise(&self, advice: Advice) -> io::Result<()> { #[cfg(unix)] self.advise(advice.into())?; #[cfg(not(unix))] log::debug!("Ignore {advice:?} on this platform"); Ok(()) } fn populate(&self) { #[cfg(target_os = "linux")] if *POPULATE_READ_IS_SUPPORTED { match self.advise(memmap2::Advice::PopulateRead) { Ok(()) => return, Err(err) => log::warn!( "Failed to populate with MADV_POPULATE_READ: {err}. \ Falling back to naive approach." ), } } populate_simple(self); } } /// True if `MADV_POPULATE_READ` is supported (added in Linux 5.14). #[cfg(target_os = "linux")] static POPULATE_READ_IS_SUPPORTED: std::sync::LazyLock<bool> = std::sync::LazyLock::new(|| memmap2::Advice::PopulateRead.is_supported()); /// On older Linuxes and non-Unix platforms, we just read every 512th byte to /// populate the page cache. This is not as efficient as `madvise(2)` with /// `MADV_POPULATE_READ` but it's better than nothing. fn populate_simple(slice: &[u8]) { black_box( slice .iter() .copied() .map(Wrapping) .step_by(512) .sum::<Wrapping<u8>>(), ); } /// Trigger readahead for a memory-mapped region by calling /// `madvise(MADV_WILLNEED)` on it. /// /// Use-case: the `region` is inside `MADV_RANDOM` memory map, but it spans /// across more than one 4KiB page. If you read it in sequence, it will cause /// multiple page faults, thus multiple 4KiB I/O operations. Avoid this by /// calling this function before reading the region. It will prefetch the whole /// region in a single I/O operation. (if possible) /// /// Note: if the region fits within a single page, this function is a no-op. #[cfg(unix)] pub fn will_need_multiple_pages(region: &[u8]) { let Some(page_mask) = *PAGE_SIZE_MASK else { return; }; // `madvise()` requires the address to be page-aligned. let addr = region.as_ptr().map_addr(|addr| addr & !page_mask); let length = region.len() + (region.as_ptr().addr() & page_mask); if length <= page_mask { // Data fits within a single page, do nothing. return; } // Safety: madvise(MADV_WILLNEED) is harmless. If the address is not valid // (not file-baked mmap or even if it is an arbitrary invalid address), it // will return an error, but it won't crash or cause an undefined behavior. let res = unsafe { nix::libc::madvise(addr as *mut _, length, nix::libc::MADV_WILLNEED) }; if res != 0 { #[cfg(debug_assertions)] { let err = io::Error::last_os_error(); panic!("Failed to call madvise(MADV_WILLNEED): {err}"); } } } #[cfg(not(unix))] pub fn will_need_multiple_pages(_region: &[u8]) {} /// Page size mask. Typically 0xfff for 4KiB pages. #[cfg(unix)] static PAGE_SIZE_MASK: std::sync::LazyLock<Option<usize>> = std::sync::LazyLock::new(|| get_page_mask().inspect_err(|err| log::warn!("{err}")).ok()); #[cfg(unix)] fn get_page_mask() -> Result<usize, String> { let page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE) .map_err(|err| format!("Failed to get page size: {err}"))? .ok_or_else(|| "sysconf(PAGE_SIZE) returned None".to_string())?; let page_size = usize::try_from(page_size) .map_err(|_| format!("Failed to convert page size {page_size} to usize"))?; if !page_size.is_power_of_two() { // Assuming that page size is a power of two (which is true for all // known platforms) simplifies computations. return Err(format!("Page size {page_size} is not a power of two")); } Ok(page_size - 1) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/checkfs.rs
lib/common/memory/src/checkfs.rs
// This file contains functions to verify that current file system is POSIX compliant. // There are some possible checks we can run here: // 1. Read information about the file system. If it is unknown or known to be not POSIX compliant, return false. // 2. Try to create, fill and save mmap file with some dummy data. If file is possible to read after it is closed, return true. // Some file systems are known to fail this test, so we need to check for that and notify user before it is too late. use std::io; use std::path::Path; use fs_err as fs; #[cfg(fs_type_check_supported)] use nix::sys::statfs::statfs; use crate::madvise::{Advice, AdviceSetting}; use crate::mmap_ops::{create_and_ensure_length, open_read_mmap, open_write_mmap}; #[derive(Debug)] pub enum FsCheckResult { Good, Unknown(String), Bad(String), } const MAGIC_QDRANT_BYTES: &[u8] = b"qdrant"; const MAGIC_FILE_SIZE: usize = 32 * 1024; // 32 Kb const MAGIC_BYTES_POSITION: usize = MAGIC_FILE_SIZE / 2; // write in the middle const MAGIC_FILE_NAME: &str = ".qdrant_fs_check"; #[derive(Debug, PartialEq)] pub enum FsType { Ext234, Btrfs, Xfs, Nfs, Fuse, Tmpfs, Ntfs, /// FAT12, FAT16, FAT32 Fat, ExFat, /// HFS and HFS+ Hfs, Apfs, Overlayfs, Squashfs, Cifs, Other, } impl FsType { #[cfg(any(target_os = "linux", target_os = "android"))] fn from_magic(magic: i64) -> Self { // Sourced from: <https://github.com/torvalds/linux/blob/e04c78d86a9699d136910cfc0bdcf01087e3267e/include/uapi/linux/magic.h#L5-L40> match magic { 0xEF53 => Self::Ext234, 0x9123683E => Self::Btrfs, 0x58465342 => Self::Xfs, 0x6969 => Self::Nfs, 0x65735546 => Self::Fuse, 0x01021994 => Self::Tmpfs, 0x5346544e => Self::Ntfs, 0x55AA => Self::Fat, 0x2011BAB0 | 0xAA55 => Self::ExFat, 0x4244 | 0x482B => Self::Hfs, 0x42535041 => Self::Apfs, 0x794c7630 => Self::Overlayfs, 0x73717368 => Self::Squashfs, 0xFF534D42 => Self::Cifs, _ => Self::Other, } } #[cfg(not(any(target_os = "linux", target_os = "android")))] fn from_name(name: &str) -> Self { // Names reference is taken from // https://github.com/happyfish100/libfastcommon/blob/7f1a85b025675671905447da13b7727323eb0c28/src/system_info.c#L203 match name { "ext2" | "ext3" | "ext4" => Self::Ext234, "btrfs" => Self::Btrfs, "xfs" => Self::Xfs, "ntfs" => Self::Ntfs, "fat" | "fat12" | "fat16" | "fat32" => Self::Fat, "nfs" => Self::Nfs, "hfs" | "htf+" => Self::Hfs, "apfs" => Self::Apfs, "fuse" => Self::Fuse, "overlayfs" => Self::Overlayfs, "squashfs" => Self::Squashfs, "cifs" => Self::Cifs, "tmpfs" => Self::Tmpfs, _ => Self::Other, } } } /// Return a string representing the file system type of a given path. /// It uses nix::sys::statfs to retrieve the magic number. #[cfg(fs_type_check_supported)] fn get_filesystem_type(path: impl AsRef<Path>) -> Result<FsType, String> { let stat = statfs(path.as_ref()).map_err(|e| format!("statfs failed: {e}"))?; #[cfg(not(any(target_os = "linux", target_os = "android")))] { let fs_name = stat.filesystem_type_name(); let fs_type = FsType::from_name(fs_name); Ok(fs_type) } #[cfg(any(target_os = "linux", target_os = "android"))] { let f_type = stat.filesystem_type().0; // Convert into correct number type as MUSL expects i64, not u64 let fs_type = FsType::from_magic(f_type as _); Ok(fs_type) } } /// Check filesystem information to identify known non-POSIX filesystems #[cfg(fs_type_check_supported)] pub fn _check_fs_info(path: impl AsRef<Path>) -> FsCheckResult { let path = path.as_ref(); let Ok(fs_type) = get_filesystem_type(path) else { return FsCheckResult::Unknown( format!("Failed to get filesystem type for path: {path:?}",), ); }; match fs_type { FsType::Ext234 => FsCheckResult::Good, FsType::Btrfs => FsCheckResult::Good, FsType::Xfs => FsCheckResult::Good, FsType::Nfs => FsCheckResult::Bad( "NFS may cause data corruption due to inconsistent file locking".to_string(), ), FsType::Fuse => FsCheckResult::Bad( "FUSE filesystems may cause data corruption due to caching issues".to_string(), ), FsType::Tmpfs => FsCheckResult::Unknown( "Data will be lost on system restart - tmpfs is memory-based".to_string(), ), FsType::Ntfs => FsCheckResult::Good, FsType::Fat => { FsCheckResult::Unknown("FAT12/FAT16/FAT32 filesystem support is untested".to_string()) } FsType::ExFat => FsCheckResult::Unknown("exFAT filesystem support is untested".to_string()), FsType::Hfs => { FsCheckResult::Unknown("HFS/HFS+ filesystem support is untested".to_string()) } FsType::Apfs => FsCheckResult::Good, FsType::Overlayfs => FsCheckResult::Unknown( "Container filesystem detected - storage might be lost with container re-creation" .to_string(), ), FsType::Squashfs => { FsCheckResult::Unknown("Read-only filesystem detected - writes will fail".to_string()) } FsType::Cifs => FsCheckResult::Bad( "CIFS/SMB may cause data corruption due to inconsistent file locking".to_string(), ), FsType::Other => FsCheckResult::Unknown( "Unrecognized filesystem - cannot guarantee data safety".to_string(), ), } } pub fn check_fs_info(path: impl AsRef<Path>) -> FsCheckResult { if !path.as_ref().exists() { return FsCheckResult::Unknown("Path does not exist".to_string()); } #[cfg(fs_type_check_supported)] { _check_fs_info(path) } #[cfg(not(fs_type_check_supported))] { FsCheckResult::Unknown( "Filesystem type check is not supported on this platform".to_string(), ) } } /// This function simulates an access pattern we use in vector storage and gridstore /// This check fails, it means that fundamental assumptions about file system are violated /// therefore, there are no guarantees that data will be safe pub fn check_mmap_functionality(path: impl AsRef<Path>) -> io::Result<bool> { let path = path.as_ref(); let magic_file_path = path.join(MAGIC_FILE_NAME); // Remove file and folder if they exist if magic_file_path.exists() { fs::remove_file(&magic_file_path)?; } fs::create_dir_all(path)?; create_and_ensure_length(&magic_file_path, MAGIC_FILE_SIZE)?; let mut mmap = open_write_mmap(&magic_file_path, AdviceSetting::Global, false)?; let mmap_seq = open_read_mmap( &magic_file_path, AdviceSetting::Advice(Advice::Sequential), false, )?; mmap[MAGIC_BYTES_POSITION..MAGIC_BYTES_POSITION + MAGIC_QDRANT_BYTES.len()] .copy_from_slice(MAGIC_QDRANT_BYTES); mmap.flush()?; drop(mmap); drop(mmap_seq); if !magic_file_path.exists() { return Ok(false); } // Check the size of the file let file_size = fs::metadata(&magic_file_path)?.len() as usize; if file_size != MAGIC_FILE_SIZE { log::debug!("File size is not equal to MAGIC_FILE_SIZE: {file_size} != {MAGIC_FILE_SIZE}"); return Ok(false); } let mmap = open_read_mmap(&magic_file_path, AdviceSetting::Global, false)?; let result = mmap[MAGIC_BYTES_POSITION..MAGIC_BYTES_POSITION + MAGIC_QDRANT_BYTES.len()] == *MAGIC_QDRANT_BYTES; drop(mmap); if result { // If ok, we can remove the file // But if not, we might need to for further investigation fs::remove_file(&magic_file_path)?; } Ok(result) } #[cfg(test)] mod tests { use tempfile::TempDir; use super::*; #[test] fn test_posix_fs_check() { let temp_dir = TempDir::new().unwrap(); let result = check_fs_info(temp_dir.path()); println!("Result: {result:?}"); let result = check_mmap_functionality(temp_dir.path()).unwrap(); println!("Result: {result}"); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/fadvise.rs
lib/common/memory/src/fadvise.rs
use std::io::{self, IoSliceMut, Read, Seek}; use std::ops::Deref; use std::path::Path; use delegate::delegate; use fs_err::File; #[cfg(posix_fadvise_supported)] use nix::fcntl::{PosixFadviseAdvice, posix_fadvise}; #[cfg(posix_fadvise_supported)] fn fadvise(f: &impl std::os::unix::io::AsFd, advise: PosixFadviseAdvice) -> io::Result<()> { Ok(posix_fadvise(f, 0, 0, advise)?) } /// For given file path, clear disk cache with `posix_fadvise` /// /// Does nothing if: /// - the file does not exist /// - `posix_fadvise` is not supported on this platform pub fn clear_disk_cache(file_path: &Path) -> io::Result<()> { #[cfg(posix_fadvise_supported)] match File::open(file_path.to_path_buf()) { Ok(file) => fadvise(&file, PosixFadviseAdvice::POSIX_FADV_DONTNEED), // If file is not found, no need to clear cache Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(()), Err(e) => Err(e), } #[cfg(not(posix_fadvise_supported))] { let _ = file_path; Ok(()) } } /// A wrapper around [`File`] intended for one-time sequential read. /// /// On supported platforms, the file contents is evicted from the OS file cache /// after the file is closed. pub struct OneshotFile { /// Is `None` only when `drop_cache` is called, to avoid double call on drop. file: Option<File>, } impl OneshotFile { /// Similar to [`File::open`]. pub fn open(path: impl AsRef<Path>) -> io::Result<Self> { let file = File::open(path.as_ref().to_path_buf())?; #[cfg(posix_fadvise_supported)] { fadvise(&file, PosixFadviseAdvice::POSIX_FADV_SEQUENTIAL)?; fadvise(&file, PosixFadviseAdvice::POSIX_FADV_NOREUSE)?; } Ok(Self { file: Some(file) }) } /// Consume this [`OneshotFile`] and clear the cache. /// /// If not called, the cache still will be implicitly cleared on drop. /// The only difference is that this method might return an error. pub fn drop_cache(mut self) -> io::Result<()> { let file = self.file.take().unwrap(); #[cfg(posix_fadvise_supported)] fadvise(&file, PosixFadviseAdvice::POSIX_FADV_DONTNEED)?; let _ = file; Ok(()) } } impl Deref for OneshotFile { type Target = File; fn deref(&self) -> &Self::Target { self.file.as_ref().unwrap() } } impl Read for OneshotFile { delegate! { to self.file.as_ref().unwrap() { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>; fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize>; fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>; fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize>; } } } impl Seek for OneshotFile { delegate! { to self.file.as_ref().unwrap() { fn seek(&mut self, pos: std::io::SeekFrom) -> io::Result<u64>; } } } impl Drop for OneshotFile { fn drop(&mut self) { if let Some(file) = self.file.take() { #[cfg(posix_fadvise_supported)] let _ = fadvise(&file, PosixFadviseAdvice::POSIX_FADV_DONTNEED); let _ = file; } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/memory/src/mmap_type.rs
lib/common/memory/src/mmap_type.rs
//! Typed memory maps //! //! This module adds type to directly map types and a slice of types onto a memory mapped file. //! The typed memory maps can be directly used as if it were that type. //! //! Types: //! - [`MmapType`] //! - [`MmapSlice`] //! - [`MmapBitSlice`] //! //! Various additional functions are added for use within Qdrant, such as `flusher` to obtain a //! flusher handle to explicitly flush the underlying memory map at a later time. //! //! # Safety //! //! Code in this module is `unsafe` and very error prone. It is therefore compacted in this single //! module to make it easier to review, to make it easier to check for soundness, and to make it //! easier to reason about. The interface provided by types in this module is as-safe-as-possible //! and uses `unsafe` where appropriate. //! //! Please prevent touching code in this file. If modifications must be done, please do so with the //! utmost care. Security is critical here as this is an easy place to introduce undefined //! behavior. Problems caused by this are very hard to debug. use std::ops::{Deref, DerefMut}; use std::path::Path; use std::sync::Arc; use std::{fmt, mem, slice}; use bitvec::slice::BitSlice; use memmap2::MmapMut; use crate::madvise::{Advice, AdviceSetting, Madviseable}; use crate::mmap_ops; /// Result for mmap errors. type Result<T> = std::result::Result<T, Error>; pub type MmapFlusher = Box<dyn FnOnce() -> Result<()> + Send>; /// Type `T` on a memory mapped file /// /// Functions as if it is `T` because this implements [`Deref`] and [`DerefMut`]. /// /// # Safety /// /// This directly maps (transmutes) the type onto the memory mapped data. This is dangerous and /// very error prone and must be used with utmost care. Types holding references are not supported /// for example. Malformed data in the mmap will break type `T` and will cause undefined behavior. pub struct MmapType<T> where T: ?Sized + 'static, { /// Type accessor: mutable reference to access the type /// /// This has the same lifetime as the backing `mmap`, and thus this struct. A borrow must /// never be leased out for longer. /// /// Since we own this reference inside this struct, we can guarantee we never lease it out for /// longer. /// /// # Safety /// /// This is an alias to the data inside `mmap`. We should prevent using both together at all /// costs because the Rust compiler assumes `noalias` for optimization. /// /// See: <https://doc.rust-lang.org/nomicon/aliasing.html> r#type: &'static mut T, /// Type storage: memory mapped file as backing store for type /// /// Has an exact size to fit the type. /// /// This should never be accessed directly, because it shares a mutable reference with /// `r#type`. That must be used instead. The sole purpose of this is to keep ownership of the /// mmap, and to allow properly cleaning up when this struct is dropped. mmap: Arc<MmapMut>, } impl<T: ?Sized> fmt::Debug for MmapType<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MmapType") .field("mmap", &self.mmap) .finish_non_exhaustive() } } impl<T> MmapType<T> where T: Sized + 'static, { /// Transform a mmap into a typed mmap of type `T`. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the size of the mmap doesn't match size `T` /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_prefix_to_type_unbounded`] pub unsafe fn from(mmap_with_type: MmapMut) -> Self { unsafe { Self::try_from(mmap_with_type).unwrap() } } /// Transform a mmap into a typed mmap of type `T`. /// /// Returns an error when the mmap has an incorrect size. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_prefix_to_type_unbounded`] pub unsafe fn try_from(mut mmap_with_type: MmapMut) -> Result<Self> { let r#type = unsafe { mmap_prefix_to_type_unbounded(&mut mmap_with_type)? }; let mmap = Arc::new(mmap_with_type); Ok(Self { r#type, mmap }) } } impl<T> MmapType<[T]> where T: 'static, { /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// Returns an error when the mmap has an incorrect size. /// /// # Warning /// /// This does not support slices, because those cannot be transmuted directly because it has /// extra parts. See [`MmapSlice`] and [`std::slice::from_raw_parts`]. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn try_slice_from(mut mmap_with_slice: MmapMut) -> Result<Self> { let r#type = unsafe { mmap_to_slice_unbounded(&mut mmap_with_slice, 0)? }; let mmap = Arc::new(mmap_with_slice); Ok(Self { r#type, mmap }) } } impl<T> MmapType<T> where T: ?Sized + 'static, { /// Get flusher to explicitly flush mmap at a later time pub fn flusher(&self) -> MmapFlusher { // TODO: if we explicitly flush when dropping this type, we can switch to a weak reference // here to only flush if it hasn't been done already Box::new({ let mmap = self.mmap.clone(); move || { // flushing a zero-sized mmap can cause panicking on some systems if !mmap.is_empty() { mmap.flush()?; } Ok(()) } }) } /// Call [`memmap2::MmapMut::unchecked_advise`] on the underlying mmap. /// /// # Safety /// /// See [`memmap2::UncheckedAdvice`] doc. #[cfg(unix)] pub unsafe fn unchecked_advise(&self, advice: memmap2::UncheckedAdvice) -> std::io::Result<()> { unsafe { self.mmap.unchecked_advise(advice) } } pub fn populate(&self) -> std::io::Result<()> { self.mmap.populate(); Ok(()) } } impl<T> Deref for MmapType<T> where T: ?Sized + 'static, { type Target = T; // Has explicit 'bounded lifetime to clarify the inner reference never outlives this struct, // even though the reference has a static lifetime internally. #[allow(clippy::needless_lifetimes)] fn deref<'bounded>(&'bounded self) -> &'bounded Self::Target { self.r#type } } impl<T> DerefMut for MmapType<T> where T: ?Sized + 'static, { // Has explicit 'bounded lifetime to clarify the inner reference never outlives this struct, // even though the reference has a static lifetime internally. #[allow(clippy::needless_lifetimes)] fn deref_mut<'bounded>(&'bounded mut self) -> &'bounded mut Self::Target { self.r#type } } /// Slice of type `T` on a memory mapped file /// /// Functions as if it is `&[T]` because this implements [`Deref`] and [`DerefMut`]. /// /// A helper because [`MmapType`] doesn't support slices directly. pub struct MmapSlice<T> where T: Sized + 'static, { mmap: MmapType<[T]>, } impl<T> fmt::Debug for MmapSlice<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MmapSlice") .field("mmap", &self.mmap) .finish_non_exhaustive() } } impl<T> MmapSlice<T> { /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// This method is specifically intended for slices. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the size of the mmap isn't a multiple of size `T` /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn from(mmap_with_slice: MmapMut) -> Self { unsafe { Self::try_from(mmap_with_slice).unwrap() } } /// Transform a mmap into a typed slice mmap of type `&[T]`. /// /// This method is specifically intended for slices. /// /// Returns an error when the mmap has an incorrect size. /// /// # Safety /// /// Unsafe because malformed data in the mmap may break type `T` resulting in undefined /// behavior. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - See: [`mmap_to_slice_unbounded`] pub unsafe fn try_from(mmap_with_slice: MmapMut) -> Result<Self> { let r#type = unsafe { MmapType::try_slice_from(mmap_with_slice) }; r#type.map(|mmap| Self { mmap }) } /// Get flusher to explicitly flush mmap at a later time pub fn flusher(&self) -> MmapFlusher { self.mmap.flusher() } pub fn create(path: &Path, mut iter: impl ExactSizeIterator<Item = T>) -> Result<()> { let file_len = iter.len() * mem::size_of::<T>(); let _file = mmap_ops::create_and_ensure_length(path, file_len)?; let mmap = mmap_ops::open_write_mmap( path, AdviceSetting::Advice(Advice::Normal), // We only write sequentially false, )?; let mut mmap_slice = unsafe { Self::try_from(mmap)? }; mmap_slice.fill_with(|| iter.next().expect("iterator size mismatch")); mmap_slice.flusher()()?; Ok(()) } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> std::io::Result<()> { self.mmap.populate()?; Ok(()) } } impl<T> Deref for MmapSlice<T> { type Target = MmapType<[T]>; fn deref(&self) -> &Self::Target { &self.mmap } } impl<T> DerefMut for MmapSlice<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.mmap } } /// [`BitSlice`] on a memory mapped file /// /// Functions as if it is a [`BitSlice`] because this implements [`Deref`] and [`DerefMut`]. #[derive(Debug)] pub struct MmapBitSlice { mmap: MmapType<BitSlice>, } impl MmapBitSlice { /// Minimum file size for the mmap file, in bytes. const MIN_FILE_SIZE: usize = mem::size_of::<usize>(); /// Transform a mmap into a [`BitSlice`]. /// /// A (non-zero) header size in bytes may be provided to omit from the BitSlice data. /// /// # Panics /// /// - panics when the size of the mmap isn't a multiple of the inner [`BitSlice`] type /// - panics when the mmap data is not correctly aligned to the inner [`BitSlice`] type /// - panics when the header size isn't a multiple of the inner [`BitSlice`] type /// - See: [`mmap_to_slice_unbounded`] pub fn from(mmap: MmapMut, header_size: usize) -> Self { Self::try_from(mmap, header_size).unwrap() } /// Transform a mmap into a [`BitSlice`]. /// /// Returns an error when the mmap has an incorrect size. /// /// A (non-zero) header size in bytes may be provided to omit from the BitSlice data. /// /// # Panics /// /// - panics when the mmap data is not correctly aligned to the inner [`BitSlice`] type /// - panics when the header size isn't a multiple of the inner [`BitSlice`] type /// - See: [`mmap_to_slice_unbounded`] pub fn try_from(mut mmap: MmapMut, header_size: usize) -> Result<Self> { let data = unsafe { mmap_to_slice_unbounded(&mut mmap, header_size)? }; let bitslice = BitSlice::from_slice_mut(data); let mmap = Arc::new(mmap); Ok(Self { mmap: MmapType { r#type: bitslice, mmap, }, }) } /// Get flusher to explicitly flush mmap at a later time pub fn flusher(&self) -> MmapFlusher { self.mmap.flusher() } pub fn create(path: &Path, bitslice: &BitSlice) -> Result<()> { let bits_count = bitslice.len(); let bytes_count = bits_count .div_ceil(u8::BITS as usize) .next_multiple_of(Self::MIN_FILE_SIZE); let _file = mmap_ops::create_and_ensure_length(path, bytes_count)?; let mmap = mmap_ops::open_write_mmap( path, AdviceSetting::Advice(Advice::Normal), // We only write sequentially false, )?; let mut mmap_bitslice = MmapBitSlice::try_from(mmap, 0)?; mmap_bitslice.fill_with(|idx| { bitslice .get(idx) .map(|bitref| bitref.as_ref().to_owned()) // mmap bitslice can be bigger than bitslice because it must align with size of `usize` .unwrap_or(false) }); mmap_bitslice.flusher()()?; Ok(()) } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> std::io::Result<()> { self.mmap.populate()?; Ok(()) } } impl Deref for MmapBitSlice { type Target = BitSlice; fn deref(&self) -> &BitSlice { &self.mmap } } impl DerefMut for MmapBitSlice { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.mmap } } /// Typed mmap errors. #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Mmap length must be {0} to match the size of type, but it is {1}")] SizeExact(usize, usize), #[error("Mmap length must be at least {0} to match the size of type, but it is {1}")] SizeLess(usize, usize), #[error("Mmap length must be multiple of {0} to match the size of type, but it is {1}")] SizeMultiple(usize, usize), #[error("{0}")] Io(#[from] std::io::Error), #[error("File not found: {0}")] MissingFile(String), } /// Get a second mutable reference for type `T` from the given mmap /// /// # Warning /// /// The returned reference is unbounded. The user must ensure it never outlives the `mmap` type. /// /// # Safety /// /// - unsafe because we create a second (unbounded) mutable reference /// - malformed data in the mmap may break the transmuted type `T` resulting in undefined behavior /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` unsafe fn mmap_prefix_to_type_unbounded<'unbnd, T>(mmap: &mut MmapMut) -> Result<&'unbnd mut T> where T: Sized, { let size_t = mem::size_of::<T>(); // Assert size if mmap.len() < size_t { return Err(Error::SizeLess(size_t, mmap.len())); } // Obtain unbounded bytes slice into mmap let bytes: &'unbnd mut [u8] = unsafe { let slice = mmap.deref_mut(); slice::from_raw_parts_mut(slice.as_mut_ptr(), size_t) }; // Assert alignment and size assert_alignment::<_, T>(bytes); #[cfg(debug_assertions)] if mmap.len() != size_t { log::warn!( "Mmap length {} is not equal to size of type {}", mmap.len(), size_t, ); } #[cfg(debug_assertions)] if bytes.len() != mem::size_of::<T>() { return Err(Error::SizeExact(mem::size_of::<T>(), bytes.len())); } let ptr = bytes.as_mut_ptr().cast::<T>(); Ok(unsafe { &mut *ptr }) } /// Get a second mutable reference for a slice of type `T` from the given mmap /// /// A (non-zero) header size in bytes may be provided to omit from the BitSlice data. /// /// # Warning /// /// The returned reference is unbounded. The user must ensure it never outlives the `mmap` type. /// /// # Safety /// /// - unsafe because we create a second (unbounded) mutable reference /// - malformed data in the mmap may break the transmuted slice for type `T` resulting in undefined /// behavior /// /// # Panics /// /// - panics when the mmap data is not correctly aligned for type `T` /// - panics when the header size isn't a multiple of size `T` unsafe fn mmap_to_slice_unbounded<'unbnd, T>( mmap: &mut MmapMut, header_size: usize, ) -> Result<&'unbnd mut [T]> where T: Sized, { let size_t = mem::size_of::<T>(); // Assert size if size_t == 0 { // For zero-sized T, data part must be zero-sized as well, we cannot have infinite slice debug_assert_eq!( mmap.len().saturating_sub(header_size), 0, "mmap data must be zero-sized, because size T is zero", ); } else { // Must be multiple of size T debug_assert_eq!(header_size % size_t, 0, "header not multiple of size T"); if !mmap.len().is_multiple_of(size_t) { return Err(Error::SizeMultiple(size_t, mmap.len())); } } // Obtain unbounded bytes slice into mmap let bytes: &'unbnd mut [u8] = unsafe { let slice = mmap.deref_mut(); &mut slice::from_raw_parts_mut(slice.as_mut_ptr(), slice.len())[header_size..] }; // Assert alignment and bytes size assert_alignment::<_, T>(bytes); debug_assert_eq!(bytes.len() + header_size, mmap.len()); // Transmute slice types unsafe { Ok(slice::from_raw_parts_mut( bytes.as_mut_ptr().cast::<T>(), bytes.len().checked_div(size_t).unwrap_or(0), )) } } /// Assert slice `&[S]` is correctly aligned for type `T`. /// /// # Panics /// /// Panics when alignment is wrong. fn assert_alignment<S, T>(bytes: &[S]) { assert_eq!( bytes.as_ptr().align_offset(mem::align_of::<T>()), 0, "type must be aligned", ); } #[cfg(test)] mod tests { use std::fmt::Debug; use std::iter; use rand::rngs::{SmallRng, StdRng}; use rand::{Rng, SeedableRng}; use tempfile::{Builder, NamedTempFile}; use super::*; use crate::madvise::AdviceSetting; use crate::mmap_ops; fn create_temp_mmap_file(len: usize) -> NamedTempFile { let tempfile = Builder::new() .prefix("test.") .suffix(".mmap") .tempfile() .unwrap(); #[allow(clippy::disallowed_methods, reason = "test code")] tempfile.as_file().set_len(len as u64).unwrap(); tempfile } #[test] fn test_open_zero_type() { check_open_zero_type::<()>(()); check_open_zero_type::<u8>(0); check_open_zero_type::<usize>(0); check_open_zero_type::<f32>(0.0); } fn check_open_zero_type<T: Sized + PartialEq + Debug + 'static>(zero: T) { let bytes = mem::size_of::<T>(); let tempfile = create_temp_mmap_file(bytes); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mmap_type: MmapType<T> = unsafe { MmapType::from(mmap) }; assert_eq!(mmap_type.deref(), &zero); } #[test] fn test_open_zero_slice() { check_open_zero_slice::<()>(0, ()); check_open_zero_slice::<u8>(0, 0); check_open_zero_slice::<u8>(1, 0); check_open_zero_slice::<u8>(131, 0); check_open_zero_slice::<usize>(0, 0); check_open_zero_slice::<usize>(1, 0); check_open_zero_slice::<usize>(131, 0); check_open_zero_slice::<f32>(0, 0.0); check_open_zero_slice::<f32>(1, 0.0); check_open_zero_slice::<f32>(131, 0.0); } #[test] #[should_panic] fn test_open_zero_slice_infinite_length() { // A slice with zero-sized type T can never be more than 0 bytes check_open_zero_slice::<()>(1, ()); } fn check_open_zero_slice<T: Sized + PartialEq + Debug + 'static>(len: usize, zero: T) { let bytes = mem::size_of::<T>() * len; let tempfile = create_temp_mmap_file(bytes); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mmap_slice: MmapSlice<T> = unsafe { MmapSlice::from(mmap) }; assert_eq!(mmap_slice.len(), len); assert!(mmap_slice.iter().all(|i| i == &zero)); } #[test] fn test_reopen_random() { let mut rng = SmallRng::seed_from_u64(42); check_reopen_random::<(), _>(0, || rng.random()); check_reopen_random::<u8, _>(0, || rng.random()); check_reopen_random::<u8, _>(1, || rng.random()); check_reopen_random::<u8, _>(131, || rng.random()); check_reopen_random::<u64, _>(0, || rng.random()); check_reopen_random::<u64, _>(1, || rng.random()); check_reopen_random::<u64, _>(131, || rng.random()); check_reopen_random::<f32, _>(0, || rng.random()); check_reopen_random::<f32, _>(1, || rng.random()); check_reopen_random::<f32, _>(131, || rng.random()); } fn check_reopen_random<T, R>(len: usize, rng: R) where T: Sized + Copy + PartialEq + Debug + 'static, R: FnMut() -> T, { let bytes = mem::size_of::<T>() * len; let tempfile = create_temp_mmap_file(bytes); let template: Vec<T> = iter::repeat_with(rng).take(len).collect(); // Write random values from template into mmap { let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mut mmap_slice: MmapSlice<T> = unsafe { MmapSlice::from(mmap) }; assert_eq!(mmap_slice.len(), len); mmap_slice.copy_from_slice(&template); } // Reopen and assert values from template { let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mmap_slice: MmapSlice<T> = unsafe { MmapSlice::from(mmap) }; assert_eq!(mmap_slice.as_ref(), template); } } #[test] fn test_bitslice() { check_bitslice_with_header(0, 0); check_bitslice_with_header(0, 128); check_bitslice_with_header(512, 0); check_bitslice_with_header(512, 256); check_bitslice_with_header(11721 * 8, 256); } fn check_bitslice_with_header(bits: usize, header_size: usize) { let bytes = (mem::size_of::<usize>() * bits / 8) + header_size; let tempfile = create_temp_mmap_file(bytes); // Fill bitslice { let mut rng = StdRng::seed_from_u64(42); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mut mmap_bitslice = MmapBitSlice::from(mmap, header_size); (0..bits).for_each(|i| mmap_bitslice.set(i, rng.random())); } // Reopen and assert contents { let mut rng = StdRng::seed_from_u64(42); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mmap_bitslice = MmapBitSlice::from(mmap, header_size); (0..bits).for_each(|i| assert_eq!(mmap_bitslice[i], rng.random::<bool>())); } } #[test] fn test_zero_sized_type() { { let tempfile = create_temp_mmap_file(0); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let result = unsafe { MmapType::<()>::try_from(mmap).unwrap() }; assert_eq!(result.deref(), &()); } { let tempfile = create_temp_mmap_file(0); let mmap = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let result = unsafe { MmapSlice::<()>::try_from(mmap).unwrap() }; assert_eq!(result.as_ref(), &[]); assert_alignment::<_, ()>(result.as_ref()); } } #[test] fn test_double_read_mmap() { // Create and open a tmp file // Mmap it with write access // then mmap it with read access // Check that the data is synchronized let tempfile = create_temp_mmap_file(1024); let mut mmap_write = mmap_ops::open_write_mmap(tempfile.path(), AdviceSetting::Global, false).unwrap(); let mmap_read = mmap_ops::open_read_mmap( tempfile.path(), AdviceSetting::Advice(Advice::Sequential), false, ) .unwrap(); mmap_write[333] = 42; assert_eq!(mmap_read[333], 42); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/build.rs
lib/common/common/build.rs
fn main() { // Required for tango benchmarks, see: // https://github.com/bazhenov/tango/blob/v0.6.0/README.md#getting-started println!("cargo:rustc-link-arg-benches=-rdynamic"); println!("cargo:rerun-if-changed=build.rs"); }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/tempfile_ext.rs
lib/common/common/src/tempfile_ext.rs
use std::ops::Deref; use std::path::{Path, PathBuf}; use tempfile::{PathPersistError, TempPath}; /// Either a temporary or a persistent path. #[must_use = "returns a TempPath, if dropped the downloaded file is deleted"] pub enum MaybeTempPath { Temporary(TempPath), Persistent(PathBuf), } impl MaybeTempPath { /// Keep the temporary file from being deleted. /// No-op if the path is persistent. pub fn keep(self) -> Result<PathBuf, PathPersistError> { match self { MaybeTempPath::Temporary(path) => path.keep(), MaybeTempPath::Persistent(path) => Ok(path), } } /// Close the temporary file, deleting it. /// No-op if the path is persistent. pub fn close(self) -> Result<(), std::io::Error> { match self { MaybeTempPath::Temporary(path) => path.close(), MaybeTempPath::Persistent(_) => Ok(()), } } } impl Deref for MaybeTempPath { type Target = Path; fn deref(&self) -> &Self::Target { match self { MaybeTempPath::Temporary(path) => path, MaybeTempPath::Persistent(path) => path, } } } impl From<TempPath> for MaybeTempPath { fn from(path: TempPath) -> Self { Self::Temporary(path) } } impl From<PathBuf> for MaybeTempPath { fn from(path: PathBuf) -> Self { Self::Persistent(path) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/num_traits.rs
lib/common/common/src/num_traits.rs
//! Traits that should belong to the [`num-traits`] crate, but are missing. //! //! [`num-traits`]: https://crates.io/crates/num-traits use std::num::{NonZero, Saturating}; pub trait ConstBits { /// The size of this integer type in bits. const BITS: u32; } macro_rules! impl_const_bits { ($($t:ty),* $(,)?) => { $( impl ConstBits for $t { const BITS: u32 = Self::BITS; } impl ConstBits for NonZero<$t> { const BITS: u32 = Self::BITS; } impl ConstBits for Saturating<$t> { const BITS: u32 = Self::BITS; } )* }; } impl_const_bits!(i8, i16, i32, i64, i128, isize); impl_const_bits!(u8, u16, u32, u64, u128, usize);
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/bitpacking_ordered.rs
lib/common/common/src/bitpacking_ordered.rs
//! A compression algorithm to store medium-to-large-sized sorted arrays of //! `u64` values. //! //! Allows for fast random access within the compressed data. //! //! Assumptions: //! - The input values are sorted. //! - The distribution of the values is somewhat uniform, i.e. there are no //! large gaps between values. A single gap might bloat the overall size, but //! it shouldn't be worse than storing byte-aligned bases without deltas. //! //! # Format //! //! The compressed data consists of small, uniformely-sized chunks. //! The size of each chunk is determined by compression parameters. //! The compression parameters are determined automatically during compression. //! //! Each chunk contains `1 << chunk_len_log2` values: the first value (the base) //! is stored as is, and the rest are stored as deltas from the base. Or, more //! formally: //! - `chunk_value[0] = base` (assume `delta[0]` is 0) //! - `chunk_value[i] = base + delta[i]` for `i > 0` //! //! ```text //! ┌───────┬───────┬───────┬ ┬───────┬────────┐ //! │chunk 0│chunk 1│chunk 2│ … │chunk X│7 × 0xFF│ //! └───────┤ ├───────┴ ┴───────┴────────┘ //! ╭───────╯ ╰────────────────╮ //! │ bitpacked chunk │ //! ├────┬──┬──┬──┬──┬ ┬────┬──────┤ //! │base│Δ₁│Δ₂│Δ₃│Δ₄│ … │Δₙ₋₁│bitpad│ //! └────┴──┴──┴──┴──┴ ┴────┴──────┘ //! ``` //! //! In the above diagram: //! - `7 × 0xFF` is 8 bytes tail (see [`TAIL_SIZE`]). //! - `base` is `parameters.base_bits` wide. //! - `Δ₁`..`Δₙ₋₁` are delta values, each is `parameters.delta_bits` wide. //! - `bitpad` is a bit padding (0..7 bits) so the chunk is byte-aligned. use std::ops::RangeInclusive; use thiserror::Error; use zerocopy::little_endian::U64; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::bitpacking::{BitWriter, make_bitmask, packed_bits}; /// The size of the tail padding. /// These extra 7 bytes after the last chunk allows the decompressor to safely /// perform unchecked unaligned 8-byte reads. const TAIL_SIZE: usize = size_of::<u64>() - 1; /// The allowed range for the `delta_bits` parameter. /// Limiting it up to 7*8 = 56 bits allows the decompressor to read a single /// delta value in a single unaligned read. /// Disallowing 0 removes unlikely edge cases. const DELTA_BITS_RANGE: RangeInclusive<u8> = 1..=(u64::BITS - u8::BITS) as u8; /// Larger values are unlikely to produce better compression. const MAX_CHUNK_LEN_LOG2: u8 = 7; /// Compress the provided data using the best parameters found. /// /// # Panics /// /// This function may panic if the input data is not sorted. pub fn compress(values: &[u64]) -> (Vec<u8>, Parameters) { let parameters = Parameters::find_best(values); let compressed = compress_with_parameters(values, parameters); (compressed, parameters) } /// Compress the data with given parameters. fn compress_with_parameters(values: &[u64], parameters: Parameters) -> Vec<u8> { let expected_size = parameters.total_chunks_size_bytes().unwrap() + TAIL_SIZE; let mut compressed = Vec::with_capacity(expected_size); for chunk in values.chunks(1 << parameters.chunk_len_log2) { let first = chunk[0]; let mut w = BitWriter::new(&mut compressed); w.write(first, parameters.base_bits); for &value in chunk.iter().skip(1) { w.write(value - first, parameters.delta_bits); } // For the last (incomplete) chunk, pad it with 0b11...11, so all chunks // have the same size. for _ in 0..(1 << parameters.chunk_len_log2) - chunk.len() { w.write( make_bitmask::<u64>(parameters.delta_bits), parameters.delta_bits, ); } w.finish(); // bit padding } compressed.extend_from_slice(&[0xFF; TAIL_SIZE]); assert_eq!(compressed.len(), expected_size); compressed } #[derive(Clone, Debug)] pub struct Reader<'a> { base_bits: u8, base_mask: u64, delta_bits: u8, delta_mask: u64, chunk_len_log2: u8, chunk_len_mask: usize, chunk_size_bytes: usize, compressed: &'a [u8], len: usize, } #[derive(Error, Debug)] #[error("decompression error: {0}")] pub struct DecompressionError(String); impl<'a> Reader<'a> { pub fn new( parameters: Parameters, bytes: &'a [u8], ) -> Result<(Self, &'a [u8]), DecompressionError> { // Safety checks: the `get()` method doesn't perform bounds checking, // so we need to be extra cautious here, including checking for // overflows. if !parameters.valid() { return Err(DecompressionError("invalid parameters".to_string())); } let total_size_bytes = parameters .total_chunks_size_bytes() .and_then(|size| size.checked_add(TAIL_SIZE)) .ok_or_else(|| DecompressionError("invalid parameters".to_string()))?; let (compressed, bytes) = bytes.split_at_checked(total_size_bytes).ok_or_else(|| { DecompressionError(format!( "insufficient length (compressed data, expected {total_size_bytes} bytes, got {})", bytes.len(), )) })?; let result = Self { base_bits: parameters.base_bits, base_mask: make_bitmask(parameters.base_bits), delta_bits: parameters.delta_bits, delta_mask: make_bitmask(parameters.delta_bits), chunk_len_log2: parameters.chunk_len_log2, chunk_len_mask: make_bitmask(parameters.chunk_len_log2), chunk_size_bytes: parameters.chunk_size_bytes().unwrap(), compressed, len: parameters.length.get() as usize, }; // Safety checks: the `get()` method doesn't perform bounds checking. // The assertions below ensure that the `compressed` slice holds enough // bytes for any index reachable by `get()`. if let Some(max_index) = result.len.checked_sub(1) { let chunk_offset = (max_index >> result.chunk_len_log2) * result.chunk_size_bytes; // *base* assert!(chunk_offset + size_of::<u64>() <= result.compressed.len()); let max_value_index = result.chunk_len_mask; if max_value_index > 0 { let delta_offset_bits = result.base_bits as usize + (max_value_index - 1) * result.delta_bits as usize; // *delta* assert!( chunk_offset + delta_offset_bits / u8::BITS as usize + size_of::<u64>() <= result.compressed.len() ); } } Ok((result, bytes)) } /// Parameters used to compress the data. #[cfg(feature = "testing")] pub fn parameters(&self) -> Parameters { Parameters { length: U64::new(self.len as u64), base_bits: self.base_bits, delta_bits: self.delta_bits, chunk_len_log2: self.chunk_len_log2, } } /// The number of values in the decompressed data. #[inline] #[expect(clippy::len_without_is_empty, reason = "len() is cheap")] pub fn len(&self) -> usize { self.len } /// Get the value at the given index. #[inline] pub fn get(&self, index: usize) -> Option<u64> { if index >= self.len { return None; } let chunk_offset = (index >> self.chunk_len_log2) * self.chunk_size_bytes; let value_index = index & self.chunk_len_mask; let chunk_ptr = self.compressed.as_ptr().wrapping_add(chunk_offset); // SAFETY: see the *base* comment in `new()`. let base = unsafe { read_u64_le(chunk_ptr) } & self.base_mask; if value_index == 0 { return Some(base); } let delta_offset_bits = self.base_bits as usize + (value_index - 1) * self.delta_bits as usize; // SAFETY: see the *delta* comment in `new()`. let delta = (unsafe { read_u64_le(chunk_ptr.add(delta_offset_bits / u8::BITS as usize)) } >> (delta_offset_bits % u8::BITS as usize)) & self.delta_mask; Some(base + delta) } } #[inline(always)] unsafe fn read_u64_le(ptr: *const u8) -> u64 { unsafe { u64::from_le(ptr.cast::<u64>().read_unaligned()) } } /// Compression parameters. Required for decompression. #[derive(Clone, Copy, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] #[repr(C)] pub struct Parameters { /// Amount of values in the decompressed data. pub length: U64, /// Amount of bits to store base values. pub base_bits: u8, /// Amount of bits to store delta values. pub delta_bits: u8, /// Log2 of the amount of values in a chunk. pub chunk_len_log2: u8, } impl Parameters { /// Check if the parameters are valid. fn valid(self) -> bool { u32::from(self.base_bits) <= u64::BITS && DELTA_BITS_RANGE.contains(&self.delta_bits) && self.chunk_len_log2 <= MAX_CHUNK_LEN_LOG2 } /// Size of a single chunk in bytes. /// Returns `None` on overflow: see safety comments in [`Reader::new()`]. #[deny(clippy::arithmetic_side_effects, reason = "extra cautious for safety")] fn chunk_size_bytes(self) -> Option<usize> { let bits = (self.base_bits as usize).checked_add( (self.delta_bits as usize).checked_mul(make_bitmask::<usize>(self.chunk_len_log2))?, )?; Some(bits.div_ceil(u8::BITS as usize)) } /// Size of the compressed data, without the tail. /// Returns `None` on overflow: see safety comments in [`Reader::new()`]. #[deny(clippy::arithmetic_side_effects, reason = "extra cautious for safety")] fn total_chunks_size_bytes(self) -> Option<usize> { let chunks_count = (self.length.get() as usize).div_ceil(1 << self.chunk_len_log2); chunks_count.checked_mul(self.chunk_size_bytes()?) } /// Find the best compression parameters for the given values. fn find_best(values: &[u64]) -> Self { Self::try_all(values) .min_by_key(|parameters| parameters.total_chunks_size_bytes()) .unwrap() } /// Generate all possible compression parameters for the given values. fn try_all(values: &[u64]) -> impl Iterator<Item = Parameters> + use<'_> { let last_value = values.last().copied().unwrap_or(0); (0..=MAX_CHUNK_LEN_LOG2) .map(move |chunk_len_log2| { let mut delta_bits = *DELTA_BITS_RANGE.start(); for chunk in values.chunks(1 << chunk_len_log2) { delta_bits = delta_bits.max(packed_bits(chunk.last().unwrap() - chunk[0])); } Parameters { length: U64::new(values.len() as u64), base_bits: packed_bits(last_value).max(1), delta_bits, chunk_len_log2, } }) .filter(|parameters| DELTA_BITS_RANGE.contains(&parameters.delta_bits)) } } #[cfg(feature = "testing")] pub fn gen_test_sequence(rng: &mut impl rand::Rng, max_delta: u64, len: usize) -> Vec<u64> { let mut last = 0u64; (0..len) .map(|_| { last = last.checked_add(rng.random_range(0..=max_delta)).unwrap(); last }) .collect() } #[cfg(test)] mod tests { use std::iter::{once, once_with}; use rand::rngs::StdRng; use rand::{Rng as _, SeedableRng}; use super::*; #[test] fn test_compress_decompress() { for values in test_sequences() { for parameters in Parameters::try_all(&values) { let compressed = compress_with_parameters(&values, parameters); let (decompressor, bytes) = Reader::new(parameters, &compressed).unwrap(); assert!(bytes.is_empty()); assert_eq!(decompressor.len(), values.len()); for (i, &value) in values.iter().enumerate() { assert_eq!(decompressor.get(i), Some(value)); } assert_eq!(decompressor.get(values.len()), None); } } } fn test_sequences() -> impl Iterator<Item = Vec<u64>> { let params = [ (10, 1_000), (20, 10_000), // large `delta_count` (10_000_000, 10_000), // large `base_bits` (0x123456789AB, 1_000), // both large `base_bits` and `delta_bits` ]; itertools::chain!( once(vec![]), once(vec![0]), once(vec![1]), once(vec![u64::MAX]), once(vec![u64::MAX, u64::MAX]), once(vec![0, u64::MAX]), // Catches the "incomplete chunk" case. params.into_iter().map(|(max_delta, len)| { gen_test_sequence(&mut StdRng::seed_from_u64(42), max_delta, len) }), once_with(|| { let mut rng = StdRng::seed_from_u64(42); let mut values = (0..1000).map(|_| rng.random()).collect::<Vec<_>>(); values.sort_unstable(); values }), ) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/scope_tracker.rs
lib/common/common/src/scope_tracker.rs
use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; /// An RAII-style counter that tracks the number of active scopes. /// Internally uses a reference-counted value, allowing it to be freely cloned. #[derive(Default, Debug, Clone)] pub struct ScopeTracker { inner: Arc<AtomicUsize>, } impl ScopeTracker { pub fn new() -> Self { Self { inner: Arc::new(AtomicUsize::new(0)), } } /// Measures the scope, the counter should keep track of. /// Must always be bound to a variable, to not get dropped prematurely! #[must_use] pub fn measure_scope(&self) -> ScopeTrackerGuard { ScopeTrackerGuard::measure(self) } /// Get the current value of the counter. pub fn get(&self, ordering: Ordering) -> usize { self.inner.load(ordering) } } const COUNT_SIZE: usize = 1; /// Guard type for [`ScopeTracker`], that must be hold for the entire duration of a scope. /// This type is in charge of correctly counting the passed counter. pub struct ScopeTrackerGuard { scope_tracker: ScopeTracker, } impl ScopeTrackerGuard { #[must_use] fn measure(scope_tracker: &ScopeTracker) -> Self { let scope_tracker = scope_tracker.clone(); scope_tracker.inner.fetch_add(COUNT_SIZE, Ordering::SeqCst); Self { scope_tracker } } } impl Drop for ScopeTrackerGuard { fn drop(&mut self) { self.scope_tracker .inner .fetch_sub(COUNT_SIZE, Ordering::SeqCst); } } #[cfg(test)] mod test { use std::sync::atomic::{AtomicBool, Ordering}; use std::thread::{self, JoinHandle}; use std::time::Duration; use super::*; #[test] fn test_scope_tracker() { let counter = ScopeTracker::new(); { let _measure_guard = counter.measure_scope(); assert_eq!(counter.get(Ordering::SeqCst), 1); } assert_eq!(counter.get(Ordering::SeqCst), 0); } #[test] fn test_scope_tracker_loop() { let counter = ScopeTracker::new(); for _ in 0..100 { let _measure_guard = counter.measure_scope(); assert_eq!(counter.get(Ordering::SeqCst), 1); } assert_eq!(counter.get(Ordering::SeqCst), 0); } #[test] fn test_scope_tracker_threads() { let counter = ScopeTracker::new(); let run = Arc::new(AtomicBool::new(true)); let mut handles: Vec<JoinHandle<()>> = vec![]; let started_threads = Arc::new(AtomicUsize::new(0)); const LEN: usize = 20; for _ in 0..LEN { let counter_clone = counter.clone(); let run_clone = run.clone(); let started_threads_clone = started_threads.clone(); let handle = thread::spawn(move || { let _guard = counter_clone.measure_scope(); started_threads_clone.fetch_add(1, Ordering::Relaxed); while run_clone.load(Ordering::Relaxed) { thread::sleep(Duration::from_secs(1)); } }); handles.push(handle); } // Wait until all threads have started. // To prevent this test becoming flaky by waiting a constant amount of time, we use an atomic counter here. while started_threads.load(Ordering::Relaxed) < LEN { thread::sleep(Duration::from_secs(1)); } assert_eq!(counter.get(Ordering::SeqCst), LEN); // Stop spawned threads run.store(false, Ordering::Release); // Wait for them to gracefully finish. for handle in handles { handle.join().unwrap(); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/bytes.rs
lib/common/common/src/bytes.rs
/// Convert number of bytes into human-readable format /// /// # Examples /// - 123 -> "123 B" /// - 1024 -> "1.00 KiB" /// - 1000000 -> "976.56 KiB" /// - 1048576 -> "1.00 MiB" pub fn bytes_to_human(bytes: usize) -> String { const UNITS: [&str; 9] = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]; if bytes < 1024 { return format!("{bytes} B"); } let mut size = bytes as f64; let mut unit_index = 0; while size >= 1024.0 && unit_index < UNITS.len() - 1 { size /= 1024.0; unit_index += 1; } format!("{size:.2} {}", UNITS[unit_index]) }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/stable_hash.rs
lib/common/common/src/stable_hash.rs
use std::hash::{Hash, Hasher}; use bytemuck::TransparentWrapper; /// A hashable type, like [`Hash`], but with a stable/portable implementation. /// /// According to the [`Hash`] docs, its implementations for most standard /// library types should not considered stable across platforms or compiler /// versions. Neither we can rely on implementations for types from third-party /// crates. /// /// This trait is intended for hashes that should be stable across different /// Qdrant versions. pub trait StableHash { /// Feed this value into the hasher. /// /// Similar to [`Hash::hash()`], but accepts [`Hasher::write()`] as a /// closure. This difference prevents implementations of this trait from: /// 1. Reusing [`Hash`] implementations which might be not portable. /// 2. Using other [`Hasher`] methods which are non-portable. See /// <https://docs.rs/siphasher/1.0.1/siphasher/index.html#note>. fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W); } impl StableHash for i32 { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { // WARN: endianess-dependent; keep for backward compatibility write(&self.to_ne_bytes()); } } impl StableHash for u32 { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { // WARN: endianess-dependent; keep for backward compatibility write(&self.to_ne_bytes()); } } impl StableHash for u64 { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { // WARN: endianess-dependent; keep for backward compatibility write(&self.to_ne_bytes()); } } impl StableHash for usize { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { (*self as u64).stable_hash(write); } } impl<A: StableHash, B: StableHash> StableHash for (A, B) { fn stable_hash<W: FnMut(&[u8])>(&self, write: &mut W) { let (a, b) = self; a.stable_hash(write); b.stable_hash(write); } } /// Compatibility wrapper that allows to use [`StableHash`] implementation in /// contexts where [`Hash`] is expected. /// /// This wrapper should be used in accompaniment with a stable [`Hasher`] /// implementation such as from the `siphasher` crate. Hashes produced by /// [`std::hash::DefaultHasher`] should not be relied upon over releases. #[derive(Copy, Clone, Eq, PartialEq, Debug, TransparentWrapper)] #[repr(transparent)] pub struct StableHashed<T: StableHash>(pub T); impl<T: StableHash> Hash for StableHashed<T> { fn hash<H: Hasher>(&self, state: &mut H) { self.0.stable_hash(&mut |bytes| state.write(bytes)); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/lib.rs
lib/common/common/src/lib.rs
pub mod bitpacking; pub mod bitpacking_links; pub mod bitpacking_ordered; pub mod budget; pub mod bytes; pub mod counter; pub mod cow; pub mod cpu; pub mod defaults; pub mod delta_pack; pub mod disk; pub mod either_variant; pub mod ext; pub mod fixed_length_priority_queue; pub mod flags; pub mod is_alive_lock; pub mod iterator_ext; pub mod math; pub mod maybe_uninit; pub mod mmap_hashmap; pub mod num_traits; pub mod panic; pub mod progress_tracker; pub mod rate_limiting; pub mod save_on_disk; pub mod scope_tracker; pub mod small_uint; pub mod stable_hash; pub mod tar_ext; pub mod tempfile_ext; pub mod top_k; pub mod typelevel; pub mod types; pub mod validation; pub mod zeros;
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/bitpacking.rs
lib/common/common/src/bitpacking.rs
use num_traits::{AsPrimitive, ConstOne, PrimInt, Unsigned}; use crate::num_traits::ConstBits; /// The internal buffer type for [`BitWriter`] and [`BitReader`]. /// Instead of writing/reading a single byte at a time, they write/read /// `size_of::<Buf>()` bytes at once, for a better performance. /// This is an implementation detail and shouldn't affect the data layout. /// Any unsigned numeric type larger than `u32` should work. type Buf = u64; /// Writes bits to the `u8` vector. /// It's like [`std::io::Write`], but for bits rather than bytes. pub struct BitWriter<'a> { output: &'a mut Vec<u8>, buf: Buf, buf_bits: u8, } impl<'a> BitWriter<'a> { /// Create a new writer that appends bits to the `output`. #[inline] pub fn new(output: &'a mut Vec<u8>) -> Self { Self { output, buf: 0, buf_bits: 0, } } /// Write a `value` of `bits` bits to the output. /// /// The `bits` must be less than or equal to 32, and the `value` must fit in /// the `bits` bits. #[inline] pub fn write<T: ConstBits + Into<Buf>>(&mut self, value: T, bits: u8) { let value = value.into(); #[cfg(test)] debug_assert!(u32::from(bits) <= T::BITS && packed_bits(value) <= bits); self.buf |= value << self.buf_bits; self.buf_bits += bits; if self.buf_bits >= Buf::BITS as u8 { // ┌──value───┐┌───initial self.buf────┐ // rrrrrvvvvvvvbbbbbbbbbbbbbbbbbbbbbbbbb // └[2]┘└─────────────[1]──────────────┘ self.output.extend_from_slice(&self.buf.to_le_bytes()); // [1] self.buf_bits -= Buf::BITS as u8; if bits - self.buf_bits == Buf::BITS as u8 { self.buf = 0; } else { self.buf = value >> (bits - self.buf_bits); // [2] } } } /// Write the remaining bufferized bits to the output. #[inline] pub fn finish(self) { self.output.extend_from_slice( &self.buf.to_le_bytes()[..(self.buf_bits as usize).div_ceil(u8::BITS as usize)], ); } } /// Reads bits from `u8` slice. /// It's like [`std::io::Read`], but for bits rather than bytes. pub struct BitReader<'a> { input: &'a [u8], buf: Buf, buf_bits: u8, mask: Buf, bits: u8, } impl<'a> BitReader<'a> { #[inline] pub fn new(input: &'a [u8]) -> Self { Self { input, buf: 0, buf_bits: 0, mask: 0, bits: 0, } } /// Configure the reader to read `bits` bits at a time. This affects /// subsequent calls to [`read()`]. /// /// The `bits` must be less than or equal to 32. /// /// Note: it's a separate method and not a parameter of [`read()`] to /// optimize reading a group of values with the same bit size. /// /// [`read()`]: Self::read #[inline] pub fn set_bits(&mut self, bits: u8) { #[cfg(test)] debug_assert!(u32::from(bits) <= Buf::BITS); self.bits = bits; self.mask = make_bitmask(bits); } /// Returns the number of bits set with [`set_bits()`]. /// /// [`set_bits()`]: Self::set_bits #[inline] pub fn bits(&self) -> u8 { self.bits } /// Read next `bits` bits from the input. The amount of bits must be set /// with [`set_bits()`] before calling this method. /// /// If read beyond the end of the input, the result would be an unspecified /// garbage. /// /// [`set_bits()`]: Self::set_bits #[inline] pub fn read<T>(&mut self) -> T where T: 'static + Copy, Buf: AsPrimitive<T>, { if self.buf_bits >= self.bits { self.buf_bits -= self.bits; let val = (self.buf & self.mask).as_(); self.buf >>= self.bits; val } else { // Consider a naive approach: // // let new_buf = read_buf_and_advance(&mut self.input); // self.buf |= new_buf << self.buf_bits; // *overflow* // self.buf_bits += size_of_val(&new_buf) * u8::BITS; // ... then proceed as usual ... // // For performance reasons, we want `new_buf` and `self.buf` to be // both 64-bit. But when they are the same, the naive approach would // overflow in the commented line. So, the following code is a trick // to let us use the same type for both. // // ┌───────────new_buf────────────┐┌─self.buf─┐ // rrrrrrrrrrrrrrrrrrrrrrrrrvvvvvvvbbbbbbbbbbbb // └──────────[3]──────────┘├─[2]─┘└───[1]────┤ // └───────val───────┘ let new_buf = read_buf_and_advance(&mut self.input); let val = ((/*[1]*/self.buf) | (/*[2]*/new_buf << self.buf_bits) & self.mask).as_(); self.buf_bits += Buf::BITS as u8 - self.bits; if self.buf_bits == 0 { self.buf = 0; } else { self.buf = /*[3]*/ new_buf >> (Buf::BITS as u8 - self.buf_bits); } val } } } /// Read a single [`Buf`] from the `input` and advance (or not) the `input`. #[inline] fn read_buf_and_advance(input: &mut &[u8]) -> Buf { let mut buf = 0; if input.len() >= size_of::<Buf>() { // This line translates to a single unaligned pointer read. buf = Buf::from_le_bytes(input[0..size_of::<Buf>()].try_into().unwrap()); // This line translates to a single pointer advance. *input = &input[size_of::<Buf>()..]; } else { // We could remove this branch by explicitly using unsafe pointer // operations in the branch above, but we are playing it safe here. for (i, byte) in input.iter().copied().enumerate() { buf |= Buf::from(byte) << (i * u8::BITS as usize); } // The following line is commented out for performance reasons as this // should be the last read. If the caller will try to read input again // anyway, it will get the same values again (aka "unspecified garbage" // as stated in the documentation). // *input = &[]; // Not needed, see the comment above. } buf } /// Minimum amount of bits required to store a value in the range /// `0..=max_value`. pub fn packed_bits<T: ConstBits + PrimInt + Unsigned>(max_value: T) -> u8 { (T::BITS - max_value.leading_zeros()) as u8 } pub fn make_bitmask<T: ConstBits + ConstOne + PrimInt + Unsigned>(bits: u8) -> T { if u32::from(bits) >= T::BITS { T::max_value() } else { (T::ONE << usize::from(bits)) - T::ONE } } #[cfg(test)] mod tests { use std::fmt::Debug; use std::iter::zip; use num_traits::{ConstOne, ConstZero, PrimInt, Unsigned}; use rand::distr::uniform::SampleUniform; use rand::rngs::StdRng; use rand::{Rng as _, SeedableRng as _}; use super::*; #[test] fn test_simple() { let mut packed = Vec::new(); let mut w = BitWriter::new(&mut packed); w.write::<u32>(0b01010, 5); w.write::<u32>(0b10110, 5); w.write::<u32>(0b10100, 5); w.write::<u32>(0b010110010, 9); w.write::<u32>(0b101100001, 9); w.write::<u32>(0b001001101, 9); w.write::<u32>(0x12345678, 32); w.finish(); assert_eq!(packed.len(), 10); let mut r = BitReader::new(&packed); r.set_bits(5); assert_eq!(r.read::<u32>(), 0b01010); assert_eq!(r.read::<u32>(), 0b10110); assert_eq!(r.read::<u32>(), 0b10100); r.set_bits(9); assert_eq!(r.read::<u32>(), 0b010110010); assert_eq!(r.read::<u32>(), 0b101100001); assert_eq!(r.read::<u32>(), 0b001001101); r.set_bits(32); assert_eq!(r.read::<u32>(), 0x12345678); } #[test] fn test_random() { test_random_impl::<u8>(); test_random_impl::<u16>(); test_random_impl::<u32>(); test_random_impl::<u64>(); } fn test_random_impl<T>() where Buf: AsPrimitive<T>, T: ConstBits + ConstOne + ConstZero + Copy + Debug + Into<Buf> + PrimInt + SampleUniform + Unsigned + 'static, { let mut rng = StdRng::seed_from_u64(42); let mut bits_per_value = Vec::new(); let mut values = Vec::<T>::new(); let mut packed = Vec::new(); let mut unpacked = Vec::<T>::new(); for len in 0..40 { for _ in 0..100 { values.clear(); bits_per_value.clear(); let mut total_bits = 0; for _ in 0..len { let bits = rng.random_range(0u8..=T::BITS as u8); values.push(rng.random_range(T::ZERO..=make_bitmask(bits))); bits_per_value.push(bits); total_bits += u64::from(bits); } packed.clear(); let mut w = BitWriter::new(&mut packed); for (&x, &bits) in zip(&values, &bits_per_value) { w.write(x, bits); } w.finish(); assert_eq!(packed.len(), total_bits.next_multiple_of(8) as usize / 8); unpacked.clear(); let mut r = BitReader::new(&packed); for &bits in &bits_per_value { r.set_bits(bits); unpacked.push(r.read()); } assert_eq!(values, unpacked); } } } #[test] fn test_packed_bits_simple() { assert_eq!(packed_bits(0_u32), 0); assert_eq!(packed_bits(1_u32), 1); assert_eq!(packed_bits(2_u32), 2); assert_eq!(packed_bits(3_u32), 2); assert_eq!(packed_bits(4_u32), 3); assert_eq!(packed_bits(7_u32), 3); assert_eq!(packed_bits(0x_7FFF_FFFF_u32), 31); assert_eq!(packed_bits(0x_8000_0000_u32), 32); assert_eq!(packed_bits(0x_FFFF_FFFF_u32), 32); } #[test] fn test_packed_bits_extensive() { fn check<T: Unsigned + PrimInt + ConstBits + TryFrom<u128>>(v: u128, expected_bits: u8) { if let Ok(x) = v.try_into() { assert_eq!(packed_bits::<T>(x), expected_bits); } } for expected_bits in 0..=128_u8 { let (min, max); if expected_bits == 0 { (min, max) = (0, 0); } else { min = 1_u128 << (expected_bits - 1); max = (min - 1) * 2 + 1; } check::<u8>(min, expected_bits); check::<u16>(min, expected_bits); check::<u32>(min, expected_bits); check::<u64>(min, expected_bits); check::<u128>(min, expected_bits); check::<usize>(min, expected_bits); check::<u8>(max, expected_bits); check::<u16>(max, expected_bits); check::<u32>(max, expected_bits); check::<u64>(max, expected_bits); check::<u128>(max, expected_bits); check::<usize>(max, expected_bits); } } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/ext.rs
lib/common/common/src/ext.rs
use bitvec::order::BitOrder; use bitvec::slice::BitSlice; use bitvec::store::BitStore; pub trait OptionExt { /// `replace` if the given `value` is `Some` fn replace_if_some(&mut self, value: Self); } impl<T> OptionExt for Option<T> { #[inline] fn replace_if_some(&mut self, value: Self) { if let Some(value) = value { self.replace(value); } } } pub trait BitSliceExt { /// Get a single bit from the slice. /// A convenience wrapper around [`BitSlice::get`]. fn get_bit(&self, index: usize) -> Option<bool>; } impl<T: BitStore, O: BitOrder> BitSliceExt for BitSlice<T, O> { #[inline] fn get_bit(&self, index: usize) -> Option<bool> { self.get(index).as_deref().copied() } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/mmap_hashmap.rs
lib/common/common/src/mmap_hashmap.rs
#[cfg(any(test, feature = "testing"))] use std::collections::{BTreeMap, BTreeSet}; use std::hash::Hash; use std::io::{self, Cursor, Write}; use std::marker::PhantomData; use std::mem::{align_of, size_of}; use std::path::Path; use std::str; use fs_err::File; use memmap2::Mmap; use memory::madvise::{AdviceSetting, Madviseable}; use memory::mmap_ops::open_read_mmap; use ph::fmph::Function; #[cfg(any(test, feature = "testing"))] use rand::Rng as _; #[cfg(any(test, feature = "testing"))] use rand::rngs::StdRng; use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use crate::zeros::WriteZerosExt as _; type ValuesLen = u32; /// On-disk hash map backed by a memory-mapped file. /// /// The layout of the memory-mapped file is as follows: /// /// | header | phf | padding | alignment | buckets | entries | /// |------------|-----|---------------|-----------|---------|-----------| /// | [`Header`] | | `u8[0..4095]` | `u8[]` | `u32[]` | See below | /// /// ## Entry format for the `str` key /// /// | key | `'\0xff'` | padding | values_len | padding | values | /// |--------|-----------|---------|------------|---------|--------| /// | `u8[]` | `u8` | `u8[]` | `u32` | `u8[]` | `V[]` | /// /// ## Entry format for the `i64` key /// /// | key | values_len | padding | values | /// |-------|------------|---------|--------| /// | `i64` | `u32` | `u8[]` | `V[]` | pub struct MmapHashMap<K: ?Sized, V: Sized + FromBytes + Immutable + IntoBytes + KnownLayout> { mmap: Mmap, header: Header, phf: Function, _phantom_key: PhantomData<K>, _phantom_value: PhantomData<V>, } #[repr(C)] #[derive(Copy, Clone, Debug, FromBytes, Immutable, IntoBytes, KnownLayout)] struct Header { key_type: [u8; 8], buckets_pos: u64, buckets_count: u64, } const PADDING_SIZE: usize = 4096; pub const BUCKET_OFFSET_OVERHEAD: usize = size_of::<BucketOffset>(); /// Overhead of reading a bucket in mmap hashmap. const SIZE_OF_LENGTH_FIELD: usize = size_of::<u32>(); const SIZE_OF_KEY: usize = size_of::<u64>(); /// How many bytes we need to read from disk to locate an entry. pub const READ_ENTRY_OVERHEAD: usize = SIZE_OF_LENGTH_FIELD + SIZE_OF_KEY + BUCKET_OFFSET_OVERHEAD; type BucketOffset = u64; impl<K: Key + ?Sized, V: Sized + FromBytes + Immutable + IntoBytes + KnownLayout> MmapHashMap<K, V> { /// Save `map` contents to `path`. pub fn create<'a>( path: &Path, map: impl Iterator<Item = (&'a K, impl ExactSizeIterator<Item = V>)> + Clone, ) -> io::Result<()> where K: 'a, { let keys_vec: Vec<_> = map.clone().map(|(k, _)| k).collect(); let keys_count = keys_vec.len(); let phf = Function::from(keys_vec); // == First pass == let mut file_size = 0; // 1. Header file_size += size_of::<Header>(); // 2. PHF file_size += phf.write_bytes(); // 3. Padding let padding_len = file_size.next_multiple_of(PADDING_SIZE) - file_size; file_size += padding_len; // 4. Buckets let buckets_size = keys_count * size_of::<BucketOffset>(); let bucket_align = buckets_size.next_multiple_of(K::ALIGN) - buckets_size; file_size += bucket_align; // Important: Bucket Position points after the alignment for backward compatibility. let buckets_pos = file_size; file_size += buckets_size; // 5. Data let mut buckets = vec![0 as BucketOffset; keys_count]; let mut last_bucket = 0usize; for (k, v) in map.clone() { last_bucket = last_bucket.next_multiple_of(K::ALIGN); buckets[phf.get(k).expect("Key not found in phf") as usize] = last_bucket as BucketOffset; last_bucket += Self::entry_bytes(k, v.len()); } file_size += last_bucket; _ = file_size; // == Second pass == let (file, temp_path) = tempfile::Builder::new() .prefix(path.file_name().ok_or(io::ErrorKind::InvalidInput)?) .tempfile_in(path.parent().ok_or(io::ErrorKind::InvalidInput)?)? .into_parts(); let file = File::from_parts::<&Path>(file, temp_path.as_ref()); let mut bufw = io::BufWriter::new(file); // 1. Header let header = Header { key_type: K::NAME, buckets_pos: buckets_pos as u64, buckets_count: keys_count as u64, }; bufw.write_all(header.as_bytes())?; // 2. PHF phf.write(&mut bufw)?; // 3. Padding bufw.write_zeros(padding_len)?; // 4. Buckets // Align the buckets to `K::ALIGN`, to make sure Entry.key is aligned. bufw.write_zeros(bucket_align)?; bufw.write_all(buckets.as_bytes())?; // 5. Data let mut pos = 0usize; for (key, values) in map { let next_pos = pos.next_multiple_of(K::ALIGN); if next_pos > pos { bufw.write_zeros(next_pos - pos)?; pos = next_pos; } let entry_size = Self::entry_bytes(key, values.len()); pos += entry_size; key.write(&mut bufw)?; bufw.write_zeros(Self::key_padding_bytes(key))?; bufw.write_all((values.len() as ValuesLen).as_bytes())?; bufw.write_zeros(Self::values_len_padding_bytes())?; for i in values { bufw.write_all(i.as_bytes())?; } } // Explicitly flush write buffer so we can catch IO errors bufw.flush()?; let file = bufw.into_inner().unwrap(); file.sync_all()?; drop(file); temp_path.persist(path)?; Ok(()) } const VALUES_LEN_SIZE: usize = size_of::<ValuesLen>(); const VALUE_SIZE: usize = size_of::<V>(); fn key_size_with_padding(key: &K) -> usize { let key_size = key.write_bytes(); key_size.next_multiple_of(Self::VALUE_SIZE) } fn key_padding_bytes(key: &K) -> usize { let key_size = key.write_bytes(); key_size.next_multiple_of(Self::VALUE_SIZE) - key_size } const fn values_len_size_with_padding() -> usize { Self::VALUES_LEN_SIZE.next_multiple_of(Self::VALUE_SIZE) } const fn values_len_padding_bytes() -> usize { Self::VALUES_LEN_SIZE.next_multiple_of(Self::VALUE_SIZE) - Self::VALUES_LEN_SIZE } /// Return the total size of the entry in bytes, including: key, values_len, values, all with /// padding. fn entry_bytes(key: &K, values_len: usize) -> usize { Self::key_size_with_padding(key) + Self::values_len_size_with_padding() + values_len * Self::VALUE_SIZE } /// Load the hash map from file. pub fn open(path: &Path, populate: bool) -> io::Result<Self> { let mmap = open_read_mmap(path, AdviceSetting::Global, populate)?; let (header, _) = Header::read_from_prefix(mmap.as_ref()).map_err(|_| io::ErrorKind::InvalidData)?; if header.key_type != K::NAME { return Err(io::Error::new( io::ErrorKind::InvalidData, "Key type mismatch", )); } let phf = Function::read(&mut Cursor::new( &mmap .get(size_of::<Header>()..header.buckets_pos as usize) .ok_or(io::ErrorKind::InvalidData)?, ))?; Ok(MmapHashMap { mmap, header, phf, _phantom_key: PhantomData, _phantom_value: PhantomData, }) } pub fn keys_count(&self) -> usize { self.header.buckets_count as usize } pub fn keys(&self) -> impl Iterator<Item = &K> { (0..self.keys_count()).filter_map(|i| match self.get_entry(i) { Ok(entry) => K::from_bytes(entry), Err(err) => { debug_assert!(false, "Error reading entry for key {i}: {err}"); log::error!("Error reading entry for key {i}: {err}"); None } }) } pub fn iter(&self) -> impl Iterator<Item = (&K, &[V])> { (0..self.keys_count()).filter_map(|i| { let entry = self.get_entry(i).ok()?; let key = K::from_bytes(entry)?; let values = Self::get_values_from_entry(entry, key).ok()?; Some((key, values)) }) } /// Get the values associated with the `key`. pub fn get(&self, key: &K) -> io::Result<Option<&[V]>> { let Some(hash) = self.phf.get(key) else { return Ok(None); }; let entry = self.get_entry(hash as usize)?; if !key.matches(entry) { return Ok(None); } Ok(Some(Self::get_values_from_entry(entry, key)?)) } fn get_values_from_entry<'a>(entry: &'a [u8], key: &K) -> io::Result<&'a [V]> { // ## Entry format for the `i64` key // // | key | values_len | padding | values | // |-------|------------|---------|--------| // | `i64` | `u32` | u8[] | `V[]` | let key_size = key.write_bytes(); let key_size_with_padding = key_size.next_multiple_of(Self::VALUE_SIZE); let entry = entry.get(key_size_with_padding..).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!( "Can't read entry from mmap, \ key_size_with_padding {key_size_with_padding} is out of bounds" ), ) })?; let (values_len, _) = ValuesLen::read_from_prefix(entry).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "Can't read values_len from mmap", ) })?; let values_from = Self::values_len_size_with_padding(); let values_to = values_from + values_len as usize * Self::VALUE_SIZE; let entry = entry.get(values_from..values_to).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!("Can't read values from mmap, relative range: {values_from}:{values_to}"), ) })?; let result = <[V]>::ref_from_bytes(entry).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "Can't convert mmap range into slice", ) })?; Ok(result) } fn get_entry(&self, index: usize) -> io::Result<&[u8]> { // Absolute position of the bucket array in the mmap. let bucket_from = self.header.buckets_pos as usize; let bucket_to = bucket_from + self.header.buckets_count as usize * size_of::<BucketOffset>(); let bucket_val = self .mmap .get(bucket_from..bucket_to) .and_then(|b| <[BucketOffset]>::ref_from_bytes(b).ok()) .and_then(|buckets| buckets.get(index).copied()) .ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!("Can't read bucket from mmap, pos: {bucket_from}:{bucket_to}"), ) })?; let entry_start = self.header.buckets_pos as usize + self.header.buckets_count as usize * size_of::<BucketOffset>() + bucket_val as usize; self.mmap.get(entry_start..).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!("Can't read entry from mmap, bucket_val {entry_start} is out of bounds"), ) }) } /// Populate all pages in the mmap. /// Block until all pages are populated. pub fn populate(&self) -> io::Result<()> { self.mmap.populate(); Ok(()) } } /// A key that can be stored in the hash map. pub trait Key: Sync + Hash { const ALIGN: usize; const NAME: [u8; 8]; /// Returns number of bytes which `write` will write. fn write_bytes(&self) -> usize; /// Write the key to `buf`. fn write(&self, buf: &mut impl Write) -> io::Result<()>; /// Check whether the first [`Key::write_bytes()`] of `buf` match the key. fn matches(&self, buf: &[u8]) -> bool; /// Try to read the key from `buf`. fn from_bytes(buf: &[u8]) -> Option<&Self>; } impl Key for str { const ALIGN: usize = align_of::<u8>(); const NAME: [u8; 8] = *b"str\0\0\0\0\0"; fn write_bytes(&self) -> usize { self.len() + 1 } fn write(&self, buf: &mut impl Write) -> io::Result<()> { buf.write_all(self.as_bytes())?; buf.write_all(&[0xFF])?; // 0xFF is not a valid leading byte of a UTF-8 sequence. Ok(()) } fn matches(&self, buf: &[u8]) -> bool { // The sentinel value 0xFF is used to ensure that `self` has the same length as the string // in the entry buffer. // // Suppose `self` is a prefix of the string in the entry buffer. (it's not very likely since // it would require a PHF collision, but it is still possible). // We'd like this method to return `false` in this case. So we need not just check that the // first `self.len()` bytes of `buf` are equal to `self`, but also that they have the same // length. To achieve that, we compare `self + [0xFF]` with `buf + [0xFF]`. // // ┌───self────┐ ┌───self────┐ ┌─────self─────┐ // 'f' 'o' 'o' FF 'f' 'o' 'o' FF 'f' 'o' 'o' FF // 'f' 'o' 'o' FF 'f' 'o' 'o' 'b' 'a' 'r' FF 'f' 'o' 'o' FF 'b' 'a' 'r' FF // └───entry───┘ └─────────entry─────────┘ └───────────entry──────────┘ // Case 1 Case 2 Case 3 // (happy) (collision) (never happens) // // 1. The case 1 is the happy path. This function returns `true`. // 2. In the case 2, `self` is a prefix of `entry`, but since we are also checking the // sentinel, this function returns `false`. (0xFF != 'b') // 3. Hypothetical case 3 might never happen unless the index data is corrupted. This is // because it assumes that `entry` is a concatenation of three parts: a valid UTF-8 // string ('foo'), a byte 0xFF, and the rest ('bar'). Concatenating a valid UTF-8 string // with 0xFF will always result in an invalid UTF-8 string. Such string could not be // added to the index since we are adding only valid UTF-8 strings as Rust enforces the // validity of `str`/`String` types. buf.get(..self.len()) == Some(IntoBytes::as_bytes(self)) && buf.get(self.len()) == Some(&0xFF) } fn from_bytes(buf: &[u8]) -> Option<&Self> { let len = buf.iter().position(|&b| b == 0xFF)?; str::from_utf8(&buf[..len]).ok() } } impl Key for i64 { const ALIGN: usize = align_of::<i64>(); const NAME: [u8; 8] = *b"i64\0\0\0\0\0"; fn write_bytes(&self) -> usize { size_of::<i64>() } fn write(&self, buf: &mut impl Write) -> io::Result<()> { buf.write_all(self.as_bytes()) } fn matches(&self, buf: &[u8]) -> bool { buf.get(..size_of::<i64>()) == Some(self.as_bytes()) } fn from_bytes(buf: &[u8]) -> Option<&Self> { Some(i64::ref_from_prefix(buf).ok()?.0) } } impl Key for u128 { const ALIGN: usize = size_of::<u128>(); const NAME: [u8; 8] = *b"u128\0\0\0\0"; fn write_bytes(&self) -> usize { size_of::<u128>() } fn write(&self, buf: &mut impl Write) -> io::Result<()> { buf.write_all(self.as_bytes()) } fn matches(&self, buf: &[u8]) -> bool { buf.get(..size_of::<u128>()) == Some(self.as_bytes()) } fn from_bytes(buf: &[u8]) -> Option<&Self> { match u128::ref_from_prefix(buf) { Ok(res) => Some(res.0), Err(err) => { debug_assert!(false, "Error reading u128 from mmap: {err}"); log::error!("Error reading u128 from mmap: {err}"); None } } } } #[cfg(any(test, feature = "testing"))] pub fn gen_map<T: Eq + Ord + Hash>( rng: &mut StdRng, gen_key: impl Fn(&mut StdRng) -> T, count: usize, ) -> BTreeMap<T, BTreeSet<u32>> { let mut map = BTreeMap::new(); for _ in 0..count { let key = repeat_until(|| gen_key(rng), |key| !map.contains_key(key)); let set = (0..rng.random_range(1..=100)) .map(|_| rng.random_range(0..=1000)) .collect::<BTreeSet<_>>(); map.insert(key, set); } map } #[cfg(any(test, feature = "testing"))] pub fn gen_ident(rng: &mut StdRng) -> String { (0..rng.random_range(5..=32)) .map(|_| rng.random_range(b'a'..=b'z') as char) .collect() } #[cfg(any(test, feature = "testing"))] fn repeat_until<T>(mut f: impl FnMut() -> T, cond: impl Fn(&T) -> bool) -> T { std::iter::from_fn(|| Some(f())).find(|v| cond(v)).unwrap() } #[cfg(test)] mod tests { use std::collections::HashMap; use rand::SeedableRng as _; use super::*; #[test] fn test_mmap_hash() { test_mmap_hash_impl(gen_ident, |s| s.as_str(), |s| s.to_owned()); test_mmap_hash_impl(|rng| rng.random::<i64>(), |i| i, |i| *i); test_mmap_hash_impl(|rng| rng.random::<u128>(), |i| i, |i| *i); } fn test_mmap_hash_impl<K: Key + ?Sized, K1: Ord + Hash>( generator: impl Clone + Fn(&mut StdRng) -> K1, as_ref: impl Fn(&K1) -> &K, from_ref: impl Fn(&K) -> K1, ) { let mut rng = StdRng::seed_from_u64(42); let tmpdir = tempfile::Builder::new().tempdir().unwrap(); let map = gen_map(&mut rng, generator.clone(), 1000); MmapHashMap::<K, u32>::create( &tmpdir.path().join("map"), map.iter().map(|(k, v)| (as_ref(k), v.iter().copied())), ) .unwrap(); let mmap = MmapHashMap::<K, u32>::open(&tmpdir.path().join("map"), false).unwrap(); // Non-existing keys should return None for _ in 0..1000 { let key = repeat_until(|| generator(&mut rng), |key| !map.contains_key(key)); assert!(mmap.get(as_ref(&key)).unwrap().is_none()); } // check keys iterator for key in mmap.keys() { let key = from_ref(key); assert!(map.contains_key(&key)); } assert_eq!(mmap.keys_count(), map.len()); assert_eq!(mmap.keys().count(), map.len()); for (k, v) in mmap.iter() { let v = v.iter().copied().collect::<BTreeSet<_>>(); assert_eq!(map.get(&from_ref(k)).unwrap(), &v); } let keys: Vec<_> = mmap.keys().collect(); assert_eq!(keys.len(), map.len()); // Existing keys should return the correct values for (k, v) in map { assert_eq!( mmap.get(as_ref(&k)).unwrap().unwrap(), &v.into_iter().collect::<Vec<_>>() ); } } #[test] fn test_mmap_hash_impl_u64_value() { let mut rng = StdRng::seed_from_u64(42); let tmpdir = tempfile::Builder::new().tempdir().unwrap(); let mut map: HashMap<i64, BTreeSet<u64>> = Default::default(); for key in 0..10i64 { map.insert(key, (0..100).map(|_| rng.random_range(0..=1000)).collect()); } MmapHashMap::<i64, u64>::create( &tmpdir.path().join("map"), map.iter().map(|(k, v)| (k, v.iter().copied())), ) .unwrap(); let mmap = MmapHashMap::<i64, u64>::open(&tmpdir.path().join("map"), true).unwrap(); for (k, v) in map { assert_eq!( mmap.get(&k).unwrap().unwrap(), &v.into_iter().collect::<Vec<_>>() ); } assert!(mmap.get(&100).unwrap().is_none()) } #[test] fn test_mmap_hash_impl_u128_value() { let mut rng = StdRng::seed_from_u64(42); let tmpdir = tempfile::Builder::new().tempdir().unwrap(); let mut map: HashMap<u128, BTreeSet<u32>> = Default::default(); map.insert( 9812384971724u128, (0..100).map(|_| rng.random_range(0..=1000)).collect(), ); MmapHashMap::<u128, u32>::create( &tmpdir.path().join("map"), map.iter().map(|(k, v)| (k, v.iter().copied())), ) .unwrap(); let mmap = MmapHashMap::<u128, u32>::open(&tmpdir.path().join("map"), true).unwrap(); let keys: Vec<_> = mmap.keys().collect(); assert_eq!(keys.len(), map.len()); for (k, v) in map { assert_eq!( mmap.get(&k).unwrap().unwrap(), &v.into_iter().collect::<Vec<_>>() ); } assert!(mmap.get(&100).unwrap().is_none()) } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/typelevel.rs
lib/common/common/src/typelevel.rs
//! Type-level boolean and option types. //! //! A value of these traits can be used to conditionally enable or disable //! particular functions in generic contexts. Compared to using [`bool`] or //! [`Option`], there are no runtime checks, nor performance overhead, nor //! potential panics due to unwrapping an [`Option`]. //! //! # Example //! //! ``` //! use common::typelevel::{TBool, True, False}; //! //! trait MyTrait { //! /// Whether the optional method `increment_value` can be called. //! type CanIncrement: TBool; //! /// Optional method. Because it accepts `CanIncrement` as an argument, //! /// it can only be called if `CanIncrement` is `True`. //! fn increment_value(&mut self, proof: Self::CanIncrement) -> u32; //! } //! //! struct Unit; //! impl MyTrait for Unit { //! type CanIncrement = False; //! fn increment_value(&mut self, proof: Self::CanIncrement) -> u32 { //! // The value of type `False` is a proof to the compiler that this //! // method cannot be called. It can be coerced to any type using an //! // empty match. //! match proof {} // coerced to `u32` //! } //! } //! //! struct Counter(u32); //! impl MyTrait for Counter { //! type CanIncrement = True; //! fn increment_value(&mut self, _proof: Self::CanIncrement) -> u32 { //! self.0 += 1; //! self.0 //! } //! } //! //! /// This function calls `increment_value` twice. It requires a proof that //! /// `increment_value` can be called, and passes it to the method. Even if //! /// `CanIncrement` is `False`, this function still type checks, but can //! /// not be called. Note that there are no runtime checks in this method. //! fn increment_twice<T: MyTrait>(proof: T::CanIncrement, x: &mut T) -> u32 { //! x.increment_value(proof); //! x.increment_value(proof) //! } //! //! fn run<T: MyTrait>(x: &mut T) { //! // The following `if` is the only runtime check in this example. //! if let Some(enabled) = T::CanIncrement::VALUE { //! let v = increment_twice(enabled, x); //! println!("After incrementing twice: {v}"); //! } //! } //! //! fn main() { //! run(&mut Unit); //! run(&mut Counter(100)); //! } //! ``` use std::marker::PhantomData; use bytemuck::TransparentWrapper; /// A boolean value lifted to the type level. pub trait TBool: Sized + Clone + Copy { /// Either `Some(True)` or `None`. const VALUE: Option<Self>; /// The corresponding [`trait TOption`] type. type TOption<T>: TOption<T, IsSome = Self>; /// Wrap a value of type `T`. Similar to [`bool::then_some`] but lifted to /// the type level. /// /// Returns either: /// - `Some(TSome(value))` if `Self` is [`True`], /// - `None` if `Self` is [`False`]. fn then_some<T>(value: T) -> Option<Self::TOption<T>>; /// Similar to [`TBool::then_some`], but for references. fn then_some_ref<T>(value: &T) -> Option<&Self::TOption<T>>; } /// An [`Option`]-like wrapper lifted to the type level. pub trait TOption<T> { /// The corresponding [`TBool`] type. Similar to [`Option::is_some`]. type IsSome: TBool; /// Get the corresponding [`TBool`] value. Similar to [`Option::is_some`]. fn is_some(&self) -> Self::IsSome; /// Get the wrapped value. Similar to [`Option::unwrap`] but always succeeds /// and does not panic. fn get(&self) -> &T; } /// A [`TBool`] implementation of `true`. /// /// This type is a zero-sized unit type. #[derive(Debug, Clone, Copy)] pub struct True; /// A [`TBool`] implementation of `false`. /// /// This type cannot be instantiated, similar to the [never] type. /// /// [never]: https://doc.rust-lang.org/std/primitive.never.html #[derive(Debug, Clone, Copy)] pub enum False {} /// A [`TOption`] type that corresponds to [`True`]. /// /// Similar to [`Option::Some`] but lifted to the type level. /// This type is a transparent wrapper around a value of type `T`. #[derive(Debug, Clone, Copy, PartialEq, Eq, TransparentWrapper)] #[repr(transparent)] pub struct TSome<T>(pub T); /// A [`TOption`] type that corresponds to [`False`]. /// /// Similar to [`Option::None`] but lifted to the type level. /// This type cannot be instantiated (similar to the [never] type). /// /// [never]: https://doc.rust-lang.org/std/primitive.never.html #[derive(Debug, Clone, Copy)] pub struct TNone<T>(False, PhantomData<T>); impl TBool for True { type TOption<T> = TSome<T>; const VALUE: Option<Self> = Some(True); #[inline] fn then_some<T>(value: T) -> Option<Self::TOption<T>> { Some(TSome(value)) } #[inline] fn then_some_ref<T>(value: &T) -> Option<&Self::TOption<T>> { Some(TSome::wrap_ref(value)) } } impl<T> TOption<T> for TSome<T> { type IsSome = True; #[inline] fn is_some(&self) -> Self::IsSome { True } #[inline] fn get(&self) -> &T { &self.0 } } impl TBool for False { type TOption<T> = TNone<T>; const VALUE: Option<Self> = None; #[inline] fn then_some<T>(_: T) -> Option<Self::TOption<T>> { None } #[inline] fn then_some_ref<T>(_: &T) -> Option<&Self::TOption<T>> { None } } impl<T> TOption<T> for TNone<T> { type IsSome = False; fn is_some(&self) -> Self::IsSome { match self.0 {} } fn get(&self) -> &T { match self.0 {} } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/rate_limiting.rs
lib/common/common/src/rate_limiting.rs
use std::time::{Duration, Instant}; /// A rate limiter based on the token bucket algorithm. /// Designed to limit the number of requests per minute. /// The bucket is refilled at a constant rate of `tokens_per_sec` tokens per second. /// The bucket has a maximum capacity of `capacity_per_minute` tokens to allow for bursts. #[derive(Debug)] pub struct RateLimiter { // Maximum tokens the bucket can hold. capacity_per_minute: u64, // Tokens added per second. tokens_per_sec: f64, // Current tokens in the bucket. tokens: f64, // Last time tokens were updated. last_check: Instant, } impl RateLimiter { /// Create a new rate limiter for `requests_num` requests per minute. pub fn new_per_minute(requests_num: usize) -> Self { let tokens_per_sec = requests_num as f64 / 60.0; RateLimiter { capacity_per_minute: requests_num as u64, tokens_per_sec, tokens: requests_num as f64, // Start with a full bucket to allow burst at the beginning. last_check: Instant::now(), } } /// Attempt to consume a given number of tokens. /// /// Returns: /// - `Ok(())` if allowed and consumes the tokens. /// - `Err(RateLimitError)` if denied. pub fn try_consume(&mut self, tokens: f64) -> Result<(), RateLimitError> { // Consumer wants more than maximum capacity, that's impossible if tokens > self.capacity_per_minute as f64 { return Err(RateLimitError::AlwaysOverBudget( "request larger than rate limiter capacity, please try to split your request", )); } let now = Instant::now(); let elapsed = now.duration_since(self.last_check); self.last_check = now; // Refill tokens based on elapsed time. self.tokens += self.tokens_per_sec * elapsed.as_secs_f64(); if self.tokens > self.capacity_per_minute as f64 { self.tokens = self.capacity_per_minute as f64; } if self.tokens >= tokens { self.tokens -= tokens; // Consume `cost` tokens. Ok(()) // Request allowed. } else { let missing_tokens = tokens - self.tokens; let retry_after = Duration::from_secs_f64(missing_tokens / self.tokens_per_sec); debug_assert!(retry_after > Duration::from_secs(0)); let retry_error = RetryError { tokens_available: self.tokens, retry_after, }; Err(RateLimitError::Retry(retry_error)) } } } #[derive(Clone, Copy, Debug, PartialEq)] pub struct RetryError { /// Number of tokens that were available at the time of the request but didn't suffice. pub tokens_available: f64, /// Estimated time to wait before retrying request pub retry_after: Duration, } /// Error when too many tokens have been tried to consume from the rate limiter. #[derive(Clone, Copy, Debug, PartialEq)] pub enum RateLimitError { /// Operation that will always be over budget. AlwaysOverBudget(&'static str), /// Operation that can be retried later. Retry(RetryError), } #[cfg(test)] mod tests { use super::*; fn assert_eq_floats(a: f64, b: f64, tolerance: f64) { assert!( (a - b).abs() < tolerance, "assertion failed: `(left == right)` (left: `{a}`, right: `{b}`, tolerance: `{tolerance}`)", ); } #[test] fn test_rate_one_per_minute() { let mut limiter = RateLimiter::new_per_minute(1); assert_eq!(limiter.capacity_per_minute, 1); assert_eq_floats(limiter.tokens_per_sec, 0.016, 0.001); assert_eq!(limiter.tokens, 1.0); assert_eq!(limiter.try_consume(1.0), Ok(())); assert_eq!(limiter.tokens, 0.0); // rate limit reached assert!(limiter.try_consume(1.0).is_err()); } #[test] fn test_rate_more_per_minute() { let mut limiter = RateLimiter::new_per_minute(600); assert_eq!(limiter.capacity_per_minute, 600); assert_eq!(limiter.tokens_per_sec, 10.0); assert_eq!(limiter.tokens, 600.0); assert_eq!(limiter.try_consume(1.0), Ok(())); assert_eq!(limiter.tokens, 599.0); assert_eq!(limiter.try_consume(10.0), Ok(())); assert_eq_floats(limiter.tokens, 589.0, 0.01); } #[test] fn test_rate_huge_request() { let mut limiter = RateLimiter::new_per_minute(100); // request too large to ever pass the rate limiter assert!(limiter.try_consume(99999.0).is_err()); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/math.rs
lib/common/common/src/math.rs
use crate::types::ScoreType; /// Acts as a substitute for sigmoid function, but faster because it doesn't do exponent. /// /// Range of output is (-1, 1) #[inline] pub fn fast_sigmoid(x: ScoreType) -> ScoreType { // from https://stackoverflow.com/questions/10732027/fast-sigmoid-algorithm x / (1.0 + x.abs()) } /// Acts as a substitute for sigmoid function, but faster because it doesn't do exponent. /// /// Scales the output to fit within (0, 1) #[inline] pub fn scaled_fast_sigmoid(x: ScoreType) -> ScoreType { 0.5 * (fast_sigmoid(x) + 1.0) } pub fn is_close(a: f64, b: f64) -> bool { const ABS_TOL: f64 = 1e-6; const REL_TOL: f64 = 1e-6; is_close_tol(a, b, ABS_TOL, REL_TOL) } pub fn is_close_tol(a: f64, b: f64, abs_tol: f64, rel_tol: f64) -> bool { let tol = a.abs().max(b.abs()) * rel_tol + abs_tol; (a - b).abs() <= tol }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/validation.rs
lib/common/common/src/validation.rs
use std::borrow::Cow; use serde::Serialize; use validator::{Validate, ValidationError, ValidationErrors}; // Multivector should be small enough to fit the chunk of vector storage #[cfg(debug_assertions)] pub const MAX_MULTIVECTOR_FLATTENED_LEN: usize = 32 * 1024; #[cfg(not(debug_assertions))] pub const MAX_MULTIVECTOR_FLATTENED_LEN: usize = 1024 * 1024; #[allow(clippy::manual_try_fold)] // `try_fold` can't be used because it shortcuts on Err pub fn validate_iter<T: Validate>(iter: impl Iterator<Item = T>) -> Result<(), ValidationErrors> { let errors = iter .filter_map(|v| v.validate().err()) .fold(Err(ValidationErrors::new()), |bag, err| { ValidationErrors::merge(bag, "?", Err(err)) }) .unwrap_err(); errors.errors().is_empty().then_some(()).ok_or(errors) } /// Validate the value is in `[min, max]` #[inline] pub fn validate_range_generic<N>( value: N, min: Option<N>, max: Option<N>, ) -> Result<(), ValidationError> where N: PartialOrd + Serialize, { // If value is within bounds we're good if min.as_ref().map(|min| &value >= min).unwrap_or(true) && max.as_ref().map(|max| &value <= max).unwrap_or(true) { return Ok(()); } let mut err = ValidationError::new("range"); if let Some(min) = min { err.add_param(Cow::from("min"), &min); } if let Some(max) = max { err.add_param(Cow::from("max"), &max); } Err(err) } /// Validate that `value` is a non-empty string. pub fn validate_not_empty(value: &str) -> Result<(), ValidationError> { if value.is_empty() { Err(ValidationError::new("not_empty")) } else { Ok(()) } } /// Validate the collection name contains no illegal characters /// /// This does not check the length of the name. pub fn validate_collection_name(value: &str) -> Result<(), ValidationError> { const INVALID_CHARS: [char; 11] = ['<', '>', ':', '"', '/', '\\', '|', '?', '*', '\0', '\u{1F}']; match INVALID_CHARS.into_iter().find(|c| value.contains(*c)) { Some(c) => { let mut err = ValidationError::new("does_not_contain"); err.add_param(Cow::from("pattern"), &c); err.message .replace(format!("collection name cannot contain \"{c}\" char").into()); Err(err) } None => Ok(()), } } /// Validate the collection name contains no illegal characters, legacy edition /// /// Similar to [`validate_collection_name`], but this still allows some special characters that /// were supported pre Qdrant 1.5. More specifically, this only disallows characters that could /// never have been used on both Linux and Windows filesystems. /// /// This does not check the length of the name. pub fn validate_collection_name_legacy(value: &str) -> Result<(), ValidationError> { // Disallowed characters on on both Linux/Windows, sourced from: <https://stackoverflow.com/a/31976060/1000145> const INVALID_CHARS: [char; 2] = ['/', '\0']; match INVALID_CHARS.into_iter().find(|c| value.contains(*c)) { Some(c) => { let mut err = ValidationError::new("does_not_contain"); err.add_param(Cow::from("pattern"), &c); err.message .replace(format!("collection name cannot contain \"{c}\" char").into()); Err(err) } None => Ok(()), } } /// Validate a polygon has at least 4 points and is closed. pub fn validate_geo_polygon<T>(points: &[T]) -> Result<(), ValidationError> where T: PartialEq, { let min_length = 4; if points.len() < min_length { let mut err = ValidationError::new("min_polygon_length"); err.add_param(Cow::from("length"), &points.len()); err.add_param(Cow::from("min_length"), &min_length); return Err(err); } let first_point = &points[0]; let last_point = &points[points.len() - 1]; if first_point != last_point { return Err(ValidationError::new("closed_polygon")); } Ok(()) } /// Validate that shard request has two different peers /// /// We do allow transferring from/to the same peer if the source and target shard are different. /// This may be used during resharding shard transfers. pub fn validate_shard_different_peers( from_peer_id: u64, to_peer_id: u64, shard_id: u32, to_shard_id: Option<u32>, ) -> Result<(), ValidationErrors> { if to_peer_id != from_peer_id { return Ok(()); } // If source and target shard is different, we do allow transferring from/to the same peer if to_shard_id.is_some_and(|to_shard_id| to_shard_id != shard_id) { return Ok(()); } let mut errors = ValidationErrors::new(); errors.add("to_peer_id", { let mut error = ValidationError::new("must_not_match"); error.add_param(Cow::from("value"), &to_peer_id.to_string()); error.add_param(Cow::from("other_field"), &"from_peer_id"); error.add_param(Cow::from("other_value"), &from_peer_id.to_string()); error.add_param( Cow::from("message"), &format!("cannot transfer shard to itself, \"to_peer_id\" must be different than {from_peer_id} in \"from_peer_id\""), ); error }); Err(errors) } /// Validate optional lowercase hexadecimal sha256 hash string. pub fn validate_sha256_hash(value: &str) -> Result<(), ValidationError> { if value.len() != 64 { let mut err = ValidationError::new("invalid_sha256_hash"); err.add_param(Cow::from("length"), &value.len()); err.add_param(Cow::from("expected_length"), &64); return Err(err); } if !value.chars().all(|c| c.is_ascii_hexdigit()) { let mut err = ValidationError::new("invalid_sha256_hash"); err.add_param( Cow::from("message"), &"invalid characters, expected 0-9, a-f, A-F", ); return Err(err); } Ok(()) } pub fn validate_multi_vector_by_length(multivec_length: &[usize]) -> Result<(), ValidationErrors> { // non_empty if multivec_length.is_empty() { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("empty_multi_vector"); err.add_param(Cow::from("message"), &"multi vector must not be empty"); errors.add("data", err); return Err(errors); } // check all individual vectors non-empty if multivec_length.contains(&0) { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("empty_vector"); err.add_param(Cow::from("message"), &"all vectors must be non-empty"); errors.add("data", err); return Err(errors); } // total size of all vectors must be less than MAX_MULTIVECTOR_FLATTENED_LEN let flattened_len = multivec_length.iter().sum::<usize>(); if flattened_len >= MAX_MULTIVECTOR_FLATTENED_LEN { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("multi_vector_too_large"); err.add_param(Cow::from("message"), &format!("Total size of all vectors ({flattened_len}) must be less than {MAX_MULTIVECTOR_FLATTENED_LEN}")); errors.add("data", err); return Err(errors); } // all vectors must have the same length let dim = multivec_length[0]; if let Some(bad_vec) = multivec_length.iter().find(|v| **v != dim) { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("inconsistent_multi_vector"); err.add_param( Cow::from("message"), &format!( "all vectors must have the same dimension, found vector with dimension {bad_vec}", ), ); errors.add("data", err); return Err(errors); } Ok(()) } pub fn validate_multi_vector<T>(multivec: &[Vec<T>]) -> Result<(), ValidationErrors> { let multivec_length: Vec<_> = multivec.iter().map(|v| v.len()).collect(); validate_multi_vector_by_length(&multivec_length) } pub fn validate_multi_vector_len( vectors_count: u32, flatten_dense_vector: &[f32], ) -> Result<(), ValidationErrors> { if vectors_count == 0 { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("invalid_vector_count"); err.add_param( Cow::from("vectors_count"), &"vectors count must be greater than 0", ); errors.add("data", err); return Err(errors); } let dense_vector_len = flatten_dense_vector.len(); if dense_vector_len >= MAX_MULTIVECTOR_FLATTENED_LEN { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("Vector size is too large"); err.add_param(Cow::from("vector_len"), &dense_vector_len); err.add_param(Cow::from("vectors_count"), &vectors_count); errors.add("data", err); return Err(errors); } if !dense_vector_len.is_multiple_of(vectors_count as usize) { let mut errors = ValidationErrors::default(); let mut err = ValidationError::new("invalid dense vector length for vectors count"); err.add_param(Cow::from("vector_len"), &dense_vector_len); err.add_param(Cow::from("vectors_count"), &vectors_count); errors.add("data", err); Err(errors) } else { Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_validate_range_generic() { assert!(validate_range_generic(u64::MIN, None, None).is_ok()); assert!(validate_range_generic(u64::MAX, None, None).is_ok()); // Min assert!(validate_range_generic(1, Some(1), None).is_ok()); assert!(validate_range_generic(0, Some(1), None).is_err()); assert!(validate_range_generic(1.0, Some(1.0), None).is_ok()); assert!(validate_range_generic(0.0, Some(1.0), None).is_err()); // Max assert!(validate_range_generic(1, None, Some(1)).is_ok()); assert!(validate_range_generic(2, None, Some(1)).is_err()); assert!(validate_range_generic(1.0, None, Some(1.0)).is_ok()); assert!(validate_range_generic(2.0, None, Some(1.0)).is_err()); // Min/max assert!(validate_range_generic(0, Some(1), Some(1)).is_err()); assert!(validate_range_generic(1, Some(1), Some(1)).is_ok()); assert!(validate_range_generic(2, Some(1), Some(1)).is_err()); assert!(validate_range_generic(0, Some(1), Some(2)).is_err()); assert!(validate_range_generic(1, Some(1), Some(2)).is_ok()); assert!(validate_range_generic(2, Some(1), Some(2)).is_ok()); assert!(validate_range_generic(3, Some(1), Some(2)).is_err()); assert!(validate_range_generic(0, Some(2), Some(1)).is_err()); assert!(validate_range_generic(1, Some(2), Some(1)).is_err()); assert!(validate_range_generic(2, Some(2), Some(1)).is_err()); assert!(validate_range_generic(3, Some(2), Some(1)).is_err()); assert!(validate_range_generic(0.0, Some(1.0), Some(1.0)).is_err()); assert!(validate_range_generic(1.0, Some(1.0), Some(1.0)).is_ok()); assert!(validate_range_generic(2.0, Some(1.0), Some(1.0)).is_err()); assert!(validate_range_generic(0.0, Some(1.0), Some(2.0)).is_err()); assert!(validate_range_generic(1.0, Some(1.0), Some(2.0)).is_ok()); assert!(validate_range_generic(2.0, Some(1.0), Some(2.0)).is_ok()); assert!(validate_range_generic(3.0, Some(1.0), Some(2.0)).is_err()); assert!(validate_range_generic(0.0, Some(2.0), Some(1.0)).is_err()); assert!(validate_range_generic(1.0, Some(2.0), Some(1.0)).is_err()); assert!(validate_range_generic(2.0, Some(2.0), Some(1.0)).is_err()); assert!(validate_range_generic(3.0, Some(2.0), Some(1.0)).is_err()); } #[test] fn test_validate_not_empty() { assert!(validate_not_empty("not empty").is_ok()); assert!(validate_not_empty(" ").is_ok()); assert!(validate_not_empty("").is_err()); } #[test] fn test_validate_collection_name() { assert!(validate_collection_name("test_collection").is_ok()); assert!(validate_collection_name("").is_ok()); assert!(validate_collection_name("no/path").is_err()); assert!(validate_collection_name("no*path").is_err()); assert!(validate_collection_name("?").is_err()); assert!(validate_collection_name("\0").is_err()); assert!(validate_collection_name_legacy("test_collection").is_ok()); assert!(validate_collection_name_legacy("").is_ok()); assert!(validate_collection_name_legacy("no/path").is_err()); assert!(validate_collection_name_legacy("no*path").is_ok()); assert!(validate_collection_name_legacy("?").is_ok()); assert!(validate_collection_name_legacy("\0").is_err()); } #[test] fn test_validate_geo_polygon() { let bad_polygon: Vec<(f64, f64)> = vec![]; assert!( validate_geo_polygon(&bad_polygon).is_err(), "bad polygon should error on validation", ); let bad_polygon = vec![(1., 1.), (2., 2.), (3., 3.)]; assert!( validate_geo_polygon(&bad_polygon).is_err(), "bad polygon should error on validation", ); let bad_polygon = vec![(1., 1.), (2., 2.), (3., 3.), (4., 4.)]; assert!( validate_geo_polygon(&bad_polygon).is_err(), "bad polygon should error on validation" ); let good_polygon = vec![(1., 1.), (2., 2.), (3., 3.), (1., 1.)]; assert!( validate_geo_polygon(&good_polygon).is_ok(), "good polygon should not error on validation", ); } #[test] fn test_validate_sha256_hash() { assert!( validate_sha256_hash( "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ) .is_ok(), ); assert!( validate_sha256_hash("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde") .is_err(), ); assert!( validate_sha256_hash( "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0" ) .is_err(), ); assert!( validate_sha256_hash( "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEG" ) .is_err(), ); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/is_alive_lock.rs
lib/common/common/src/is_alive_lock.rs
use std::sync::{Arc, Weak}; use parking_lot::{ArcMutexGuard, Mutex, RawMutex}; /// Structure which ensures that the lock is alive at the time of locking, /// and will prevent dropping while guarded. /// /// Dropping this structure will also mark as dead, preventing future access through any dangling /// handles. /// /// This structure explicitly doesn't implement Clone, so that `handle` is used instead. #[derive(Debug)] pub struct IsAliveLock { inner: Arc<Mutex<bool>>, } impl IsAliveLock { pub fn new() -> Self { Self { inner: Arc::new(Mutex::new(true)), } } /// Get a handle for this lock. pub fn handle(&self) -> IsAliveHandle { IsAliveHandle { inner: Arc::downgrade(&self.inner), } } /// Waits for lock and marks as dead without dropping. /// Lock will no longer be usable after this. pub fn blocking_mark_dead(&self) { *self.inner.lock() = false } } impl Default for IsAliveLock { fn default() -> Self { Self::new() } } impl Drop for IsAliveLock { fn drop(&mut self) { // prevent dangling handles from accessing the lock self.blocking_mark_dead(); } } /// Handle for `IsAliveLock` which can access the lock at a later time. /// /// This is a separate structure so it does not change the boolean on drop. pub struct IsAliveHandle { inner: Weak<Mutex<bool>>, } impl IsAliveHandle { /// Get a guard of this lock if the parent hasn't been dropped #[must_use = "Guard must be held for lifetime of operation, abort if None is returned"] pub fn lock_if_alive(&self) -> Option<IsAliveGuard> { let mutex = self.inner.upgrade()?; let is_alive = Mutex::lock_arc(&mutex); if *is_alive { Some(IsAliveGuard(is_alive)) } else { None } } } /// Guards a `true` boolean pub struct IsAliveGuard(#[expect(dead_code)] ArcMutexGuard<RawMutex, bool>); #[cfg(test)] mod tests { use super::*; #[test] fn test_marking_dead() { let lock = IsAliveLock::new(); let handle = lock.handle(); assert!(handle.lock_if_alive().is_some()); lock.blocking_mark_dead(); assert!(handle.lock_if_alive().is_none()); } #[test] fn test_dropping() { let lock = IsAliveLock::default(); let handle = lock.handle(); assert!(handle.lock_if_alive().is_some()); // dropping the handle does not poison the lock drop(handle); let handle = lock.handle(); assert!(handle.lock_if_alive().is_some()); // dropping the parent poisons the lock drop(lock); assert!(handle.lock_if_alive().is_none()); } #[test] fn test_parent_waits_for_guard() { use std::sync::atomic::{AtomicUsize, Ordering}; let lock = IsAliveLock::new(); let handle = lock.handle(); // Test following sequence // | tick | lock | handle | // | ---- | ------------- | ------ | // | 0 | | guarded | // | 1 | drop (waiting) | guarded | // | 2 | (waiting) | guarded | // | 3 | actual drop | release guard | let tick = AtomicUsize::new(0); // Hold the guard let guard = handle.lock_if_alive().unwrap(); std::thread::scope(|s| { s.spawn(|| { // Start dropping until tick 1 while tick.load(Ordering::SeqCst) < 1 { std::thread::yield_now(); } // This should block until guard is dropped (at tick 3) drop(lock); // Verify we dropped after guard was released (tick == 3) assert!(tick.load(Ordering::SeqCst) >= 3); }); tick.store(1, Ordering::SeqCst); // Advance tick to show we're about to drop the guard tick.store(2, Ordering::SeqCst); tick.store(3, Ordering::SeqCst); drop(guard); }); // After parent is dropped, handle should return None assert!(handle.lock_if_alive().is_none()); } }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false
qdrant/qdrant
https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/panic.rs
lib/common/common/src/panic.rs
use std::any::Any; pub type Payload = dyn Any + Send + 'static; /// Downcast panic payload into a string. /// /// Downcast `&'static str` and `String` panic payloads into a `&str`. #[allow(clippy::borrowed_box)] // We *have to* use `&Box<dyn Any>`, because `Box<dyn Any>` implements `Any` itself, // so `downcast_str(&boxed_any)` would *not* do an auto-deref to inner `dyn Any`, // but *coerce* `&Box<dyn Any>` itself to `&dyn Any` :( pub fn downcast_str(any: &Box<Payload>) -> Option<&str> { if let Some(str) = any.downcast_ref::<&'static str>() { return Some(str); } if let Some(str) = any.downcast_ref::<String>() { return Some(str); } None }
rust
Apache-2.0
f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd
2026-01-04T15:34:51.524868Z
false