repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/point_ops.rs | lib/collection/src/collection/point_ops.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::stream::FuturesUnordered;
use futures::{StreamExt as _, TryFutureExt, TryStreamExt as _, future};
use itertools::Itertools;
use segment::data_types::order_by::{Direction, OrderBy};
use segment::types::{ShardKey, WithPayload, WithPayloadInterface};
use shard::retrieve::record_internal::RecordInternal;
use super::Collection;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::point_ops::WriteOrdering;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::*;
use crate::operations::{CollectionUpdateOperations, OperationWithClockTag};
use crate::shards::shard::ShardId;
impl Collection {
/// Apply collection update operation to all local shards.
/// Return None if there are no local shards
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn update_all_local(
&self,
operation: CollectionUpdateOperations,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Option<UpdateResult>> {
let update_lock = self.updates_lock.clone().read_owned().await;
let shard_holder = self.shards_holder.clone().read_owned().await;
let results = self
.update_runtime
.spawn(async move {
let _update_lock = update_lock;
// `ShardReplicaSet::update_local` is *not* cancel safe, so we *have to* execute *all*
// `update_local` requests to completion.
//
// Note that `futures::try_join_all`/`TryStreamExt::try_collect` *cancel* pending
// requests if any of them returns an error, so we *have to* use
// `futures::join_all`/`TryStreamExt::collect` instead!
let local_updates: FuturesUnordered<_> = shard_holder
.all_shards()
.map(|shard| {
// The operation *can't* have a clock tag!
//
// We update *all* shards with a single operation, but each shard has it's own clock,
// so it's *impossible* to assign any single clock tag to this operation.
shard.update_local(
OperationWithClockTag::from(operation.clone()),
wait,
hw_measurement_acc.clone(),
false,
)
})
.collect();
let results: Vec<_> = local_updates.collect().await;
results
})
.await?;
let mut result = None;
for collection_result in results {
let update_result = collection_result?;
if result.is_none() && update_result.is_some() {
result = update_result;
}
}
Ok(result)
}
/// Handle collection updates from peers.
///
/// Shard transfer aware.
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn update_from_peer(
&self,
operation: OperationWithClockTag,
shard_selection: ShardId,
wait: bool,
ordering: WriteOrdering,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
let update_lock = self.updates_lock.clone().read_owned().await;
let shard_holder = self.shards_holder.clone().read_owned().await;
let result = self.update_runtime.spawn(async move {
let _update_lock = update_lock;
let Some(shard) = shard_holder.get_shard(shard_selection) else {
return Ok(None);
};
match ordering {
WriteOrdering::Weak => shard.update_local(operation, wait, hw_measurement_acc.clone(), false).await,
WriteOrdering::Medium | WriteOrdering::Strong => {
if let Some(clock_tag) = operation.clock_tag {
log::warn!(
"Received update operation forwarded from another peer with {ordering:?} \
with non-`None` clock tag {clock_tag:?} (operation: {:#?})",
operation.operation,
);
}
shard
.update_with_consistency(operation.operation, wait, ordering, false, hw_measurement_acc)
.await
.map(Some)
}
}
})
.await??;
if let Some(result) = result {
Ok(result)
} else {
// Special error type needed to handle creation of partial shards
// In all other scenarios, equivalent to `service_error`
Err(CollectionError::pre_condition_failed(format!(
"No target shard {shard_selection} found for update"
)))
}
}
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn update_from_client(
&self,
operation: CollectionUpdateOperations,
wait: bool,
ordering: WriteOrdering,
shard_keys_selection: Option<ShardKey>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
let update_lock = self.updates_lock.clone().read_owned().await;
let shard_holder = self.shards_holder.clone().read_owned().await;
let mut results = self
.update_runtime
.spawn(async move {
let _update_lock = update_lock;
let updates = FuturesUnordered::new();
let operations = shard_holder.split_by_shard(operation, &shard_keys_selection)?;
for (shard, operation) in operations {
let operation = shard_holder.split_by_mode(shard.shard_id, operation);
let hw_acc = hw_measurement_acc.clone();
updates.push(async move {
let mut result = UpdateResult {
operation_id: None,
status: UpdateStatus::Acknowledged,
clock_tag: None,
};
for operation in operation.update_all {
result = shard
.update_with_consistency(
operation,
wait,
ordering,
false,
hw_acc.clone(),
)
.await?;
}
for operation in operation.update_only_existing {
let res = shard
.update_with_consistency(
operation,
wait,
ordering,
true,
hw_acc.clone(),
)
.await;
if let Err(err) = &res
&& err.is_missing_point()
{
continue;
}
result = res?;
}
CollectionResult::Ok(result)
});
}
let results: Vec<_> = updates.collect().await;
CollectionResult::Ok(results)
})
.await??;
if results.is_empty() {
return Err(CollectionError::bad_request(
"Empty update request".to_string(),
));
}
let with_error = results.iter().filter(|result| result.is_err()).count();
// one request per shard
let result_len = results.len();
if with_error > 0 {
let first_err = results.into_iter().find(|result| result.is_err()).unwrap();
// inconsistent if only a subset of the requests fail - one request per shard.
if with_error < result_len {
first_err.map_err(|err| {
// compute final status code based on the first error
// e.g. a partially successful batch update failing because of bad input is a client error
CollectionError::InconsistentShardFailure {
shards_total: result_len as u32, // report only the number of shards that took part in the update
shards_failed: with_error as u32,
first_err: Box::new(err),
}
})
} else {
// all requests per shard failed - propagate first error (assume there are all the same)
first_err
}
} else {
// At least one result is always present.
results.pop().unwrap()
}
}
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn update_from_client_simple(
&self,
operation: CollectionUpdateOperations,
wait: bool,
ordering: WriteOrdering,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
self.update_from_client(operation, wait, ordering, None, hw_measurement_acc)
.await
}
pub async fn scroll_by(
&self,
mut request: ScrollRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<ScrollResult> {
let default_request = ScrollRequestInternal::default();
let mut limit = request
.limit
.unwrap_or_else(|| default_request.limit.unwrap());
if limit == 0 {
return Err(CollectionError::BadRequest {
description: "Limit cannot be 0".to_string(),
});
}
let local_only = shard_selection.is_shard_id();
let order_by = request.order_by.clone().map(OrderBy::from);
// `order_by` does not support offset
if order_by.is_none() {
// Needed to return next page offset.
limit = limit.saturating_add(1);
request.limit = Some(limit);
}
let request = Arc::new(request);
let retrieved_points: Vec<_> = {
let shards_holder = self.shards_holder.read().await;
let target_shards = shards_holder.select_shards(shard_selection)?;
let scroll_futures = target_shards.into_iter().map(|(shard, shard_key)| {
let shard_key = shard_key.cloned();
shard
.scroll_by(
request.clone(),
read_consistency,
local_only,
timeout,
hw_measurement_acc.clone(),
)
.and_then(move |mut records| async move {
if shard_key.is_none() {
return Ok(records);
}
for point in &mut records {
point.shard_key.clone_from(&shard_key);
}
Ok(records)
})
});
future::try_join_all(scroll_futures).await?
};
let retrieved_iter = retrieved_points.into_iter();
let mut points = match &order_by {
None => retrieved_iter
.flatten()
.sorted_unstable_by_key(|point| point.id)
// Add each point only once, deduplicate point IDs
.dedup_by(|a, b| a.id == b.id)
.take(limit)
.map(api::rest::Record::from)
.collect_vec(),
Some(order_by) => {
retrieved_iter
// Get top results
.kmerge_by(|a, b| match order_by.direction() {
Direction::Asc => (a.order_value, a.id) < (b.order_value, b.id),
Direction::Desc => (a.order_value, a.id) > (b.order_value, b.id),
})
.dedup_by(|record_a, record_b| {
(record_a.order_value, record_a.id) == (record_b.order_value, record_b.id)
})
.map(api::rest::Record::from)
.take(limit)
.collect_vec()
}
};
let next_page_offset = if points.len() < limit || order_by.is_some() {
// This was the last page
None
} else {
// remove extra point, it would be a first point of the next page
Some(points.pop().unwrap().id)
};
Ok(ScrollResult {
points,
next_page_offset,
})
}
pub async fn count(
&self,
request: CountRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
let shards_holder = self.shards_holder.read().await;
let shards = shards_holder.select_shards(shard_selection)?;
let request = Arc::new(request);
let mut requests: FuturesUnordered<_> = shards
.into_iter()
// `count` requests received through internal gRPC *always* have `shard_selection`
.map(|(shard, _shard_key)| {
shard.count(
Arc::clone(&request),
read_consistency,
timeout,
shard_selection.is_shard_id(),
hw_measurement_acc.clone(),
)
})
.collect();
let mut count = 0;
while let Some(response) = requests.try_next().await? {
count += response.count;
}
Ok(CountResult { count })
}
pub async fn retrieve(
&self,
request: PointRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
if request.ids.is_empty() {
return Ok(Vec::new());
}
let with_payload_interface = request
.with_payload
.as_ref()
.unwrap_or(&WithPayloadInterface::Bool(false));
let with_payload = WithPayload::from(with_payload_interface);
let ids_len = request.ids.len();
let request = Arc::new(request);
let shard_holder = self.shards_holder.read().await;
let target_shards = shard_holder.select_shards(shard_selection)?;
let mut all_shard_collection_requests = target_shards
.into_iter()
.map(|(shard, shard_key)| {
// Explicitly borrow `request` and `with_payload`, so we can use them in `async move`
// block below without unnecessarily cloning anything
let request = &request;
let with_payload = &with_payload;
let hw_acc = hw_measurement_acc.clone();
async move {
let mut records = shard
.retrieve(
request.clone(),
with_payload,
&request.with_vector,
read_consistency,
timeout,
shard_selection.is_shard_id(),
hw_acc,
)
.await?;
if shard_key.is_none() {
return Ok(records);
}
for point in &mut records {
point.shard_key.clone_from(&shard_key.cloned());
}
CollectionResult::Ok(records)
}
})
.collect::<FuturesUnordered<_>>();
// pre-allocate hashmap with capped capacity to protect from malevolent input
let mut covered_point_ids = HashMap::with_capacity(ids_len.min(1024));
while let Some(response) = all_shard_collection_requests.try_next().await? {
for point in response {
// Add each point only once, deduplicate point IDs
covered_point_ids.insert(point.id, point);
}
}
// Collect points in the same order as they were requested
let points = request
.ids
.iter()
.filter_map(|id| covered_point_ids.remove(id))
.collect();
Ok(points)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/shard_transfer.rs | lib/collection/src/collection/shard_transfer.rs | use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use common::defaults;
use fs_err::tokio as tokio_fs;
use parking_lot::Mutex;
use tokio_util::task::AbortOnDropHandle;
use super::Collection;
use crate::operations::cluster_ops::ReshardingDirection;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::local_shard::LocalShard;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_holder::ShardHolder;
use crate::shards::transfer::transfer_tasks_pool::{TransferTaskItem, TransferTaskProgress};
use crate::shards::transfer::{
ShardTransfer, ShardTransferConsensus, ShardTransferKey, ShardTransferMethod,
};
use crate::shards::{shard_initializing_flag_path, transfer};
impl Collection {
pub async fn get_related_transfers(&self, current_peer_id: PeerId) -> Vec<ShardTransfer> {
self.shards_holder.read().await.get_transfers(|transfer| {
transfer.from == current_peer_id || transfer.to == current_peer_id
})
}
pub async fn check_transfer_exists(&self, transfer_key: &ShardTransferKey) -> bool {
self.shards_holder
.read()
.await
.check_transfer_exists(transfer_key)
}
pub async fn start_shard_transfer<T, F>(
&self,
mut shard_transfer: ShardTransfer,
consensus: Box<dyn ShardTransferConsensus>,
temp_dir: PathBuf,
on_finish: T,
on_error: F,
) -> CollectionResult<bool>
where
T: Future<Output = ()> + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
// Select transfer method
if shard_transfer.method.is_none() {
let method = self
.shared_storage_config
.default_shard_transfer_method
.unwrap_or_default();
log::warn!("No shard transfer method selected, defaulting to {method:?}");
shard_transfer.method.replace(method);
}
let do_transfer = {
let this_peer_id = consensus.this_peer_id();
let is_receiver = this_peer_id == shard_transfer.to;
let is_sender = this_peer_id == shard_transfer.from;
// Get the source and target shards, in case of resharding the target shard is different
let from_shard_id = shard_transfer.shard_id;
let to_shard_id = shard_transfer
.to_shard_id
.unwrap_or(shard_transfer.shard_id);
let shards_holder = self.shards_holder.read().await;
let from_replica_set = shards_holder.get_shard(from_shard_id).ok_or_else(|| {
CollectionError::service_error(format!("Shard {from_shard_id} doesn't exist"))
})?;
let to_replica_set = shards_holder.get_shard(to_shard_id).ok_or_else(|| {
CollectionError::service_error(format!("Shard {to_shard_id} doesn't exist"))
})?;
let _was_not_transferred =
shards_holder.register_start_shard_transfer(shard_transfer.clone())?;
let from_is_local = from_replica_set.is_local().await;
let to_is_local = to_replica_set.is_local().await;
let initial_state = match shard_transfer.method.unwrap_or_default() {
ShardTransferMethod::StreamRecords => ReplicaState::Partial,
ShardTransferMethod::Snapshot | ShardTransferMethod::WalDelta => {
ReplicaState::Recovery
}
ShardTransferMethod::ReshardingStreamRecords => {
let resharding_direction =
self.resharding_state().await.map(|state| state.direction);
match resharding_direction {
Some(ReshardingDirection::Up) => ReplicaState::Resharding,
Some(ReshardingDirection::Down) => ReplicaState::ReshardingScaleDown,
None => {
return Err(CollectionError::bad_input(
"can't start resharding transfer, because resharding is not in progress",
));
}
}
}
};
// Create local shard if it does not exist on receiver, or simply set replica state otherwise
// (on all peers, regardless if shard is local or remote on that peer).
//
// This should disable queries to receiver replica even if it was active before.
if !to_is_local && is_receiver {
let effective_optimizers_config = self.effective_optimizers_config().await?;
let shard = LocalShard::build(
to_shard_id,
self.name().to_string(),
&to_replica_set.shard_path,
self.collection_config.clone(),
self.shared_storage_config.clone(),
self.payload_index_schema.clone(),
self.update_runtime.clone(),
self.search_runtime.clone(),
self.optimizer_resource_budget.clone(),
effective_optimizers_config,
)
.await?;
let old_shard = to_replica_set.set_local(shard, Some(initial_state)).await?;
if let Some(old_shard) = old_shard {
debug_assert!(false, "We should not have a local shard yet");
old_shard.stop_gracefully().await;
}
} else {
to_replica_set
.ensure_replica_with_state(shard_transfer.to, initial_state)
.await?;
}
from_is_local && is_sender
};
if do_transfer {
self.send_shard(shard_transfer, consensus, temp_dir, on_finish, on_error)
.await;
}
Ok(do_transfer)
}
async fn send_shard<OF, OE>(
&self,
transfer: ShardTransfer,
consensus: Box<dyn ShardTransferConsensus>,
temp_dir: PathBuf,
on_finish: OF,
on_error: OE,
) where
OF: Future<Output = ()> + Send + 'static,
OE: Future<Output = ()> + Send + 'static,
{
let mut active_transfer_tasks = self.transfer_tasks.lock().await;
let task_result = active_transfer_tasks.stop_task(&transfer.key()).await;
debug_assert!(task_result.is_none(), "Transfer task already exists");
debug_assert!(
transfer.method.is_some(),
"When sending shard, a transfer method must have been selected",
);
let shard_holder = self.shards_holder.clone();
let collection_id = self.id.clone();
let channel_service = self.channel_service.clone();
let progress = Arc::new(Mutex::new(TransferTaskProgress::new()));
let transfer_task = transfer::driver::spawn_transfer_task(
shard_holder,
progress.clone(),
transfer.clone(),
consensus,
collection_id,
channel_service,
self.snapshots_path.clone(),
temp_dir,
on_finish,
on_error,
);
active_transfer_tasks.add_task(
&transfer,
TransferTaskItem {
task: transfer_task,
started_at: chrono::Utc::now(),
progress,
},
);
}
/// Handles finishing of the shard transfer.
///
/// Returns true if state was changed, false otherwise.
pub async fn finish_shard_transfer(
&self,
transfer: ShardTransfer,
shard_holder: Option<&ShardHolder>,
) -> CollectionResult<()> {
let transfer_result = self
.transfer_tasks
.lock()
.await
.stop_task(&transfer.key())
.await;
log::debug!("Transfer result: {transfer_result:?}");
let mut shard_holder_guard = None;
let shard_holder = match shard_holder {
Some(shard_holder) => shard_holder,
None => shard_holder_guard.insert(self.shards_holder.read().await),
};
let is_resharding_transfer = transfer.method.is_some_and(|method| method.is_resharding());
// Handle *destination* replica
let mut is_dest_replica_active = false;
let dest_replica_set =
shard_holder.get_shard(transfer.to_shard_id.unwrap_or(transfer.shard_id));
if let Some(replica_set) = dest_replica_set
&& replica_set.peer_state(transfer.to).is_some()
{
// Promote *destination* replica/shard to `Active` if:
//
// - replica *exists*
// - replica should be created when shard transfer (or resharding) is started
// - replica might *not* exist, if it (or the whole *peer*) was removed right
// before transfer is finished
// - transfer is *not* resharding
// - resharding requires multiple transfers, so destination shard is promoted
// *explicitly* when all transfers are finished
// TODO(resharding): Do not change replica state at all, when finishing resharding transfer?
//
// We switch replica into correct state when *starting* resharding transfer, and
// we want to *keep* it in the same state *after* resharding transfer is finished...
let state = if is_resharding_transfer {
let resharding_direction =
self.resharding_state().await.map(|state| state.direction);
match resharding_direction {
Some(ReshardingDirection::Up) => ReplicaState::Resharding,
Some(ReshardingDirection::Down) => ReplicaState::ReshardingScaleDown,
None => {
log::error!(
"Can't finish resharding shard transfer correctly, \
because resharding is not in progress anymore!",
);
ReplicaState::Dead
}
}
} else {
ReplicaState::Active
};
if transfer.to == self.this_peer_id {
replica_set.set_replica_state(transfer.to, state).await?;
} else {
replica_set.add_remote(transfer.to, state).await?;
}
is_dest_replica_active = state == ReplicaState::Active;
}
// Handle *source* replica
let src_replica_set = shard_holder.get_shard(transfer.shard_id);
if let Some(replica_set) = src_replica_set {
if transfer.sync || is_resharding_transfer {
// If transfer is *sync* (or *resharding*), we *keep* source replica
if transfer.from == self.this_peer_id {
// If current peer is *transfer-sender*, we need to unproxify local shard
replica_set.un_proxify_local().await?;
}
} else if is_dest_replica_active {
// If transfer is *not* sync (and *not* resharding) and *destination* replica is `Active`,
// we *remove* source replica
if transfer.from == self.this_peer_id {
self.invalidate_clean_local_shards([transfer.shard_id])
.await;
replica_set.remove_local().await?;
} else {
replica_set.remove_remote(transfer.from).await?;
}
}
}
let is_finish_registered = shard_holder.register_finish_transfer(&transfer.key())?;
log::debug!("Transfer finish registered: {is_finish_registered}");
Ok(())
}
/// Return if it was a resharding transfer so it can be handled correctly (aborted or ignored)
pub async fn abort_shard_transfer(
&self,
transfer: ShardTransfer,
shard_holder: &ShardHolder,
) -> CollectionResult<()> {
// TODO: Ensure cancel safety!
let transfer_key = transfer.key();
log::debug!("Aborting shard transfer {transfer_key:?}");
let _transfer_result = self
.transfer_tasks
.lock()
.await
.stop_task(&transfer_key)
.await;
let is_resharding_transfer = transfer.is_resharding();
let shard_id = transfer_key.to_shard_id.unwrap_or(transfer_key.shard_id);
if let Some(replica_set) = shard_holder.get_shard(shard_id) {
if replica_set.peer_state(transfer.to).is_some() {
if is_resharding_transfer {
// If *resharding* shard transfer failed, we don't need/want to change replica state:
// - on transfer failure, the whole resharding would be aborted (see below),
// and so all changes to replicas would be discarded/rolled-back anyway
// - during resharding *up*, we transfer points to a single new shard replica;
// it is expected that this node is initially empty/incomplete, and so failed
// transfer should not strictly introduce inconsistency (it just means the node
// is *still* empty/incomplete); marking this new replica as `Dead` would only
// make requests to return explicit errors
// - during resharding *down*, we transfer points from shard-to-be-removed
// to all other shards; all other shards are expected to be `Active`,
// and so failed transfer does not introduce any inconsistencies to points
// that are not affected by resharding in all other shards
} else if transfer.sync {
replica_set
.set_replica_state(transfer.to, ReplicaState::Dead)
.await?;
} else {
self.invalidate_clean_local_shards([transfer
.to_shard_id
.unwrap_or(transfer.shard_id)])
.await;
replica_set.remove_peer(transfer.to).await?;
}
}
} else {
log::warn!(
"Aborting shard transfer {transfer_key:?}, but shard {shard_id} does not exist"
);
}
if transfer.from == self.this_peer_id {
transfer::driver::revert_proxy_shard_to_local(shard_holder, transfer.shard_id).await?;
}
shard_holder.register_abort_transfer(&transfer_key)?;
Ok(())
}
/// Handles abort of the transfer and also aborts resharding if the transfer was related to resharding
///
/// 1. Unregister the transfer
/// 2. Stop transfer task
/// 3. Unwrap the proxy
/// 4. Remove temp shard, or mark it as dead
pub async fn abort_shard_transfer_and_resharding(
&self,
transfer_key: ShardTransferKey,
shard_holder: Option<&ShardHolder>,
) -> CollectionResult<()> {
let mut shard_holder_guard = None;
let shard_holder = match shard_holder {
Some(shard_holder) => shard_holder,
None => shard_holder_guard.insert(self.shards_holder.read().await),
};
let Some(transfer) = shard_holder.get_transfer(&transfer_key) else {
return Ok(());
};
let is_resharding_transfer = transfer.is_resharding();
self.abort_shard_transfer(transfer, shard_holder).await?;
if is_resharding_transfer {
let resharding_state = shard_holder.resharding_state.read().clone();
// `abort_resharding` locks `shard_holder`!
drop(shard_holder_guard);
if let Some(state) = resharding_state {
self.abort_resharding(state.key(), false).await?;
}
}
Ok(())
}
/// Initiate local partial shard
pub fn initiate_shard_transfer(
&self,
shard_id: ShardId,
) -> impl Future<Output = CollectionResult<()>> + 'static {
// TODO: Ensure cancel safety!
let shards_holder = self.shards_holder.clone();
let collection_path = self.path.clone();
async move {
let shards_holder_guard = shards_holder.clone().read_owned().await;
let Some(replica_set) = shards_holder_guard.get_shard(shard_id) else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} doesn't exist, repartition is not supported yet"
)));
};
// Wait for the replica set to have the local shard initialized
// This can take some time as this is arranged through consensus
replica_set
.wait_for_local(defaults::CONSENSUS_META_OP_WAIT)
.await?;
let this_peer_id = replica_set.this_peer_id();
let shard_transfer_requested = tokio::task::spawn_blocking(move || {
// We can guarantee that replica_set is not None, cause we checked it before
// and `shards_holder` is holding the lock.
// This is a workaround for lifetime checker.
let replica_set = shards_holder_guard.get_shard(shard_id).unwrap();
let shard_transfer_registered = shards_holder_guard.shard_transfers.wait_for(
|shard_transfers| {
shard_transfers
.iter()
.any(|shard_transfer| shard_transfer.is_target(this_peer_id, shard_id))
},
Duration::from_secs(60),
);
// It is not enough to check for shard_transfer_registered,
// because it is registered before the state of the shard is changed.
shard_transfer_registered
&& replica_set.wait_for_state_condition_sync(
|state| {
state
.get_peer_state(this_peer_id)
.is_some_and(|peer_state| peer_state.is_partial_or_recovery())
},
defaults::CONSENSUS_META_OP_WAIT,
)
});
match AbortOnDropHandle::new(shard_transfer_requested).await {
Ok(true) => Ok(()),
Ok(false) => {
let description = "\
Failed to initiate shard transfer: \
Didn't receive shard transfer notification from consensus in 60 seconds";
Err(CollectionError::Timeout {
description: description.into(),
})
}
Err(err) => Err(CollectionError::service_error(format!(
"Failed to initiate shard transfer: \
Failed to execute wait-for-consensus-notification task: \
{err}"
))),
}?;
// At this point we made sure that receiver replica is synced and expecting incoming
// shard transfer.
// Further checks are an extra safety net, in normal situation they should not fail.
let shards_holder_guard = shards_holder.read_owned().await;
let Some(replica_set) = shards_holder_guard.get_shard(shard_id) else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} doesn't exist, repartition is not supported yet"
)));
};
if replica_set.is_proxy().await {
debug_assert!(false, "We should not have proxy shard here");
// We have proxy or something, we need to unwrap it
log::error!("Unwrapping proxy shard {shard_id}");
replica_set.un_proxify_local().await?;
}
if replica_set.is_dummy().await {
// We can reach here because of either of these:
// 1. Qdrant is in recovery mode, and user intentionally triggered a transfer
// 2. Shard is dirty (shard initializing flag), and Qdrant triggered a transfer to recover from Dead state after an update fails
//
// In both cases, it's safe to drop existing local shard data
log::debug!(
"Initiating transfer to dummy shard {}. Initializing empty local shard first",
replica_set.shard_id,
);
replica_set.init_empty_local_shard().await?;
let shard_flag = shard_initializing_flag_path(&collection_path, shard_id);
if tokio_fs::try_exists(&shard_flag).await.is_ok() {
// We can delete initializing flag without waiting for transfer to finish
// because if transfer fails in between, Qdrant will retry it.
tokio_fs::remove_file(&shard_flag).await?;
log::debug!("Removed shard initializing flag {shard_flag:?}");
}
}
Ok(())
}
}
/// Whether we have reached the automatic shard transfer limit based on the given incoming and
/// outgoing transfers.
pub(super) fn check_auto_shard_transfer_limit(&self, incoming: usize, outgoing: usize) -> bool {
let incoming_shard_transfer_limit_reached = self
.shared_storage_config
.incoming_shard_transfers_limit
.is_some_and(|limit| incoming >= limit);
let outgoing_shard_transfer_limit_reached = self
.shared_storage_config
.outgoing_shard_transfers_limit
.is_some_and(|limit| outgoing >= limit);
incoming_shard_transfer_limit_reached || outgoing_shard_transfer_limit_reached
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/query.rs | lib/collection/src/collection/query.rs | use std::future::Future;
use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::{TryFutureExt, future};
use itertools::{Either, Itertools};
use rand::Rng;
use segment::common::reciprocal_rank_fusion::rrf_scoring;
use segment::common::score_fusion::{ScoreFusion, score_fusion};
use segment::data_types::vectors::VectorStructInternal;
use segment::types::{Order, ScoredPoint, WithPayloadInterface, WithVector};
use segment::utils::scored_point_ties::ScoredPointTies;
use tokio::sync::RwLockReadGuard;
use tokio::time::Instant;
use super::Collection;
use crate::collection::mmr::mmr_from_points_with_vector;
use crate::collection_manager::probabilistic_search_sampling::find_search_sampling_over_point_distribution;
use crate::common::batching::batch_requests;
use crate::common::fetch_vectors::{
build_vector_resolver_queries, resolve_referenced_vectors_batch,
};
use crate::common::retrieve_request_trait::RetrieveRequest;
use crate::common::transpose_iterator::transposed_iter;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::operations::universal_query::collection_query::CollectionQueryRequest;
use crate::operations::universal_query::shard_query::{
self, FusionInternal, MmrInternal, ScoringQuery, ShardQueryRequest, ShardQueryResponse,
};
/// A factor which determines if we need to use the 2-step search or not.
/// Should be adjusted based on usage statistics.
pub(super) const PAYLOAD_TRANSFERS_FACTOR_THRESHOLD: usize = 10;
struct IntermediateQueryInfo<'a> {
scoring_query: Option<&'a ScoringQuery>,
/// Limit + offset
take: usize,
}
impl Collection {
/// query is a special case of query_batch with a single batch
pub async fn query(
&self,
request: ShardQueryRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
if request.limit == 0 {
return Ok(vec![]);
}
let results = self
.do_query_batch(
vec![request],
read_consistency,
shard_selection,
timeout,
hw_measurement_acc,
)
.await?;
Ok(results.into_iter().next().unwrap())
}
/// If the query limit above this value, it will be a subject to undersampling.
const SHARD_QUERY_SUBSAMPLING_LIMIT: usize = 128;
/// Give some more ensurance for undersampling,
/// retrieve more points to prevent undersampling errors.
/// Errors are still possible, but rare enough to be acceptable compared to
/// errors introduced by vector index.
const MORE_ENSURANCE_FACTOR: f64 = 1.2;
/// Creates a copy of requests in case it is possible to apply limit modification
/// Returns unchanged requests if limit modification is not applicable.
///
/// If there are many independent shards, and we need a very high limit, we can do an optimization.
/// Instead of querying all shards with the same limit, we can query each shard with a smaller limit
/// and then merge the results. Since shards are independent and data is randomly distributed, we can
/// apply probability estimation to make sure we query enough points to get the desired number of results.
///
/// Same optimization we already apply on segment level, but here it seems to be even more reliable
/// because auto-sharding guarantee random and independent distribution of data.
///
/// Unlike segments, however, the cost of re-requesting the data is much higher for shards.
/// So we "accept" the risk of not getting enough results.
fn modify_shard_query_for_undersampling_limits(
batch_request: Arc<Vec<ShardQueryRequest>>,
num_shards: usize,
is_auto_sharding: bool,
) -> Arc<Vec<ShardQueryRequest>> {
if num_shards <= 1 {
return batch_request;
}
// Check this parameter inside the function
// to ensure it is not omitted in the future.
if !is_auto_sharding {
return batch_request;
}
let max_limit = batch_request
.iter()
.map(|req| req.limit + req.offset)
.max()
.unwrap_or(0);
if max_limit < Self::SHARD_QUERY_SUBSAMPLING_LIMIT {
return batch_request;
}
let mut new_requests = Vec::with_capacity(batch_request.len());
for request in batch_request.iter() {
let mut new_request = request.clone();
let request_limit = new_request.limit + new_request.offset;
let is_exact = request.params.as_ref().map(|p| p.exact).unwrap_or(false);
if is_exact || request_limit < Self::SHARD_QUERY_SUBSAMPLING_LIMIT {
new_requests.push(new_request);
continue;
}
// Example: 1000 limit, 10 shards
// 1.0 / 10 * 1.2 = 0.12
// lambda = 0.12 * 1000 = 120
// Which is equal to 171 limit per shard
let undersample_limit = find_search_sampling_over_point_distribution(
request_limit as f64,
1. / num_shards as f64 * Self::MORE_ENSURANCE_FACTOR,
);
new_request.limit = std::cmp::min(undersample_limit, request_limit);
new_request.offset = 0; // Offset is handled on the collection level
new_requests.push(new_request);
}
Arc::new(new_requests)
}
/// Returns a shape of [shard_id, batch_id, intermediate_response, points]
async fn batch_query_shards_concurrently(
&self,
batch_request: Arc<Vec<ShardQueryRequest>>,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ShardQueryResponse>>> {
// query all shards concurrently
let shard_holder = self.shards_holder.read().await;
let target_shards = shard_holder.select_shards(shard_selection)?;
let num_unique_shard_keys = target_shards
.iter()
.map(|(_, shard_key)| shard_key)
.unique()
.count();
// Auto-sharding happens when we are only querying shards with _the_same_ shard key.
// It either might be when we are querying a specific shard key
// OR when we are querying all shards with no shard keys specified.
let is_auto_sharding = num_unique_shard_keys == 1;
let batch_request = Self::modify_shard_query_for_undersampling_limits(
batch_request,
target_shards.len(),
is_auto_sharding,
);
let all_searches = target_shards.iter().map(|(shard, shard_key)| {
let shard_key = shard_key.cloned();
let request_clone = Arc::clone(&batch_request);
shard
.query_batch(
request_clone,
read_consistency,
shard_selection.is_shard_id(),
timeout,
hw_measurement_acc.clone(),
)
.and_then(move |mut shard_responses| async move {
if shard_key.is_none() {
return Ok(shard_responses);
}
shard_responses
.iter_mut()
.flatten()
.flatten()
.for_each(|point| point.shard_key.clone_from(&shard_key));
Ok(shard_responses)
})
});
future::try_join_all(all_searches).await
}
/// This function is used to query the collection. It will return a list of scored points.
async fn do_query_batch(
&self,
requests_batch: Vec<ShardQueryRequest>,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let start = Instant::now();
// shortcuts batch if all requests with limit=0
if requests_batch.iter().all(|s| s.limit == 0) {
return Ok(vec![]);
}
let is_payload_required = requests_batch.iter().all(|s| s.with_payload.is_required());
let with_vectors = requests_batch.iter().all(|s| s.with_vector.is_enabled());
let metadata_required = is_payload_required || with_vectors;
let sum_limits: usize = requests_batch.iter().map(|s| s.limit).sum();
let sum_offsets: usize = requests_batch.iter().map(|s| s.offset).sum();
// Number of records we need to retrieve to fill the search result.
let require_transfers = self.shards_holder.read().await.len() * (sum_limits + sum_offsets);
// Actually used number of records.
let used_transfers = sum_limits;
let is_required_transfer_large_enough =
require_transfers > used_transfers.saturating_mul(PAYLOAD_TRANSFERS_FACTOR_THRESHOLD);
if metadata_required && is_required_transfer_large_enough {
// If there is a significant offset, we need to retrieve the whole result
// set without payload first and then retrieve the payload.
// It is required to do this because the payload might be too large to send over the
// network.
let mut without_payload_requests = Vec::with_capacity(requests_batch.len());
for query in &requests_batch {
let mut without_payload_request = query.clone();
without_payload_request.with_payload = WithPayloadInterface::Bool(false);
without_payload_request.with_vector = WithVector::Bool(false);
without_payload_requests.push(without_payload_request);
}
let without_payload_batch = without_payload_requests;
let without_payload_results = self
.do_query_batch_impl(
without_payload_batch,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// update timeout
let timeout = timeout.map(|t| t.saturating_sub(start.elapsed()));
let filled_results = without_payload_results
.into_iter()
.zip(requests_batch.into_iter())
.map(|(without_payload_result, req)| {
self.fill_search_result_with_payload(
without_payload_result,
Some(req.with_payload),
req.with_vector,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc.clone(),
)
});
future::try_join_all(filled_results).await
} else {
self.do_query_batch_impl(
requests_batch,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc.clone(),
)
.await
}
}
/// This function is used to query the collection. It will return a list of scored points.
async fn do_query_batch_impl(
&self,
requests_batch: Vec<ShardQueryRequest>,
read_consistency: Option<ReadConsistency>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let instant = Instant::now();
let requests_batch = Arc::new(requests_batch);
let all_shards_results = self
.batch_query_shards_concurrently(
requests_batch.clone(),
read_consistency,
shard_selection,
timeout,
hw_measurement_acc.clone(),
)
.await?;
let results_f = transposed_iter(all_shards_results)
.zip(requests_batch.iter())
.map(|(shards_results, request)| async {
// shards_results shape: [num_shards, num_intermediate_results, num_points]
// merged_intermediates shape: [num_intermediate_results, num_points]
let merged_intermediates = self
.merge_intermediate_results_from_shards(request, shards_results)
.await?;
let result = self
.intermediates_to_final_list(
merged_intermediates,
request,
timeout.map(|timeout| timeout.saturating_sub(instant.elapsed())),
hw_measurement_acc.clone(),
)
.await?;
let filter_refs = request.filter_refs();
self.post_process_if_slow_request(instant.elapsed(), filter_refs);
Ok::<_, CollectionError>(result)
});
let results = future::try_join_all(results_f).await?;
Ok(results)
}
/// Resolves the final list of scored points from the intermediate results.
///
/// Finalizes queries like fusion and mmr after collecting from all shards.
/// For other kind of queries it just passes the results through.
///
/// Handles offset and limit.
async fn intermediates_to_final_list(
&self,
mut intermediates: Vec<Vec<ScoredPoint>>,
request: &ShardQueryRequest,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let ShardQueryRequest {
prefetches: _,
query,
filter: _,
score_threshold,
limit,
offset,
params: _,
with_vector,
with_payload: _,
} = request;
let result = match query.as_ref() {
Some(ScoringQuery::Fusion(fusion)) => {
// If the root query is a Fusion, the returned results correspond to each the prefetches.
let mut fused = match fusion {
FusionInternal::RrfK(k) => rrf_scoring(intermediates, *k),
FusionInternal::Dbsf => score_fusion(intermediates, ScoreFusion::dbsf()),
};
if let Some(&score_threshold) = score_threshold.as_ref() {
fused = fused
.into_iter()
.take_while(|point| point.score >= score_threshold.0)
.collect();
}
fused
}
Some(ScoringQuery::Mmr(mmr)) => {
let points_with_vector = intermediates.into_iter().flatten();
let collection_params = self.collection_config.read().await.params.clone();
let search_runtime_handle = &self.search_runtime;
let timeout = timeout.unwrap_or(self.shared_storage_config.search_timeout);
let mut mmr_result = mmr_from_points_with_vector(
&collection_params,
points_with_vector,
mmr.clone(),
*limit,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?;
// strip mmr vector if necessary
match with_vector {
WithVector::Bool(false) => mmr_result.iter_mut().for_each(|p| {
p.vector.take();
}),
WithVector::Bool(true) => {}
WithVector::Selector(items) => {
if !items.contains(&mmr.using) {
mmr_result.iter_mut().for_each(|p| {
VectorStructInternal::take_opt(&mut p.vector, &mmr.using);
})
}
}
};
mmr_result
}
None
| Some(ScoringQuery::Vector(_))
| Some(ScoringQuery::OrderBy(_))
| Some(ScoringQuery::Formula(_))
| Some(ScoringQuery::Sample(_)) => {
// Otherwise, it will be a list with a single list of scored points.
debug_assert_eq!(intermediates.len(), 1);
intermediates.pop().ok_or_else(|| {
CollectionError::service_error(
"Query response was expected to have one list of results.",
)
})?
}
};
let result: Vec<ScoredPoint> = result.into_iter().skip(*offset).take(*limit).collect();
Ok(result)
}
/// To be called on the user-responding instance. Resolves ids into vectors, and merges the results from local and remote shards.
///
/// This function is used to query the collection. It will return a list of scored points.
pub async fn query_batch<'a, F, Fut>(
&self,
requests_batch: Vec<(CollectionQueryRequest, ShardSelectorInternal)>,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
let start = Instant::now();
// Lift nested prefetches to root queries for vector resolution
let resolver_requests = build_vector_resolver_queries(&requests_batch);
// Build referenced vectors
let ids_to_vectors = resolve_referenced_vectors_batch(
&resolver_requests,
self,
collection_by_name,
read_consistency,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// update timeout
let timeout = timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
// Check we actually fetched all referenced vectors from the resolver requests
for (resolver_req, _) in &resolver_requests {
for point_id in resolver_req.get_referenced_point_ids() {
let lookup_collection = resolver_req.get_lookup_collection();
if ids_to_vectors.get(lookup_collection, point_id).is_none() {
return Err(CollectionError::PointNotFound {
missed_point_id: point_id,
});
}
}
}
let futures = batch_requests::<
(CollectionQueryRequest, ShardSelectorInternal),
ShardSelectorInternal,
Vec<ShardQueryRequest>,
Vec<_>,
>(
requests_batch,
|(_req, shard)| shard,
|(req, _), acc| {
req.try_into_shard_request(&self.id, &ids_to_vectors)
.map(|shard_req| {
acc.push(shard_req);
})
},
|shard_selection, shard_requests, futures| {
if shard_requests.is_empty() {
return Ok(());
}
futures.push(self.do_query_batch(
shard_requests,
read_consistency,
shard_selection,
timeout,
hw_measurement_acc.clone(),
));
Ok(())
},
)?;
let results = future::try_join_all(futures)
.await?
.into_iter()
.flatten()
.collect();
Ok(results)
}
/// To be called on the remote instance. Only used for the internal service.
///
/// If the root query is a Fusion, the returned results correspond to each the prefetches.
/// Otherwise, it will be a list with a single list of scored points.
pub async fn query_batch_internal(
&self,
requests: Vec<ShardQueryRequest>,
shard_selection: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let requests_arc = Arc::new(requests);
// Results from all shards
// Shape: [num_shards, batch_size, num_intermediate_results, num_points]
let all_shards_results = self
.batch_query_shards_concurrently(
Arc::clone(&requests_arc),
None,
shard_selection,
timeout,
hw_measurement_acc,
)
.await?;
let merged_f = transposed_iter(all_shards_results)
.zip(requests_arc.iter())
.map(|(shards_results, request)| async {
// shards_results shape: [num_shards, num_intermediate_results, num_points]
self.merge_intermediate_results_from_shards(request, shards_results)
.await
});
let merged = futures::future::try_join_all(merged_f).await?;
Ok(merged)
}
/// Find best result across last results of all shards.
/// Presence of the worst result in final result means that there could be other results
/// of that shard that could be included in the final result.
/// Used to check undersampling.
fn get_best_last_shard_result(
shard_results: &[Vec<ScoredPoint>],
order: Order,
) -> Option<ScoredPoint> {
shard_results
.iter()
.filter_map(|shard_result| shard_result.last().cloned())
.max_by(|a, b| match order {
Order::LargeBetter => ScoredPointTies(a).cmp(&ScoredPointTies(b)),
Order::SmallBetter => ScoredPointTies(a).cmp(&ScoredPointTies(b)).reverse(),
})
}
/// Check that worst result of the shard in not present in the final result.
fn check_undersampling(
&self,
worst_merged_point: &ScoredPoint,
best_last_result: &ScoredPoint,
order: Order,
) {
// Merged point should be better than the best last result.
let is_properly_sampled = match order {
Order::LargeBetter => {
ScoredPointTies(worst_merged_point) > ScoredPointTies(best_last_result)
}
Order::SmallBetter => {
ScoredPointTies(worst_merged_point) < ScoredPointTies(best_last_result)
}
};
if !is_properly_sampled {
log::debug!(
"Undersampling detected. Collection: {}, Best last shard score: {}, Worst merged score: {}",
self.id,
best_last_result.score,
worst_merged_point.score
);
}
}
/// Merges the results in each shard for each intermediate query.
/// ```text
/// [ [shard1_result1, shard1_result2],
/// ↓ ↓
/// [shard2_result1, shard2_result2] ]
///
/// = [merged_result1, merged_result2]
/// ```
async fn merge_intermediate_results_from_shards(
&self,
request: &ShardQueryRequest,
all_shards_results: Vec<ShardQueryResponse>,
) -> CollectionResult<ShardQueryResponse> {
let query_infos = intermediate_query_infos(request);
let results_len = query_infos.len();
let mut results = ShardQueryResponse::with_capacity(results_len);
debug_assert!(
all_shards_results
.iter()
.all(|shard_results| shard_results.len() == results_len)
);
let collection_params = self.collection_config.read().await.params.clone();
// Shape: [num_internal_queries, num_shards, num_scored_points]
let all_shards_result_by_transposed = transposed_iter(all_shards_results);
for (query_info, shards_results) in
query_infos.into_iter().zip(all_shards_result_by_transposed)
{
// `shards_results` shape: [num_shards, num_scored_points]
let order =
shard_query::query_result_order(query_info.scoring_query, &collection_params)?;
let number_of_shards = shards_results.len();
// Equivalent to:
//
// shards_results
// .into_iter()
// .kmerge_by(match order {
// Order::LargeBetter => |a, b| ScoredPointTies(a) > ScoredPointTies(b),
// Order::SmallBetter => |a, b| ScoredPointTies(a) < ScoredPointTies(b),
// })
//
// if the `kmerge_by` function were able to work with reference predicates.
// Either::Left and Either::Right are used to allow type inference to work.
//
let intermediate_result = if let Some(order) = order {
let best_last_result = Self::get_best_last_shard_result(&shards_results, order);
let merged: Vec<_> = match order {
Order::LargeBetter => Either::Left(
shards_results
.into_iter()
.kmerge_by(|a, b| ScoredPointTies(a) > ScoredPointTies(b)),
),
Order::SmallBetter => Either::Right(
shards_results
.into_iter()
.kmerge_by(|a, b| ScoredPointTies(a) < ScoredPointTies(b)),
),
}
.dedup()
.take(query_info.take)
.collect();
// Prevents undersampling warning in case there are not enough data to merge.
let is_enough = merged.len() == query_info.take;
if let Some(best_last_result) = best_last_result
&& number_of_shards > 1
&& is_enough
{
let worst_merged_point = merged.last();
if let Some(worst_merged_point) = worst_merged_point {
self.check_undersampling(worst_merged_point, &best_last_result, order);
}
}
merged
} else {
// If the order is not defined, it is a random query. Take from all shards randomly.
let mut rng = rand::rng();
shards_results
.into_iter()
.kmerge_by(|_, _| rng.random_bool(0.5))
.unique_by(|point| point.id)
.take(query_info.take)
.collect()
};
results.push(intermediate_result);
}
Ok(results)
}
}
/// Returns a list of the query that corresponds to each of the results in each shard.
///
/// Example: `[info1, info2, info3]` corresponds to `[result1, result2, result3]` of each shard
fn intermediate_query_infos(request: &ShardQueryRequest) -> Vec<IntermediateQueryInfo<'_>> {
let scoring_query = request.query.as_ref();
match scoring_query {
Some(ScoringQuery::Fusion(_)) => {
// In case of Fusion, expect the propagated intermediate results
request
.prefetches
.iter()
.map(|prefetch| IntermediateQueryInfo {
scoring_query: prefetch.query.as_ref(),
take: prefetch.limit,
})
.collect_vec()
}
Some(ScoringQuery::Mmr(MmrInternal {
vector: _,
using: _,
lambda: _,
candidates_limit,
})) => {
// In case of MMR, expect a single list with the amount of candidates
vec![IntermediateQueryInfo {
scoring_query: request.query.as_ref(),
take: *candidates_limit,
}]
}
None
| Some(ScoringQuery::Vector(_))
| Some(ScoringQuery::OrderBy(_))
| Some(ScoringQuery::Formula(_))
| Some(ScoringQuery::Sample(_)) => {
// Otherwise, we expect the root result
vec![IntermediateQueryInfo {
scoring_query: request.query.as_ref(),
take: request.offset + request.limit,
}]
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/clean.rs | lib/collection/src/collection/clean.rs | use std::collections::HashMap;
use std::ops::Deref;
use std::sync::{Arc, Weak};
use std::time::{Duration, Instant};
use ahash::AHashMap;
use cancel::{CancellationToken, DropGuard};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use parking_lot::RwLock;
use segment::types::ExtendedPointId;
use tokio::sync::watch::{Receiver, Sender};
use tokio::task::JoinHandle;
use super::Collection;
use crate::operations::types::{CollectionError, CollectionResult, UpdateResult, UpdateStatus};
use crate::operations::{CollectionUpdateOperations, OperationWithClockTag};
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::LockedShardHolder;
use crate::telemetry::{
ShardCleanStatusFailedTelemetry, ShardCleanStatusProgressTelemetry, ShardCleanStatusTelemetry,
};
/// Number of points the delete background task will delete in each iteration.
///
/// This number is arbitrary and seemed 'good enough' in local testing. It is not too low to
/// prevent having a huge amount of iterations, nor is it too large causing latency spikes in user
/// operations.
const CLEAN_BATCH_SIZE: usize = 5_000;
/// A collection of local shard clean tasks
///
/// Manages tasks for shards and allows easily awaiting a completion state.
///
/// These tasks are not persisted in any way and are lost on restart. In case of resharding,
/// cluster manager will take care of calling the task again and again until it eventually
/// completes. Once it completes it does not have to be run again on restart.
#[derive(Default)]
pub(super) struct ShardCleanTasks {
tasks: Arc<RwLock<AHashMap<ShardId, ShardCleanTask>>>,
}
impl ShardCleanTasks {
/// Clean a shard and await for the operation to finish
///
/// Creates a new task if none is going, the task failed or if the task was invalidated. Joins
/// the existing task if one is currently ongoing or if it successfully finished.
///
/// If `wait` is `false`, the function will return immediately after creating a new task with
/// the `Acknowledged` status. If `wait` is `true` this function will await at most `timeout`
/// for the task to finish. If the task is not completed within the timeout the `Acknowledged`
/// status is returned.
///
/// To probe for completeness this function must be called again repeatedly until it returns
/// the `Completed` status.
///
/// # Cancel safety
///
/// This function is cancel safe. It either will or will not spawn a task if cancelled, and
/// will not abort any ongoing task.
async fn clean_and_await(
&self,
shards_holder: &Arc<LockedShardHolder>,
shard_id: ShardId,
wait: bool,
timeout: Option<Duration>,
) -> CollectionResult<UpdateStatus> {
let mut tasks = self.tasks.upgradable_read();
// Await existing task if not cancelled or failed
if let Some(task) = tasks
.get(&shard_id)
.filter(|task| !task.is_cancelled_or_failed())
{
let receiver = task.status.clone();
drop(tasks);
return Self::await_task(receiver, wait, timeout).await;
}
// Create and await new task
let receiver = tasks.with_upgraded(|tasks| {
let task = ShardCleanTask::new(shards_holder, shard_id);
let receiver = task.status.clone();
tasks.insert(shard_id, task);
receiver
});
drop(tasks);
Self::await_task(receiver, wait, timeout).await
}
/// Await for an ongoing task to finish by its status receiver
///
/// # Cancel safety
///
/// This function is cancel safe.
async fn await_task(
mut receiver: Receiver<ShardCleanStatus>,
wait: bool,
timeout: Option<Duration>,
) -> CollectionResult<UpdateStatus> {
let start = Instant::now();
loop {
match receiver.borrow_and_update().deref() {
ShardCleanStatus::Started => {}
ShardCleanStatus::Progress { .. } => {}
ShardCleanStatus::Done => return Ok(UpdateStatus::Completed),
ShardCleanStatus::Failed { reason } => {
return Err(CollectionError::service_error(format!(
"Failed to clean shard points: {reason}",
)));
}
ShardCleanStatus::Cancelled => {
return Err(CollectionError::service_error(
"Failed to clean shard points due to cancellation, please try again",
));
}
}
if !wait {
return Ok(UpdateStatus::Acknowledged);
}
let result = if let Some(timeout) = timeout {
tokio::time::timeout(timeout.saturating_sub(start.elapsed()), receiver.changed())
.await
} else {
Ok(receiver.changed().await)
};
match result {
// Status updated, loop again to check it another time
Ok(Ok(_)) => (),
// Channel dropped, return error
Ok(Err(_)) => {
return Err(CollectionError::service_error(
"Failed to clean shard points, notification channel dropped",
));
}
// Timeout elapsed, acknowledge so the client can probe again later
Err(_) => return Ok(UpdateStatus::Acknowledged),
}
}
}
/// Invalidate shard cleaning operations for the given shards, marking them as dirty
///
/// Aborts any ongoing cleaning tasks and waits until all tasks are stopped.
///
/// # Cancel safety
///
/// This function is cancel safe. If cancelled, we may not actually await on all tasks to
/// finish but they will always still abort in the background.
pub(super) async fn invalidate(&self, shard_ids: impl IntoIterator<Item = ShardId>) {
// Take relevant tasks out of task list, abort and take handles
let handles = {
let mut tasks = self.tasks.write();
shard_ids
.into_iter()
.filter_map(|shard_id| tasks.remove(&shard_id))
.map(ShardCleanTask::abort)
.collect::<Vec<_>>()
};
// Await all tasks to finish
for handle in handles {
if let Err(err) = handle.await {
log::error!("Failed to join shard clean task: {err}");
}
}
}
/// List all shards we have any status for
///
/// Only includes shards we've triggered cleaning for. On restart, or when invalidating shards,
/// items are removed from the list.
pub fn statuses(&self) -> AHashMap<ShardId, ShardCleanStatus> {
self.tasks
.read()
.iter()
.map(|(shard_id, task)| (*shard_id, task.status.borrow().clone()))
.collect()
}
}
/// A background task for cleaning a shard
///
/// The task will delete points that don't belong in the shard according to the hash ring. This is
/// used in context of resharding, where points are transferred to different shards.
pub(super) struct ShardCleanTask {
/// Handle of the clean task
handle: JoinHandle<()>,
/// Watch channel with current status of the task
status: Receiver<ShardCleanStatus>,
/// Cancellation token drop guard, cancels the task if this is dropped
cancel: DropGuard,
}
impl ShardCleanTask {
/// Create a new shard clean task and immediately execute it
pub fn new(shards_holder: &Arc<LockedShardHolder>, shard_id: ShardId) -> Self {
let (sender, receiver) = tokio::sync::watch::channel(ShardCleanStatus::Started);
let shard_holder = Arc::downgrade(shards_holder);
let cancel = CancellationToken::default();
let task = tokio::task::spawn(Self::task(shard_holder, shard_id, sender, cancel.clone()));
ShardCleanTask {
handle: task,
status: receiver,
cancel: cancel.drop_guard(),
}
}
pub fn is_cancelled_or_failed(&self) -> bool {
matches!(
self.status.borrow().deref(),
ShardCleanStatus::Cancelled | ShardCleanStatus::Failed { .. }
)
}
fn abort(self) -> JoinHandle<()> {
// Explicitly cancel clean task
self.cancel.disarm().cancel();
self.handle
}
async fn task(
shard_holder: Weak<LockedShardHolder>,
shard_id: ShardId,
sender: Sender<ShardCleanStatus>,
cancel: CancellationToken,
) {
let task = clean_task(shard_holder, shard_id, sender.clone());
let status = match cancel.run_until_cancelled(task).await {
Some(Ok(())) => {
log::trace!("Background task to clean shard {shard_id} is completed");
ShardCleanStatus::Done
}
Some(Err(err)) => {
log::error!("Background task to clean shard {shard_id} failed: {err}");
ShardCleanStatus::Failed {
reason: err.to_string(),
}
}
None => {
log::trace!("Background task to clean shard {shard_id} is cancelled");
ShardCleanStatus::Cancelled
}
};
// Ignore channel dropped error, then there's no one listening anyway
let _ = sender.send(status);
}
}
async fn clean_task(
shard_holder: Weak<LockedShardHolder>,
shard_id: ShardId,
sender: Sender<ShardCleanStatus>,
) -> CollectionResult<()> {
// Do not measure the hardware usage of these deletes as clean the shard is always considered an internal operation
// users should not be billed for.
let mut offset = None;
let mut deleted_points = 0;
loop {
// Get shard
let Some(shard_holder) = shard_holder.upgrade() else {
return Err(CollectionError::not_found("Shard holder dropped"));
};
let shard_holder = shard_holder.read().await;
let Some(shard) = shard_holder.get_shard(shard_id) else {
return Err(CollectionError::not_found(format!(
"Shard {shard_id} not found",
)));
};
if !shard.is_local().await {
return Err(CollectionError::not_found(format!(
"Shard {shard_id} is not a local shard",
)));
}
// Scroll next batch of points
let mut ids = match shard
.local_scroll_by_id(
offset,
CLEAN_BATCH_SIZE + 1,
&false.into(),
&false.into(),
None,
None,
None,
HwMeasurementAcc::disposable(), // Internal operation, no measurement needed!
)
.await
{
Ok(batch) => batch.into_iter().map(|entry| entry.id).collect::<Vec<_>>(),
Err(err) => {
return Err(CollectionError::service_error(format!(
"Failed to read points to delete from shard: {err}",
)));
}
};
// Update offset for next batch
offset = (ids.len() > CLEAN_BATCH_SIZE).then(|| ids.pop().unwrap());
deleted_points += ids.len();
let last_batch = offset.is_none();
// Filter list of point IDs after scrolling, delete points that don't belong in this shard
// Checking the hash ring to determine if a point belongs in the shard is very expensive.
// We scroll all point IDs and only filter points by the hash ring after scrolling on
// purpose, this way we check each point ID against the hash ring only once. Naively we
// might pass a hash ring filter into the scroll operation itself, but that will make it
// significantly slower, because then we'll do the expensive hash ring check on every
// point, in every segment.
// See: <https://github.com/qdrant/qdrant/pull/6085>
let hashring = shard_holder.hash_ring_router(shard_id).ok_or_else(|| {
CollectionError::service_error(format!(
"Shard {shard_id} cannot be cleaned, failed to get shard hash ring"
))
})?;
let ids: Vec<ExtendedPointId> = ids
.into_iter()
// TODO: run test with this inverted?
.filter(|id| !hashring.is_in_shard(id, shard_id))
.collect();
// Delete points from local shard
let delete_operation =
OperationWithClockTag::from(CollectionUpdateOperations::PointOperation(
crate::operations::point_ops::PointOperations::DeletePoints { ids },
));
if let Err(err) = shard
.update_local(
delete_operation,
last_batch,
HwMeasurementAcc::disposable(),
false,
)
.await
{
return Err(CollectionError::service_error(format!(
"Failed to delete points from shard: {err}",
)));
}
let _ = sender.send(ShardCleanStatus::Progress { deleted_points });
// Finish if this was the last batch
if last_batch {
return Ok(());
}
}
}
impl Collection {
pub async fn cleanup_local_shard(
&self,
shard_id: ShardId,
wait: bool,
timeout: Option<Duration>,
) -> CollectionResult<UpdateResult> {
// Ensure we have this local shard
{
let shard_holder = self.shards_holder.read().await;
let Some(shard) = shard_holder.get_shard(shard_id) else {
return Err(CollectionError::not_found(format!(
"Shard {shard_id} not found",
)));
};
if !shard.is_local().await {
return Err(CollectionError::not_found(format!(
"Shard {shard_id} is not a local shard",
)));
}
}
let status = self
.shard_clean_tasks
.clean_and_await(&self.shards_holder, shard_id, wait, timeout)
.await?;
Ok(UpdateResult {
operation_id: None,
status,
clock_tag: None,
})
}
/// Invalidate shard cleaning operations for the given shards
///
/// Aborts any ongoing cleaning tasks and waits until all tasks are stopped.
///
/// This does nothing if the given shards have no known status or do not exist.
///
/// # Cancel safety
///
/// This function is cancel safe. If cancelled, we may not actually await on all tasks to
/// finish but they will always still abort in the background.
pub(super) async fn invalidate_clean_local_shards(
&self,
shard_ids: impl IntoIterator<Item = ShardId>,
) {
self.shard_clean_tasks.invalidate(shard_ids).await;
}
pub fn clean_local_shards_statuses(&self) -> HashMap<ShardId, ShardCleanStatusTelemetry> {
self.shard_clean_tasks
.statuses()
.into_iter()
.map(|(shard_id, status)| (shard_id, status.into()))
.collect()
}
}
#[derive(Debug, Clone)]
pub(super) enum ShardCleanStatus {
Started,
Progress { deleted_points: usize },
Done,
Failed { reason: String },
Cancelled,
}
impl From<ShardCleanStatus> for ShardCleanStatusTelemetry {
fn from(status: ShardCleanStatus) -> Self {
match status {
ShardCleanStatus::Started => ShardCleanStatusTelemetry::Started,
ShardCleanStatus::Progress { deleted_points } => {
ShardCleanStatusTelemetry::Progress(ShardCleanStatusProgressTelemetry {
deleted_points,
})
}
ShardCleanStatus::Done => ShardCleanStatusTelemetry::Done,
ShardCleanStatus::Failed { reason } => {
ShardCleanStatusTelemetry::Failed(ShardCleanStatusFailedTelemetry { reason })
}
ShardCleanStatus::Cancelled => ShardCleanStatusTelemetry::Cancelled,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/collection_ops.rs | lib/collection/src/collection/collection_ops.rs | use std::cmp::{self, Reverse};
use std::sync::Arc;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::{TryStreamExt as _, future};
use lazy_static::lazy_static;
use segment::types::{Payload, QuantizationConfig, StrictModeConfig};
use semver::Version;
use super::Collection;
use crate::collection_manager::optimizers::IndexingProgressViews;
use crate::operations::config_diff::*;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::*;
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::replica_set::Change;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::PeerId;
lazy_static! {
/// Old logic for aborting shard transfers on shard drop, had a bug: it dropped all transfers
/// regardless of the shard id. In order to keep consensus consistent, we can only
/// enable new fixed logic once cluster fully switched to this version.
/// Otherwise, some node might follow old logic and some - new logic.
///
/// See: <https://github.com/qdrant/qdrant/pull/7792>
pub(super) static ref ABORT_TRANSFERS_ON_SHARD_DROP_FIX_FROM_VERSION: Version = Version::parse("1.16.3-dev").unwrap();
}
impl Collection {
/// Updates collection params:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_params_from_diff(
&self,
params_diff: CollectionParamsDiff,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
config.params = config.params.update(¶ms_diff);
}
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
/// Updates HNSW config:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_hnsw_config_from_diff(
&self,
hnsw_config_diff: HnswConfigDiff,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
config.hnsw_config = config.hnsw_config.update(&hnsw_config_diff);
}
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
/// Updates vectors config:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_vectors_from_diff(
&self,
update_vectors_diff: &VectorsConfigDiff,
) -> CollectionResult<()> {
let mut config = self.collection_config.write().await;
update_vectors_diff.check_vector_names(&config.params)?;
config
.params
.update_vectors_from_diff(update_vectors_diff)?;
config.save(&self.path)?;
Ok(())
}
/// Updates sparse vectors config:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_sparse_vectors_from_other(
&self,
update_vectors_diff: &SparseVectorsConfig,
) -> CollectionResult<()> {
let mut config = self.collection_config.write().await;
update_vectors_diff.check_vector_names(&config.params)?;
config
.params
.update_sparse_vectors_from_other(update_vectors_diff)?;
config.save(&self.path)?;
Ok(())
}
/// Updates shard optimization params:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_optimizer_params_from_diff(
&self,
optimizer_config_diff: OptimizersConfigDiff,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
config.optimizer_config = config.optimizer_config.update(&optimizer_config_diff);
}
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
/// Updates shard optimization params: Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_optimizer_params(
&self,
optimizer_config: OptimizersConfig,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
config.optimizer_config = optimizer_config;
}
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
/// Updates quantization config:
/// Saves new params on disk
///
/// After this, `recreate_optimizers_blocking` must be called to create new optimizers using
/// the updated configuration.
pub async fn update_quantization_config_from_diff(
&self,
quantization_config_diff: QuantizationConfigDiff,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
match quantization_config_diff {
QuantizationConfigDiff::Scalar(scalar) => {
config
.quantization_config
.replace(QuantizationConfig::Scalar(scalar));
}
QuantizationConfigDiff::Product(product) => {
config
.quantization_config
.replace(QuantizationConfig::Product(product));
}
QuantizationConfigDiff::Binary(binary) => {
config
.quantization_config
.replace(QuantizationConfig::Binary(binary));
}
QuantizationConfigDiff::Disabled(_) => {
config.quantization_config = None;
}
}
}
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
pub async fn update_metadata(&self, metadata: Payload) -> CollectionResult<()> {
let mut collection_config_guard: tokio::sync::RwLockWriteGuard<
'_,
crate::config::CollectionConfigInternal,
> = self.collection_config.write().await;
if let Some(current_metadata) = collection_config_guard.metadata.as_mut() {
current_metadata.merge(&metadata);
} else {
collection_config_guard.metadata = Some(metadata);
}
drop(collection_config_guard);
self.collection_config.read().await.save(&self.path)?;
Ok(())
}
/// Updates the strict mode configuration and saves it to disk.
pub async fn update_strict_mode_config(
&self,
strict_mode_diff: StrictModeConfig,
) -> CollectionResult<()> {
{
let mut config = self.collection_config.write().await;
if let Some(current_config) = config.strict_mode_config.as_mut() {
*current_config = current_config.update(&strict_mode_diff);
} else {
config.strict_mode_config = Some(strict_mode_diff);
}
}
// update collection config
self.collection_config.read().await.save(&self.path)?;
// apply config change to all shards
let mut shard_holder = self.shards_holder.write().await;
let updates = shard_holder
.all_shards_mut()
.map(|replica_set| replica_set.on_strict_mode_config_update());
future::try_join_all(updates).await?;
Ok(())
}
/// Handle replica changes
///
/// add and remove replicas from replica set
pub async fn handle_replica_changes(
&self,
replica_changes: Vec<Change>,
) -> CollectionResult<()> {
if replica_changes.is_empty() {
return Ok(());
}
let shard_holder = self.shards_holder.read().await;
for change in replica_changes {
let (shard_id, peer_id) = match change {
Change::Remove(shard_id, peer_id) => (shard_id, peer_id),
};
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
return Err(CollectionError::BadRequest {
description: format!("Shard {} of {} not found", shard_id, self.name()),
});
};
let peers = replica_set.peers();
if !peers.contains_key(&peer_id) {
return Err(CollectionError::BadRequest {
description: format!("Peer {peer_id} has no replica of shard {shard_id}"),
});
}
// Check that we are not removing the *last* replica or the last *active* replica
//
// `is_last_active_replica` counts both `Active` and `ReshardingScaleDown` replicas!
if peers.len() == 1 || replica_set.is_last_source_of_truth_replica(peer_id) {
return Err(CollectionError::BadRequest {
description: format!(
"Shard {shard_id} must have at least one active replica after removing {peer_id}",
),
});
}
let all_nodes_fixed_cancellation = self
.channel_service
.all_peers_at_version(&ABORT_TRANSFERS_ON_SHARD_DROP_FIX_FROM_VERSION);
// Collect shard transfers related to removed shard...
let transfers = if all_nodes_fixed_cancellation {
shard_holder.get_related_transfers(peer_id, shard_id)
} else {
// This is the old buggy logic, but we have to keep it
// for maintaining consistency in a cluster with mixed versions.
shard_holder
.get_transfers(|transfer| transfer.from == peer_id || transfer.to == peer_id)
};
// ...and cancel transfer tasks and remove transfers from internal state
for transfer in transfers {
self.abort_shard_transfer_and_resharding(transfer.key(), Some(&shard_holder))
.await?;
}
replica_set.remove_peer(peer_id).await?;
// We can't remove the last repilca of a shard, so this should prevent removing
// resharding shard, because it's always the *only* replica.
//
// And if we remove some other shard, that is currently doing resharding transfer,
// the transfer should be cancelled (see the block right above this comment),
// so no special handling is needed.
}
Ok(())
}
/// Recreate the optimizers on all shards for this collection
///
/// This will stop existing optimizers, and start new ones with new configurations.
///
/// # Blocking
///
/// Partially blocking. Stopping existing optimizers is blocking. Starting new optimizers is
/// not blocking.
///
/// ## Cancel safety
///
/// This function is cancel safe, and will always run to completion.
pub async fn recreate_optimizers_blocking(&self) -> CollectionResult<()> {
let shards_holder = self.shards_holder.clone();
tokio::task::spawn(async move {
let shard_holder = shards_holder.read().await;
let updates = shard_holder
.all_shards()
.map(|replica_set| replica_set.on_optimizer_config_update());
future::try_join_all(updates).await
})
.await??;
Ok(())
}
pub async fn strict_mode_config(&self) -> Option<StrictModeConfig> {
self.collection_config
.read()
.await
.strict_mode_config
.clone()
}
pub async fn info(
&self,
shard_selection: &ShardSelectorInternal,
) -> CollectionResult<CollectionInfo> {
let shards_holder = self.shards_holder.read().await;
let shards = shards_holder.select_shards(shard_selection)?;
let mut requests: futures::stream::FuturesUnordered<_> = shards
.into_iter()
// `info` requests received through internal gRPC *always* have `shard_selection`
.map(|(shard, _shard_key)| shard.info(shard_selection.is_shard_id()))
.collect();
let mut info = match requests.try_next().await? {
Some(info) => info,
None => CollectionInfo::empty(self.collection_config.read().await.clone()),
};
while let Some(response) = requests.try_next().await? {
info.status = cmp::max(info.status, response.status);
info.optimizer_status = cmp::max(info.optimizer_status, response.optimizer_status);
info.indexed_vectors_count = info
.indexed_vectors_count
.zip(response.indexed_vectors_count)
.map(|(a, b)| a + b);
info.points_count = info
.points_count
.zip(response.points_count)
.map(|(a, b)| a + b);
info.segments_count += response.segments_count;
for (key, response_schema) in response.payload_schema {
info.payload_schema
.entry(key)
.and_modify(|info_schema| info_schema.points += response_schema.points)
.or_insert(response_schema);
}
}
Ok(info)
}
pub async fn cluster_info(&self, peer_id: PeerId) -> CollectionResult<CollectionClusterInfo> {
let shards_holder = self.shards_holder.read().await;
let shard_count = shards_holder.len();
let mut local_shards = Vec::new();
let mut remote_shards = Vec::new();
let count_request = Arc::new(CountRequestInternal {
filter: None,
exact: false, // Don't need exact count of unique ids here, only size estimation
});
let shard_to_key = shards_holder.get_shard_id_to_key_mapping();
// extract shards info
for (shard_id, replica_set) in shards_holder.get_shards() {
let peers = replica_set.peers();
if replica_set.has_local_shard().await {
let state = peers
.get(&replica_set.this_peer_id())
.copied()
.unwrap_or(ReplicaState::Dead);
// Cluster info is explicitly excluded from hardware measurements
// So that we can monitor hardware usage without interference
let hw_acc = HwMeasurementAcc::disposable();
let count_result = replica_set
.count_local(count_request.clone(), None, hw_acc)
.await
.unwrap_or_default();
let points_count = count_result.map(|x| x.count).unwrap_or(0);
local_shards.push(LocalShardInfo {
shard_id,
points_count,
state,
shard_key: shard_to_key.get(&shard_id).cloned(),
})
}
for (peer_id, state) in replica_set.peers() {
if peer_id == replica_set.this_peer_id() {
continue;
}
remote_shards.push(RemoteShardInfo {
shard_id,
peer_id,
state,
shard_key: shard_to_key.get(&shard_id).cloned(),
});
}
}
let shard_transfers =
shards_holder.get_shard_transfer_info(&*self.transfer_tasks.lock().await);
let resharding_operations = shards_holder.get_resharding_operations_info();
// sort by shard_id
local_shards.sort_by_key(|k| k.shard_id);
remote_shards.sort_by_key(|k| k.shard_id);
let info = CollectionClusterInfo {
peer_id,
shard_count,
local_shards,
remote_shards,
shard_transfers,
resharding_operations,
};
Ok(info)
}
pub async fn optimizations(
&self,
completed_limit: Option<usize>,
) -> CollectionResult<OptimizationsResponse> {
let mut all_ongoing = Vec::new();
let mut all_completed = completed_limit.map(|_| Vec::new());
let shards_holder = self.shards_holder.read().await;
for (_shard_id, replica_set) in shards_holder.get_shards() {
let Some(log) = replica_set.optimizers_log().await else {
continue;
};
let IndexingProgressViews { ongoing, completed } = log.lock().progress_views();
all_ongoing.extend(ongoing);
if let Some(all_completed) = all_completed.as_mut() {
all_completed.extend(completed);
}
}
// Sort - see `OptimizationsResponse` doc
all_ongoing.sort_by_key(|v| Reverse(v.started_at()));
if let Some(all_completed) = all_completed.as_mut() {
all_completed.sort_by_key(|v| Reverse(v.started_at()));
// Unwrap is ok because `all_completed` and `completed_limit`
// either are both `Some` or both `None`.
all_completed.truncate(completed_limit.unwrap());
}
let root = "Segment Optimizing";
Ok(OptimizationsResponse {
ongoing: all_ongoing.into_iter().map(|v| v.snapshot(root)).collect(),
completed: all_completed.map(|c| c.into_iter().map(|v| v.snapshot(root)).collect()),
})
}
pub async fn print_warnings(&self) {
let warnings = self.collection_config.read().await.get_warnings();
for warning in warnings {
log::warn!("Collection {}: {}", self.name(), warning.message);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/payload_index_schema.rs | lib/collection/src/collection/payload_index_schema.rs | use std::path::{Path, PathBuf};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::save_on_disk::SaveOnDisk;
use segment::json_path::JsonPath;
use segment::types::{Filter, PayloadFieldSchema};
pub use shard::payload_index_schema::PayloadIndexSchema;
use crate::collection::Collection;
use crate::operations::types::{CollectionResult, UpdateResult};
use crate::operations::universal_query::formula::ExpressionInternal;
use crate::operations::{CollectionUpdateOperations, CreateIndex, FieldIndexOperations};
use crate::problems::unindexed_field;
pub const PAYLOAD_INDEX_CONFIG_FILE: &str = "payload_index.json";
impl Collection {
pub(crate) fn payload_index_file(collection_path: &Path) -> PathBuf {
collection_path.join(PAYLOAD_INDEX_CONFIG_FILE)
}
pub(crate) fn load_payload_index_schema(
collection_path: &Path,
) -> CollectionResult<SaveOnDisk<PayloadIndexSchema>> {
let payload_index_file = Self::payload_index_file(collection_path);
let schema: SaveOnDisk<PayloadIndexSchema> =
SaveOnDisk::load_or_init_default(payload_index_file)?;
Ok(schema)
}
pub async fn create_payload_index(
&self,
field_name: JsonPath,
field_schema: PayloadFieldSchema,
hw_acc: HwMeasurementAcc,
) -> CollectionResult<Option<UpdateResult>> {
// This function is called from consensus, so we use `wait = false`, because we can't afford
// to wait for the result as indexation may take a long time
self.create_payload_index_with_wait(field_name, field_schema, false, hw_acc)
.await
}
pub async fn create_payload_index_with_wait(
&self,
field_name: JsonPath,
field_schema: PayloadFieldSchema,
wait: bool,
hw_acc: HwMeasurementAcc,
) -> CollectionResult<Option<UpdateResult>> {
self.payload_index_schema.write(|schema| {
schema
.schema
.insert(field_name.clone(), field_schema.clone());
})?;
// This operation might be redundant, if we also create index as a regular collection op,
// but it looks better in long term to also have it here, so
// the creation of payload index may be eventually completely converted
// into the consensus operation
let create_index_operation = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::CreateIndex(CreateIndex {
field_name,
field_schema: Some(field_schema),
}),
);
self.update_all_local(create_index_operation, wait, hw_acc)
.await
}
pub async fn drop_payload_index(
&self,
field_name: JsonPath,
) -> CollectionResult<Option<UpdateResult>> {
self.payload_index_schema.write(|schema| {
schema.schema.remove(&field_name);
})?;
let delete_index_operation = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::DeleteIndex(field_name),
);
let result = self
.update_all_local(
delete_index_operation,
false,
HwMeasurementAcc::disposable(), // Unmeasured API
)
.await?;
Ok(result)
}
pub fn payload_key_index_schema(&self, key: &JsonPath) -> Option<PayloadFieldSchema> {
self.payload_index_schema.read().schema.get(key).cloned()
}
/// Returns an arbitrary payload key along with acceptable
/// schemas used by `filter` which can be indexed but currently is not.
/// If this function returns `None` all indexable keys in `filter` are indexed.
pub fn one_unindexed_key(
&self,
filter: &Filter,
) -> Option<(JsonPath, Vec<PayloadFieldSchema>)> {
one_unindexed_filter_key(&self.payload_index_schema.read(), filter)
}
pub fn one_unindexed_expression_key(
&self,
expr: &ExpressionInternal,
) -> Option<(JsonPath, Vec<PayloadFieldSchema>)> {
one_unindexed_expression_key(&self.payload_index_schema.read(), expr)
}
}
enum PotentiallyUnindexed<'a> {
Filter(&'a Filter),
Expression(&'a ExpressionInternal),
}
/// Returns an arbitrary payload key with acceptable schemas
/// used by `filter` which can be indexed but currently is not.
/// If this function returns `None` all indexable keys in `filter` are indexed.
fn one_unindexed_key(
schema: &PayloadIndexSchema,
suspect: PotentiallyUnindexed<'_>,
) -> Option<(JsonPath, Vec<PayloadFieldSchema>)> {
let mut extractor = unindexed_field::Extractor::new(&schema.schema);
match suspect {
PotentiallyUnindexed::Filter(filter) => {
extractor.update_from_filter_once(None, filter);
}
PotentiallyUnindexed::Expression(expression) => {
extractor.update_from_expression(expression);
}
}
// Get the first unindexed field from the extractor.
extractor
.unindexed_schema()
.iter()
.next()
.map(|(key, schema)| (key.clone(), schema.clone()))
}
/// Returns an arbitrary payload key with acceptable schemas
/// used by `filter` which can be indexed but currently is not.
/// If this function returns `None` all indexable keys in `filter` are indexed.
pub fn one_unindexed_filter_key(
schema: &PayloadIndexSchema,
filter: &Filter,
) -> Option<(JsonPath, Vec<PayloadFieldSchema>)> {
one_unindexed_key(schema, PotentiallyUnindexed::Filter(filter))
}
pub fn one_unindexed_expression_key(
schema: &PayloadIndexSchema,
expr: &ExpressionInternal,
) -> Option<(JsonPath, Vec<PayloadFieldSchema>)> {
one_unindexed_key(schema, PotentiallyUnindexed::Expression(expr))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/collection/resharding.rs | lib/collection/src/collection/resharding.rs | use std::num::NonZeroU32;
use futures::Future;
use super::Collection;
use crate::config::ShardingMethod;
use crate::hash_ring::HashRingRouter;
use crate::operations::cluster_ops::ReshardingDirection;
use crate::operations::types::CollectionResult;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::resharding::{ReshardKey, ReshardState};
use crate::shards::transfer::ShardTransferConsensus;
impl Collection {
pub async fn resharding_state(&self) -> Option<ReshardState> {
self.shards_holder
.read()
.await
.resharding_state
.read()
.clone()
}
/// Start a new resharding operation
///
/// # Cancel safety
///
/// This method is *not* cancel safe.
pub async fn start_resharding<T, F>(
&self,
resharding_key: ReshardKey,
_consensus: Box<dyn ShardTransferConsensus>,
_on_finish: T,
_on_error: F,
) -> CollectionResult<()>
where
T: Future<Output = ()> + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
{
let mut shard_holder = self.shards_holder.write().await;
shard_holder.check_start_resharding(&resharding_key)?;
// If scaling up, create a new replica set
let replica_set = if resharding_key.direction == ReshardingDirection::Up {
let replica_set = self
.create_replica_set(
resharding_key.shard_id,
resharding_key.shard_key.clone(),
&[resharding_key.peer_id],
Some(ReplicaState::Resharding),
)
.await?;
Some(replica_set)
} else {
None
};
shard_holder
.start_resharding_unchecked(resharding_key.clone(), replica_set)
.await?;
if resharding_key.direction == ReshardingDirection::Up {
let mut config = self.collection_config.write().await;
match config.params.sharding_method.unwrap_or_default() {
// If adding a shard, increase persisted count so we load it on restart
ShardingMethod::Auto => {
debug_assert_eq!(config.params.shard_number.get(), resharding_key.shard_id);
config.params.shard_number = config
.params
.shard_number
.checked_add(1)
.expect("cannot have more than u32::MAX shards after resharding");
if let Err(err) = config.save(&self.path) {
log::error!(
"Failed to update and save collection config during resharding: {err}",
);
}
}
// Custom shards don't use the persisted count, we don't change it
ShardingMethod::Custom => {}
}
}
}
// Drive resharding
// self.drive_resharding(resharding_key, consensus, false, on_finish, on_error)
// .await?;
Ok(())
}
pub async fn commit_read_hashring(&self, resharding_key: &ReshardKey) -> CollectionResult<()> {
let mut shards_holder = self.shards_holder.write().await;
shards_holder.commit_read_hashring(resharding_key)?;
// Invalidate clean state for shards we copied points out of
// These shards must be cleaned or dropped to ensure they don't contain irrelevant points
match resharding_key.direction {
// On resharding up: related shards below new shard key are affected
ReshardingDirection::Up => match shards_holder.rings.get(&resharding_key.shard_key) {
Some(HashRingRouter::Resharding { old, new: _ }) => {
self.invalidate_clean_local_shards(old.nodes().clone())
.await;
}
Some(HashRingRouter::Single(ring)) => {
debug_assert!(false, "must have resharding hash ring during resharding");
self.invalidate_clean_local_shards(ring.nodes().clone())
.await;
}
None => {
debug_assert!(false, "must have hash ring for resharding key");
}
},
// On resharding down: shard we're about to remove is affected
ReshardingDirection::Down => {
self.invalidate_clean_local_shards([resharding_key.shard_id])
.await;
}
}
Ok(())
}
pub async fn commit_write_hashring(&self, resharding_key: &ReshardKey) -> CollectionResult<()> {
self.shards_holder
.write()
.await
.commit_write_hashring(resharding_key)
}
pub async fn finish_resharding(&self, resharding_key: ReshardKey) -> CollectionResult<()> {
let mut shard_holder = self.shards_holder.write().await;
shard_holder.check_finish_resharding(&resharding_key)?;
shard_holder.finish_resharding_unchecked(&resharding_key)?;
if resharding_key.direction == ReshardingDirection::Down {
// Remove the shard we've now migrated all points out of
if let Some(shard_key) = &resharding_key.shard_key {
shard_holder.remove_shard_from_key_mapping(resharding_key.shard_id, shard_key)?;
}
shard_holder
.drop_and_remove_shard(resharding_key.shard_id)
.await?;
{
let mut config = self.collection_config.write().await;
match config.params.sharding_method.unwrap_or_default() {
// If removing a shard, decrease persisted count so we don't load it on restart
ShardingMethod::Auto => {
debug_assert_eq!(
config.params.shard_number.get() - 1,
resharding_key.shard_id,
);
config.params.shard_number =
NonZeroU32::new(config.params.shard_number.get() - 1)
.expect("cannot have zero shards after finishing resharding");
if let Err(err) = config.save(&self.path) {
log::error!(
"Failed to update and save collection config during resharding: {err}"
);
}
}
// Custom shards don't use the persisted count, we don't change it
ShardingMethod::Custom => {}
}
}
}
Ok(())
}
pub async fn abort_resharding(
&self,
resharding_key: ReshardKey,
force: bool,
) -> CollectionResult<()> {
log::warn!(
"Invalidating local cleanup tasks and aborting resharding {resharding_key} (force: {force})"
);
let shard_holder = self.shards_holder.read().await;
if !force {
shard_holder.check_abort_resharding(&resharding_key)?;
} else {
log::warn!("Force-aborting resharding {resharding_key}");
}
// Invalidate clean state for shards we copied new points into
// These shards must be cleaned or dropped to ensure they don't contain irrelevant points
match resharding_key.direction {
// On resharding up: new shard now has invalid points, shard will likely be dropped
ReshardingDirection::Up => {
self.invalidate_clean_local_shards([resharding_key.shard_id])
.await;
}
// On resharding down: existing shards may have new points moved into them
ReshardingDirection::Down => match shard_holder.rings.get(&resharding_key.shard_key) {
Some(HashRingRouter::Resharding { old: _, new }) => {
self.invalidate_clean_local_shards(new.nodes().clone())
.await;
}
Some(HashRingRouter::Single(ring)) => {
debug_assert!(false, "must have resharding hash ring during resharding");
self.invalidate_clean_local_shards(ring.nodes().clone())
.await;
}
None => {
debug_assert!(false, "must have hash ring for resharding key");
}
},
}
// Abort all resharding transfer related to this specific resharding operation
let resharding_transfers =
shard_holder.get_transfers(|t| t.is_related_to_resharding(&resharding_key));
for transfer in resharding_transfers {
self.abort_shard_transfer(transfer, &shard_holder).await?;
}
drop(shard_holder); // drop the read lock before acquiring write lock
let mut shard_holder = self.shards_holder.write().await;
shard_holder
.abort_resharding(resharding_key.clone(), force)
.await?;
// Decrease the persisted shard count, ensures we don't load dropped shard on restart
if resharding_key.direction == ReshardingDirection::Up {
let mut config = self.collection_config.write().await;
match config.params.sharding_method.unwrap_or_default() {
// If removing a shard, decrease persisted count so we don't load it on restart
ShardingMethod::Auto => {
debug_assert_eq!(
config.params.shard_number.get() - 1,
resharding_key.shard_id,
);
config.params.shard_number =
NonZeroU32::new(config.params.shard_number.get() - 1)
.expect("cannot have zero shards after aborting resharding");
if let Err(err) = config.save(&self.path) {
log::error!(
"Failed to update and save collection config during resharding: {err}"
);
}
}
// Custom shards don't use the persisted count, we don't change it
ShardingMethod::Custom => {}
}
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/operation_effect.rs | lib/collection/src/operations/operation_effect.rs | use std::borrow::Cow;
use segment::types::{Filter, PointIdType};
use super::vector_ops;
use crate::operations::payload_ops::PayloadOps;
use crate::operations::{CollectionUpdateOperations, point_ops};
/// Structure to define what part of the shard are affected by the operation
pub enum OperationEffectArea<'a> {
Empty,
Points(Cow<'a, [PointIdType]>),
Filter(&'a Filter),
}
/// Estimate how many points will be affected by the operation
pub enum PointsOperationEffect {
/// No points affected
Empty,
/// Some points are affected
Some(Vec<PointIdType>),
/// Too many to enumerate, so we just say that it is a lot
Many,
}
pub trait EstimateOperationEffectArea {
fn estimate_effect_area(&self) -> OperationEffectArea<'_>;
}
impl EstimateOperationEffectArea for CollectionUpdateOperations {
fn estimate_effect_area(&self) -> OperationEffectArea<'_> {
match self {
CollectionUpdateOperations::PointOperation(point_operation) => {
point_operation.estimate_effect_area()
}
CollectionUpdateOperations::VectorOperation(vector_operation) => {
vector_operation.estimate_effect_area()
}
CollectionUpdateOperations::PayloadOperation(payload_operation) => {
payload_operation.estimate_effect_area()
}
CollectionUpdateOperations::FieldIndexOperation(_) => OperationEffectArea::Empty,
}
}
}
impl EstimateOperationEffectArea for point_ops::PointOperations {
fn estimate_effect_area(&self) -> OperationEffectArea<'_> {
match self {
point_ops::PointOperations::UpsertPoints(insert_operations) => {
insert_operations.estimate_effect_area()
}
point_ops::PointOperations::UpsertPointsConditional(conditional_upsert) => {
conditional_upsert.points_op.estimate_effect_area()
}
point_ops::PointOperations::DeletePoints { ids } => {
OperationEffectArea::Points(Cow::Borrowed(ids))
}
point_ops::PointOperations::DeletePointsByFilter(filter) => {
OperationEffectArea::Filter(filter)
}
point_ops::PointOperations::SyncPoints(sync_op) => {
debug_assert!(
false,
"SyncPoints operation should not be used during transfer"
);
OperationEffectArea::Points(Cow::Owned(
sync_op.points.iter().map(|x| x.id).collect(),
))
}
#[cfg(feature = "staging")]
point_ops::PointOperations::TestDelay(_) => OperationEffectArea::Empty,
}
}
}
impl EstimateOperationEffectArea for vector_ops::VectorOperations {
fn estimate_effect_area(&self) -> OperationEffectArea<'_> {
match self {
vector_ops::VectorOperations::UpdateVectors(update_operation) => {
let ids = update_operation.points.iter().map(|p| p.id).collect();
OperationEffectArea::Points(Cow::Owned(ids))
}
vector_ops::VectorOperations::DeleteVectors(ids, _) => {
OperationEffectArea::Points(Cow::Borrowed(&ids.points))
}
vector_ops::VectorOperations::DeleteVectorsByFilter(filter, _) => {
OperationEffectArea::Filter(filter)
}
}
}
}
impl EstimateOperationEffectArea for point_ops::PointInsertOperationsInternal {
fn estimate_effect_area(&self) -> OperationEffectArea<'_> {
match self {
point_ops::PointInsertOperationsInternal::PointsBatch(batch) => {
OperationEffectArea::Points(Cow::Borrowed(&batch.ids))
}
point_ops::PointInsertOperationsInternal::PointsList(list) => {
OperationEffectArea::Points(Cow::Owned(list.iter().map(|x| x.id).collect()))
}
}
}
}
impl EstimateOperationEffectArea for PayloadOps {
fn estimate_effect_area(&self) -> OperationEffectArea<'_> {
match self {
PayloadOps::SetPayload(set_payload) => {
if let Some(points) = &set_payload.points {
OperationEffectArea::Points(Cow::Borrowed(points))
} else if let Some(filter) = &set_payload.filter {
OperationEffectArea::Filter(filter)
} else {
OperationEffectArea::Empty
}
}
PayloadOps::DeletePayload(delete_payload) => {
if let Some(points) = &delete_payload.points {
OperationEffectArea::Points(Cow::Borrowed(points))
} else if let Some(filter) = &delete_payload.filter {
OperationEffectArea::Filter(filter)
} else {
OperationEffectArea::Empty
}
}
PayloadOps::ClearPayload { points } => {
OperationEffectArea::Points(Cow::Borrowed(points))
}
PayloadOps::ClearPayloadByFilter(filter) => OperationEffectArea::Filter(filter),
PayloadOps::OverwritePayload(set_payload) => {
if let Some(points) = &set_payload.points {
OperationEffectArea::Points(Cow::Borrowed(points))
} else if let Some(filter) = &set_payload.filter {
OperationEffectArea::Filter(filter)
} else {
OperationEffectArea::Empty
}
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/snapshot_ops.rs | lib/collection/src/operations/snapshot_ops.rs | use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::time::SystemTime;
use api::grpc::conversions::naive_date_time_to_proto;
use chrono::{DateTime, NaiveDateTime};
use fs_err::tokio as tokio_fs;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use url::Url;
use validator::Validate;
use crate::operations::types::CollectionResult;
/// Defines source of truth for snapshot recovery:
///
/// `NoSync` means - restore snapshot without *any* additional synchronization.
/// `Snapshot` means - prefer snapshot data over the current state.
/// `Replica` means - prefer existing data over the snapshot.
#[derive(Debug, Deserialize, Serialize, JsonSchema, Default, Clone, Copy)]
#[serde(rename_all = "snake_case")]
pub enum SnapshotPriority {
NoSync,
#[default]
Snapshot,
Replica,
// `ShardTransfer` is for internal use only, and should not be exposed/used in public API
#[serde(skip)]
ShardTransfer,
}
impl TryFrom<i32> for SnapshotPriority {
type Error = tonic::Status;
fn try_from(snapshot_priority: i32) -> Result<Self, Self::Error> {
api::grpc::qdrant::ShardSnapshotPriority::try_from(snapshot_priority)
.map(Into::into)
.map_err(|_| tonic::Status::invalid_argument("Malformed shard snapshot priority"))
}
}
impl From<api::grpc::qdrant::ShardSnapshotPriority> for SnapshotPriority {
fn from(snapshot_priority: api::grpc::qdrant::ShardSnapshotPriority) -> Self {
match snapshot_priority {
api::grpc::qdrant::ShardSnapshotPriority::NoSync => Self::NoSync,
api::grpc::qdrant::ShardSnapshotPriority::Snapshot => Self::Snapshot,
api::grpc::qdrant::ShardSnapshotPriority::Replica => Self::Replica,
api::grpc::qdrant::ShardSnapshotPriority::ShardTransfer => Self::ShardTransfer,
}
}
}
impl From<SnapshotPriority> for api::grpc::qdrant::ShardSnapshotPriority {
fn from(snapshot_priority: SnapshotPriority) -> Self {
match snapshot_priority {
SnapshotPriority::NoSync => Self::NoSync,
SnapshotPriority::Snapshot => Self::Snapshot,
SnapshotPriority::Replica => Self::Replica,
SnapshotPriority::ShardTransfer => Self::ShardTransfer,
}
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
pub struct SnapshotRecover {
/// Examples:
/// - URL `http://localhost:8080/collections/my_collection/snapshots/my_snapshot`
/// - Local path `file:///qdrant/snapshots/test_collection-2022-08-04-10-49-10.snapshot`
pub location: Url,
/// Defines which data should be used as a source of truth if there are other replicas in the cluster.
/// If set to `Snapshot`, the snapshot will be used as a source of truth, and the current state will be overwritten.
/// If set to `Replica`, the current state will be used as a source of truth, and after recovery if will be synchronized with the snapshot.
#[serde(default)]
pub priority: Option<SnapshotPriority>,
/// Optional SHA256 checksum to verify snapshot integrity before recovery.
#[serde(default)]
#[validate(custom(function = "common::validation::validate_sha256_hash"))]
pub checksum: Option<String>,
/// Optional API key used when fetching the snapshot from a remote URL.
#[serde(default)]
pub api_key: Option<String>,
}
fn snapshot_description_example() -> SnapshotDescription {
SnapshotDescription {
name: "my-collection-3766212330831337-2024-07-22-08-31-55.snapshot".to_string(),
creation_time: Some(NaiveDateTime::from_str("2022-08-04T10:49:10").unwrap()),
size: 1_000_000,
checksum: Some("a1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0".to_string()),
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[schemars(example = "snapshot_description_example")]
pub struct SnapshotDescription {
pub name: String,
pub creation_time: Option<NaiveDateTime>,
pub size: u64,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub checksum: Option<String>,
}
impl From<SnapshotDescription> for api::grpc::qdrant::SnapshotDescription {
fn from(value: SnapshotDescription) -> Self {
Self {
name: value.name,
creation_time: value.creation_time.map(naive_date_time_to_proto),
size: value.size as i64,
checksum: value.checksum,
}
}
}
pub async fn get_snapshot_description(path: &Path) -> CollectionResult<SnapshotDescription> {
let name = path.file_name().unwrap().to_str().unwrap();
let file_meta = tokio_fs::metadata(&path).await?;
let creation_time = file_meta.created().ok().and_then(|created_time| {
created_time
.duration_since(SystemTime::UNIX_EPOCH)
.ok()
.map(|duration| {
DateTime::from_timestamp(duration.as_secs() as i64, 0)
.map(|dt| dt.naive_utc())
.unwrap()
})
});
let checksum = read_checksum_for_snapshot(path).await;
let size = file_meta.len();
Ok(SnapshotDescription {
name: name.to_string(),
creation_time,
size,
checksum,
})
}
async fn read_checksum_for_snapshot(snapshot_path: impl Into<PathBuf>) -> Option<String> {
let checksum_path = get_checksum_path(snapshot_path);
tokio_fs::read_to_string(&checksum_path).await.ok()
}
pub fn get_checksum_path(snapshot_path: impl Into<PathBuf>) -> PathBuf {
let mut checksum_path = snapshot_path.into().into_os_string();
checksum_path.push(".checksum");
checksum_path.into()
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize, schemars::JsonSchema)]
pub struct ShardSnapshotRecover {
pub location: ShardSnapshotLocation,
#[serde(default)]
pub priority: Option<SnapshotPriority>,
/// Optional SHA256 checksum to verify snapshot integrity before recovery.
#[validate(custom(function = "common::validation::validate_sha256_hash"))]
#[serde(default)]
pub checksum: Option<String>,
/// Optional API key used when fetching the snapshot from a remote URL.
#[serde(default)]
pub api_key: Option<String>,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize, schemars::JsonSchema)]
#[serde(untagged)]
pub enum ShardSnapshotLocation {
Url(Url),
Path(PathBuf),
}
impl TryFrom<Option<api::grpc::qdrant::ShardSnapshotLocation>> for ShardSnapshotLocation {
type Error = tonic::Status;
fn try_from(
snapshot_location: Option<api::grpc::qdrant::ShardSnapshotLocation>,
) -> Result<Self, Self::Error> {
let Some(snapshot_location) = snapshot_location else {
return Err(tonic::Status::invalid_argument(
"Malformed shard snapshot location",
));
};
snapshot_location.try_into()
}
}
impl TryFrom<api::grpc::qdrant::ShardSnapshotLocation> for ShardSnapshotLocation {
type Error = tonic::Status;
fn try_from(location: api::grpc::qdrant::ShardSnapshotLocation) -> Result<Self, Self::Error> {
use api::grpc::qdrant::shard_snapshot_location;
let api::grpc::qdrant::ShardSnapshotLocation { location } = location;
let Some(location) = location else {
return Err(tonic::Status::invalid_argument(
"Malformed shard snapshot location",
));
};
let location = match location {
shard_snapshot_location::Location::Url(url) => {
let url = Url::parse(&url).map_err(|err| {
tonic::Status::invalid_argument(format!(
"Invalid shard snapshot URL {url}: {err}",
))
})?;
Self::Url(url)
}
shard_snapshot_location::Location::Path(path) => {
let path = PathBuf::from(path);
Self::Path(path)
}
};
Ok(location)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/config_diff.rs | lib/collection/src/operations/config_diff.rs | //! Structures for partial update of collection params
#![allow(deprecated)] // hack to remove warning for memmap_threshold deprecation below
use std::num::NonZeroU32;
use api::rest::MaxOptimizationThreads;
use schemars::JsonSchema;
use segment::types::{
BinaryQuantization, HnswConfig, ProductQuantization, ScalarQuantization, StrictModeConfig,
};
use serde::{Deserialize, Serialize};
use validator::{Validate, ValidationErrors};
use crate::config::{CollectionParams, WalConfig};
use crate::optimizers_builder::OptimizersConfig;
pub trait DiffConfig<Diff>: Clone {
/// Update this config with field from `diff`
///
/// The `diff` has higher priority, meaning that fields specified in
/// the `diff` will always be in the returned object.
fn update(&self, diff: &Diff) -> Self;
fn update_opt(&self, diff: Option<&Diff>) -> Self {
match diff {
Some(diff) => self.update(diff),
None => self.clone(),
}
}
}
#[derive(
Debug, Default, Deserialize, Serialize, JsonSchema, Validate, Copy, Clone, PartialEq, Eq, Hash,
)]
#[serde(rename_all = "snake_case")]
pub struct HnswConfigDiff {
/// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
#[serde(skip_serializing_if = "Option::is_none")]
pub m: Option<usize>,
/// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
#[validate(range(min = 4))]
#[serde(skip_serializing_if = "Option::is_none")]
pub ef_construct: Option<usize>,
/// Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search.
/// This measures the total size of vectors being queried against.
/// When the maximum estimated amount of points that a condition satisfies is smaller than
/// `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index
/// traversal for better performance.
/// Note: 1Kb = 1 vector of size 256
#[serde(
alias = "full_scan_threshold_kb",
default,
skip_serializing_if = "Option::is_none"
)]
#[validate(range(min = 10))]
pub full_scan_threshold: Option<usize>,
/// Number of parallel threads used for background index building.
/// If 0 - automatically select from 8 to 16.
/// Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs.
/// On small CPUs, less threads are used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max_indexing_threads: Option<usize>,
/// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
/// Custom M param for additional payload-aware HNSW links. If not set, default M will be used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub payload_m: Option<usize>,
/// Store copies of original and quantized vectors within the HNSW index file. Default: false.
/// Enabling this option will trade the search speed for disk usage by reducing amount of
/// random seeks during the search.
/// Requires quantized vectors to be enabled. Multi-vectors are not supported.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub inline_storage: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq, Eq, Hash)]
pub struct WalConfigDiff {
/// Size of a single WAL segment in MB
#[validate(range(min = 1))]
pub wal_capacity_mb: Option<usize>,
/// Number of WAL segments to create ahead of actually used ones
pub wal_segments_ahead: Option<usize>,
/// Number of closed WAL segments to retain
pub wal_retain_closed: Option<usize>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
pub struct CollectionParamsDiff {
/// Number of replicas for each shard
pub replication_factor: Option<NonZeroU32>,
/// Minimal number successful responses from replicas to consider operation successful
pub write_consistency_factor: Option<NonZeroU32>,
/// Fan-out every read request to these many additional remote nodes (and return first available response)
pub read_fan_out_factor: Option<u32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
#[serde(default)]
pub on_disk_payload: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
pub struct OptimizersConfigDiff {
/// The minimal fraction of deleted vectors in a segment, required to perform segment optimization
#[validate(range(min = 0.0, max = 1.0))]
pub deleted_threshold: Option<f64>,
/// The minimal number of vectors in a segment, required to perform segment optimization
#[validate(range(min = 100))]
pub vacuum_min_vector_number: Option<usize>,
/// Target amount of segments optimizer will try to keep.
/// Real amount of segments may vary depending on multiple parameters:
/// - Amount of stored points
/// - Current write RPS
///
/// It is recommended to select default number of segments as a factor of the number of search threads,
/// so that each segment would be handled evenly by one of the threads
/// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs
pub default_segment_number: Option<usize>,
/// Do not create segments larger this size (in kilobytes).
/// Large segments might require disproportionately long indexation times,
/// therefore it makes sense to limit the size of segments.
///
/// If indexation speed have more priority for your - make this parameter lower.
/// If search speed is more important - make this parameter higher.
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "max_segment_size_kb")]
#[validate(range(min = 1))]
pub max_segment_size: Option<usize>,
/// Maximum size (in kilobytes) of vectors to store in-memory per segment.
/// Segments larger than this threshold will be stored as read-only memmapped file.
///
/// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
///
/// To disable memmap storage, set this to `0`.
///
/// Note: 1Kb = 1 vector of size 256
///
/// Deprecated since Qdrant 1.15.0
#[serde(alias = "memmap_threshold_kb")]
#[deprecated(since = "1.15.0", note = "Use `on_disk` flags instead")]
pub memmap_threshold: Option<usize>,
/// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
///
/// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.
///
/// To disable vector indexing, set to `0`.
///
/// Note: 1kB = 1 vector of size 256.
#[serde(alias = "indexing_threshold_kb")]
pub indexing_threshold: Option<usize>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: Option<u64>,
/// Max number of threads (jobs) for running optimizations per shard.
/// Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
/// If "auto" - have no limit and choose dynamically to saturate CPU.
/// If 0 - no optimization threads, optimizations will be disabled.
pub max_optimization_threads: Option<MaxOptimizationThreads>,
}
impl std::hash::Hash for OptimizersConfigDiff {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let Self {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
#[expect(deprecated)]
memmap_threshold,
indexing_threshold,
flush_interval_sec,
max_optimization_threads,
} = self;
deleted_threshold.map(f64::to_le_bytes).hash(state);
vacuum_min_vector_number.hash(state);
default_segment_number.hash(state);
max_segment_size.hash(state);
memmap_threshold.hash(state);
indexing_threshold.hash(state);
flush_interval_sec.hash(state);
max_optimization_threads.hash(state);
}
}
impl PartialEq for OptimizersConfigDiff {
fn eq(&self, other: &Self) -> bool {
#[expect(deprecated)]
let eq_memmap_threshold = self.memmap_threshold == other.memmap_threshold;
self.deleted_threshold.map(f64::to_le_bytes)
== other.deleted_threshold.map(f64::to_le_bytes)
&& self.vacuum_min_vector_number == other.vacuum_min_vector_number
&& self.default_segment_number == other.default_segment_number
&& self.max_segment_size == other.max_segment_size
&& eq_memmap_threshold
&& self.indexing_threshold == other.indexing_threshold
&& self.flush_interval_sec == other.flush_interval_sec
&& self.max_optimization_threads == other.max_optimization_threads
}
}
impl Eq for OptimizersConfigDiff {}
impl DiffConfig<HnswConfigDiff> for HnswConfig {
fn update(&self, diff: &HnswConfigDiff) -> Self {
let HnswConfigDiff {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = diff;
HnswConfig {
m: m.unwrap_or(self.m),
ef_construct: ef_construct.unwrap_or(self.ef_construct),
full_scan_threshold: full_scan_threshold.unwrap_or(self.full_scan_threshold),
max_indexing_threads: max_indexing_threads.unwrap_or(self.max_indexing_threads),
on_disk: on_disk.or(self.on_disk),
payload_m: payload_m.or(self.payload_m),
inline_storage: inline_storage.or(self.inline_storage),
}
}
}
impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {
fn update(&self, diff: &HnswConfigDiff) -> Self {
let HnswConfigDiff {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = diff;
HnswConfigDiff {
m: m.or(self.m),
ef_construct: ef_construct.or(self.ef_construct),
full_scan_threshold: full_scan_threshold.or(self.full_scan_threshold),
max_indexing_threads: max_indexing_threads.or(self.max_indexing_threads),
on_disk: on_disk.or(self.on_disk),
payload_m: payload_m.or(self.payload_m),
inline_storage: inline_storage.or(self.inline_storage),
}
}
}
impl DiffConfig<OptimizersConfigDiff> for OptimizersConfig {
fn update(&self, diff: &OptimizersConfigDiff) -> Self {
let OptimizersConfigDiff {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
memmap_threshold,
indexing_threshold,
flush_interval_sec,
max_optimization_threads,
} = diff;
OptimizersConfig {
deleted_threshold: deleted_threshold.unwrap_or(self.deleted_threshold),
vacuum_min_vector_number: vacuum_min_vector_number
.unwrap_or(self.vacuum_min_vector_number),
default_segment_number: default_segment_number.unwrap_or(self.default_segment_number),
max_segment_size: max_segment_size.or(self.max_segment_size),
memmap_threshold: memmap_threshold.or(self.memmap_threshold),
indexing_threshold: indexing_threshold.or(self.indexing_threshold),
flush_interval_sec: flush_interval_sec.unwrap_or(self.flush_interval_sec),
max_optimization_threads: max_optimization_threads
.map_or(self.max_optimization_threads, From::from),
}
}
}
impl DiffConfig<WalConfigDiff> for WalConfig {
fn update(&self, diff: &WalConfigDiff) -> Self {
let WalConfigDiff {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = diff;
WalConfig {
wal_capacity_mb: wal_capacity_mb.unwrap_or(self.wal_capacity_mb),
wal_segments_ahead: wal_segments_ahead.unwrap_or(self.wal_segments_ahead),
wal_retain_closed: wal_retain_closed.unwrap_or(self.wal_retain_closed),
}
}
}
impl DiffConfig<CollectionParamsDiff> for CollectionParams {
fn update(&self, diff: &CollectionParamsDiff) -> Self {
let CollectionParamsDiff {
replication_factor,
write_consistency_factor,
read_fan_out_factor,
on_disk_payload,
} = diff;
CollectionParams {
replication_factor: replication_factor.unwrap_or(self.replication_factor),
write_consistency_factor: write_consistency_factor
.unwrap_or(self.write_consistency_factor),
read_fan_out_factor: read_fan_out_factor.or(self.read_fan_out_factor),
on_disk_payload: on_disk_payload.unwrap_or(self.on_disk_payload),
shard_number: self.shard_number,
sharding_method: self.sharding_method,
sparse_vectors: self.sparse_vectors.clone(),
vectors: self.vectors.clone(),
}
}
}
impl DiffConfig<StrictModeConfig> for StrictModeConfig {
fn update(&self, diff: &StrictModeConfig) -> Self {
let StrictModeConfig {
enabled,
max_query_limit,
max_timeout,
unindexed_filtering_retrieve,
unindexed_filtering_update,
search_max_hnsw_ef,
search_allow_exact,
search_max_oversampling,
upsert_max_batchsize,
max_collection_vector_size_bytes,
read_rate_limit,
write_rate_limit,
max_collection_payload_size_bytes,
max_points_count,
filter_max_conditions,
condition_max_size,
multivector_config,
sparse_config,
max_payload_index_count,
} = diff;
StrictModeConfig {
enabled: enabled.or(self.enabled),
max_query_limit: max_query_limit.or(self.max_query_limit),
max_timeout: max_timeout.or(self.max_timeout),
unindexed_filtering_retrieve: unindexed_filtering_retrieve
.or(self.unindexed_filtering_retrieve),
unindexed_filtering_update: unindexed_filtering_update
.or(self.unindexed_filtering_update),
search_max_hnsw_ef: search_max_hnsw_ef.or(self.search_max_hnsw_ef),
search_allow_exact: search_allow_exact.or(self.search_allow_exact),
search_max_oversampling: search_max_oversampling.or(self.search_max_oversampling),
upsert_max_batchsize: upsert_max_batchsize.or(self.upsert_max_batchsize),
max_collection_vector_size_bytes: max_collection_vector_size_bytes
.or(self.max_collection_vector_size_bytes),
read_rate_limit: read_rate_limit.or(self.read_rate_limit),
write_rate_limit: write_rate_limit.or(self.write_rate_limit),
max_collection_payload_size_bytes: max_collection_payload_size_bytes
.or(self.max_collection_payload_size_bytes),
max_points_count: max_points_count.or(self.max_points_count),
filter_max_conditions: filter_max_conditions.or(self.filter_max_conditions),
condition_max_size: condition_max_size.or(self.condition_max_size),
multivector_config: multivector_config
.as_ref()
.or(self.multivector_config.as_ref())
.cloned(),
sparse_config: sparse_config
.as_ref()
.or(self.sparse_config.as_ref())
.cloned(),
max_payload_index_count: max_payload_index_count.or(self.max_payload_index_count),
}
}
}
impl From<HnswConfig> for HnswConfigDiff {
fn from(config: HnswConfig) -> Self {
let HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = config;
HnswConfigDiff {
m: Some(m),
ef_construct: Some(ef_construct),
full_scan_threshold: Some(full_scan_threshold),
max_indexing_threads: Some(max_indexing_threads),
on_disk,
payload_m,
inline_storage,
}
}
}
impl From<WalConfig> for WalConfigDiff {
fn from(config: WalConfig) -> Self {
let WalConfig {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = config;
WalConfigDiff {
wal_capacity_mb: Some(wal_capacity_mb),
wal_segments_ahead: Some(wal_segments_ahead),
wal_retain_closed: Some(wal_retain_closed),
}
}
}
impl From<CollectionParams> for CollectionParamsDiff {
fn from(config: CollectionParams) -> Self {
let CollectionParams {
replication_factor,
write_consistency_factor,
read_fan_out_factor,
on_disk_payload,
shard_number: _,
sharding_method: _,
sparse_vectors: _,
vectors: _,
} = config;
CollectionParamsDiff {
replication_factor: Some(replication_factor),
write_consistency_factor: Some(write_consistency_factor),
read_fan_out_factor,
on_disk_payload: Some(on_disk_payload),
}
}
}
impl From<OptimizersConfig> for OptimizersConfigDiff {
fn from(config: OptimizersConfig) -> Self {
let OptimizersConfig {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
#[expect(deprecated)]
memmap_threshold,
indexing_threshold,
flush_interval_sec,
max_optimization_threads,
} = config;
Self {
deleted_threshold: Some(deleted_threshold),
vacuum_min_vector_number: Some(vacuum_min_vector_number),
default_segment_number: Some(default_segment_number),
max_segment_size,
#[expect(deprecated)]
memmap_threshold,
indexing_threshold,
flush_interval_sec: Some(flush_interval_sec),
max_optimization_threads: max_optimization_threads.map(MaxOptimizationThreads::Threads),
}
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
pub enum Disabled {
Disabled,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum QuantizationConfigDiff {
Scalar(ScalarQuantization),
Product(ProductQuantization),
Binary(BinaryQuantization),
Disabled(Disabled),
}
impl QuantizationConfigDiff {
pub fn new_disabled() -> Self {
QuantizationConfigDiff::Disabled(Disabled::Disabled)
}
}
impl Validate for QuantizationConfigDiff {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
QuantizationConfigDiff::Scalar(scalar) => scalar.validate(),
QuantizationConfigDiff::Product(product) => product.validate(),
QuantizationConfigDiff::Binary(binary) => binary.validate(),
QuantizationConfigDiff::Disabled(_) => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use rstest::rstest;
use segment::types::{Distance, HnswConfig};
use super::*;
use crate::operations::vector_params_builder::VectorParamsBuilder;
use crate::optimizers_builder::OptimizersConfig;
#[test]
fn test_update_collection_params() {
let params = CollectionParams {
vectors: VectorParamsBuilder::new(128, Distance::Cosine)
.build()
.into(),
..CollectionParams::empty()
};
let diff = CollectionParamsDiff {
replication_factor: None,
write_consistency_factor: Some(NonZeroU32::new(2).unwrap()),
read_fan_out_factor: None,
on_disk_payload: None,
};
let new_params = params.update(&diff);
assert_eq!(new_params.replication_factor.get(), 1);
assert_eq!(new_params.write_consistency_factor.get(), 2);
assert!(new_params.on_disk_payload);
}
#[test]
fn test_hnsw_update() {
let base_config = HnswConfig::default();
let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap();
let new_config = base_config.update(&update);
assert_eq!(new_config.m, 32)
}
#[test]
fn test_optimizer_update() {
let base_config = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 10,
max_segment_size: None,
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: Some(1),
};
let update: OptimizersConfigDiff =
serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap();
let new_config = base_config.update(&update);
assert_eq!(new_config.indexing_threshold, Some(10000))
}
#[rstest]
#[case::number(r#"{ "max_optimization_threads": 5 }"#, Some(5))]
#[case::auto(r#"{ "max_optimization_threads": "auto" }"#, None)]
#[case::null(r#"{ "max_optimization_threads": null }"#, Some(1))] // no effect
#[case::nothing("{ }", Some(1))] // no effect
#[should_panic]
#[case::other(r#"{ "max_optimization_threads": "other" }"#, Some(1))]
fn test_set_optimizer_threads(#[case] json_diff: &str, #[case] expected: Option<usize>) {
let base_config = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 10,
max_segment_size: None,
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: Some(1),
};
let update: OptimizersConfigDiff = serde_json::from_str(json_diff).unwrap();
let new_config = base_config.update(&update);
assert_eq!(new_config.max_optimization_threads, expected);
}
#[test]
fn test_wal_config() {
let base_config = WalConfig::default();
let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap();
let new_config = base_config.update(&update);
assert_eq!(new_config.wal_segments_ahead, 2)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/validation.rs | lib/collection/src/operations/validation.rs | use actix_web_validator::error::flatten_errors;
use serde_json::Value;
use validator::{ValidationError, ValidationErrors};
/// Warn about validation errors in the log.
///
/// Validation errors are pretty printed field-by-field.
pub fn warn_validation_errors(description: &str, errs: &ValidationErrors) {
log::warn!("{description} has validation errors:");
describe_errors(errs)
.into_iter()
.for_each(|(key, msg)| log::warn!("- {key}: {msg}"));
}
/// Label the given validation errors in a single string.
pub fn label_errors(label: impl AsRef<str>, errs: &ValidationErrors) -> String {
format!(
"{}: [{}]",
label.as_ref(),
describe_errors(errs)
.into_iter()
.map(|(field, err)| format!("{field}: {err}"))
.collect::<Vec<_>>()
.join("; ")
)
}
/// Describe the given validation errors.
///
/// Returns a list of error messages for fields: `(field, message)`
fn describe_errors(errs: &ValidationErrors) -> Vec<(String, String)> {
flatten_errors(errs)
.into_iter()
.map(|(_, name, err)| (name, describe_error(err)))
.collect()
}
/// Describe a specific validation error.
fn describe_error(
err @ ValidationError {
code,
message,
params,
}: &ValidationError,
) -> String {
// Prefer to return message if set
if let Some(message) = message {
return message.to_string();
} else if let Some(Value::String(message)) = params.get("message") {
return message.clone();
}
// Generate messages based on codes
match code.as_ref() {
"range" => {
let msg = match (params.get("min"), params.get("max")) {
(Some(min), None) => format!("must be {min} or larger"),
(Some(min), Some(max)) => format!("must be from {min} to {max}"),
(None, Some(max)) => format!("must be {max} or smaller"),
// Should be unreachable
_ => err.to_string(),
};
match params.get("value") {
Some(value) => format!("value {value} invalid, {msg}"),
None => msg,
}
}
"length" => {
let msg = match (params.get("equal"), params.get("min"), params.get("max")) {
(Some(equal), _, _) => format!("must be exactly {equal} characters"),
(None, Some(min), None) => format!("must be at least {min} characters"),
(None, Some(min), Some(max)) => {
format!("must be from {min} to {max} characters")
}
(None, None, Some(max)) => format!("must be at most {max} characters"),
// Should be unreachable
_ => err.to_string(),
};
match params.get("value") {
Some(value) => format!("value {value} invalid, {msg}"),
None => msg,
}
}
"must_not_match" => {
match (
params.get("value"),
params.get("other_field"),
params.get("other_value"),
) {
(Some(value), Some(other_field), Some(other_value)) => {
format!("value {value} must not match {other_value} in {other_field}")
}
(Some(value), Some(other_field), None) => {
format!("value {value} must not match value in {other_field}")
}
(None, Some(other_field), Some(other_value)) => {
format!("must not match {other_value} in {other_field}")
}
(None, Some(other_field), None) => {
format!("must not match value in {other_field}")
}
// Should be unreachable
_ => err.to_string(),
}
}
"does_not_contain" => match params.get("pattern") {
Some(pattern) => format!("cannot contain {pattern}"),
None => err.to_string(),
},
"not_empty" => "value invalid, must not be empty".to_string(),
"closed_line" => {
"value invalid, the first and the last points should be same to form a closed line"
.to_string()
}
"min_line_length" => match (params.get("min_length"), params.get("length")) {
(Some(min_length), Some(length)) => {
format!("value invalid, the size must be at least {min_length}, got {length}")
}
_ => err.to_string(),
},
// Undescribed error codes
_ => err.to_string(),
}
}
#[cfg(test)]
mod tests {
use api::grpc::qdrant::{GeoLineString, GeoPoint, GeoPolygon};
use validator::Validate;
use super::*;
#[derive(Validate, Debug)]
struct SomeThing {
#[validate(range(min = 1))]
pub idx: usize,
}
#[derive(Validate, Debug)]
struct OtherThing {
#[validate(nested)]
pub things: Vec<SomeThing>,
}
fn build_polygon(
exterior_points: Vec<(f64, f64)>,
interiors_points: Vec<Vec<(f64, f64)>>,
) -> GeoPolygon {
let exterior_line = GeoLineString {
points: exterior_points
.into_iter()
.map(|(lon, lat)| GeoPoint { lon, lat })
.collect(),
};
let interior_lines = interiors_points
.into_iter()
.map(|points| GeoLineString {
points: points
.into_iter()
.map(|(lon, lat)| GeoPoint { lon, lat })
.collect(),
})
.collect();
GeoPolygon {
exterior: Some(exterior_line),
interiors: interior_lines,
}
}
#[test]
fn test_validation() {
let bad_config = OtherThing {
things: vec![SomeThing { idx: 0 }],
};
assert!(
bad_config.validate().is_err(),
"validation of bad config should fail"
);
}
#[test]
fn test_config_validation_render() {
let bad_config = OtherThing {
things: vec![
SomeThing { idx: 0 },
SomeThing { idx: 1 },
SomeThing { idx: 2 },
],
};
let errors = bad_config
.validate()
.expect_err("validation of bad config should fail");
assert_eq!(
describe_errors(&errors),
vec![(
"things[0].idx".into(),
"value 0 invalid, must be 1 or larger".into()
)]
);
}
#[test]
fn test_polygon_validation_render() {
let test_cases = vec![
(
build_polygon(vec![], vec![]),
vec![("exterior".into(), "value invalid, must not be empty".into())],
),
(
build_polygon(vec![(1., 1.),(2., 2.),(1., 1.)], vec![]),
vec![("exterior".into(), "value invalid, the size must be at least 4, got 3".into())],
),
(
build_polygon(vec![(1., 1.),(2., 2.),(3., 3.),(4., 4.)], vec![]),
vec![(
"exterior".into(),
"value invalid, the first and the last points should be same to form a closed line".into(),
)],
),
(
build_polygon(
vec![(1., 1.),(2., 2.),(3., 3.),(1., 1.)],
vec![vec![(1., 1.),(2., 2.),(1., 1.)]],
),
vec![("interiors".into(), "value invalid, the size must be at least 4, got 3".into())],
),
(
build_polygon(
vec![(1., 1.),(2., 2.),(3., 3.),(1., 1.)],
vec![vec![(1., 1.),(2., 2.),(3., 3.),(4., 4.)]],
),
vec![(
"interiors".into(),
"value invalid, the first and the last points should be same to form a closed line".into(),
)],
),
];
for (polygon, expected_errors) in test_cases {
let errors = polygon
.validate()
.expect_err("validation of bad polygon should fail");
assert_eq!(describe_errors(&errors), expected_errors);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/payload_ops.rs | lib/collection/src/operations/payload_ops.rs | pub use shard::operations::payload_ops::*;
use super::{OperationToShard, SplitByShard, split_iter_by_shard};
use crate::hash_ring::HashRingRouter;
impl SplitByShard for PayloadOps {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match self {
PayloadOps::SetPayload(operation) => {
operation.split_by_shard(ring).map(PayloadOps::SetPayload)
}
PayloadOps::DeletePayload(operation) => operation
.split_by_shard(ring)
.map(PayloadOps::DeletePayload),
PayloadOps::ClearPayload { points } => split_iter_by_shard(points, |id| *id, ring)
.map(|points| PayloadOps::ClearPayload { points }),
operation @ PayloadOps::ClearPayloadByFilter(_) => OperationToShard::to_all(operation),
PayloadOps::OverwritePayload(operation) => operation
.split_by_shard(ring)
.map(PayloadOps::OverwritePayload),
}
}
}
impl SplitByShard for DeletePayloadOp {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match (&self.points, &self.filter) {
(Some(_), _) => {
split_iter_by_shard(self.points.unwrap(), |id| *id, ring).map(|points| {
DeletePayloadOp {
points: Some(points),
keys: self.keys.clone(),
filter: self.filter.clone(),
}
})
}
(None, Some(_)) => OperationToShard::to_all(self),
(None, None) => OperationToShard::to_none(),
}
}
}
impl SplitByShard for SetPayloadOp {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match (&self.points, &self.filter) {
(Some(_), _) => {
split_iter_by_shard(self.points.unwrap(), |id| *id, ring).map(|points| {
SetPayloadOp {
points: Some(points),
payload: self.payload.clone(),
filter: self.filter.clone(),
key: self.key.clone(),
}
})
}
(None, Some(_)) => OperationToShard::to_all(self),
(None, None) => OperationToShard::to_none(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/shared_storage_config.rs | lib/collection/src/operations/shared_storage_config.rs | use std::default;
use std::num::NonZeroUsize;
use std::time::Duration;
use segment::types::HnswGlobalConfig;
use crate::common::snapshots_manager::SnapshotsConfig;
use crate::operations::types::NodeType;
use crate::shards::transfer::ShardTransferMethod;
/// Default timeout for search requests.
/// In cluster mode, this should be aligned with collection timeout.
const DEFAULT_SEARCH_TIMEOUT: Duration = Duration::from_secs(60);
const DEFAULT_UPDATE_QUEUE_SIZE: usize = 100;
const DEFAULT_UPDATE_QUEUE_SIZE_LISTENER: usize = 10_000;
pub const DEFAULT_IO_SHARD_TRANSFER_LIMIT: Option<usize> = Some(1);
pub const DEFAULT_SNAPSHOTS_PATH: &str = "./snapshots";
/// Storage configuration shared between all collections.
/// Represents a per-node configuration, which might be changes with restart.
/// Vales of this struct are not persisted.
#[derive(Clone, Debug)]
pub struct SharedStorageConfig {
pub update_queue_size: usize,
pub node_type: NodeType,
pub handle_collection_load_errors: bool,
pub recovery_mode: Option<String>,
pub search_timeout: Duration,
pub update_concurrency: Option<NonZeroUsize>,
pub is_distributed: bool,
pub default_shard_transfer_method: Option<ShardTransferMethod>,
pub incoming_shard_transfers_limit: Option<usize>,
pub outgoing_shard_transfers_limit: Option<usize>,
pub snapshots_path: String,
pub snapshots_config: SnapshotsConfig,
pub hnsw_global_config: HnswGlobalConfig,
pub search_thread_count: usize,
}
impl Default for SharedStorageConfig {
fn default() -> Self {
Self {
update_queue_size: DEFAULT_UPDATE_QUEUE_SIZE,
node_type: Default::default(),
handle_collection_load_errors: false,
recovery_mode: None,
search_timeout: DEFAULT_SEARCH_TIMEOUT,
update_concurrency: None,
is_distributed: false,
default_shard_transfer_method: None,
incoming_shard_transfers_limit: DEFAULT_IO_SHARD_TRANSFER_LIMIT,
outgoing_shard_transfers_limit: DEFAULT_IO_SHARD_TRANSFER_LIMIT,
snapshots_path: DEFAULT_SNAPSHOTS_PATH.to_string(),
snapshots_config: default::Default::default(),
hnsw_global_config: HnswGlobalConfig::default(),
search_thread_count: common::defaults::search_thread_count(common::cpu::get_num_cpus()),
}
}
}
impl SharedStorageConfig {
#[allow(clippy::too_many_arguments)]
pub fn new(
update_queue_size: Option<usize>,
node_type: NodeType,
handle_collection_load_errors: bool,
recovery_mode: Option<String>,
search_timeout: Option<Duration>,
update_concurrency: Option<NonZeroUsize>,
is_distributed: bool,
default_shard_transfer_method: Option<ShardTransferMethod>,
incoming_shard_transfers_limit: Option<usize>,
outgoing_shard_transfers_limit: Option<usize>,
snapshots_path: String,
snapshots_config: SnapshotsConfig,
hnsw_global_config: HnswGlobalConfig,
search_thread_count: usize,
) -> Self {
let update_queue_size = update_queue_size.unwrap_or(match node_type {
NodeType::Normal => DEFAULT_UPDATE_QUEUE_SIZE,
NodeType::Listener => DEFAULT_UPDATE_QUEUE_SIZE_LISTENER,
});
Self {
update_queue_size,
node_type,
handle_collection_load_errors,
recovery_mode,
search_timeout: search_timeout.unwrap_or(DEFAULT_SEARCH_TIMEOUT),
update_concurrency,
is_distributed,
default_shard_transfer_method,
incoming_shard_transfers_limit,
outgoing_shard_transfers_limit,
snapshots_path,
snapshots_config,
hnsw_global_config,
search_thread_count,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/consistency_params.rs | lib/collection/src/operations/consistency_params.rs | use std::borrow::Cow;
use api::grpc::qdrant::{
ReadConsistency as ReadConsistencyGrpc, ReadConsistencyType as ReadConsistencyTypeGrpc,
read_consistency,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use validator::{Validate, ValidationError as ValidatorError, ValidationErrors};
/// Read consistency parameter
///
/// Defines how many replicas should be queried to get the result
///
/// * `N` - send N random request and return points, which present on all of them
///
/// * `majority` - send N/2+1 random request and return points, which present on all of them
///
/// * `quorum` - send requests to all nodes and return points which present on majority of them
///
/// * `all` - send requests to all nodes and return points which present on all of them
///
/// Default value is `Factor(1)`
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)]
#[serde(untagged)]
pub enum ReadConsistency {
// send N random request and return points, which present on all of them
Factor(#[serde(deserialize_with = "deserialize_factor")] usize),
Type(ReadConsistencyType),
}
impl Validate for ReadConsistency {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
ReadConsistency::Factor(factor) if *factor == 0 => {
let mut errors = ValidationErrors::new();
errors.add("factor", {
let mut error = ValidatorError::new("range");
error.add_param(Cow::from("value"), factor);
error.add_param(Cow::from("min"), &1);
error
});
Err(errors)
}
ReadConsistency::Factor(_) | ReadConsistency::Type(_) => Ok(()),
}
}
}
impl Default for ReadConsistency {
fn default() -> Self {
ReadConsistency::Factor(1)
}
}
impl ReadConsistency {
pub fn try_from_optional(
consistency: Option<ReadConsistencyGrpc>,
) -> Result<Option<Self>, tonic::Status> {
consistency.map(TryFrom::try_from).transpose()
}
}
impl TryFrom<Option<ReadConsistencyGrpc>> for ReadConsistency {
type Error = tonic::Status;
fn try_from(consistency: Option<ReadConsistencyGrpc>) -> Result<Self, Self::Error> {
match consistency {
Some(consistency) => consistency.try_into(),
None => Ok(Self::Factor(1)),
}
}
}
impl TryFrom<ReadConsistencyGrpc> for ReadConsistency {
type Error = tonic::Status;
fn try_from(consistency: ReadConsistencyGrpc) -> Result<Self, Self::Error> {
let ReadConsistencyGrpc { value } = consistency;
let value = value.ok_or_else(|| {
tonic::Status::invalid_argument(
"invalid read consistency message: `ReadConsistency::value` field is `None`",
)
})?;
let consistency = match value {
read_consistency::Value::Factor(factor) => Self::Factor(
usize::try_from(factor)
.map_err(|err| tonic::Status::invalid_argument(err.to_string()))?,
),
read_consistency::Value::Type(consistency) => Self::Type(consistency.try_into()?),
};
Ok(consistency)
}
}
impl From<ReadConsistency> for ReadConsistencyGrpc {
fn from(consistency: ReadConsistency) -> Self {
let value = match consistency {
ReadConsistency::Factor(factor) => {
read_consistency::Value::Factor(factor.try_into().unwrap())
}
ReadConsistency::Type(consistency) => read_consistency::Value::Type(consistency.into()),
};
ReadConsistencyGrpc { value: Some(value) }
}
}
fn deserialize_factor<'de, D>(deserializer: D) -> Result<usize, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum Factor<'a> {
Usize(usize),
Str(&'a str),
String(String),
}
let factor = match Factor::deserialize(deserializer)? {
Factor::Usize(factor) => Ok(factor),
Factor::Str(str) => str.parse(),
Factor::String(str) => str.parse(),
};
let factor = factor.map_err(|err| {
serde::de::Error::custom(format!(
"failed to deserialize read consistency factor value: {err}"
))
})?;
if factor > 0 {
Ok(factor)
} else {
Err(serde::de::Error::custom(
"read consistency factor can't be zero",
))
}
}
/// * `majority` - send N/2+1 random request and return points, which present on all of them
///
/// * `quorum` - send requests to all nodes and return points which present on majority of nodes
///
/// * `all` - send requests to all nodes and return points which present on all nodes
#[derive(Debug, Deserialize, Serialize, JsonSchema, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum ReadConsistencyType {
// send N/2+1 random request and return points, which present on all of them
Majority,
// send requests to all nodes and return points which present on majority of nodes
Quorum,
// send requests to all nodes and return points which present on all nodes
All,
}
impl TryFrom<i32> for ReadConsistencyType {
type Error = tonic::Status;
fn try_from(consistency: i32) -> Result<Self, Self::Error> {
let consistency = ReadConsistencyTypeGrpc::try_from(consistency).map_err(|_| {
tonic::Status::invalid_argument(format!(
"invalid read consistency type value {consistency}",
))
})?;
Ok(consistency.into())
}
}
impl From<ReadConsistencyTypeGrpc> for ReadConsistencyType {
fn from(consistency: ReadConsistencyTypeGrpc) -> Self {
match consistency {
ReadConsistencyTypeGrpc::Majority => Self::Majority,
ReadConsistencyTypeGrpc::Quorum => Self::Quorum,
ReadConsistencyTypeGrpc::All => Self::All,
}
}
}
impl From<ReadConsistencyType> for i32 {
fn from(consistency: ReadConsistencyType) -> Self {
ReadConsistencyTypeGrpc::from(consistency) as _
}
}
impl From<ReadConsistencyType> for ReadConsistencyTypeGrpc {
fn from(consistency: ReadConsistencyType) -> Self {
match consistency {
ReadConsistencyType::Majority => ReadConsistencyTypeGrpc::Majority,
ReadConsistencyType::Quorum => ReadConsistencyTypeGrpc::Quorum,
ReadConsistencyType::All => ReadConsistencyTypeGrpc::All,
}
}
}
#[derive(Copy, Clone, Debug, thiserror::Error)]
#[error("Read consistency factor cannot be less than 1")]
pub struct ValidationError;
#[cfg(test)]
mod tests {
use schemars::schema_for;
use super::*;
#[test]
fn test_read_consistency_deserialization() {
let consistency = ReadConsistency::Type(ReadConsistencyType::Majority);
let json = serde_json::to_string(&consistency).unwrap();
println!("{json}");
let json = "2";
let consistency: ReadConsistency = serde_json::from_str(json).unwrap();
assert_eq!(consistency, ReadConsistency::Factor(2));
let json = "0";
let consistency: Result<ReadConsistency, _> = serde_json::from_str(json);
assert!(consistency.is_err());
let json = "\"majority\"";
let consistency: ReadConsistency = serde_json::from_str(json).unwrap();
assert_eq!(
consistency,
ReadConsistency::Type(ReadConsistencyType::Majority)
);
let consistency = ReadConsistency::Type(ReadConsistencyType::All);
let json = serde_json::to_string(&consistency).unwrap();
assert_eq!(json, "\"all\"");
let consistency = ReadConsistency::Factor(1);
let json = serde_json::to_string(&consistency).unwrap();
assert_eq!(json, "1");
let json = "\"all\"";
let consistency: ReadConsistency = serde_json::from_str(json).unwrap();
assert_eq!(consistency, ReadConsistency::Type(ReadConsistencyType::All));
let schema = schema_for!(ReadConsistency);
let schema_str = serde_json::to_string_pretty(&schema).unwrap();
println!("{schema_str}")
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/vector_ops.rs | lib/collection/src/operations/vector_ops.rs | use std::collections::HashSet;
use ahash::AHashMap;
use api::rest::{PointVectors, ShardKeySelector};
use schemars::JsonSchema;
use segment::types::{Filter, PointIdType, VectorNameBuf};
use serde::{Deserialize, Serialize};
pub use shard::operations::vector_ops::*;
use validator::Validate;
use super::{OperationToShard, SplitByShard, point_to_shards, split_iter_by_shard};
use crate::hash_ring::HashRingRouter;
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct DeleteVectors {
/// Deletes values from each point in this list
pub points: Option<Vec<PointIdType>>,
/// Deletes values from points that satisfy this filter condition
#[validate(nested)]
pub filter: Option<Filter>,
/// Vector names
#[serde(alias = "vectors")]
#[validate(length(min = 1, message = "must specify vector names to delete"))]
pub vector: HashSet<VectorNameBuf>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
impl SplitByShard for Vec<PointVectors> {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
split_iter_by_shard(self, |point| point.id, ring)
}
}
impl SplitByShard for VectorOperations {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match self {
VectorOperations::UpdateVectors(UpdateVectorsOp {
points,
update_filter,
}) => {
let shard_points = points
.into_iter()
.flat_map(|point| {
point_to_shards(&point.id, ring)
.into_iter()
.map(move |shard_id| (shard_id, point.clone()))
})
.fold(
AHashMap::new(),
|mut map: AHashMap<u32, Vec<PointVectorsPersisted>>, (shard_id, points)| {
map.entry(shard_id).or_default().push(points);
map
},
);
let shard_ops = shard_points.into_iter().map(|(shard_id, points)| {
(
shard_id,
VectorOperations::UpdateVectors(UpdateVectorsOp {
points,
update_filter: update_filter.clone(),
}),
)
});
OperationToShard::by_shard(shard_ops)
}
VectorOperations::DeleteVectors(ids, vector_names) => {
split_iter_by_shard(ids.points, |id| *id, ring)
.map(|ids| VectorOperations::DeleteVectors(ids.into(), vector_names.clone()))
}
by_filter @ VectorOperations::DeleteVectorsByFilter(..) => {
OperationToShard::to_all(by_filter)
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/shard_selector_internal.rs | lib/collection/src/operations/shard_selector_internal.rs | use api::rest::{ShardKeySelector, ShardKeyWithFallback};
use segment::types::ShardKey;
use crate::shards::shard::ShardId;
#[derive(Debug, Clone, PartialEq)]
pub enum ShardSelectorInternal {
/// No shard key specified
Empty,
/// All apply to all keys
All,
/// Select one shard key
ShardKey(ShardKey),
/// Select multiple shard keys
ShardKeys(Vec<ShardKey>),
/// Select shard key with a fallback shard
ShardKeyWithFallback(ShardKeyWithFallback),
/// ShardId
ShardId(ShardId),
}
impl ShardSelectorInternal {
pub fn is_shard_id(&self) -> bool {
matches!(self, ShardSelectorInternal::ShardId(_))
}
}
impl From<Option<ShardKey>> for ShardSelectorInternal {
fn from(key: Option<ShardKey>) -> Self {
match key {
None => ShardSelectorInternal::Empty,
Some(key) => ShardSelectorInternal::ShardKey(key),
}
}
}
impl From<Vec<ShardKey>> for ShardSelectorInternal {
fn from(keys: Vec<ShardKey>) -> Self {
ShardSelectorInternal::ShardKeys(keys)
}
}
impl From<ShardKeySelector> for ShardSelectorInternal {
fn from(selector: ShardKeySelector) -> Self {
match selector {
ShardKeySelector::ShardKey(key) => ShardSelectorInternal::ShardKey(key),
ShardKeySelector::ShardKeys(keys) => ShardSelectorInternal::ShardKeys(keys),
ShardKeySelector::ShardKeyWithFallback(key_with_fallback) => {
ShardSelectorInternal::ShardKeyWithFallback(key_with_fallback)
}
}
}
}
impl From<Option<ShardKeySelector>> for ShardSelectorInternal {
fn from(selector: Option<ShardKeySelector>) -> Self {
match selector {
None => ShardSelectorInternal::Empty,
Some(selector) => selector.into(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/cluster_ops.rs | lib/collection/src/operations/cluster_ops.rs | use std::num::NonZeroU32;
use common::validation::validate_shard_different_peers;
use schemars::JsonSchema;
use segment::types::{Filter, ShardKey};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use validator::{Validate, ValidationErrors};
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::transfer::ShardTransferMethod;
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(untagged, rename_all = "snake_case")]
pub enum ClusterOperations {
/// Move shard to a different peer
MoveShard(MoveShardOperation),
/// Replicate shard to a different peer
ReplicateShard(ReplicateShardOperation),
/// Abort currently running shard moving operation
AbortTransfer(AbortTransferOperation),
/// Drop replica of a shard from a peer
DropReplica(DropReplicaOperation),
/// Create a custom shard partition for a given key
CreateShardingKey(CreateShardingKeyOperation),
/// Drop a custom shard partition for a given key
DropShardingKey(DropShardingKeyOperation),
/// Restart transfer
RestartTransfer(RestartTransferOperation),
/// Start resharding
StartResharding(StartReshardingOperation),
/// Finish migrating points on specified shard, mark shard as `Active`
#[schemars(skip)] // hide for internal use
FinishMigratingPoints(FinishMigratingPointsOperation),
/// Commit read hashring
#[schemars(skip)] // hide for internal use
CommitReadHashRing(CommitReadHashRingOperation),
/// Commit write hashring
#[schemars(skip)] // hide for internal use
CommitWriteHashRing(CommitWriteHashRingOperation),
/// Finish resharding
#[schemars(skip)] // hide for internal use
FinishResharding(FinishReshardingOperation),
/// Abort resharding
AbortResharding(AbortReshardingOperation),
/// Trigger replication of points between two shards
ReplicatePoints(ReplicatePointsOperation),
/// Introduce artificial delay to a node
#[cfg(feature = "staging")]
TestSlowDown(TestSlowDownOperation),
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateShardingKeyOperation {
pub create_sharding_key: CreateShardingKey,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DropShardingKeyOperation {
pub drop_sharding_key: DropShardingKey,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RestartTransferOperation {
#[validate(nested)]
pub restart_transfer: RestartTransfer,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateShardingKey {
pub shard_key: ShardKey,
/// How many shards to create for this key
/// If not specified, will use the default value from config
#[serde(alias = "shard_number")]
pub shards_number: Option<NonZeroU32>,
/// How many replicas to create for each shard
/// If not specified, will use the default value from config
pub replication_factor: Option<NonZeroU32>,
/// Placement of shards for this key
/// List of peer ids, that can be used to place shards for this key
/// If not specified, will be randomly placed among all peers
pub placement: Option<Vec<PeerId>>,
/// Initial state of the shards for this key
/// If not specified, will be `Initializing` first and then `Active`
/// Warning: do not change this unless you know what you are doing
pub initial_state: Option<ReplicaState>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DropShardingKey {
pub shard_key: ShardKey,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RestartTransfer {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schemars(skip)] // hide for internal use
pub to_shard_id: Option<ShardId>,
pub from_peer_id: PeerId,
pub to_peer_id: PeerId,
pub method: ShardTransferMethod,
}
impl Validate for ClusterOperations {
fn validate(&self) -> Result<(), validator::ValidationErrors> {
match self {
ClusterOperations::MoveShard(op) => op.validate(),
ClusterOperations::ReplicateShard(op) => op.validate(),
ClusterOperations::AbortTransfer(op) => op.validate(),
ClusterOperations::DropReplica(op) => op.validate(),
ClusterOperations::CreateShardingKey(op) => op.validate(),
ClusterOperations::DropShardingKey(op) => op.validate(),
ClusterOperations::RestartTransfer(op) => op.validate(),
ClusterOperations::StartResharding(op) => op.validate(),
ClusterOperations::FinishMigratingPoints(op) => op.validate(),
ClusterOperations::CommitReadHashRing(op) => op.validate(),
ClusterOperations::CommitWriteHashRing(op) => op.validate(),
ClusterOperations::FinishResharding(op) => op.validate(),
ClusterOperations::AbortResharding(op) => op.validate(),
ClusterOperations::ReplicatePoints(op) => op.validate(),
#[cfg(feature = "staging")]
ClusterOperations::TestSlowDown(op) => op.validate(),
}
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct MoveShardOperation {
#[validate(nested)]
pub move_shard: MoveShard,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ReplicateShardOperation {
#[validate(nested)]
pub replicate_shard: ReplicateShard,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DropReplicaOperation {
#[validate(nested)]
pub drop_replica: Replica,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct AbortTransferOperation {
#[validate(nested)]
pub abort_transfer: AbortShardTransfer,
}
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct StartReshardingOperation {
#[validate(nested)]
pub start_resharding: StartResharding,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct FinishMigratingPointsOperation {
#[validate(nested)]
pub finish_migrating_points: FinishMigratingPoints,
}
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
#[serde(rename_all = "snake_case")]
pub struct AbortReshardingOperation {
#[validate(nested)]
pub abort_resharding: AbortResharding,
}
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
#[serde(rename_all = "snake_case")]
pub struct CommitReadHashRingOperation {
#[validate(nested)]
pub commit_read_hash_ring: CommitReadHashRing,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct CommitWriteHashRingOperation {
#[validate(nested)]
pub commit_write_hash_ring: CommitWriteHashRing,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct FinishReshardingOperation {
#[validate(nested)]
pub finish_resharding: FinishResharding,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ReplicatePointsOperation {
#[validate(nested)]
pub replicate_points: ReplicatePoints,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ReplicatePoints {
pub filter: Option<Filter>,
pub from_shard_key: ShardKey,
pub to_shard_key: ShardKey,
}
impl Validate for ReplicatePoints {
fn validate(&self) -> Result<(), ValidationErrors> {
if self.from_shard_key != self.to_shard_key {
return Ok(());
}
let mut errors = ValidationErrors::new();
errors.add(
"to_shard_key",
validator::ValidationError::new("must be different from from_shard_key"),
);
Err(errors)
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ReplicateShard {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schemars(skip)] // hide for internal use
pub to_shard_id: Option<ShardId>,
pub to_peer_id: PeerId,
pub from_peer_id: PeerId,
/// Method for transferring the shard from one node to another
pub method: Option<ShardTransferMethod>,
}
impl Validate for ReplicateShard {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_shard_different_peers(
self.from_peer_id,
self.to_peer_id,
self.shard_id,
self.to_shard_id,
)
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(rename_all = "snake_case")]
pub struct MoveShard {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schemars(skip)] // hide for internal use
pub to_shard_id: Option<ShardId>,
pub to_peer_id: PeerId,
pub from_peer_id: PeerId,
/// Method for transferring the shard from one node to another
pub method: Option<ShardTransferMethod>,
}
impl Validate for RestartTransfer {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_shard_different_peers(
self.from_peer_id,
self.to_peer_id,
self.shard_id,
self.to_shard_id,
)
}
}
impl Validate for MoveShard {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_shard_different_peers(
self.from_peer_id,
self.to_peer_id,
self.shard_id,
self.to_shard_id,
)
}
}
impl Validate for AbortShardTransfer {
fn validate(&self) -> Result<(), ValidationErrors> {
validate_shard_different_peers(
self.from_peer_id,
self.to_peer_id,
self.shard_id,
self.to_shard_id,
)
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct Replica {
pub shard_id: ShardId,
pub peer_id: PeerId,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone)]
#[serde(rename_all = "snake_case")]
pub struct AbortShardTransfer {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schemars(skip)] // hide for internal use
pub to_shard_id: Option<ShardId>,
pub to_peer_id: PeerId,
pub from_peer_id: PeerId,
}
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct StartResharding {
#[schemars(skip)]
pub uuid: Option<Uuid>,
pub direction: ReshardingDirection,
pub peer_id: Option<PeerId>,
pub shard_key: Option<ShardKey>,
}
/// Resharding direction, scale up or down in number of shards
///
/// - `up` - Scale up, add a new shard
///
/// - `down` - Scale down, remove a shard
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum ReshardingDirection {
// Scale up, add a new shard
#[default]
Up,
// Scale down, remove a shard
Down,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct FinishMigratingPoints {
pub shard_id: Option<ShardId>,
pub peer_id: Option<PeerId>,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct CommitReadHashRing {}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct CommitWriteHashRing {}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct FinishResharding {}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct AbortResharding {}
#[cfg(feature = "staging")]
pub use super::staging::{TestSlowDown, TestSlowDownOperation};
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/types.rs | lib/collection/src/operations/types.rs | use std::backtrace::Backtrace;
use std::collections::{BTreeMap, HashMap};
use std::error::Error as _;
use std::fmt::{Debug, Write as _};
use std::iter;
use std::num::NonZeroU64;
use std::time::{Duration, SystemTimeError};
use api::grpc::transport_channel_pool::RequestError;
use api::rest::{
BaseGroupRequest, LookupLocation, OrderByInterface, RecommendStrategy,
SearchGroupsRequestInternal, SearchRequestInternal, ShardKeySelector, VectorStructOutput,
};
use common::ext::OptionExt;
use common::progress_tracker::ProgressTree;
use common::rate_limiting::{RateLimitError, RetryError};
use common::types::ScoreType;
use common::validation::validate_range_generic;
use common::{defaults, save_on_disk};
use io::file_operations::FileStorageError;
use issues::IssueRecord;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use segment::common::operation_error::{CancelledError, OperationError};
use segment::data_types::groups::GroupId;
use segment::data_types::modifier::Modifier;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, DenseVector};
use segment::types::{
Distance, Filter, HnswConfig, MultiVectorConfig, Payload, PayloadIndexInfo, PayloadKeyType,
PointIdType, QuantizationConfig, SearchParams, SeqNumberType, ShardKey,
SparseVectorStorageType, StrictModeConfigOutput, VectorName, VectorNameBuf,
VectorStorageDatatype, WithPayloadInterface, WithVector,
};
use semver::Version;
use serde;
use serde::{Deserialize, Serialize};
use serde_json::{Error as JsonError, Map, Value};
pub use shard::query::scroll::{QueryScrollRequestInternal, ScrollOrder};
pub use shard::search::CoreSearchRequest;
use shard::wal::WalError;
use sparse::common::sparse_vector::SparseVector;
use thiserror::Error;
use tokio::sync::mpsc::error::SendError;
use tokio::sync::oneshot::error::RecvError as OneshotRecvError;
use tokio::task::JoinError;
use tonic::codegen::http::uri::InvalidUri;
use uuid::Uuid;
use validator::{Validate, ValidationError, ValidationErrors};
use super::ClockTag;
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::cluster_ops::ReshardingDirection;
use crate::operations::config_diff::{HnswConfigDiff, QuantizationConfigDiff};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::transfer::ShardTransferMethod;
/// Current state of the collection.
/// `Green` - all good. `Yellow` - optimization is running, 'Grey' - optimizations are possible but not triggered, `Red` - some operations failed and was not recovered
#[derive(Debug, Serialize, JsonSchema, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum CollectionStatus {
// Collection is completely ready for requests
Green,
// Collection is available, but some segments are under optimization
Yellow,
// Collection is available, but some segments are pending optimization
Grey,
// Something is not OK:
// - some operations failed and was not recovered
Red,
}
/// Current state of the shard (supports same states as the collection)
///
/// `Green` - all good. `Yellow` - optimization is running, 'Grey' - optimizations are possible but not triggered, `Red` - some operations failed and was not recovered
#[derive(Debug, Serialize, JsonSchema, Anonymize, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ShardStatus {
// Shard is completely ready for requests
Green,
// Shard is available, but some segments are under optimization
Yellow,
// Shard is available, but some segments are pending optimization
Grey,
// Something is not OK:
// - some operations failed and was not recovered
Red,
}
impl From<ShardStatus> for CollectionStatus {
fn from(info: ShardStatus) -> Self {
match info {
ShardStatus::Green => Self::Green,
ShardStatus::Yellow => Self::Yellow,
ShardStatus::Grey => Self::Grey,
ShardStatus::Red => Self::Red,
}
}
}
/// State of existence of a collection,
/// true = exists, false = does not exist
#[derive(Debug, Serialize, JsonSchema, Clone)]
pub struct CollectionExistence {
pub exists: bool,
}
/// Current state of the collection
#[derive(
Debug, Default, Serialize, JsonSchema, Anonymize, PartialEq, Eq, PartialOrd, Ord, Clone,
)]
#[serde(rename_all = "snake_case")]
pub enum OptimizersStatus {
/// Optimizers are reporting as expected
#[default]
Ok,
/// Something wrong happened with optimizers
#[anonymize(false)]
Error(String),
}
#[derive(
Debug, Default, Serialize, JsonSchema, Anonymize, PartialEq, Eq, PartialOrd, Ord, Clone,
)]
#[serde(rename_all = "snake_case")]
pub struct CollectionWarning {
/// Warning message
#[anonymize(true)] // Might contain vector names
pub message: String,
}
// Version of the collection config we can present to the user
/// Information about the collection configuration
#[derive(Debug, Serialize, JsonSchema)]
pub struct CollectionConfig {
pub params: CollectionParams,
pub hnsw_config: HnswConfig,
pub optimizer_config: OptimizersConfig,
pub wal_config: Option<WalConfig>,
#[serde(default)]
pub quantization_config: Option<QuantizationConfig>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub strict_mode_config: Option<StrictModeConfigOutput>,
/// Arbitrary JSON metadata for the collection
/// This can be used to store application-specific information
/// such as creation time, migration data, inference model info, etc.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Payload>,
}
impl From<CollectionConfigInternal> for CollectionConfig {
fn from(config: CollectionConfigInternal) -> Self {
let CollectionConfigInternal {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
// Internal UUID to identify unique collections in consensus snapshots
uuid: _,
metadata,
} = config;
CollectionConfig {
params,
hnsw_config,
optimizer_config,
wal_config: Some(wal_config),
quantization_config,
strict_mode_config: strict_mode_config.map(StrictModeConfigOutput::from),
metadata,
}
}
}
/// Current statistics and configuration of the collection
#[derive(Debug, Serialize, JsonSchema)]
pub struct CollectionInfo {
/// Status of the collection
pub status: CollectionStatus,
/// Status of optimizers
pub optimizer_status: OptimizersStatus,
/// Warnings related to the collection
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub warnings: Vec<CollectionWarning>,
/// Approximate number of indexed vectors in the collection.
/// Indexed vectors in large segments are faster to query,
/// as it is stored in a specialized vector index.
pub indexed_vectors_count: Option<usize>,
/// Approximate number of points (vectors + payloads) in collection.
/// Each point could be accessed by unique id.
pub points_count: Option<usize>,
/// Number of segments in collection.
/// Each segment has independent vector as payload indexes
pub segments_count: usize,
/// Collection settings
pub config: CollectionConfig,
/// Types of stored payload
pub payload_schema: HashMap<PayloadKeyType, PayloadIndexInfo>,
}
impl CollectionInfo {
pub fn empty(collection_config: CollectionConfigInternal) -> Self {
Self {
status: CollectionStatus::Green,
optimizer_status: OptimizersStatus::Ok,
warnings: collection_config.get_warnings(),
indexed_vectors_count: Some(0),
points_count: Some(0),
segments_count: 0,
config: CollectionConfig::from(collection_config),
payload_schema: HashMap::new(),
}
}
}
impl From<ShardInfoInternal> for CollectionInfo {
fn from(info: ShardInfoInternal) -> Self {
let ShardInfoInternal {
status,
optimizer_status,
indexed_vectors_count,
points_count,
segments_count,
config,
payload_schema,
} = info;
Self {
status: status.into(),
optimizer_status,
warnings: config.get_warnings(),
indexed_vectors_count: Some(indexed_vectors_count),
points_count: Some(points_count),
segments_count,
config: CollectionConfig::from(config),
payload_schema,
}
}
}
/// Internal statistics and configuration of the collection.
#[derive(Debug)]
pub struct ShardInfoInternal {
/// Status of the shard
pub status: ShardStatus,
/// Status of optimizers
pub optimizer_status: OptimizersStatus,
/// Approximate number of indexed vectors in the shard.
/// Indexed vectors in large segments are faster to query,
/// as it is stored in vector index (HNSW).
pub indexed_vectors_count: usize,
/// Approximate number of points (vectors + payloads) in shard.
/// Each point could be accessed by unique id.
pub points_count: usize,
/// Number of segments in shard.
/// Each segment has independent vector as payload indexes
pub segments_count: usize,
/// Collection settings
pub config: CollectionConfigInternal,
/// Types of stored payload
pub payload_schema: HashMap<PayloadKeyType, PayloadIndexInfo>,
}
/// Current clustering distribution for the collection
#[derive(Debug, Serialize, JsonSchema)]
pub struct CollectionClusterInfo {
/// ID of this peer
pub peer_id: PeerId,
/// Total number of shards
pub shard_count: usize,
/// Local shards
pub local_shards: Vec<LocalShardInfo>,
/// Remote shards
pub remote_shards: Vec<RemoteShardInfo>,
/// Shard transfers
pub shard_transfers: Vec<ShardTransferInfo>,
/// Resharding operations
// TODO(resharding): remove this skip when releasing resharding
#[serde(skip_serializing_if = "Option::is_none")]
pub resharding_operations: Option<Vec<ReshardingInfo>>,
}
/// Optimizations progress for the collection
#[derive(Debug, Serialize, JsonSchema)]
pub struct OptimizationsResponse {
/// Ongoing optimizations from newest to oldest.
pub ongoing: Vec<ProgressTree>,
/// Completed optimizations from newest to oldest.
// NOTE: `None` when `?completed=false`,
// empty vec when `?completed=true` but no completed optimizations.
#[serde(skip_serializing_if = "Option::is_none")]
pub completed: Option<Vec<ProgressTree>>,
}
#[derive(Debug, Serialize, JsonSchema, Clone, Anonymize)]
pub struct ShardTransferInfo {
#[anonymize(false)]
pub shard_id: ShardId,
/// Target shard ID if different than source shard ID
///
/// Used exclusively with `ReshardStreamRecords` transfer method.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub to_shard_id: Option<ShardId>,
/// Source peer id
#[anonymize(false)]
pub from: PeerId,
/// Destination peer id
#[anonymize(false)]
pub to: PeerId,
/// If `true` transfer is a synchronization of a replicas
/// If `false` transfer is a moving of a shard from one peer to another
#[anonymize(false)]
pub sync: bool,
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub method: Option<ShardTransferMethod>,
/// A human-readable report of the transfer progress. Available only on the source peer.
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub comment: Option<String>,
}
#[derive(Debug, Serialize, JsonSchema, Clone, Anonymize)]
pub struct ReshardingInfo {
#[schemars(skip)]
pub uuid: Uuid,
#[anonymize(false)]
pub direction: ReshardingDirection,
#[anonymize(false)]
pub shard_id: ShardId,
#[anonymize(false)]
pub peer_id: PeerId,
pub shard_key: Option<ShardKey>,
}
#[derive(Debug, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct LocalShardInfo {
/// Local shard id
pub shard_id: ShardId,
/// User-defined sharding key
#[serde(skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKey>,
/// Number of points in the shard
pub points_count: usize,
/// Is replica active
pub state: ReplicaState,
}
#[derive(Debug, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct RemoteShardInfo {
/// Remote shard id
pub shard_id: ShardId,
/// User-defined sharding key
#[serde(skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKey>,
/// Remote peer id
pub peer_id: PeerId,
/// Is replica active
pub state: ReplicaState,
}
/// `Acknowledged` - Request is saved to WAL and will be process in a queue.
/// `Completed` - Request is completed, changes are actual.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum UpdateStatus {
Acknowledged,
Completed,
/// Internal: update is rejected due to an outdated clock
#[schemars(skip)]
ClockRejected,
}
#[derive(Copy, Clone, Debug, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct UpdateResult {
/// Sequential number of the operation
#[serde(skip_serializing_if = "Option::is_none")]
pub operation_id: Option<SeqNumberType>,
/// Update status
pub status: UpdateStatus,
/// Updated value for the external clock tick
/// Provided if incoming update request also specify clock tick
#[serde(skip)]
pub clock_tag: Option<ClockTag>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ScrollRequest {
#[serde(flatten)]
#[validate(nested)]
pub scroll_request: ScrollRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// Scroll request - paginate over all points which matches given condition
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq, Hash)]
#[serde(rename_all = "snake_case")]
pub struct ScrollRequestInternal {
/// Start ID to read points from.
pub offset: Option<PointIdType>,
/// Page size. Default: 10
#[validate(range(min = 1))]
pub limit: Option<usize>,
/// Look only for points which satisfies this conditions. If not provided - all points.
#[validate(nested)]
pub filter: Option<Filter>,
/// Select which payload to return with the response. Default is true.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
#[serde(default, alias = "with_vectors")]
pub with_vector: WithVector,
/// Order the records by a payload field.
pub order_by: Option<OrderByInterface>,
}
impl ScrollRequestInternal {
pub(crate) fn default_limit() -> usize {
10
}
pub(crate) fn default_with_payload() -> WithPayloadInterface {
WithPayloadInterface::Bool(true)
}
pub(crate) fn default_with_vector() -> WithVector {
WithVector::Bool(false)
}
}
impl Default for ScrollRequestInternal {
fn default() -> Self {
ScrollRequestInternal {
offset: None,
limit: Some(Self::default_limit()),
filter: None,
with_payload: Some(Self::default_with_payload()),
with_vector: Self::default_with_vector(),
order_by: None,
}
}
}
fn points_example() -> Vec<api::rest::Record> {
let mut payload_map_1 = Map::new();
payload_map_1.insert("city".to_string(), Value::String("London".to_string()));
payload_map_1.insert("color".to_string(), Value::String("green".to_string()));
let mut payload_map_2 = Map::new();
payload_map_2.insert("city".to_string(), Value::String("Paris".to_string()));
payload_map_2.insert("color".to_string(), Value::String("red".to_string()));
vec![
api::rest::Record {
id: PointIdType::NumId(40),
payload: Some(Payload(payload_map_1)),
vector: Some(VectorStructOutput::Single(vec![0.875, 0.140625, 0.897_6])),
shard_key: Some("region_1".into()),
order_value: None,
},
api::rest::Record {
id: PointIdType::NumId(41),
payload: Some(Payload(payload_map_2)),
vector: Some(VectorStructOutput::Single(vec![0.75, 0.640625, 0.8945])),
shard_key: Some("region_1".into()),
order_value: None,
},
]
}
/// Result of the points read request
#[derive(Debug, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct ScrollResult {
/// List of retrieved points
#[schemars(example = "points_example")]
pub points: Vec<api::rest::Record>,
/// Offset which should be used to retrieve a next page result
pub next_page_offset: Option<PointIdType>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct SearchRequest {
#[serde(flatten)]
#[validate(nested)]
pub search_request: SearchRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct SearchRequestBatch {
#[validate(nested)]
pub searches: Vec<SearchRequest>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
pub struct SearchGroupsRequest {
#[serde(flatten)]
#[validate(nested)]
pub search_group_request: SearchGroupsRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct PointRequest {
#[serde(flatten)]
#[validate(nested)]
pub point_request: PointRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq, Hash)]
#[serde(rename_all = "snake_case")]
pub struct PointRequestInternal {
/// Look for points with ids
pub ids: Vec<PointIdType>,
/// Select which payload to return with the response. Default is true.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
#[serde(default, alias = "with_vectors")]
pub with_vector: WithVector,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq)]
#[serde(untagged)]
pub enum RecommendExample {
PointId(PointIdType),
Dense(DenseVector),
Sparse(SparseVector),
}
impl RecommendExample {
pub fn as_point_id(&self) -> Option<PointIdType> {
match self {
RecommendExample::PointId(id) => Some(*id),
_ => None,
}
}
}
impl Validate for RecommendExample {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
RecommendExample::PointId(_) => Ok(()),
RecommendExample::Dense(_) => Ok(()),
RecommendExample::Sparse(sparse) => sparse.validate(),
}
}
}
impl From<u64> for RecommendExample {
fn from(id: u64) -> Self {
RecommendExample::PointId(id.into())
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq)]
#[serde(rename_all = "snake_case", untagged)]
pub enum UsingVector {
Name(VectorNameBuf),
}
impl UsingVector {
pub fn as_name(&self) -> VectorNameBuf {
match self {
UsingVector::Name(name) => name.clone(),
}
}
}
impl From<VectorNameBuf> for UsingVector {
fn from(name: VectorNameBuf) -> Self {
UsingVector::Name(name)
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Default, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RecommendRequest {
#[serde(flatten)]
#[validate(nested)]
pub recommend_request: RecommendRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// Recommendation request.
/// Provides positive and negative examples of the vectors, which can be ids of points that
/// are already stored in the collection, raw vectors, or even ids and vectors combined.
///
/// Service should look for the points which are closer to positive examples and at the same time
/// further to negative examples. The concrete way of how to compare negative and positive distances
/// is up to the `strategy` chosen.
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Default, Clone, PartialEq)]
#[serde(rename_all = "snake_case")]
pub struct RecommendRequestInternal {
/// Look for vectors closest to those
#[serde(default)]
#[validate(nested)]
pub positive: Vec<RecommendExample>,
/// Try to avoid vectors like this
#[serde(default)]
#[validate(nested)]
pub negative: Vec<RecommendExample>,
/// How to use positive and negative examples to find the results
pub strategy: Option<api::rest::RecommendStrategy>,
/// Look only for points which satisfies this conditions
#[validate(nested)]
pub filter: Option<Filter>,
/// Additional search params
#[validate(nested)]
pub params: Option<SearchParams>,
/// Max number of result to return
#[serde(alias = "top")]
#[validate(range(min = 1))]
pub limit: usize,
/// Offset of the first result to return.
/// May be used to paginate results.
/// Note: large offset values may cause performance issues.
pub offset: Option<usize>,
/// Select which payload to return with the response. Default is false.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
#[serde(default, alias = "with_vectors")]
pub with_vector: Option<WithVector>,
/// Define a minimal score threshold for the result.
/// If defined, less similar results will not be returned.
/// Score of the returned result might be higher or smaller than the threshold depending on the
/// Distance function used. E.g. for cosine similarity only higher scores will be returned.
pub score_threshold: Option<ScoreType>,
/// Define which vector to use for recommendation, if not specified - try to use default vector
#[serde(default)]
pub using: Option<UsingVector>,
/// The location used to lookup vectors. If not specified - use current collection.
/// Note: the other collection should have the same vector size as the current collection
#[serde(default)]
pub lookup_from: Option<LookupLocation>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
#[serde(rename_all = "snake_case")]
pub struct RecommendRequestBatch {
#[validate(nested)]
pub searches: Vec<RecommendRequest>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RecommendGroupsRequest {
#[serde(flatten)]
#[validate(nested)]
pub recommend_group_request: RecommendGroupsRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq)]
pub struct RecommendGroupsRequestInternal {
/// Look for vectors closest to those
#[serde(default)]
pub positive: Vec<RecommendExample>,
/// Try to avoid vectors like this
#[serde(default)]
pub negative: Vec<RecommendExample>,
/// How to use positive and negative examples to find the results
#[serde(default)]
pub strategy: Option<RecommendStrategy>,
/// Look only for points which satisfies this conditions
#[validate(nested)]
pub filter: Option<Filter>,
/// Additional search params
#[validate(nested)]
pub params: Option<SearchParams>,
/// Select which payload to return with the response. Default is false.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
#[serde(default, alias = "with_vectors")]
pub with_vector: Option<WithVector>,
/// Define a minimal score threshold for the result.
/// If defined, less similar results will not be returned.
/// Score of the returned result might be higher or smaller than the threshold depending on the
/// Distance function used. E.g. for cosine similarity only higher scores will be returned.
pub score_threshold: Option<ScoreType>,
/// Define which vector to use for recommendation, if not specified - try to use default vector
#[serde(default)]
pub using: Option<UsingVector>,
/// The location used to lookup vectors. If not specified - use current collection.
/// Note: the other collection should have the same vector size as the current collection
#[serde(default)]
pub lookup_from: Option<LookupLocation>,
#[serde(flatten)]
pub group_request: BaseGroupRequest,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, PartialEq)]
pub struct ContextExamplePair {
#[validate(nested)]
pub positive: RecommendExample,
#[validate(nested)]
pub negative: RecommendExample,
}
impl ContextExamplePair {
pub fn iter(&self) -> impl Iterator<Item = &RecommendExample> {
iter::once(&self.positive).chain(iter::once(&self.negative))
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
pub struct DiscoverRequest {
#[serde(flatten)]
#[validate(nested)]
pub discover_request: DiscoverRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// Use context and a target to find the most similar points, constrained by the context.
#[derive(Deserialize, Serialize, JsonSchema, Validate, Clone, Debug, PartialEq)]
pub struct DiscoverRequestInternal {
/// Look for vectors closest to this.
///
/// When using the target (with or without context), the integer part of the score represents
/// the rank with respect to the context, while the decimal part of the score relates to the
/// distance to the target.
#[validate(nested)]
pub target: Option<RecommendExample>,
/// Pairs of { positive, negative } examples to constrain the search.
///
/// When using only the context (without a target), a special search - called context search - is
/// performed where pairs of points are used to generate a loss that guides the search towards the
/// zone where most positive examples overlap. This means that the score minimizes the scenario of
/// finding a point closer to a negative than to a positive part of a pair.
///
/// Since the score of a context relates to loss, the maximum score a point can get is 0.0,
/// and it becomes normal that many points can have a score of 0.0.
///
/// For discovery search (when including a target), the context part of the score for each pair
/// is calculated +1 if the point is closer to a positive than to a negative part of a pair,
/// and -1 otherwise.
#[validate(nested)]
pub context: Option<Vec<ContextExamplePair>>,
/// Look only for points which satisfies this conditions
#[validate(nested)]
pub filter: Option<Filter>,
/// Additional search params
#[validate(nested)]
pub params: Option<SearchParams>,
/// Max number of result to return
#[serde(alias = "top")]
#[validate(range(min = 1))]
pub limit: usize,
/// Offset of the first result to return.
/// May be used to paginate results.
/// Note: large offset values may cause performance issues.
pub offset: Option<usize>,
/// Select which payload to return with the response. Default is false.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
pub with_vector: Option<WithVector>,
/// Define which vector to use for recommendation, if not specified - try to use default vector
#[serde(default)]
pub using: Option<UsingVector>,
/// The location used to lookup vectors. If not specified - use current collection.
/// Note: the other collection should have the same vector size as the current collection
#[serde(default)]
pub lookup_from: Option<LookupLocation>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
pub struct DiscoverRequestBatch {
#[validate(nested)]
pub searches: Vec<DiscoverRequest>,
}
#[derive(Debug, Serialize, JsonSchema, Clone)]
pub struct PointGroup {
/// Scored points that have the same value of the group_by key
pub hits: Vec<api::rest::ScoredPoint>,
/// Value of the group_by key, shared across all the hits in the group
pub id: GroupId,
/// Record that has been looked up using the group id
#[serde(skip_serializing_if = "Option::is_none")]
pub lookup: Option<api::rest::Record>,
}
#[derive(Debug, Serialize, JsonSchema)]
pub struct GroupsResult {
pub groups: Vec<PointGroup>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
#[serde(rename_all = "snake_case")]
pub struct CountRequest {
#[serde(flatten)]
#[validate(nested)]
pub count_request: CountRequestInternal,
/// Specify in which shards to look for the points, if not specified - look in all shards
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// Count Request
/// Counts the number of points which satisfy the given filter.
/// If filter is not provided, the count of all points in the collection will be returned.
#[derive(Deserialize, Serialize, JsonSchema, Validate, Clone, Debug, PartialEq, Hash)]
#[serde(rename_all = "snake_case")]
pub struct CountRequestInternal {
/// Look only for points which satisfies this conditions
#[validate(nested)]
pub filter: Option<Filter>,
/// If true, count exact number of points. If false, count approximate number of points faster.
/// Approximate count might be unreliable during the indexing process. Default: true
#[serde(default = "default_exact_count")]
pub exact: bool,
}
pub const fn default_exact_count() -> bool {
true
}
#[derive(Debug, Default, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub struct CountResult {
/// Number of points which satisfy the conditions
pub count: usize,
}
#[derive(Error, Debug, Clone, PartialEq)]
#[error("{0}")]
pub enum CollectionError {
#[error("Wrong input: {description}")]
BadInput { description: String },
#[error("{what} not found")]
NotFound { what: String },
#[error("No point with id {missed_point_id} found")]
PointNotFound { missed_point_id: PointIdType },
#[error("Service internal error: {error}")]
ServiceError {
error: String,
backtrace: Option<String>,
},
#[error("Bad request: {description}")]
BadRequest { description: String },
#[error("Operation Cancelled: {description}")]
Cancelled { description: String },
#[error("Bad shard selection: {description}")]
BadShardSelection { description: String },
#[error(
"{shards_failed} out of {shards_total} shards failed to apply operation. First error captured: {first_err}"
)]
InconsistentShardFailure {
shards_total: u32,
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/loggable.rs | lib/collection/src/operations/loggable.rs | use std::hash::{DefaultHasher, Hash, Hasher};
use std::sync::Arc;
use segment::data_types::facets::FacetParams;
use serde_json::Value;
use shard::operations::CollectionUpdateOperations;
use crate::operations::types::{CountRequestInternal, PointRequestInternal, ScrollRequestInternal};
use crate::operations::universal_query::shard_query::ShardQueryRequest;
pub trait Loggable {
fn to_log_value(&self) -> serde_json::Value;
fn request_name(&self) -> &'static str;
/// Hash of the query, which is going to be used for approximate deduplication and counting.
fn request_hash(&self) -> u64;
}
impl Loggable for CollectionUpdateOperations {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"points-update"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
impl Loggable for Vec<ShardQueryRequest> {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"query"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
impl Loggable for ScrollRequestInternal {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"scroll"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
impl<T: Loggable> Loggable for Arc<T> {
fn to_log_value(&self) -> Value {
self.as_ref().to_log_value()
}
fn request_name(&self) -> &'static str {
self.as_ref().request_name()
}
fn request_hash(&self) -> u64 {
self.as_ref().request_hash()
}
}
impl Loggable for FacetParams {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"facet"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
impl Loggable for CountRequestInternal {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"count"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
impl Loggable for PointRequestInternal {
fn to_log_value(&self) -> Value {
serde_json::to_value(self).unwrap_or_default()
}
fn request_name(&self) -> &'static str {
"retrieve"
}
fn request_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.request_name().hash(&mut hasher);
self.hash(&mut hasher);
hasher.finish()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/mod.rs | lib/collection/src/operations/mod.rs | pub mod cluster_ops;
pub mod config_diff;
pub mod consistency_params;
pub mod conversions;
pub mod generalizer;
pub mod loggable;
pub mod operation_effect;
pub mod payload_ops;
pub mod point_ops;
pub mod shard_selector_internal;
pub mod shared_storage_config;
pub mod snapshot_ops;
pub mod snapshot_storage_ops;
#[cfg(feature = "staging")]
pub mod staging;
pub mod types;
pub mod universal_query;
pub mod validation;
pub mod vector_ops;
pub mod vector_params_builder;
pub mod verification;
pub mod query_enum {
pub use shard::query::query_enum::QueryEnum;
}
use ahash::AHashMap;
use segment::types::ExtendedPointId;
pub use shard::operations::*;
use crate::hash_ring::{HashRingRouter, ShardIds};
use crate::shards::shard::ShardId;
/// Trait for Operation enums to split them by shard.
pub trait SplitByShard {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self>
where
Self: Sized;
}
impl SplitByShard for CollectionUpdateOperations {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match self {
CollectionUpdateOperations::PointOperation(operation) => operation
.split_by_shard(ring)
.map(CollectionUpdateOperations::PointOperation),
CollectionUpdateOperations::VectorOperation(operation) => operation
.split_by_shard(ring)
.map(CollectionUpdateOperations::VectorOperation),
CollectionUpdateOperations::PayloadOperation(operation) => operation
.split_by_shard(ring)
.map(CollectionUpdateOperations::PayloadOperation),
operation @ CollectionUpdateOperations::FieldIndexOperation(_) => {
OperationToShard::to_all(operation)
}
}
}
}
/// A mapping of operation to shard.
/// Is a result of splitting one operation into several shards by corresponding PointIds
pub enum OperationToShard<O> {
ByShard(Vec<(ShardId, O)>),
ToAll(O),
}
impl<O> OperationToShard<O> {
pub fn by_shard(operations: impl IntoIterator<Item = (ShardId, O)>) -> Self {
Self::ByShard(operations.into_iter().collect())
}
pub fn to_none() -> Self {
Self::ByShard(Vec::new())
}
pub fn to_all(operation: O) -> Self {
Self::ToAll(operation)
}
pub fn map<O2>(self, f: impl Fn(O) -> O2) -> OperationToShard<O2> {
match self {
OperationToShard::ByShard(operation_to_shard) => OperationToShard::ByShard(
operation_to_shard
.into_iter()
.map(|(id, operation)| (id, f(operation)))
.collect(),
),
OperationToShard::ToAll(to_all) => OperationToShard::ToAll(f(to_all)),
}
}
}
/// Split iterator of items that have point ids by shard
fn split_iter_by_shard<I, F, O: Clone>(
iter: I,
id_extractor: F,
ring: &HashRingRouter,
) -> OperationToShard<Vec<O>>
where
I: IntoIterator<Item = O>,
F: Fn(&O) -> ExtendedPointId,
{
let mut op_vec_by_shard: AHashMap<ShardId, Vec<O>> = AHashMap::new();
for operation in iter {
for shard_id in point_to_shards(&id_extractor(&operation), ring) {
op_vec_by_shard
.entry(shard_id)
.or_default()
.push(operation.clone());
}
}
OperationToShard::by_shard(op_vec_by_shard)
}
/// Get the shards for a point ID
///
/// Normally returns a single shard ID. Might return multiple if resharding is currently in
/// progress.
///
/// # Panics
///
/// Panics if the hash ring is empty and there is no shard for the given point ID.
fn point_to_shards(point_id: &ExtendedPointId, ring: &HashRingRouter) -> ShardIds {
let shard_ids = ring.get(point_id);
assert!(
!shard_ids.is_empty(),
"Hash ring is guaranteed to be non-empty",
);
shard_ids
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/staging.rs | lib/collection/src/operations/staging.rs | //! Staging-only operations for testing and debugging purposes.
//!
//! This module contains operations that are only available when the `staging` feature is enabled.
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use validator::Validate;
use crate::shards::shard::PeerId;
fn default_test_slow_down_duration() -> f64 {
1.0
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct TestSlowDownOperation {
#[validate(nested)]
pub test_slow_down: TestSlowDown,
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct TestSlowDown {
/// Target peer ID to execute the sleep on.
/// If not specified, the operation will be executed on all peers.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub peer_id: Option<PeerId>,
/// Duration of the sleep in seconds (default: 1.0, max: 300.0).
#[serde(default = "default_test_slow_down_duration")]
#[validate(range(min = 0.0, max = 300.0))]
pub duration: f64,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/snapshot_storage_ops.rs | lib/collection/src/operations/snapshot_storage_ops.rs | use std::io::{BufReader, Read};
use std::path::{Path, PathBuf};
use common::budget::ResourceBudget;
use fs_err as fs;
use fs_err::{File, tokio as tokio_fs};
use futures::StreamExt;
use object_store::WriteMultipart;
use segment::common::BYTES_IN_MB;
use tokio::io::AsyncWriteExt;
use super::snapshot_ops::SnapshotDescription;
use super::types::{CollectionError, CollectionResult};
pub(crate) fn trim_dot_slash(path: &Path) -> CollectionResult<object_store::path::Path> {
// Get file name by trimming the path.
// if the path is ./path/to/file.txt, the key should be path/to/file.txt
let key = path.to_str().ok_or_else(|| {
CollectionError::object_storage_error(format!(
"Failed to get key for snapshot: {}",
path.display()
))
})?;
Ok(object_store::path::Path::from(key.trim_start_matches("./")))
}
fn get_filename(path: &str) -> CollectionResult<String> {
let path = PathBuf::from(path);
path.file_name()
.ok_or_else(|| CollectionError::object_storage_error("Failed to get file name".to_string()))
.and_then(|name| {
name.to_str()
.ok_or_else(|| {
CollectionError::object_storage_error("Failed to get file name".to_string())
})
.map(|name| name.to_string())
})
}
pub async fn get_snapshot_description(
client: &dyn object_store::ObjectStore,
path: &Path,
) -> CollectionResult<SnapshotDescription> {
let file_meta: object_store::ObjectMeta = client
.head(&trim_dot_slash(path)?)
.await
.map_err(|e| CollectionError::service_error(format!("Failed to get head: {e}")))?;
let name = get_filename(path.to_str().ok_or_else(|| {
CollectionError::object_storage_error(format!(
"Failed to get key for snapshot: {}",
path.display()
))
})?)?;
let size = file_meta.size;
let last_modified = file_meta.last_modified.naive_local();
let checksum = None;
Ok(SnapshotDescription {
name,
creation_time: Some(last_modified),
size,
checksum,
})
}
/// This function adjusts the chunk size based on service limits and the total size of the data to be uploaded.
/// Note:
///
/// * Amazon S3: <https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html>
/// partsize: min 5 MB, max 5 GB, up to 10,000 parts.
/// * Google Cloud Storage: <https://cloud.google.com/storage/quotas?hl=ja#objects>
/// partsize: min 5 MB, max 5 GB, up to 10,000 parts.
/// * Azure Storage: <https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob?tabs=microsoft-entra-id#remarks>
/// TODO: It looks like Azure Storage has different limits for different service versions.
pub async fn get_appropriate_chunk_size(local_source_path: &Path) -> CollectionResult<usize> {
const DEFAULT_CHUNK_SIZE: usize = 50 * 1024 * 1024;
const MAX_PART_NUMBER: usize = 10000;
/// 5TB as maximum object size.
/// Source: <https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html>
const MAX_UPLOAD_SIZE: usize = 5 * 1024 * 1024 * 1024 * 1024;
let file_meta = tokio_fs::metadata(local_source_path).await?;
let file_size = file_meta.len() as usize;
// check if the file size exceeds the maximum upload size
if file_size > MAX_UPLOAD_SIZE {
return Err(CollectionError::service_error(format!(
"File size exceeds the maximum upload size: {MAX_UPLOAD_SIZE}"
)));
}
// check if the file size exceeds the maximum part number
// if so, adjust the chunk size to fit the maximum part number
if file_size > DEFAULT_CHUNK_SIZE * MAX_PART_NUMBER {
let chunk_size = ((file_size - 1) / MAX_PART_NUMBER) + 1; // ceil((file_size) / MAX_PART_NUMBER)
return Ok(chunk_size);
}
Ok(DEFAULT_CHUNK_SIZE)
}
pub async fn multipart_upload(
client: &dyn object_store::ObjectStore,
source_path: &Path,
target_path: &Path,
) -> CollectionResult<()> {
let s3_path = trim_dot_slash(target_path)?;
let upload = client
.put_multipart(&s3_path)
.await
.map_err(|e| CollectionError::service_error(format!("Failed to put multipart: {e}")))?;
let chunk_size: usize = get_appropriate_chunk_size(source_path).await?;
let mut write = WriteMultipart::new_with_chunk_size(upload, chunk_size);
let file = File::open(source_path)?;
let mut reader = BufReader::new(file);
let mut buffer = vec![0u8; chunk_size];
// Initialize CpuBudget to manage concurrency
let cpu_budget = ResourceBudget::default();
// Cap max concurrency to avoid saturating the network on high core count
let max_concurrency = std::cmp::min(cpu_budget.available_cpu_budget(), 8);
// Note:
// 1. write.write() is sync but a worker thread is spawned internally.
// 2. write.finish() will wait for all the worker threads to finish.
while let Ok(bytes_read) = reader.read(&mut buffer) {
if bytes_read == 0 {
break;
}
let buffer = &buffer[..bytes_read];
// Wait for capacity before writing the buffer
write
.wait_for_capacity(max_concurrency)
.await
.map_err(|e| {
CollectionError::service_error(format!("Failed to wait for capacity: {e}"))
})?;
write.write(buffer);
}
write
.finish() // 2. write.finish() will wait for all the worker threads to finish.
.await
.map_err(|e| CollectionError::service_error(format!("Failed to finish upload: {e}")))?;
Ok(())
}
pub async fn list_snapshot_descriptions(
client: &dyn object_store::ObjectStore,
directory: &Path,
) -> CollectionResult<Vec<SnapshotDescription>> {
let prefix = trim_dot_slash(directory)?;
let mut list_stream = client.list(Some(&prefix));
let mut snapshots = Vec::new();
while let Some(meta) = list_stream
.next()
.await
.transpose()
.map_err(|e| CollectionError::service_error(format!("Failed to list snapshots: {e}")))?
{
snapshots.push(SnapshotDescription {
name: get_filename(meta.location.as_ref())?,
creation_time: Some(meta.last_modified.naive_local()),
size: meta.size,
checksum: None,
});
}
Ok(snapshots)
}
pub async fn delete_snapshot(
client: &dyn object_store::ObjectStore,
path: &Path,
) -> CollectionResult<bool> {
let s3_path = trim_dot_slash(path)?;
client.head(&s3_path).await.map_err(|e| match e {
object_store::Error::NotFound { .. } => {
CollectionError::not_found(format!("Snapshot {s3_path:?}"))
}
_ => CollectionError::service_error(format!("Failed to delete snapshot: {e}")),
})?;
client
.delete(&s3_path)
.await
.map_err(|e| CollectionError::service_error(format!("Failed to delete snapshot: {e}")))?;
Ok(true)
}
pub async fn download_snapshot(
client: &dyn object_store::ObjectStore,
path: &Path,
target_path: &Path,
) -> CollectionResult<()> {
let download_start_time = tokio::time::Instant::now();
let s3_path = trim_dot_slash(path)?;
let download = client.get(&s3_path).await.map_err(|e| match e {
object_store::Error::NotFound { .. } => {
CollectionError::not_found(format!("Snapshot {s3_path:?}"))
}
_ => CollectionError::service_error(format!("Failed to get {s3_path}: {e}")),
})?;
let mut stream = download.into_stream();
// Create the target directory if it does not exist
if let Some(target_dir) = target_path.parent()
&& !target_dir.exists()
{
fs::create_dir_all(target_dir)?;
}
let mut file = tokio_fs::File::create(target_path)
.await
.map_err(|e| CollectionError::service_error(format!("Failed to create file: {e}")))?;
let mut total_size = 0;
while let Some(data) = stream.next().await {
let data = data.map_err(|e| {
CollectionError::service_error(format!("Failed to get data from stream: {e}"))
})?;
file.write_all(&data)
.await
.map_err(|e| CollectionError::service_error(format!("Failed to write to file: {e}")))?;
total_size += data.len();
}
// ensure flush
file.flush()
.await
.map_err(|e| CollectionError::service_error(format!("Failed to flush file: {e}")))?;
let download_duration = download_start_time.elapsed();
let total_size_mb = total_size as f64 / BYTES_IN_MB as f64;
let download_speed_mbps = total_size_mb / download_duration.as_secs_f64();
log::debug!(
"Object storage snapshot download completed: path={}, size={:.2} MB, duration={:.2}s, speed={:.2} MB/s",
target_path.display(),
total_size_mb,
download_duration.as_secs_f64(),
download_speed_mbps
);
// check len to file len
let file_meta = tokio_fs::metadata(target_path).await?;
if file_meta.len() != total_size as u64 {
return Err(CollectionError::service_error(format!(
"Downloaded file size does not match the expected size: {} != {}",
file_meta.len(),
total_size
)));
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/point_ops.rs | lib/collection/src/operations/point_ops.rs | use std::collections::HashMap;
use ahash::AHashMap;
use api::rest::ShardKeySelector;
use itertools::izip;
use schemars::JsonSchema;
use segment::common::utils::transpose_map_into_named_vector;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::VectorInternal;
use segment::types::Filter;
use serde::{Deserialize, Serialize};
pub use shard::operations::point_ops::*;
use validator::{Validate, ValidationErrors};
use super::{OperationToShard, SplitByShard, point_to_shards, split_iter_by_shard};
use crate::hash_ring::HashRingRouter;
use crate::shards::shard::ShardId;
#[derive(Debug, Deserialize, Serialize, JsonSchema)]
#[serde(untagged, rename_all = "snake_case")]
pub enum PointsSelector {
/// Select points by list of IDs
PointIdsSelector(PointIdsList),
/// Select points by filtering condition
FilterSelector(FilterSelector),
}
impl Validate for PointsSelector {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
PointsSelector::PointIdsSelector(ids) => ids.validate(),
PointsSelector::FilterSelector(filter) => filter.validate(),
}
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
#[serde(rename_all = "snake_case")]
pub struct FilterSelector {
pub filter: Filter,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// Defines write ordering guarantees for collection operations
///
/// * `weak` - write operations may be reordered, works faster, default
///
/// * `medium` - write operations go through dynamically selected leader, may be inconsistent for a short period of time in case of leader change
///
/// * `strong` - Write operations go through the permanent leader, consistent, but may be unavailable if leader is down
///
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, Default)]
#[serde(rename_all = "snake_case")]
pub enum WriteOrdering {
#[default]
Weak,
Medium,
Strong,
}
impl SplitByShard for PointOperations {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match self {
PointOperations::UpsertPoints(upsert_points) => upsert_points
.split_by_shard(ring)
.map(PointOperations::UpsertPoints),
PointOperations::UpsertPointsConditional(conditional_upsert) => conditional_upsert
.split_by_shard(ring)
.map(PointOperations::UpsertPointsConditional),
PointOperations::DeletePoints { ids } => split_iter_by_shard(ids, |id| *id, ring)
.map(|ids| PointOperations::DeletePoints { ids }),
by_filter @ PointOperations::DeletePointsByFilter(_) => {
OperationToShard::to_all(by_filter)
}
PointOperations::SyncPoints(_) => {
#[cfg(debug_assertions)]
panic!("SyncPoints operation is intended to by applied to specific shard only");
#[cfg(not(debug_assertions))]
OperationToShard::by_shard(vec![])
}
#[cfg(feature = "staging")]
test_delay @ PointOperations::TestDelay(_) => OperationToShard::to_all(test_delay),
}
}
}
impl SplitByShard for PointInsertOperationsInternal {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
match self {
PointInsertOperationsInternal::PointsBatch(batch) => batch
.split_by_shard(ring)
.map(PointInsertOperationsInternal::PointsBatch),
PointInsertOperationsInternal::PointsList(list) => list
.split_by_shard(ring)
.map(PointInsertOperationsInternal::PointsList),
}
}
}
impl SplitByShard for ConditionalInsertOperationInternal {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
let ConditionalInsertOperationInternal {
points_op,
condition,
} = self;
let points_op = points_op.split_by_shard(ring);
match points_op {
OperationToShard::ByShard(by_shards) => OperationToShard::ByShard(
by_shards
.into_iter()
.map(|(shard_id, upsert_operation)| {
(
shard_id,
ConditionalInsertOperationInternal {
points_op: upsert_operation,
condition: condition.clone(),
},
)
})
.collect(),
),
OperationToShard::ToAll(upsert_operation) => OperationToShard::ToAll(Self {
points_op: upsert_operation,
condition,
}),
}
}
}
impl SplitByShard for BatchPersisted {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
let batch = self;
let mut batch_by_shard: AHashMap<ShardId, BatchPersisted> = AHashMap::new();
let BatchPersisted {
ids,
vectors,
payloads,
} = batch;
if let Some(payloads) = payloads {
match vectors {
BatchVectorStructPersisted::Single(vectors) => {
for (id, vector, payload) in izip!(ids, vectors, payloads) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::Single(vec![]),
payloads: Some(vec![]),
});
batch.ids.push(id);
match &mut batch.vectors {
BatchVectorStructPersisted::Single(vectors) => {
vectors.push(vector.clone())
}
_ => unreachable!(), // TODO(sparse) propagate error
}
batch.payloads.as_mut().unwrap().push(payload.clone());
}
}
}
BatchVectorStructPersisted::MultiDense(vectors) => {
for (id, vector, payload) in izip!(ids, vectors, payloads) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::MultiDense(vec![]),
payloads: Some(vec![]),
});
batch.ids.push(id);
match &mut batch.vectors {
BatchVectorStructPersisted::MultiDense(vectors) => {
vectors.push(vector.clone())
}
_ => unreachable!(), // TODO(sparse) propagate error
}
batch.payloads.as_mut().unwrap().push(payload.clone());
}
}
}
BatchVectorStructPersisted::Named(named_vectors) => {
let named_vectors_list = if !named_vectors.is_empty() {
transpose_map_into_named_vector(named_vectors)
} else {
vec![NamedVectors::default(); ids.len()]
};
for (id, named_vector, payload) in izip!(ids, named_vectors_list, payloads) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::Named(HashMap::new()),
payloads: Some(vec![]),
});
batch.ids.push(id);
for (name, vector) in named_vector.clone() {
let name = name.into_owned();
let vector: VectorInternal = vector.to_owned();
match &mut batch.vectors {
BatchVectorStructPersisted::Named(batch_vectors) => {
batch_vectors
.entry(name)
.or_default()
.push(VectorPersisted::from(vector))
}
_ => unreachable!(), // TODO(sparse) propagate error
}
}
batch.payloads.as_mut().unwrap().push(payload.clone());
}
}
}
}
} else {
match vectors {
BatchVectorStructPersisted::Single(vectors) => {
for (id, vector) in izip!(ids, vectors) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::Single(vec![]),
payloads: None,
});
batch.ids.push(id);
match &mut batch.vectors {
BatchVectorStructPersisted::Single(vectors) => {
vectors.push(vector.clone())
}
_ => unreachable!(), // TODO(sparse) propagate error
}
}
}
}
BatchVectorStructPersisted::MultiDense(vectors) => {
for (id, vector) in izip!(ids, vectors) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::MultiDense(vec![]),
payloads: None,
});
batch.ids.push(id);
match &mut batch.vectors {
BatchVectorStructPersisted::MultiDense(vectors) => {
vectors.push(vector.clone())
}
_ => unreachable!(), // TODO(sparse) propagate error
}
}
}
}
BatchVectorStructPersisted::Named(named_vectors) => {
let named_vectors_list = if !named_vectors.is_empty() {
transpose_map_into_named_vector(named_vectors)
} else {
vec![NamedVectors::default(); ids.len()]
};
for (id, named_vector) in izip!(ids, named_vectors_list) {
for shard_id in point_to_shards(&id, ring) {
let batch =
batch_by_shard
.entry(shard_id)
.or_insert_with(|| BatchPersisted {
ids: vec![],
vectors: BatchVectorStructPersisted::Named(HashMap::new()),
payloads: None,
});
batch.ids.push(id);
for (name, vector) in named_vector.clone() {
let name = name.into_owned();
let vector: VectorInternal = vector.to_owned();
match &mut batch.vectors {
BatchVectorStructPersisted::Named(batch_vectors) => {
batch_vectors
.entry(name)
.or_default()
.push(VectorPersisted::from(vector))
}
_ => unreachable!(), // TODO(sparse) propagate error
}
}
}
}
}
}
}
OperationToShard::by_shard(batch_by_shard)
}
}
impl SplitByShard for Vec<PointStructPersisted> {
fn split_by_shard(self, ring: &HashRingRouter) -> OperationToShard<Self> {
split_iter_by_shard(self, |point| point.id, ring)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use api::rest::{Batch, BatchVectorStruct, PointInsertOperations, PointsBatch};
use segment::types::{ExtendedPointId, PointIdType};
use super::*;
#[test]
fn split_point_operations() {
let id1 = ExtendedPointId::from_str("4072cda9-8ac6-46fa-9367-7372bc5e4798").unwrap();
let id2 = ExtendedPointId::from(321);
let id3 = ExtendedPointId::from_str("fe23809b-dcc9-40ba-8255-a45dce6f10be").unwrap();
let id4 = ExtendedPointId::from_str("a307aa98-d5b5-4b0c-aec9-f963171acb74").unwrap();
let id5 = ExtendedPointId::from_str("aaf3bb55-dc48-418c-ba76-18badc0d7fc5").unwrap();
let id6 = ExtendedPointId::from_str("63000b52-641b-45cf-bc15-14de3944b9dd").unwrap();
let id7 = ExtendedPointId::from_str("aab5ef35-83ad-49ea-a629-508e308872f7").unwrap();
let id8 = ExtendedPointId::from(0);
let id9 = ExtendedPointId::from(100500);
let all_ids = vec![id1, id2, id3, id4, id5, id6, id7, id8, id9];
let points: Vec<_> = all_ids
.iter()
.map(|id| PointStructPersisted {
id: *id,
vector: VectorStructPersisted::from(vec![0.1, 0.2, 0.3]),
payload: None,
})
.collect();
let mut hash_ring = HashRingRouter::single();
hash_ring.add(0);
hash_ring.add(1);
hash_ring.add(2);
let operation_to_shard = points.split_by_shard(&hash_ring);
match operation_to_shard {
OperationToShard::ByShard(by_shard) => {
for (shard_id, points) in by_shard {
for point in points {
// Important: This mapping should not change with new updates!
if point.id == id1 {
assert_eq!(shard_id, 2);
}
if point.id == id2 {
assert_eq!(shard_id, 1);
}
if point.id == id3 {
assert_eq!(shard_id, 2);
}
if point.id == id4 {
assert_eq!(shard_id, 2);
}
if point.id == id5 {
assert_eq!(shard_id, 0);
}
if point.id == id6 {
assert_eq!(shard_id, 0);
}
if point.id == id7 {
assert_eq!(shard_id, 0);
}
if point.id == id8 {
assert_eq!(shard_id, 2);
}
if point.id == id9 {
assert_eq!(shard_id, 1);
}
}
}
}
OperationToShard::ToAll(_) => panic!("expected ByShard"),
}
}
#[test]
fn validate_batch() {
let batch = PointInsertOperations::PointsBatch(PointsBatch {
batch: Batch {
ids: vec![PointIdType::NumId(0)],
vectors: BatchVectorStruct::Single(vec![]),
payloads: None,
},
shard_key: None,
update_filter: None,
});
assert!(batch.validate().is_err());
let batch = PointInsertOperations::PointsBatch(PointsBatch {
batch: Batch {
ids: vec![PointIdType::NumId(0)],
vectors: BatchVectorStruct::Single(vec![vec![0.1]]),
payloads: None,
},
shard_key: None,
update_filter: None,
});
assert!(batch.validate().is_ok());
let batch = PointInsertOperations::PointsBatch(PointsBatch {
batch: Batch {
ids: vec![PointIdType::NumId(0)],
vectors: BatchVectorStruct::Single(vec![vec![0.1]]),
payloads: Some(vec![]),
},
shard_key: None,
update_filter: None,
});
assert!(batch.validate().is_err());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/conversions.rs | lib/collection/src/operations/conversions.rs | use std::collections::BTreeMap;
use std::num::{NonZeroU32, NonZeroU64};
use std::time::Duration;
use api::conversions::json::json_path_from_proto;
use api::grpc::conversions::{
convert_shard_key_from_grpc, convert_shard_key_from_grpc_opt, convert_shard_key_to_grpc,
from_grpc_dist,
};
use api::grpc::qdrant::quantization_config_diff::Quantization;
use api::grpc::qdrant::update_collection_cluster_setup_request::{
Operation as ClusterOperationsPb, Operation,
};
use api::rest::schema::ShardKeySelector;
use api::rest::{BaseGroupRequest, LookupLocation, MaxOptimizationThreads, ShardKeyWithFallback};
use itertools::Itertools;
use segment::common::operation_error::OperationError;
use segment::data_types::modifier::Modifier;
use segment::data_types::vectors::{VectorInternal, VectorStructInternal};
use segment::types::{
Distance, Filter, HnswConfig, MultiVectorConfig, QuantizationConfig, StrictModeConfigOutput,
WithPayloadInterface,
};
use shard::retrieve::record_internal::RecordInternal;
use tonic::Status;
use super::cluster_ops::{ReplicatePoints, ReplicatePointsOperation, ReshardingDirection};
use super::consistency_params::ReadConsistency;
use super::types::{
CollectionConfig, ContextExamplePair, CoreSearchRequest, Datatype, DiscoverRequestInternal,
GroupsResult, PointGroup, RecommendExample, RecommendGroupsRequestInternal, ReshardingInfo,
SparseIndexParams, SparseVectorParams, SparseVectorsConfig, VectorParamsDiff,
VectorsConfigDiff,
};
use crate::config::{
CollectionParams, ShardingMethod, WalConfig, default_replication_factor,
default_write_consistency_factor,
};
use crate::lookup::WithLookup;
use crate::lookup::types::WithLookupInterface;
use crate::operations::ClockTag;
use crate::operations::cluster_ops::{
AbortShardTransfer, AbortTransferOperation, ClusterOperations, CreateShardingKey,
CreateShardingKeyOperation, DropReplicaOperation, DropShardingKey, DropShardingKeyOperation,
MoveShard, MoveShardOperation, Replica, ReplicateShard, ReplicateShardOperation,
RestartTransfer, RestartTransferOperation,
};
use crate::operations::config_diff::{
CollectionParamsDiff, HnswConfigDiff, OptimizersConfigDiff, QuantizationConfigDiff,
WalConfigDiff,
};
use crate::operations::point_ops::{FilterSelector, PointIdsList, PointsSelector, WriteOrdering};
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{
AliasDescription, CollectionClusterInfo, CollectionInfo, CollectionStatus, CollectionWarning,
CountResult, LocalShardInfo, OptimizersStatus, RecommendRequestInternal, RemoteShardInfo,
ShardTransferInfo, UpdateResult, UpdateStatus, VectorParams, VectorsConfig,
};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::remote_shard::CollectionCoreSearchRequest;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::transfer::ShardTransferMethod;
pub fn sharding_method_to_proto(sharding_method: ShardingMethod) -> i32 {
match sharding_method {
ShardingMethod::Auto => api::grpc::qdrant::ShardingMethod::Auto as i32,
ShardingMethod::Custom => api::grpc::qdrant::ShardingMethod::Custom as i32,
}
}
pub fn sharding_method_from_proto(sharding_method: i32) -> Result<ShardingMethod, Status> {
let sharding_method_grpc = api::grpc::qdrant::ShardingMethod::try_from(sharding_method);
match sharding_method_grpc {
Ok(api::grpc::qdrant::ShardingMethod::Auto) => Ok(ShardingMethod::Auto),
Ok(api::grpc::qdrant::ShardingMethod::Custom) => Ok(ShardingMethod::Custom),
Err(err) => Err(Status::invalid_argument(format!(
"Cannot convert ShardingMethod: {sharding_method}, error: {err}"
))),
}
}
pub fn write_ordering_to_proto(ordering: WriteOrdering) -> api::grpc::qdrant::WriteOrdering {
api::grpc::qdrant::WriteOrdering {
r#type: match ordering {
WriteOrdering::Weak => api::grpc::qdrant::WriteOrderingType::Weak as i32,
WriteOrdering::Medium => api::grpc::qdrant::WriteOrderingType::Medium as i32,
WriteOrdering::Strong => api::grpc::qdrant::WriteOrderingType::Strong as i32,
},
}
}
pub fn write_ordering_from_proto(
ordering: Option<api::grpc::qdrant::WriteOrdering>,
) -> Result<WriteOrdering, Status> {
let ordering_parsed = match ordering {
None => api::grpc::qdrant::WriteOrderingType::Weak,
Some(write_ordering) => {
match api::grpc::qdrant::WriteOrderingType::try_from(write_ordering.r#type) {
Err(_) => {
return Err(Status::invalid_argument(format!(
"cannot convert ordering: {}",
write_ordering.r#type
)));
}
Ok(res) => res,
}
}
};
Ok(match ordering_parsed {
api::grpc::qdrant::WriteOrderingType::Weak => WriteOrdering::Weak,
api::grpc::qdrant::WriteOrderingType::Medium => WriteOrdering::Medium,
api::grpc::qdrant::WriteOrderingType::Strong => WriteOrdering::Strong,
})
}
pub fn try_record_from_grpc(
point: api::grpc::qdrant::RetrievedPoint,
with_payload: bool,
) -> Result<RecordInternal, Status> {
let api::grpc::qdrant::RetrievedPoint {
id,
payload,
vectors,
shard_key,
order_value,
} = point;
let id = id
.ok_or_else(|| Status::invalid_argument("retrieved point does not have an ID"))?
.try_into()?;
let payload = if with_payload {
Some(api::conversions::json::proto_to_payloads(payload)?)
} else {
debug_assert!(payload.is_empty());
None
};
let vector: Option<_> = vectors
.map(VectorStructInternal::try_from)
.transpose()
.map_err(|e| Status::invalid_argument(format!("Cannot convert vectors: {e}")))?;
let order_value = order_value.map(TryFrom::try_from).transpose()?;
Ok(RecordInternal {
id,
payload,
vector,
shard_key: convert_shard_key_from_grpc_opt(shard_key),
order_value,
})
}
#[allow(clippy::type_complexity)]
pub fn try_discover_request_from_grpc(
value: api::grpc::qdrant::DiscoverPoints,
) -> Result<
(
DiscoverRequestInternal,
String,
Option<ReadConsistency>,
Option<Duration>,
Option<api::grpc::qdrant::ShardKeySelector>,
),
Status,
> {
let api::grpc::qdrant::DiscoverPoints {
collection_name,
target,
context,
filter,
limit,
offset,
with_payload,
params,
using,
with_vectors,
lookup_from,
read_consistency,
timeout,
shard_key_selector,
} = value;
let target = target.map(RecommendExample::try_from).transpose()?;
let context = context
.into_iter()
.map(|pair| {
match (
pair.positive.map(|p| p.try_into()),
pair.negative.map(|n| n.try_into()),
) {
(Some(Ok(positive)), Some(Ok(negative))) => {
Ok(ContextExamplePair { positive, negative })
}
(Some(Err(e)), _) | (_, Some(Err(e))) => Err(e),
(None, _) | (_, None) => Err(Status::invalid_argument(
"Both positive and negative are required in a context pair",
)),
}
})
.try_collect()?;
let request = DiscoverRequestInternal {
target,
context: Some(context),
filter: filter.map(|f| f.try_into()).transpose()?,
params: params.map(|p| p.into()),
limit: limit as usize,
offset: offset.map(|x| x as usize),
with_payload: with_payload.map(|wp| wp.try_into()).transpose()?,
with_vector: Some(
with_vectors
.map(|selector| selector.into())
.unwrap_or_default(),
),
using: using.map(|u| u.into()),
lookup_from: lookup_from.map(LookupLocation::try_from).transpose()?,
};
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let timeout = timeout.map(Duration::from_secs);
Ok((
request,
collection_name,
read_consistency,
timeout,
shard_key_selector,
))
}
impl From<api::grpc::qdrant::HnswConfigDiff> for HnswConfigDiff {
fn from(value: api::grpc::qdrant::HnswConfigDiff) -> Self {
let api::grpc::qdrant::HnswConfigDiff {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = value;
Self {
m: m.map(|v| v as usize),
ef_construct: ef_construct.map(|v| v as usize),
full_scan_threshold: full_scan_threshold.map(|v| v as usize),
max_indexing_threads: max_indexing_threads.map(|v| v as usize),
on_disk,
payload_m: payload_m.map(|v| v as usize),
inline_storage,
}
}
}
impl From<HnswConfigDiff> for api::grpc::qdrant::HnswConfigDiff {
fn from(value: HnswConfigDiff) -> Self {
let HnswConfigDiff {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = value;
Self {
m: m.map(|v| v as u64),
ef_construct: ef_construct.map(|v| v as u64),
full_scan_threshold: full_scan_threshold.map(|v| v as u64),
max_indexing_threads: max_indexing_threads.map(|v| v as u64),
on_disk,
payload_m: payload_m.map(|v| v as u64),
inline_storage,
}
}
}
impl From<api::grpc::qdrant::WalConfigDiff> for WalConfigDiff {
fn from(value: api::grpc::qdrant::WalConfigDiff) -> Self {
let api::grpc::qdrant::WalConfigDiff {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = value;
Self {
wal_capacity_mb: wal_capacity_mb.map(|v| v as usize),
wal_segments_ahead: wal_segments_ahead.map(|v| v as usize),
wal_retain_closed: wal_retain_closed.map(|v| v as usize),
}
}
}
impl TryFrom<api::grpc::qdrant::CollectionParamsDiff> for CollectionParamsDiff {
type Error = Status;
fn try_from(value: api::grpc::qdrant::CollectionParamsDiff) -> Result<Self, Self::Error> {
let api::grpc::qdrant::CollectionParamsDiff {
replication_factor,
write_consistency_factor,
read_fan_out_factor,
on_disk_payload,
} = value;
Ok(Self {
replication_factor: replication_factor
.map(|factor| {
NonZeroU32::new(factor)
.ok_or_else(|| Status::invalid_argument("`replication_factor` cannot be 0"))
})
.transpose()?,
write_consistency_factor: write_consistency_factor
.map(|factor| {
NonZeroU32::new(factor).ok_or_else(|| {
Status::invalid_argument("`write_consistency_factor` cannot be 0")
})
})
.transpose()?,
read_fan_out_factor,
on_disk_payload,
})
}
}
impl TryFrom<api::grpc::qdrant::OptimizersConfigDiff> for OptimizersConfigDiff {
type Error = Status;
fn try_from(value: api::grpc::qdrant::OptimizersConfigDiff) -> Result<Self, Self::Error> {
let api::grpc::qdrant::OptimizersConfigDiff {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
memmap_threshold,
indexing_threshold,
flush_interval_sec,
deprecated_max_optimization_threads,
max_optimization_threads,
} = value;
Ok(Self {
deleted_threshold,
vacuum_min_vector_number: vacuum_min_vector_number.map(|v| v as usize),
default_segment_number: default_segment_number.map(|v| v as usize),
max_segment_size: max_segment_size.map(|v| v as usize),
#[expect(deprecated)]
memmap_threshold: memmap_threshold.map(|v| v as usize),
indexing_threshold: indexing_threshold.map(|v| v as usize),
flush_interval_sec,
// TODO: remove deprecated field in a later version
max_optimization_threads: deprecated_max_optimization_threads
.map(|v| MaxOptimizationThreads::Threads(v as usize))
.or(max_optimization_threads
.map(TryFrom::try_from)
.transpose()?),
})
}
}
impl TryFrom<api::grpc::qdrant::QuantizationConfigDiff> for QuantizationConfigDiff {
type Error = Status;
fn try_from(value: api::grpc::qdrant::QuantizationConfigDiff) -> Result<Self, Self::Error> {
let api::grpc::qdrant::QuantizationConfigDiff { quantization } = value;
match quantization {
None => Err(Status::invalid_argument(
"Quantization type is not specified",
)),
Some(quantization) => match quantization {
Quantization::Scalar(scalar) => Ok(Self::Scalar(scalar.try_into()?)),
Quantization::Product(product) => Ok(Self::Product(product.try_into()?)),
Quantization::Binary(binary) => Ok(Self::Binary(binary.try_into()?)),
Quantization::Disabled(_) => Ok(Self::new_disabled()),
},
}
}
}
impl From<CollectionInfo> for api::grpc::qdrant::CollectionInfo {
fn from(value: CollectionInfo) -> Self {
let CollectionInfo {
status,
optimizer_status,
warnings,
indexed_vectors_count,
points_count,
segments_count,
config,
payload_schema,
} = value;
let CollectionConfig {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
metadata,
} = config;
let OptimizersConfig {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
#[expect(deprecated)]
memmap_threshold,
indexing_threshold,
flush_interval_sec,
max_optimization_threads,
} = optimizer_config;
let HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads,
on_disk,
payload_m,
inline_storage,
} = hnsw_config;
let CollectionParams {
vectors,
shard_number,
replication_factor,
on_disk_payload,
write_consistency_factor,
read_fan_out_factor,
sharding_method,
sparse_vectors,
} = params;
api::grpc::qdrant::CollectionInfo {
status: match status {
CollectionStatus::Green => api::grpc::qdrant::CollectionStatus::Green,
CollectionStatus::Yellow => api::grpc::qdrant::CollectionStatus::Yellow,
CollectionStatus::Red => api::grpc::qdrant::CollectionStatus::Red,
CollectionStatus::Grey => api::grpc::qdrant::CollectionStatus::Grey,
}
.into(),
optimizer_status: Some(match optimizer_status {
OptimizersStatus::Ok => api::grpc::qdrant::OptimizerStatus {
ok: true,
error: "".to_string(),
},
OptimizersStatus::Error(error) => {
api::grpc::qdrant::OptimizerStatus { ok: false, error }
}
}),
indexed_vectors_count: indexed_vectors_count.map(|count| count as u64),
points_count: points_count.map(|count| count as u64),
segments_count: segments_count as u64,
config: Some(api::grpc::qdrant::CollectionConfig {
params: Some(api::grpc::qdrant::CollectionParams {
vectors_config: {
let config = match vectors {
VectorsConfig::Single(vector_params) => {
Some(api::grpc::qdrant::vectors_config::Config::Params(
vector_params.into(),
))
}
VectorsConfig::Multi(vectors_params) => {
Some(api::grpc::qdrant::vectors_config::Config::ParamsMap(
api::grpc::qdrant::VectorParamsMap {
map: vectors_params
.iter()
.map(|(vector_name, vector_param)| {
(vector_name.clone(), vector_param.clone().into())
})
.collect(),
},
))
}
};
Some(api::grpc::qdrant::VectorsConfig { config })
},
shard_number: shard_number.get(),
replication_factor: Some(replication_factor.get()),
on_disk_payload,
write_consistency_factor: Some(write_consistency_factor.get()),
read_fan_out_factor,
sharding_method: sharding_method.map(sharding_method_to_proto),
sparse_vectors_config: sparse_vectors.map(|sparse_vectors| {
api::grpc::qdrant::SparseVectorConfig {
map: sparse_vectors
.into_iter()
.map(|(name, sparse_vector_params)| {
(name, sparse_vector_params.into())
})
.collect(),
}
}),
}),
hnsw_config: Some(api::grpc::qdrant::HnswConfigDiff {
m: Some(m as u64),
ef_construct: Some(ef_construct as u64),
full_scan_threshold: Some(full_scan_threshold as u64),
max_indexing_threads: Some(max_indexing_threads as u64),
on_disk,
payload_m: payload_m.map(|v| v as u64),
inline_storage,
}),
optimizer_config: Some(api::grpc::qdrant::OptimizersConfigDiff {
deleted_threshold: Some(deleted_threshold),
vacuum_min_vector_number: Some(vacuum_min_vector_number as u64),
default_segment_number: Some(default_segment_number as u64),
max_segment_size: max_segment_size.map(|x| x as u64),
memmap_threshold: memmap_threshold.map(|x| x as u64),
indexing_threshold: indexing_threshold.map(|x| x as u64),
flush_interval_sec: Some(flush_interval_sec),
deprecated_max_optimization_threads: max_optimization_threads.map(|x| x as u64),
max_optimization_threads: Some(From::from(max_optimization_threads)),
}),
wal_config: wal_config.map(|wal_config| {
let WalConfig {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = wal_config;
api::grpc::qdrant::WalConfigDiff {
wal_capacity_mb: Some(wal_capacity_mb as u64),
wal_segments_ahead: Some(wal_segments_ahead as u64),
wal_retain_closed: Some(wal_retain_closed as u64),
}
}),
quantization_config: quantization_config.map(|x| x.into()),
strict_mode_config: strict_mode_config
.map(api::grpc::qdrant::StrictModeConfig::from),
metadata: metadata
.map(api::conversions::json::payload_to_proto)
.unwrap_or_default(),
}),
payload_schema: payload_schema
.into_iter()
.map(|(k, v)| (k.to_string(), v.into()))
.collect(),
warnings: warnings
.into_iter()
.map(api::grpc::qdrant::CollectionWarning::from)
.collect(),
}
}
}
impl From<CollectionWarning> for api::grpc::qdrant::CollectionWarning {
fn from(value: CollectionWarning) -> Self {
let CollectionWarning { message } = value;
Self { message }
}
}
impl From<api::grpc::qdrant::CollectionWarning> for CollectionWarning {
fn from(value: api::grpc::qdrant::CollectionWarning) -> Self {
let api::grpc::qdrant::CollectionWarning { message } = value;
Self { message }
}
}
impl TryFrom<i32> for CollectionStatus {
type Error = Status;
fn try_from(value: i32) -> Result<Self, Self::Error> {
let status_grpc = api::grpc::qdrant::CollectionStatus::try_from(value);
match status_grpc {
Ok(api::grpc::qdrant::CollectionStatus::Green) => Ok(CollectionStatus::Green),
Ok(api::grpc::qdrant::CollectionStatus::Yellow) => Ok(CollectionStatus::Yellow),
Ok(api::grpc::qdrant::CollectionStatus::Red) => Ok(CollectionStatus::Red),
Ok(api::grpc::qdrant::CollectionStatus::Grey) => Ok(CollectionStatus::Grey),
Ok(api::grpc::qdrant::CollectionStatus::UnknownCollectionStatus) => Err(
Status::invalid_argument(format!("Unknown CollectionStatus: {value}")),
),
Err(err) => Err(Status::invalid_argument(format!(
"Cannot convert CollectionStatus: {value}, error: {err}"
))),
}
}
}
impl TryFrom<api::grpc::qdrant::OptimizersConfigDiff> for OptimizersConfig {
type Error = Status;
fn try_from(
optimizer_config: api::grpc::qdrant::OptimizersConfigDiff,
) -> Result<Self, Self::Error> {
let api::grpc::qdrant::OptimizersConfigDiff {
deleted_threshold,
vacuum_min_vector_number,
default_segment_number,
max_segment_size,
memmap_threshold,
indexing_threshold,
flush_interval_sec,
deprecated_max_optimization_threads,
max_optimization_threads,
} = optimizer_config;
let converted_max_optimization_threads: Option<usize> =
match deprecated_max_optimization_threads {
None => match max_optimization_threads {
None => None,
Some(max_optimization_threads) => TryFrom::try_from(max_optimization_threads)?,
},
Some(threads) => Some(threads as usize),
};
Ok(Self {
deleted_threshold: deleted_threshold.unwrap_or_default(),
vacuum_min_vector_number: vacuum_min_vector_number.unwrap_or_default() as usize,
default_segment_number: default_segment_number.unwrap_or_default() as usize,
max_segment_size: max_segment_size.map(|x| x as usize),
#[expect(deprecated)]
memmap_threshold: memmap_threshold.map(|x| x as usize),
indexing_threshold: indexing_threshold.map(|x| x as usize),
flush_interval_sec: flush_interval_sec.unwrap_or_default(),
max_optimization_threads: converted_max_optimization_threads,
})
}
}
impl From<api::grpc::qdrant::WalConfigDiff> for WalConfig {
fn from(wal_config: api::grpc::qdrant::WalConfigDiff) -> Self {
let api::grpc::qdrant::WalConfigDiff {
wal_capacity_mb,
wal_segments_ahead,
wal_retain_closed,
} = wal_config;
Self {
wal_capacity_mb: wal_capacity_mb.unwrap_or_default() as usize,
wal_segments_ahead: wal_segments_ahead.unwrap_or_default() as usize,
wal_retain_closed: wal_retain_closed.unwrap_or_default() as usize,
}
}
}
impl TryFrom<api::grpc::qdrant::vectors_config::Config> for VectorsConfig {
type Error = Status;
fn try_from(value: api::grpc::qdrant::vectors_config::Config) -> Result<Self, Self::Error> {
Ok(match value {
api::grpc::qdrant::vectors_config::Config::Params(vector_params) => {
VectorsConfig::Single(vector_params.try_into()?)
}
api::grpc::qdrant::vectors_config::Config::ParamsMap(vectors_params) => {
let mut params_map = BTreeMap::new();
for (name, params) in vectors_params.map {
params_map.insert(name, params.try_into()?);
}
VectorsConfig::Multi(params_map)
}
})
}
}
impl TryFrom<api::grpc::qdrant::vectors_config_diff::Config> for VectorsConfigDiff {
type Error = Status;
fn try_from(
value: api::grpc::qdrant::vectors_config_diff::Config,
) -> Result<Self, Self::Error> {
Ok(match value {
api::grpc::qdrant::vectors_config_diff::Config::Params(vector_params) => {
let diff: VectorParamsDiff = vector_params.try_into()?;
VectorsConfigDiff::from(diff)
}
api::grpc::qdrant::vectors_config_diff::Config::ParamsMap(vectors_params) => {
let mut params_map = BTreeMap::new();
for (name, params) in vectors_params.map {
params_map.insert(name, params.try_into()?);
}
VectorsConfigDiff(params_map)
}
})
}
}
impl TryFrom<api::grpc::qdrant::VectorParams> for VectorParams {
type Error = Status;
fn try_from(vector_params: api::grpc::qdrant::VectorParams) -> Result<Self, Self::Error> {
let api::grpc::qdrant::VectorParams {
size,
distance,
hnsw_config,
quantization_config,
on_disk,
datatype,
multivector_config,
} = vector_params;
Ok(Self {
size: NonZeroU64::new(size).ok_or_else(|| {
Status::invalid_argument("VectorParams size must be greater than zero")
})?,
distance: from_grpc_dist(distance)?,
hnsw_config: hnsw_config.map(Into::into),
quantization_config: quantization_config
.map(grpc_to_segment_quantization_config)
.transpose()?,
on_disk,
datatype: convert_datatype_from_proto(datatype)?,
multivector_config: multivector_config
.map(MultiVectorConfig::try_from)
.transpose()?,
})
}
}
fn convert_datatype_from_proto(datatype: Option<i32>) -> Result<Option<Datatype>, Status> {
if let Some(datatype_int) = datatype {
let grpc_datatype = api::grpc::qdrant::Datatype::try_from(datatype_int);
if let Ok(grpc_datatype) = grpc_datatype {
match grpc_datatype {
api::grpc::qdrant::Datatype::Uint8 => Ok(Some(Datatype::Uint8)),
api::grpc::qdrant::Datatype::Float32 => Ok(Some(Datatype::Float32)),
api::grpc::qdrant::Datatype::Float16 => Ok(Some(Datatype::Float16)),
api::grpc::qdrant::Datatype::Default => Ok(None),
}
} else {
Err(Status::invalid_argument(format!(
"Cannot convert datatype: {datatype_int}"
)))
}
} else {
Ok(None)
}
}
impl TryFrom<api::grpc::qdrant::VectorParamsDiff> for VectorParamsDiff {
type Error = Status;
fn try_from(vector_params: api::grpc::qdrant::VectorParamsDiff) -> Result<Self, Self::Error> {
let api::grpc::qdrant::VectorParamsDiff {
hnsw_config,
quantization_config,
on_disk,
} = vector_params;
Ok(Self {
hnsw_config: hnsw_config.map(Into::into),
quantization_config: quantization_config.map(TryInto::try_into).transpose()?,
on_disk,
})
}
}
impl TryFrom<api::grpc::qdrant::SparseVectorParams> for SparseVectorParams {
type Error = Status;
fn try_from(
sparse_vector_params: api::grpc::qdrant::SparseVectorParams,
) -> Result<Self, Self::Error> {
let api::grpc::qdrant::SparseVectorParams { index, modifier } = sparse_vector_params;
Ok(Self {
index: index
.map(|index_config| -> Result<_, Status> {
Ok(SparseIndexParams {
full_scan_threshold: index_config.full_scan_threshold.map(|v| v as usize),
on_disk: index_config.on_disk,
datatype: convert_datatype_from_proto(index_config.datatype)?,
})
})
.transpose()?,
modifier: modifier
.and_then(|x|
// XXX: Invalid values silently converted to None
api::grpc::qdrant::Modifier::try_from(x).ok())
.map(Modifier::from),
})
}
}
impl From<SparseVectorParams> for api::grpc::qdrant::SparseVectorParams {
fn from(sparse_vector_params: SparseVectorParams) -> Self {
let SparseVectorParams { index, modifier } = sparse_vector_params;
Self {
index: index.map(|index_config| {
let SparseIndexParams {
full_scan_threshold,
on_disk,
datatype,
} = index_config;
api::grpc::qdrant::SparseIndexConfig {
full_scan_threshold: full_scan_threshold.map(|v| v as u64),
on_disk,
datatype: datatype.map(|dt| api::grpc::qdrant::Datatype::from(dt).into()),
}
}),
modifier: modifier.map(|modifier| api::grpc::qdrant::Modifier::from(modifier) as i32),
}
}
}
fn grpc_to_segment_quantization_config(
value: api::grpc::qdrant::QuantizationConfig,
) -> Result<QuantizationConfig, Status> {
let api::grpc::qdrant::QuantizationConfig { quantization } = value;
let quantization = quantization
.ok_or_else(|| Status::invalid_argument("QuantizationConfig must contain quantization"))?;
match quantization {
api::grpc::qdrant::quantization_config::Quantization::Scalar(config) => {
Ok(QuantizationConfig::Scalar(config.try_into()?))
}
api::grpc::qdrant::quantization_config::Quantization::Product(config) => {
Ok(QuantizationConfig::Product(config.try_into()?))
}
api::grpc::qdrant::quantization_config::Quantization::Binary(config) => {
Ok(QuantizationConfig::Binary(config.try_into()?))
}
}
}
impl TryFrom<api::grpc::qdrant::GetCollectionInfoResponse> for CollectionInfo {
type Error = Status;
fn try_from(
collection_info_response: api::grpc::qdrant::GetCollectionInfoResponse,
) -> Result<Self, Self::Error> {
let api::grpc::qdrant::GetCollectionInfoResponse { result, time: _ } =
collection_info_response;
match result {
None => Err(Status::invalid_argument("Malformed CollectionInfo type")),
Some(collection_info_response) => {
let api::grpc::qdrant::CollectionInfo {
status,
optimizer_status,
indexed_vectors_count,
points_count,
segments_count,
config,
payload_schema,
warnings,
} = collection_info_response;
Ok(Self {
status: CollectionStatus::try_from(status)?,
optimizer_status: match optimizer_status {
None => {
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/vector_params_builder.rs | lib/collection/src/operations/vector_params_builder.rs | use std::num::NonZeroU64;
use segment::types::{Distance, MultiVectorConfig, QuantizationConfig};
use crate::operations::config_diff::HnswConfigDiff;
use crate::operations::types::{Datatype, VectorParams};
pub struct VectorParamsBuilder {
vector_params: VectorParams,
}
impl VectorParamsBuilder {
pub fn new(size: u64, distance: Distance) -> Self {
VectorParamsBuilder {
vector_params: VectorParams {
size: NonZeroU64::new(size).unwrap(),
distance,
hnsw_config: None,
quantization_config: None,
on_disk: None,
datatype: None,
multivector_config: None,
},
}
}
pub fn with_hnsw_config(mut self, hnsw_config: HnswConfigDiff) -> Self {
self.vector_params.hnsw_config = Some(hnsw_config);
self
}
pub fn with_quantization_config(mut self, quantization_config: QuantizationConfig) -> Self {
self.vector_params.quantization_config = Some(quantization_config);
self
}
pub fn with_on_disk(mut self, on_disk: bool) -> Self {
self.vector_params.on_disk = Some(on_disk);
self
}
pub fn with_datatype(mut self, datatype: Datatype) -> Self {
self.vector_params.datatype = Some(datatype);
self
}
pub fn with_multivector_config(mut self, multivector_config: MultiVectorConfig) -> Self {
self.vector_params.multivector_config = Some(multivector_config);
self
}
pub fn build(self) -> VectorParams {
self.vector_params
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/count.rs | lib/collection/src/operations/verification/count.rs | use segment::types::{Filter, SearchParams};
use super::StrictModeVerification;
use crate::operations::types::CountRequestInternal;
impl StrictModeVerification for CountRequestInternal {
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
Some(self.exact)
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/discovery.rs | lib/collection/src/operations/verification/discovery.rs | use segment::types::{Filter, SearchParams, StrictModeConfig};
use super::StrictModeVerification;
use crate::collection::Collection;
use crate::operations::types::{CollectionResult, DiscoverRequestBatch, DiscoverRequestInternal};
impl StrictModeVerification for DiscoverRequestInternal {
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
self.params.as_ref()
}
}
impl StrictModeVerification for DiscoverRequestBatch {
async fn check_strict_mode(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
for i in self.searches.iter() {
i.discover_request
.check_strict_mode(collection, strict_mode_config)
.await?;
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/update.rs | lib/collection/src/operations/verification/update.rs | use api::rest::{
BatchVectorStruct, MultiDenseVector, PointInsertOperations, PointsBatch, PointsList,
UpdateVectors, Vector, VectorStruct,
};
use segment::data_types::tiny_map::TinyMap;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::types::{
Filter, StrictModeConfig, StrictModeMultivectorConfig, StrictModeSparseConfig, VectorName,
VectorNameBuf,
};
use super::{StrictModeVerification, check_limit_opt};
use crate::collection::Collection;
use crate::common::collection_size_stats::CollectionSizeAtomicStats;
use crate::operations::payload_ops::{DeletePayload, SetPayload};
use crate::operations::point_ops::PointsSelector;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::operations::vector_ops::DeleteVectors;
impl StrictModeVerification for PointsSelector {
fn indexed_filter_write(&self) -> Option<&Filter> {
match self {
PointsSelector::FilterSelector(filter) => Some(&filter.filter),
PointsSelector::PointIdsSelector(_) => None,
}
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for DeleteVectors {
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for SetPayload {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
if let Some(payload_size_limit_bytes) = strict_mode_config.max_collection_payload_size_bytes
&& let Some(local_stats) = collection.estimated_collection_stats().await
{
check_collection_payload_size_limit(payload_size_limit_bytes, local_stats)?;
}
Ok(())
}
fn indexed_filter_write(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for DeletePayload {
fn indexed_filter_write(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for PointInsertOperations {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
check_limit_opt(
Some(self.len()),
strict_mode_config.upsert_max_batchsize,
"upsert limit",
)?;
check_collection_size_limit(collection, strict_mode_config).await?;
if let Some(multivector_config) = &strict_mode_config.multivector_config {
check_multivectors_limits_insert(self, multivector_config).await?;
}
if let Some(sparse_config) = &strict_mode_config.sparse_config {
check_sparse_vector_limits_insert(self, sparse_config).await?;
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
// Update filter doesn't require strict mode checks
// as it is only used on a limited and small subset of points.
// Reading from payload storage is acceptable in this case.
match self {
PointInsertOperations::PointsBatch(PointsBatch {
batch: _,
shard_key: _,
update_filter: _,
}) => None,
PointInsertOperations::PointsList(PointsList {
points: _,
shard_key: _,
update_filter: _,
}) => None,
}
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for UpdateVectors {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
check_limit_opt(
Some(self.points.len()),
strict_mode_config.upsert_max_batchsize,
"update limit",
)?;
check_collection_size_limit(collection, strict_mode_config).await?;
if let Some(multivector_config) = &strict_mode_config.multivector_config {
check_multivectors_limits_update(self, multivector_config).await?;
}
if let Some(sparse_config) = &strict_mode_config.sparse_config {
check_sparse_vector_limits_update(self, sparse_config).await?;
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
let UpdateVectors {
points: _,
shard_key: _,
// Update filter doesn't require strict mode checks
// as it is only used on a limited and small subset of points.
// Reading from payload storage is acceptable in this case.
update_filter: _,
} = self;
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
/// Checks all collection size limits that are configured in strict mode.
async fn check_collection_size_limit(
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
let vector_limit = strict_mode_config.max_collection_vector_size_bytes;
let payload_limit = strict_mode_config.max_collection_payload_size_bytes;
let point_limit = strict_mode_config.max_points_count;
// If all configs are disabled/unset, don't need to check anything nor update cache for performance.
if (vector_limit, payload_limit, point_limit) == (None, None, None) {
return Ok(());
}
let Some(stats) = collection.estimated_collection_stats().await else {
return Ok(());
};
if let Some(vector_storage_size_limit_bytes) = vector_limit {
check_collection_vector_size_limit(vector_storage_size_limit_bytes, stats)?;
}
if let Some(payload_storage_size_limit_bytes) = payload_limit {
check_collection_payload_size_limit(payload_storage_size_limit_bytes, stats)?;
}
if let Some(points_count_limit) = point_limit {
check_collection_points_count_limit(points_count_limit, stats)?;
}
Ok(())
}
fn check_collection_points_count_limit(
points_count_limit: usize,
stats: &CollectionSizeAtomicStats,
) -> CollectionResult<()> {
let points_count = stats.get_points_count();
if points_count >= points_count_limit {
return Err(CollectionError::bad_request(format!(
"Max points count limit of {points_count_limit} reached!",
)));
}
Ok(())
}
/// Check collections vector storage size limit.
fn check_collection_vector_size_limit(
max_vec_storage_size_bytes: usize,
stats: &CollectionSizeAtomicStats,
) -> CollectionResult<()> {
let vec_storage_size_bytes = stats.get_vector_storage_size();
if vec_storage_size_bytes >= max_vec_storage_size_bytes {
let size_in_mb = max_vec_storage_size_bytes as f32 / (1024.0 * 1024.0);
return Err(CollectionError::bad_request(format!(
"Max vector storage size limit of {size_in_mb}MB reached!",
)));
}
Ok(())
}
/// Check collections payload storage size limit.
fn check_collection_payload_size_limit(
max_payload_storage_size_bytes: usize,
stats: &CollectionSizeAtomicStats,
) -> CollectionResult<()> {
let payload_storage_size_bytes = stats.get_payload_storage_size();
if payload_storage_size_bytes >= max_payload_storage_size_bytes {
let size_in_mb = max_payload_storage_size_bytes as f32 / (1024.0 * 1024.0);
return Err(CollectionError::bad_request(format!(
"Max payload storage size limit of {size_in_mb}MB reached!",
)));
}
Ok(())
}
/// Compute a non-empty mapping of multivector limits by name.
///
/// Uses a tiny map as we expect a small number of multivectors to be configured per collection in strict mode.
///
/// Return None if no multivectors are configured with strict mode
async fn multivector_limits_by_name(
multivector_strict_config: &StrictModeMultivectorConfig,
) -> Option<TinyMap<VectorNameBuf, usize>> {
// If no multivectors strict mode no need to check anything.
if multivector_strict_config.config.is_empty() {
return None;
}
let multivector_max_size_by_name: TinyMap<VectorNameBuf, usize> = multivector_strict_config
.config
.iter()
.filter_map(|(name, config)| {
config
.max_vectors
.map(|max_vectors| (name.clone(), max_vectors))
})
.collect();
// If no multivectors are configured, no need to check anything.
if multivector_max_size_by_name.is_empty() {
None
} else {
Some(multivector_max_size_by_name)
}
}
async fn check_multivectors_limits_update(
point_insert: &UpdateVectors,
multivector_strict_config: &StrictModeMultivectorConfig,
) -> CollectionResult<()> {
let Some(multivector_max_size_by_name) =
multivector_limits_by_name(multivector_strict_config).await
else {
return Ok(());
};
for point in &point_insert.points {
check_named_multivectors_vecstruct_limit(
DEFAULT_VECTOR_NAME,
&point.vector,
&multivector_max_size_by_name,
)?;
}
Ok(())
}
async fn sparse_limits(
sparse_config: &StrictModeSparseConfig,
) -> Option<TinyMap<&VectorName, usize>> {
if sparse_config.config.is_empty() {
return None;
}
let sparse_max_size: TinyMap<&VectorName, usize> = sparse_config
.config
.iter()
.filter_map(|(name, config)| {
config
.max_length
.map(|max_length| (name.as_ref(), max_length))
})
.collect();
(!sparse_max_size.is_empty()).then_some(sparse_max_size)
}
async fn check_sparse_vector_limits_update(
point_insert: &UpdateVectors,
sparse_config: &StrictModeSparseConfig,
) -> CollectionResult<()> {
let Some(sparse_max_size_by_name) = sparse_limits(sparse_config).await else {
return Ok(());
};
for point in &point_insert.points {
check_sparse_vecstruct_limit(&point.vector, &sparse_max_size_by_name)?;
}
Ok(())
}
async fn check_sparse_vector_limits_insert(
point_insert: &PointInsertOperations,
sparse_config: &StrictModeSparseConfig,
) -> CollectionResult<()> {
let Some(sparse_max_size_by_name) = sparse_limits(sparse_config).await else {
return Ok(());
};
match point_insert {
PointInsertOperations::PointsBatch(batch) => match &batch.batch.vectors {
BatchVectorStruct::Named(named_batch_vectors) => {
for (name, vectors) in named_batch_vectors {
for vector in vectors {
check_named_sparse_vec_limit(name, vector, &sparse_max_size_by_name)?;
}
}
}
BatchVectorStruct::Single(_)
| BatchVectorStruct::Document(_)
| BatchVectorStruct::MultiDense(_)
| BatchVectorStruct::Image(_)
| BatchVectorStruct::Object(_) => {}
},
PointInsertOperations::PointsList(list) => {
for point_struct in &list.points {
match &point_struct.vector {
VectorStruct::Named(named_vectors) => {
for (name, vector) in named_vectors {
check_named_sparse_vec_limit(name, vector, &sparse_max_size_by_name)?;
}
}
VectorStruct::Single(_) => {}
VectorStruct::MultiDense(_) => {}
VectorStruct::Document(_) => {}
VectorStruct::Image(_) => {}
VectorStruct::Object(_) => {}
}
}
}
}
Ok(())
}
fn check_sparse_vecstruct_limit(
vector: &VectorStruct,
sparse_max_size_by_name: &TinyMap<&VectorName, usize>,
) -> CollectionResult<()> {
match vector {
VectorStruct::Named(named) => {
for (name, vec) in named {
check_named_sparse_vec_limit(name, vec, sparse_max_size_by_name)?;
}
Ok(())
}
VectorStruct::Single(_) => Ok(()),
VectorStruct::MultiDense(_) => Ok(()),
VectorStruct::Document(_) => Ok(()),
VectorStruct::Image(_) => Ok(()),
VectorStruct::Object(_) => Ok(()),
}
}
fn check_named_sparse_vec_limit(
name: &VectorName,
vector: &Vector,
sparse_max_size_by_name: &TinyMap<&VectorName, usize>,
) -> CollectionResult<()> {
if let Vector::Sparse(sparse) = vector
&& let Some(strict_sparse_limit) = sparse_max_size_by_name.get(name)
{
check_sparse_vector_limit(name, sparse, *strict_sparse_limit)?;
}
Ok(())
}
fn check_sparse_vector_limit(
name: &VectorName,
sparse: &sparse::common::sparse_vector::SparseVector,
max_size: usize,
) -> CollectionResult<()> {
let vector_len = sparse.indices.len();
if vector_len > max_size || sparse.values.len() > max_size {
return Err(CollectionError::bad_request(format!(
"Sparse vector '{name}' has a limit of {max_size} indices, but {vector_len} were provided!"
)));
}
Ok(())
}
async fn check_multivectors_limits_insert(
point_insert: &PointInsertOperations,
multivector_strict_config: &StrictModeMultivectorConfig,
) -> CollectionResult<()> {
let Some(multivector_max_size_by_name) =
multivector_limits_by_name(multivector_strict_config).await
else {
return Ok(());
};
match point_insert {
PointInsertOperations::PointsBatch(batch) => match &batch.batch.vectors {
BatchVectorStruct::MultiDense(multis) => {
for multi in multis {
check_named_multivector_limit(
DEFAULT_VECTOR_NAME,
multi,
&multivector_max_size_by_name,
)?;
}
}
BatchVectorStruct::Named(named_batch_vectors) => {
for (name, vectors) in named_batch_vectors {
for vector in vectors {
check_named_multivectors_vec_limit(
name,
vector,
&multivector_max_size_by_name,
)?;
}
}
}
BatchVectorStruct::Single(_)
| BatchVectorStruct::Document(_)
| BatchVectorStruct::Image(_)
| BatchVectorStruct::Object(_) => {}
},
PointInsertOperations::PointsList(list) => {
for point_struct in &list.points {
match &point_struct.vector {
VectorStruct::MultiDense(multi) => {
check_named_multivector_limit(
DEFAULT_VECTOR_NAME,
multi,
&multivector_max_size_by_name,
)?;
}
VectorStruct::Named(named_vectors) => {
for (name, vector) in named_vectors {
check_named_multivectors_vec_limit(
name,
vector,
&multivector_max_size_by_name,
)?;
}
}
VectorStruct::Single(_)
| VectorStruct::Document(_)
| VectorStruct::Image(_)
| VectorStruct::Object(_) => {}
}
}
}
}
Ok(())
}
fn check_named_multivectors_vecstruct_limit(
name: &VectorName,
vector: &VectorStruct,
multivector_max_size_by_name: &TinyMap<VectorNameBuf, usize>,
) -> CollectionResult<()> {
match vector {
VectorStruct::MultiDense(multi) => {
check_named_multivector_limit(name, multi, multivector_max_size_by_name)
}
VectorStruct::Named(named) => {
for (name, vec) in named {
check_named_multivectors_vec_limit(name, vec, multivector_max_size_by_name)?;
}
Ok(())
}
VectorStruct::Single(_)
| VectorStruct::Document(_)
| VectorStruct::Image(_)
| VectorStruct::Object(_) => Ok(()),
}
}
fn check_named_multivectors_vec_limit(
name: &VectorName,
vector: &Vector,
multivector_max_size_by_name: &TinyMap<VectorNameBuf, usize>,
) -> CollectionResult<()> {
match vector {
Vector::MultiDense(multi) => {
check_named_multivector_limit(name, multi, multivector_max_size_by_name)
}
Vector::Dense(_)
| Vector::Sparse(_)
| Vector::Document(_)
| Vector::Image(_)
| Vector::Object(_) => Ok(()),
}
}
fn check_named_multivector_limit(
name: &VectorName,
multi: &MultiDenseVector,
multivector_max_size_by_name: &TinyMap<VectorNameBuf, usize>,
) -> CollectionResult<()> {
if let Some(strict_multi_limit) = multivector_max_size_by_name.get(name) {
check_multivector_limit(name, multi, *strict_multi_limit)?
}
Ok(())
}
fn check_multivector_limit(
name: &VectorName,
multi: &MultiDenseVector,
max_size: usize,
) -> CollectionResult<()> {
let multi_len = multi.len();
if multi_len > max_size {
return Err(CollectionError::bad_request(format!(
"Multivector '{name}' has a limit of {max_size} vectors, but {multi_len} were provided!",
)));
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/recommend.rs | lib/collection/src/operations/verification/recommend.rs | use segment::types::Filter;
use super::StrictModeVerification;
use crate::operations::types::{RecommendGroupsRequestInternal, RecommendRequestInternal};
impl StrictModeVerification for RecommendRequestInternal {
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
self.params.as_ref()
}
}
impl StrictModeVerification for RecommendGroupsRequestInternal {
fn query_limit(&self) -> Option<usize> {
Some(self.group_request.limit as usize * self.group_request.group_size as usize)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
self.params.as_ref()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/local_shard.rs | lib/collection/src/operations/verification/local_shard.rs | use super::StrictModeVerification;
use crate::operations::types::{PointRequestInternal, ScrollRequestInternal};
impl StrictModeVerification for ScrollRequestInternal {
fn query_limit(&self) -> Option<usize> {
self.limit
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for PointRequestInternal {
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/search.rs | lib/collection/src/operations/verification/search.rs | use api::rest::{SearchGroupsRequestInternal, SearchRequestInternal};
use segment::types::{Filter, SearchParams, StrictModeConfig};
use super::{StrictModeVerification, check_grouping_field};
use crate::collection::Collection;
use crate::operations::types::{CollectionResult, CoreSearchRequest, SearchRequestBatch};
impl StrictModeVerification for SearchRequestInternal {
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
self.params.as_ref()
}
fn request_exact(&self) -> Option<bool> {
None
}
}
impl StrictModeVerification for CoreSearchRequest {
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
self.params.as_ref()
}
}
impl StrictModeVerification for SearchRequestBatch {
async fn check_strict_mode(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
for search_request in &self.searches {
search_request
.search_request
.check_strict_mode(collection, strict_mode_config)
.await?;
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
impl StrictModeVerification for SearchGroupsRequestInternal {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// check for unindexed fields targeted by group_by
check_grouping_field(&self.group_request.group_by, collection, strict_mode_config)?;
Ok(())
}
fn query_limit(&self) -> Option<usize> {
Some(self.group_request.limit as usize * self.group_request.group_size as usize)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn request_exact(&self) -> Option<bool> {
// We already check this in `request_search_params`
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
self.params.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/facet.rs | lib/collection/src/operations/verification/facet.rs | use api::rest::FacetRequestInternal;
use segment::data_types::facets::FacetParams;
use segment::types::{Filter, SearchParams};
use super::StrictModeVerification;
impl StrictModeVerification for FacetRequestInternal {
fn query_limit(&self) -> Option<usize> {
self.limit
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
self.exact
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
impl StrictModeVerification for FacetParams {
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/mod.rs | lib/collection/src/operations/verification/mod.rs | mod count;
mod discovery;
mod facet;
mod local_shard;
mod matrix;
mod query;
mod recommend;
mod search;
mod update;
use std::fmt::Display;
use itertools::Itertools;
use segment::json_path::JsonPath;
use segment::types::{Filter, SearchParams, StrictModeConfig};
pub use shard::operation_rate_cost;
use super::types::{CollectionError, CollectionResult};
use crate::collection::Collection;
// Creates a new `VerificationPass` without actually verifying anything.
// This is useful in situations where we don't need to check for strict mode, but still
// want to be able to access `TableOfContent` using `.toc()`.
// If you're not implementing a new point-api endpoint for which a strict mode check
// is required, this is safe to use.
pub const fn new_unchecked_verification_pass() -> VerificationPass {
VerificationPass { _inner: () }
}
/// A pass, created on successful verification.
pub struct VerificationPass {
// Private field, so we can't instantiate it from somewhere else.
_inner: (),
}
/// Trait to verify strict mode for requests.
/// This trait ignores the `enabled` parameter in `StrictModeConfig`.
pub trait StrictModeVerification {
/// Implementing this method allows adding a custom check for request specific values.
#[allow(async_fn_in_trait)]
async fn check_custom(
&self,
_collection: &Collection,
_strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
Ok(())
}
/// Implement this to check the limit of a request.
fn query_limit(&self) -> Option<usize>;
/// Verifies that all keys in the given filter have an index available. Only implement this
/// if the filter operates on a READ-operation, like search.
/// For filtered updates implement `request_indexed_filter_write`!
fn indexed_filter_read(&self) -> Option<&Filter>;
/// Verifies that all keys in the given filter have an index available. Only implement this
/// if the filter is used for filtered-UPDATES like delete by payload.
/// For read only filters implement `request_indexed_filter_read`!
fn indexed_filter_write(&self) -> Option<&Filter>;
fn request_exact(&self) -> Option<bool>;
fn request_search_params(&self) -> Option<&SearchParams>;
/// Checks the 'exact' parameter.
fn check_request_exact(&self, strict_mode_config: &StrictModeConfig) -> CollectionResult<()> {
check_bool_opt(
self.request_exact(),
strict_mode_config.search_allow_exact,
"Exact search",
"exact",
)
}
/// Checks the request limit.
fn check_request_query_limit(
&self,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
check_limit_opt(
self.query_limit(),
strict_mode_config.max_query_limit,
"limit",
)
}
/// Checks search parameters.
#[allow(async_fn_in_trait)]
async fn check_search_params(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
if let Some(search_params) = self.request_search_params() {
Box::pin(search_params.check_strict_mode(collection, strict_mode_config)).await?;
}
Ok(())
}
// Checks all filters use indexed fields only.
fn check_request_filter(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
let check_filter = |filter: Option<&Filter>,
allow_unindexed_filter: Option<bool>|
-> CollectionResult<()> {
let Some(filter) = filter else {
return Ok(());
};
// Check for filter indices
if allow_unindexed_filter == Some(false)
&& let Some((key, schemas)) = collection.one_unindexed_key(filter)
{
let possible_schemas_str = schemas
.iter()
.map(|schema| schema.to_string())
.sorted()
.dedup()
.collect::<Vec<_>>()
.join(", ");
return Err(CollectionError::strict_mode(
format!(
"Index required but not found for \"{key}\" of one of the following types: [{possible_schemas_str}]",
),
"Create an index for this key or use a different filter.",
));
}
check_filter_limits(filter, strict_mode_config)?;
Ok(())
};
check_filter(
self.indexed_filter_read(),
strict_mode_config.unindexed_filtering_retrieve,
)?;
check_filter(
self.indexed_filter_write(),
strict_mode_config.unindexed_filtering_update,
)?;
Ok(())
}
/// Does the verification of all configured parameters. Only implement this function if you know what
/// you are doing. In most cases implementing `check_custom` is sufficient.
#[allow(async_fn_in_trait)]
async fn check_strict_mode(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
self.check_custom(collection, strict_mode_config).await?;
self.check_request_query_limit(strict_mode_config)?;
self.check_request_filter(collection, strict_mode_config)?;
self.check_request_exact(strict_mode_config)?;
self.check_search_params(collection, strict_mode_config)
.await?;
Ok(())
}
}
fn check_filter_limits(
filter: &Filter,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// Filter condition count limit
if let Some(filter_condition_limit) = strict_mode_config.filter_max_conditions {
let filter_conditions = filter.total_conditions_count();
if !check_custom(|| Some(filter_conditions), Some(filter_condition_limit)) {
return Err(CollectionError::strict_mode(
format!(
"Filter condition limit reached ({filter_conditions} > {filter_condition_limit})",
),
"Reduce the amount of conditions of your filter.",
));
}
}
// Filter condition size limit
if let Some(max_condition_size) = strict_mode_config.condition_max_size {
let input_condition_size = filter.max_condition_input_size();
if !check_custom(|| Some(input_condition_size), Some(max_condition_size)) {
return Err(CollectionError::strict_mode(
format!(
"Condition size limit reached ({input_condition_size} > {max_condition_size})"
),
"Reduce the size of your condition.",
));
}
}
Ok(())
}
pub fn check_timeout(
timeout: usize,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
check_limit_opt(Some(timeout), strict_mode_config.max_timeout, "timeout")
}
pub(crate) fn check_bool_opt(
value: Option<bool>,
allowed: Option<bool>,
name: &str,
parameter: &str,
) -> CollectionResult<()> {
if allowed != Some(false) || !value.unwrap_or_default() {
return Ok(());
}
Err(CollectionError::strict_mode(
format!("{name} disabled!"),
format!("Set {parameter}=false."),
))
}
pub(crate) fn check_limit_opt<T: PartialOrd + Display>(
value: Option<T>,
limit: Option<T>,
name: &str,
) -> CollectionResult<()> {
let (Some(limit), Some(value)) = (limit, value) else {
return Ok(());
};
if value > limit {
return Err(CollectionError::strict_mode(
format!("Limit exceeded {value} > {limit} for \"{name}\""),
format!("Reduce the \"{name}\" parameter to or below {limit}."),
));
}
Ok(())
}
pub(crate) fn check_custom<T: PartialOrd>(
value_fn: impl FnOnce() -> Option<T>,
limit: Option<T>,
) -> bool {
let Some(limit) = limit else {
return true;
};
let Some(value) = value_fn() else {
return true;
};
value <= limit
}
impl StrictModeVerification for SearchParams {
async fn check_custom(
&self,
_collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
check_limit_opt(
self.quantization.and_then(|i| i.oversampling),
strict_mode_config.search_max_oversampling,
"oversampling",
)?;
check_limit_opt(
self.hnsw_ef,
strict_mode_config.search_max_hnsw_ef,
"hnsw_ef",
)?;
Ok(())
}
fn request_exact(&self) -> Option<bool> {
Some(self.exact)
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn request_search_params(&self) -> Option<&SearchParams> {
None
}
}
pub fn check_grouping_field(
group_by: &JsonPath,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// check for unindexed fields targeted by group_by
if strict_mode_config.unindexed_filtering_retrieve == Some(false) {
// check the group_by field is indexed and support `match` statement
if let Some(schema) = collection.payload_key_index_schema(group_by) {
if !schema.supports_match() {
let schema_kind = schema.kind();
return Err(CollectionError::strict_mode(
format!("Index of type \"{schema_kind:?}\" found for \"{group_by}\""),
"Create an index supporting `match` for this key.",
));
}
} else {
return Err(CollectionError::strict_mode(
format!("Index required but not found for \"{group_by}\""),
"Create an index supporting `match` for this key.",
));
}
}
Ok(())
}
#[cfg(test)]
mod test {
use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::{
Condition, FieldCondition, Filter, Match, PayloadFieldSchema, PayloadSchemaType,
SearchParams, StrictModeConfig, ValueVariants,
};
use tempfile::Builder;
use super::StrictModeVerification;
use crate::collection::{Collection, RequestShardTransfer};
use crate::config::{CollectionConfigInternal, CollectionParams, WalConfig};
use crate::operations::point_ops::{FilterSelector, PointsSelector};
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::{
CollectionError, CountRequestInternal, DiscoverRequestInternal,
};
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState};
const UNINDEXED_KEY: &str = "key";
const INDEXED_KEY: &str = "num";
#[tokio::test(flavor = "multi_thread")]
async fn test_strict_mode_verification_trait() {
let collection = fixture().await;
test_query_limit(&collection).await;
test_search_params(&collection).await;
test_filter_read(&collection).await;
test_filter_write(&collection).await;
test_request_exact(&collection).await;
}
async fn test_query_limit(collection: &Collection) {
assert_strict_mode_error(discovery_fixture(Some(10), None, None), collection).await;
assert_strict_mode_success(discovery_fixture(Some(4), None, None), collection).await;
}
async fn test_filter_read(collection: &Collection) {
let filter = filter_fixture(UNINDEXED_KEY);
assert_strict_mode_error(discovery_fixture(None, Some(filter), None), collection).await;
let filter = filter_fixture(INDEXED_KEY);
assert_strict_mode_success(discovery_fixture(None, Some(filter), None), collection).await;
}
async fn test_search_params(collection: &Collection) {
let restricted_params = search_params_fixture(true);
assert_strict_mode_error(
discovery_fixture(None, None, Some(restricted_params)),
collection,
)
.await;
let allowed_params = search_params_fixture(false);
assert_strict_mode_success(
discovery_fixture(None, None, Some(allowed_params)),
collection,
)
.await;
}
async fn test_filter_write(collection: &Collection) {
let restricted_request = PointsSelector::FilterSelector(FilterSelector {
filter: filter_fixture(UNINDEXED_KEY),
shard_key: None,
});
assert_strict_mode_error(restricted_request, collection).await;
let allowed_request = PointsSelector::FilterSelector(FilterSelector {
filter: filter_fixture(INDEXED_KEY),
shard_key: None,
});
assert_strict_mode_success(allowed_request, collection).await;
}
async fn test_request_exact(collection: &Collection) {
let request = CountRequestInternal {
filter: None,
exact: true,
};
assert_strict_mode_error(request, collection).await;
let request = CountRequestInternal {
filter: None,
exact: false,
};
assert_strict_mode_success(request, collection).await;
}
async fn assert_strict_mode_error<R: StrictModeVerification>(
request: R,
collection: &Collection,
) {
let strict_mode_config = collection.strict_mode_config().await.unwrap();
let error = request
.check_strict_mode(collection, &strict_mode_config)
.await
.expect_err("Expected strict mode error but got Ok() value");
if !matches!(error, CollectionError::StrictMode { .. }) {
panic!("Expected strict mode error but got {error:#}");
}
}
async fn assert_strict_mode_success<R: StrictModeVerification>(
request: R,
collection: &Collection,
) {
let strict_mode_config = collection.strict_mode_config().await.unwrap();
let res = request
.check_strict_mode(collection, &strict_mode_config)
.await;
if let Err(CollectionError::StrictMode { description }) = res {
panic!("Strict mode check should've passed but failed with error: {description:?}");
} else if res.is_err() {
panic!("Unexpected error");
}
}
fn filter_fixture(key: &str) -> Filter {
Filter::new_must(Condition::Field(FieldCondition::new_match(
key.try_into().unwrap(),
Match::new_value(ValueVariants::Integer(123)),
)))
}
fn search_params_fixture(exact: bool) -> SearchParams {
SearchParams {
exact,
..SearchParams::default()
}
}
fn discovery_fixture(
limit: Option<usize>,
filter: Option<Filter>,
search_params: Option<SearchParams>,
) -> DiscoverRequestInternal {
DiscoverRequestInternal {
limit: limit.unwrap_or(0),
filter,
params: search_params,
target: None,
context: None,
offset: None,
with_payload: None,
with_vector: None,
using: None,
lookup_from: None,
}
}
async fn fixture() -> Collection {
let strict_mode_config = StrictModeConfig {
enabled: Some(true),
max_timeout: Some(3),
max_query_limit: Some(4),
unindexed_filtering_update: Some(false),
unindexed_filtering_retrieve: Some(false),
search_max_hnsw_ef: Some(3),
search_allow_exact: Some(false),
search_max_oversampling: Some(0.2),
..Default::default()
};
fixture_collection(&strict_mode_config).await
}
async fn fixture_collection(strict_mode_config: &StrictModeConfig) -> Collection {
let wal_config = WalConfig::default();
let collection_params = CollectionParams::empty();
let config = CollectionConfigInternal {
params: collection_params,
optimizer_config: OptimizersConfig::fixture(),
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: Some(strict_mode_config.clone()),
uuid: None,
metadata: None,
};
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
let collection_name = "test".to_string();
let storage_config: SharedStorageConfig = SharedStorageConfig::default();
let storage_config = Arc::new(storage_config);
let collection = Collection::new(
collection_name.clone(),
0,
collection_dir.path(),
snapshots_path.path(),
&config,
storage_config.clone(),
CollectionShardDistribution::all_local(None, 0),
None,
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await
.expect("Failed to create new fixture collection");
collection
.create_payload_index(
INDEXED_KEY.parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Integer),
HwMeasurementAcc::new(),
)
.await
.expect("failed to create payload index");
collection
}
pub fn dummy_on_replica_failure() -> ChangePeerFromState {
Arc::new(move |_peer_id, _shard_id, _from_state| {})
}
pub fn dummy_request_shard_transfer() -> RequestShardTransfer {
Arc::new(move |_transfer| {})
}
pub fn dummy_abort_shard_transfer() -> AbortShardTransfer {
Arc::new(|_transfer, _reason| {})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/query.rs | lib/collection/src/operations/verification/query.rs | use itertools::Itertools;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::types::{Filter, StrictModeConfig};
use super::{StrictModeVerification, check_grouping_field};
use crate::collection::Collection;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::operations::universal_query::collection_query::{
CollectionPrefetch, CollectionQueryGroupsRequest, CollectionQueryRequest, Query,
};
impl Query {
async fn check_strict_mode(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
if strict_mode_config.unindexed_filtering_retrieve == Some(false)
&& let Query::Formula(formula) = self
&& let Some((key, schemas)) = collection.one_unindexed_expression_key(&formula.formula)
{
let possible_schemas_str = schemas
.iter()
.map(|schema| schema.to_string())
.sorted()
.dedup()
.collect::<Vec<_>>()
.join(", ");
return Err(CollectionError::strict_mode(
format!(
"Index required but not found for \"{key}\" of one of the following types: [{possible_schemas_str}]",
),
"Create an index for this key or use a different formula expression.",
));
}
Ok(())
}
/// Check that the query does not perform a fullscan based on the collection configuration.
async fn check_fullscan(
&self,
using: &str,
filter: Option<&Filter>,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// Check only applies on `search_allow_exact`
if strict_mode_config.search_allow_exact == Some(false) {
match &self {
Query::Fusion(_) | Query::OrderBy(_) | Query::Formula(_) | Query::Sample(_) => (),
Query::Vector(_) => {
let config = collection.collection_config.read().await;
// ignore sparse vectors
let query_targets_sparse = config
.params
.sparse_vectors
.as_ref()
.is_some_and(|sparse| sparse.contains_key(using));
if query_targets_sparse {
// sparse vectors are always indexed
return Ok(());
}
// check HNSW configuration for vector
let vector_hnsw_config = &config
.params
.vectors
.get_params(using)
.and_then(|param| param.hnsw_config.as_ref());
let vector_hnsw_m = vector_hnsw_config
.map(|hnsw_config| hnsw_config.m)
.flatten()
.unwrap_or(config.hnsw_config.m);
let vector_hnsw_payload_m = vector_hnsw_config
.map(|hnsw_config| hnsw_config.payload_m)
.flatten()
.unwrap_or_else(|| config.hnsw_config.payload_m.unwrap_or(vector_hnsw_m));
// no further check necessary if there is a global HNSW index
if vector_hnsw_m > 0 {
return Ok(());
}
// specialized error message if not default vector
let vector_error_label = if using == DEFAULT_VECTOR_NAME {
""
} else {
&format!(" on '{using}'")
};
// check hnsw.payload_m if there is a filter
let uses_multitenant_filter = if let Some(filter) = filter {
filter
.iter_conditions()
.filter_map(|c| c.targeted_key())
.filter_map(|key| collection.payload_key_index_schema(&key))
.any(|index_schema| index_schema.is_tenant())
} else {
false
};
if !uses_multitenant_filter {
// HNSW disabled AND no filters
return Err(CollectionError::strict_mode(
format!(
"Request is forbidden{vector_error_label} because global vector indexing is disabled (hnsw_config.m = 0)"
),
"Use tenant-specific filter, enable global vector indexing or enable strict mode `search_allow_exact` option",
));
}
if vector_hnsw_payload_m == 0 {
// HNSW disabled AND no filters
return Err(CollectionError::strict_mode(
format!(
"Request is forbidden{vector_error_label} because vector indexing is disabled (hnsw_config.m = 0 and hnsw_config.payload_m = 0)"
),
"Enable vector indexing, use a prefetch query with indexed vectors or enable strict mode `search_allow_exact` option",
));
}
return Ok(());
}
}
}
Ok(())
}
}
impl StrictModeVerification for CollectionQueryRequest {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// CollectionPrefetch.prefetch is of type CollectionPrefetch (recursive type)
for prefetch in &self.prefetch {
prefetch
.check_strict_mode(collection, strict_mode_config)
.await?;
}
if let Some(query) = self.query.as_ref() {
// check query can perform fullscan when not rescoring
if self.prefetch.is_empty() {
query
.check_fullscan(
&self.using,
self.filter.as_ref(),
collection,
strict_mode_config,
)
.await?;
}
// check for unindexed fields in formula
query
.check_strict_mode(collection, strict_mode_config)
.await?
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
self.params.as_ref()
}
}
impl StrictModeVerification for CollectionPrefetch {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
// CollectionPrefetch.prefetch is of type CollectionPrefetch (recursive type)
for prefetch in &self.prefetch {
Box::pin(prefetch.check_strict_mode(collection, strict_mode_config)).await?;
}
if let Some(query) = self.query.as_ref() {
// check if prefetch can perform a fullscan
query
.check_fullscan(
&self.using,
self.filter.as_ref(),
collection,
strict_mode_config,
)
.await?;
// check for unindexed fields in formula
query
.check_strict_mode(collection, strict_mode_config)
.await?
}
Ok(())
}
fn query_limit(&self) -> Option<usize> {
Some(self.limit)
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
self.params.as_ref()
}
}
impl StrictModeVerification for CollectionQueryGroupsRequest {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
if let Some(query) = self.query.as_ref() {
// check query can perform fullscan when not rescoring
if self.prefetch.is_empty() {
query
.check_fullscan(
&self.using,
self.filter.as_ref(),
collection,
strict_mode_config,
)
.await?;
}
// check for unindexed fields in formula
query
.check_strict_mode(collection, strict_mode_config)
.await?
}
// check for unindexed fields targeted by group_by
check_grouping_field(&self.group_by, collection, strict_mode_config)?;
Ok(())
}
fn query_limit(&self) -> Option<usize> {
Some(self.limit * self.group_size)
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
self.params.as_ref()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/verification/matrix.rs | lib/collection/src/operations/verification/matrix.rs | use api::rest::SearchMatrixRequestInternal;
use super::StrictModeVerification;
use crate::collection::distance_matrix::CollectionSearchMatrixRequest;
impl StrictModeVerification for SearchMatrixRequestInternal {
fn query_limit(&self) -> Option<usize> {
match (self.limit, self.sample) {
(Some(limit), Some(sample)) => Some(limit * sample),
(Some(limit), None) => Some(limit),
(None, Some(sample)) => Some(sample),
(None, None) => None,
}
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
impl StrictModeVerification for CollectionSearchMatrixRequest {
fn query_limit(&self) -> Option<usize> {
Some(self.limit_per_sample * self.sample_size)
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
self.filter.as_ref()
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/universal_query/collection_query.rs | lib/collection/src/operations/universal_query/collection_query.rs | use ahash::AHashSet;
use api::rest::LookupLocation;
use common::types::ScoreType;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use segment::data_types::order_by::OrderBy;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, NamedQuery, VectorInternal, VectorRef};
use segment::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula;
use segment::json_path::JsonPath;
use segment::types::{
Condition, ExtendedPointId, Filter, HasIdCondition, PointIdType, SearchParams, VectorName,
VectorNameBuf, WithPayloadInterface, WithVector,
};
use segment::vector_storage::query::{
ContextPair, ContextQuery, DiscoveryQuery, FeedbackItem, NaiveFeedbackCoefficients, RecoQuery,
};
use serde::Serialize;
use shard::query::query_enum::QueryEnum;
use super::formula::FormulaInternal;
use super::shard_query::{
FusionInternal, SampleInternal, ScoringQuery, ShardPrefetch, ShardQueryRequest,
};
use crate::common::fetch_vectors::ReferencedVectors;
use crate::lookup::WithLookup;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::operations::universal_query::shard_query::MmrInternal;
use crate::recommendations::avg_vector_for_recommendation;
const DEFAULT_MMR_LAMBDA: f32 = 0.5;
/// Internal representation of a query request, used to converge from REST and gRPC. This can have IDs referencing vectors.
#[derive(Clone, Debug, PartialEq)]
pub struct CollectionQueryRequest {
pub prefetch: Vec<CollectionPrefetch>,
pub query: Option<Query>,
pub using: VectorNameBuf,
pub filter: Option<Filter>,
pub score_threshold: Option<ScoreType>,
pub limit: usize,
pub offset: usize,
/// Search params for when there is no prefetch
pub params: Option<SearchParams>,
pub with_vector: WithVector,
pub with_payload: WithPayloadInterface,
pub lookup_from: Option<LookupLocation>,
}
impl CollectionQueryRequest {
pub const DEFAULT_LIMIT: usize = 10;
pub const DEFAULT_GROUP_SIZE: usize = 3;
pub const DEFAULT_OFFSET: usize = 0;
pub const DEFAULT_WITH_VECTOR: WithVector = WithVector::Bool(false);
pub const DEFAULT_WITH_PAYLOAD: WithPayloadInterface = WithPayloadInterface::Bool(false);
}
/// Lightweight representation of a query request to implement the [`RetrieveRequest`] trait.
///
/// [`RetrieveRequest`]: crate::common::retrieve_request_trait::RetrieveRequest
#[derive(Debug)]
pub struct CollectionQueryResolveRequest {
pub referenced_ids: Vec<PointIdType>,
pub lookup_from: Option<LookupLocation>,
pub using: VectorNameBuf,
}
/// Internal representation of a group query request, used to converge from REST and gRPC.
#[derive(Debug)]
pub struct CollectionQueryGroupsRequest {
pub prefetch: Vec<CollectionPrefetch>,
pub query: Option<Query>,
pub using: VectorNameBuf,
pub filter: Option<Filter>,
pub params: Option<SearchParams>,
pub score_threshold: Option<ScoreType>,
pub with_vector: WithVector,
pub with_payload: WithPayloadInterface,
pub lookup_from: Option<LookupLocation>,
pub group_by: JsonPath,
pub group_size: usize,
pub limit: usize,
pub with_lookup: Option<WithLookup>,
}
#[derive(Clone, Debug, PartialEq)]
pub enum Query {
/// Score points against some vector(s)
Vector(VectorQuery<VectorInputInternal>),
/// Reciprocal rank fusion
Fusion(FusionInternal),
/// Order by a payload field
OrderBy(OrderBy),
/// Score boosting via an arbitrary formula
Formula(FormulaInternal),
/// Sample points
Sample(SampleInternal),
}
impl Query {
pub fn try_into_scoring_query(
self,
ids_to_vectors: &ReferencedVectors,
lookup_vector_name: &VectorName,
lookup_collection: Option<&String>,
using: VectorNameBuf,
request_limit: usize,
) -> CollectionResult<ScoringQuery> {
let scoring_query = match self {
Query::Vector(vector_query) => {
vector_query
// Homogenize the input into raw vectors
.ids_into_vectors(ids_to_vectors, lookup_vector_name, lookup_collection)?
.preprocess_vectors()
// Turn into QueryEnum
.into_scoring_query(using, request_limit)?
}
Query::Fusion(fusion) => ScoringQuery::Fusion(fusion),
Query::OrderBy(order_by) => ScoringQuery::OrderBy(order_by),
Query::Formula(formula) => ScoringQuery::Formula(ParsedFormula::try_from(formula)?),
Query::Sample(sample) => ScoringQuery::Sample(sample),
};
Ok(scoring_query)
}
pub fn get_referenced_ids(&self) -> Vec<PointIdType> {
match self {
Self::Vector(vector_query) => vector_query
.get_referenced_ids()
.into_iter()
.copied()
.collect(),
Self::Fusion(_) | Self::OrderBy(_) | Self::Formula(_) | Self::Sample(_) => Vec::new(),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum VectorInputInternal {
Id(PointIdType),
Vector(VectorInternal),
}
impl VectorInputInternal {
pub fn as_id(&self) -> Option<&PointIdType> {
match self {
VectorInputInternal::Id(id) => Some(id),
VectorInputInternal::Vector(_) => None,
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum VectorQuery<T> {
Nearest(T),
NearestWithMmr(NearestWithMmr<T>),
RecommendAverageVector(RecoQuery<T>),
RecommendBestScore(RecoQuery<T>),
RecommendSumScores(RecoQuery<T>),
Discover(DiscoveryQuery<T>),
Context(ContextQuery<T>),
Feedback(FeedbackQuery<T>),
}
impl<T> VectorQuery<T> {
/// Iterate through all items, without any kind of structure
pub fn flat_iter(&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
VectorQuery::Nearest(input) => Box::new(std::iter::once(input)),
VectorQuery::NearestWithMmr(query) => Box::new(std::iter::once(&query.nearest)),
VectorQuery::RecommendAverageVector(query)
| VectorQuery::RecommendBestScore(query)
| VectorQuery::RecommendSumScores(query) => Box::new(query.flat_iter()),
VectorQuery::Discover(query) => Box::new(query.flat_iter()),
VectorQuery::Context(query) => Box::new(query.flat_iter()),
VectorQuery::Feedback(query) => Box::new(query.flat_iter()),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct NearestWithMmr<T> {
pub nearest: T,
pub mmr: Mmr,
}
#[derive(Clone, Debug, PartialEq, Serialize)]
pub struct Mmr {
pub diversity: Option<f32>,
pub candidates_limit: Option<usize>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct FeedbackQuery<T> {
pub target: T,
pub feedback: Vec<FeedbackItem<T>>,
pub strategy: FeedbackStrategy,
}
impl<T> FeedbackQuery<T> {
fn flat_iter(&self) -> impl Iterator<Item = &T> {
self.feedback
.iter()
.map(|item| &item.vector)
.chain(std::iter::once(&self.target))
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum FeedbackStrategy {
Naive { a: f32, b: f32, c: f32 },
}
impl VectorQuery<VectorInputInternal> {
/// Turns all [VectorInputInternal]s into [VectorInternal]s, using the provided [ReferencedVectors] to look up the vectors.
///
/// Will panic if the ids are not found in the [ReferencedVectors].
fn ids_into_vectors(
self,
ids_to_vectors: &ReferencedVectors,
lookup_vector_name: &VectorName,
lookup_collection: Option<&String>,
) -> CollectionResult<VectorQuery<VectorInternal>> {
match self {
VectorQuery::Nearest(vector_input) => {
let vector = ids_to_vectors
.resolve_reference(lookup_collection, lookup_vector_name, vector_input)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?;
Ok(VectorQuery::Nearest(vector))
}
VectorQuery::RecommendAverageVector(reco) => {
let (positives, negatives) = Self::resolve_reco_reference(
reco,
ids_to_vectors,
lookup_vector_name,
lookup_collection,
);
Ok(VectorQuery::RecommendAverageVector(RecoQuery::new(
positives, negatives,
)))
}
VectorQuery::RecommendBestScore(reco) => {
let (positives, negatives) = Self::resolve_reco_reference(
reco,
ids_to_vectors,
lookup_vector_name,
lookup_collection,
);
Ok(VectorQuery::RecommendBestScore(RecoQuery::new(
positives, negatives,
)))
}
VectorQuery::RecommendSumScores(reco) => {
let (positives, negatives) = Self::resolve_reco_reference(
reco,
ids_to_vectors,
lookup_vector_name,
lookup_collection,
);
Ok(VectorQuery::RecommendSumScores(RecoQuery::new(
positives, negatives,
)))
}
VectorQuery::Discover(discover) => {
let target = ids_to_vectors
.resolve_reference(lookup_collection, lookup_vector_name, discover.target)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?;
let pairs = discover
.pairs
.into_iter()
.map(|pair| {
Ok(ContextPair {
positive: ids_to_vectors
.resolve_reference(
lookup_collection,
lookup_vector_name,
pair.positive,
)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?,
negative: ids_to_vectors
.resolve_reference(
lookup_collection,
lookup_vector_name,
pair.negative,
)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?,
})
})
.collect::<CollectionResult<_>>()?;
Ok(VectorQuery::Discover(DiscoveryQuery { target, pairs }))
}
VectorQuery::Context(context) => {
let pairs = context
.pairs
.into_iter()
.map(|pair| {
Ok(ContextPair {
positive: ids_to_vectors
.resolve_reference(
lookup_collection,
lookup_vector_name,
pair.positive,
)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?,
negative: ids_to_vectors
.resolve_reference(
lookup_collection,
lookup_vector_name,
pair.negative,
)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?,
})
})
.collect::<CollectionResult<_>>()?;
Ok(VectorQuery::Context(ContextQuery { pairs }))
}
VectorQuery::NearestWithMmr(NearestWithMmr { nearest, mmr }) => {
let nearest = ids_to_vectors
.resolve_reference(lookup_collection, lookup_vector_name, nearest)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?;
Ok(VectorQuery::NearestWithMmr(NearestWithMmr { nearest, mmr }))
}
VectorQuery::Feedback(FeedbackQuery {
target,
feedback,
strategy,
}) => {
let target = ids_to_vectors
.resolve_reference(lookup_collection, lookup_vector_name, target)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?;
let feedback = feedback
.into_iter()
.map(|FeedbackItem { vector, score }| {
Ok(FeedbackItem {
vector: ids_to_vectors
.resolve_reference(lookup_collection, lookup_vector_name, vector)
.ok_or_else(|| vector_not_found_error(lookup_vector_name))?,
score,
})
})
.collect::<CollectionResult<_>>()?;
Ok(VectorQuery::Feedback(FeedbackQuery {
target,
feedback,
strategy,
}))
}
}
}
/// Resolves the references in the RecoQuery into actual vectors.
fn resolve_reco_reference(
reco_query: RecoQuery<VectorInputInternal>,
ids_to_vectors: &ReferencedVectors,
lookup_vector_name: &VectorName,
lookup_collection: Option<&String>,
) -> (Vec<VectorInternal>, Vec<VectorInternal>) {
let positives = reco_query
.positives
.into_iter()
.filter_map(|vector_input| {
ids_to_vectors.resolve_reference(
lookup_collection,
lookup_vector_name,
vector_input,
)
})
.collect();
let negatives = reco_query
.negatives
.into_iter()
.filter_map(|vector_input| {
ids_to_vectors.resolve_reference(
lookup_collection,
lookup_vector_name,
vector_input,
)
})
.collect();
(positives, negatives)
}
}
fn vector_not_found_error(vector_name: &VectorName) -> CollectionError {
CollectionError::not_found(format!("Vector with name {vector_name:?} for point"))
}
impl VectorQuery<VectorInternal> {
fn preprocess_vectors(mut self) -> Self {
match &mut self {
VectorQuery::Nearest(vector) => {
vector.preprocess();
}
VectorQuery::RecommendAverageVector(reco) => {
reco.positives.iter_mut().for_each(|v| v.preprocess());
reco.negatives.iter_mut().for_each(|v| v.preprocess());
}
VectorQuery::RecommendBestScore(reco) => {
reco.positives.iter_mut().for_each(|v| v.preprocess());
reco.negatives.iter_mut().for_each(|v| v.preprocess());
}
VectorQuery::RecommendSumScores(reco) => {
reco.positives.iter_mut().for_each(|v| v.preprocess());
reco.negatives.iter_mut().for_each(|v| v.preprocess());
}
VectorQuery::Discover(discover) => {
discover.target.preprocess();
discover.pairs.iter_mut().for_each(|pair| {
pair.positive.preprocess();
pair.negative.preprocess();
});
}
VectorQuery::Context(context) => {
context.pairs.iter_mut().for_each(|pair| {
pair.positive.preprocess();
pair.negative.preprocess();
});
}
VectorQuery::NearestWithMmr(NearestWithMmr { nearest, mmr: _ }) => {
nearest.preprocess();
}
VectorQuery::Feedback(FeedbackQuery {
target,
feedback,
strategy: _,
}) => {
target.preprocess();
feedback
.iter_mut()
.for_each(|item| item.vector.preprocess());
}
}
self
}
fn into_scoring_query(
self,
using: VectorNameBuf,
request_limit: usize,
) -> CollectionResult<ScoringQuery> {
let query_enum = match self {
VectorQuery::Nearest(vector) => QueryEnum::Nearest(NamedQuery::new(vector, using)),
VectorQuery::RecommendAverageVector(reco) => {
// Get average vector
let search_vector = avg_vector_for_recommendation(
reco.positives.iter().map(VectorRef::from),
reco.negatives.iter().map(VectorRef::from).peekable(),
)?;
QueryEnum::Nearest(NamedQuery::new(search_vector, using))
}
VectorQuery::RecommendBestScore(reco) => {
QueryEnum::RecommendBestScore(NamedQuery::new(reco, using))
}
VectorQuery::RecommendSumScores(reco) => {
QueryEnum::RecommendSumScores(NamedQuery::new(reco, using))
}
VectorQuery::Discover(discover) => {
QueryEnum::Discover(NamedQuery::new(discover, using))
}
VectorQuery::Context(context) => QueryEnum::Context(NamedQuery::new(context, using)),
VectorQuery::NearestWithMmr(NearestWithMmr { nearest, mmr }) => {
let Mmr {
diversity,
candidates_limit,
} = mmr;
return Ok(ScoringQuery::Mmr(MmrInternal {
vector: nearest,
using,
lambda: OrderedFloat(diversity.map(|x| 1.0 - x).unwrap_or(DEFAULT_MMR_LAMBDA)),
candidates_limit: candidates_limit.unwrap_or(request_limit),
}));
}
VectorQuery::Feedback(FeedbackQuery {
target,
feedback,
strategy,
}) => match strategy {
FeedbackStrategy::Naive { a, b, c } => QueryEnum::FeedbackNaive(NamedQuery::new(
segment::vector_storage::query::NaiveFeedbackQuery {
target,
feedback,
coefficients: NaiveFeedbackCoefficients {
a: a.into(),
b: b.into(),
c: c.into(),
},
},
using,
)),
},
};
Ok(ScoringQuery::Vector(query_enum))
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct CollectionPrefetch {
pub prefetch: Vec<CollectionPrefetch>,
pub query: Option<Query>,
pub using: VectorNameBuf,
pub filter: Option<Filter>,
pub score_threshold: Option<OrderedFloat<ScoreType>>,
pub limit: usize,
/// Search params for when there is no prefetch
pub params: Option<SearchParams>,
pub lookup_from: Option<LookupLocation>,
}
/// Exclude the referenced ids by editing the filter.
fn exclude_referenced_ids(ids: Vec<ExtendedPointId>, filter: Option<Filter>) -> Option<Filter> {
let ids: AHashSet<_> = ids.into_iter().collect();
if ids.is_empty() {
return filter;
}
let id_filter = Filter::new_must_not(Condition::HasId(HasIdCondition::from(ids)));
Some(id_filter.merge_owned(filter.unwrap_or_default()))
}
impl CollectionPrefetch {
fn get_lookup_collection(&self) -> Option<&String> {
self.lookup_from.as_ref().map(|x| &x.collection)
}
fn get_lookup_vector_name(&self) -> VectorNameBuf {
self.lookup_from
.as_ref()
.and_then(|lookup_from| lookup_from.vector.as_ref())
.unwrap_or(&self.using)
.to_owned()
}
pub fn get_referenced_point_ids_on_collection(&self, collection: &str) -> Vec<PointIdType> {
let mut refs = Vec::new();
let mut lookup_other_collection = false;
if let Some(lookup_collection) = self.get_lookup_collection() {
lookup_other_collection = lookup_collection != collection
};
if !lookup_other_collection && let Some(Query::Vector(vector_query)) = &self.query {
if let VectorQuery::Nearest(VectorInputInternal::Id(id)) = vector_query {
refs.push(*id);
}
refs.extend(vector_query.get_referenced_ids())
};
for prefetch in &self.prefetch {
refs.extend(prefetch.get_referenced_point_ids_on_collection(collection))
}
refs
}
fn try_into_shard_prefetch(
self,
ids_to_vectors: &ReferencedVectors,
) -> CollectionResult<ShardPrefetch> {
CollectionQueryRequest::validation(
&self.query,
&self.using,
&self.prefetch,
self.score_threshold.map(OrderedFloat::into_inner),
)?;
let lookup_vector_name = self.get_lookup_vector_name();
let lookup_collection = self.get_lookup_collection().cloned();
let using = self.using.clone();
let query = self
.query
.map(|query| {
query.try_into_scoring_query(
ids_to_vectors,
&lookup_vector_name,
lookup_collection.as_ref(),
using,
self.limit,
)
})
.transpose()?;
let prefetches = self
.prefetch
.into_iter()
.map(|prefetch| prefetch.try_into_shard_prefetch(ids_to_vectors))
.try_collect()?;
Ok(ShardPrefetch {
prefetches,
query,
filter: self.filter,
score_threshold: self.score_threshold,
limit: self.limit,
params: self.params,
})
}
pub fn flatten_resolver_requests(&self) -> Vec<CollectionQueryResolveRequest> {
let mut inner_queries = vec![];
// resolve ids for root query
let referenced_ids = self
.query
.as_ref()
.map(Query::get_referenced_ids)
.unwrap_or_default();
if !referenced_ids.is_empty() {
let resolve_root = CollectionQueryResolveRequest {
referenced_ids,
lookup_from: self.lookup_from.clone(),
using: self.using.clone(),
};
inner_queries.push(resolve_root);
}
// recurse on prefetches
for prefetch in &self.prefetch {
for flatten in prefetch.flatten_resolver_requests() {
inner_queries.push(flatten);
}
}
inner_queries
}
}
impl CollectionQueryRequest {
fn get_lookup_collection(&self) -> Option<&String> {
self.lookup_from.as_ref().map(|x| &x.collection)
}
fn get_lookup_vector_name(&self) -> VectorNameBuf {
self.lookup_from
.as_ref()
.and_then(|lookup_from| lookup_from.vector.as_ref())
.unwrap_or(&self.using)
.to_owned()
}
fn get_referenced_point_ids_on_collection(&self, collection: &str) -> Vec<PointIdType> {
let mut refs = Vec::new();
let mut lookup_other_collection = false;
if let Some(lookup_collection) = self.get_lookup_collection() {
lookup_other_collection = lookup_collection != collection
};
if !lookup_other_collection && let Some(Query::Vector(vector_query)) = &self.query {
if let VectorQuery::Nearest(VectorInputInternal::Id(id)) = vector_query {
refs.push(*id);
}
refs.extend(vector_query.get_referenced_ids())
};
for prefetch in &self.prefetch {
refs.extend(prefetch.get_referenced_point_ids_on_collection(collection))
}
refs
}
/// Substitutes all the point ids in the request with the actual vectors, as well as editing filters so that ids are not included in the response.
pub fn try_into_shard_request(
self,
collection_name: &str,
ids_to_vectors: &ReferencedVectors,
) -> CollectionResult<ShardQueryRequest> {
Self::validation(
&self.query,
&self.using,
&self.prefetch,
self.score_threshold,
)?;
let mut offset = self.offset;
if matches!(self.query, Some(Query::Sample(SampleInternal::Random)))
&& self.prefetch.is_empty()
{
// Shortcut: Ignore offset with random query, since output is not stable.
offset = 0;
}
let query_lookup_collection = self.get_lookup_collection().cloned();
let query_lookup_vector_name = self.get_lookup_vector_name();
let using = self.using.clone();
// Edit filter to exclude all referenced point ids (root and nested) on the searched collection
// We do not want to exclude vector ids from different collection via lookup_from.
let referenced_point_ids = self.get_referenced_point_ids_on_collection(collection_name);
let filter = exclude_referenced_ids(referenced_point_ids, self.filter);
let query = self
.query
.map(|query| {
query.try_into_scoring_query(
ids_to_vectors,
&query_lookup_vector_name,
query_lookup_collection.as_ref(),
using,
self.limit,
)
})
.transpose()?;
let prefetches = self
.prefetch
.into_iter()
.map(|prefetch| prefetch.try_into_shard_prefetch(ids_to_vectors))
.try_collect()?;
Ok(ShardQueryRequest {
prefetches,
query,
filter,
score_threshold: self.score_threshold.map(OrderedFloat),
limit: self.limit,
offset,
params: self.params,
with_vector: self.with_vector,
with_payload: self.with_payload,
})
}
pub fn validation(
query: &Option<Query>,
using: &VectorNameBuf,
prefetch: &[CollectionPrefetch],
score_threshold: Option<ScoreType>,
) -> CollectionResult<()> {
// Check no prefetches without a query
if !prefetch.is_empty() && query.is_none() {
return Err(CollectionError::bad_request(
"A query is needed to merge the prefetches. Can't have prefetches without defining a query.",
));
}
// Check no score_threshold without a vector query
if score_threshold.is_some() {
match query {
Some(Query::OrderBy(_)) => {
return Err(CollectionError::bad_request(
"Can't use score_threshold with an order_by query.",
));
}
None => {
return Err(CollectionError::bad_request(
"A query is needed to use the score_threshold. Can't have score_threshold without defining a query.",
));
}
_ => {}
}
}
// Check that fusion queries are not combined with a using vector name
if let Some(Query::Fusion(_)) = query
&& using != DEFAULT_VECTOR_NAME
{
return Err(CollectionError::bad_request(
"Fusion queries cannot be combined with the 'using' field.",
));
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/universal_query/mod.rs | lib/collection/src/operations/universal_query/mod.rs | //! ## Universal query request types
//!
//! Provides a common interface for querying points in a collection
//!
//! Pipeline of type conversion:
//!
//! 1. [`api::rest::QueryRequest`], [`api::grpc::qdrant::QueryPoints`]:
//! Rest or grpc request. Used in API.
//! 2. [`collection_query::CollectionQueryRequest`]:
//! Direct representation of the API request, but to be used as a single type. Created at API to enter ToC.
//! 3. [`ShardQueryRequest`]:
//! Same as the common request, but all point ids have been substituted with vectors. Created at Collection.
//! 4. [`api::grpc::qdrant::QueryShardPoints`]:
//! to be used in the internal service. Created for [`RemoteShard`], converts to and from [`ShardQueryRequest`].
//! 5. [`planned_query::PlannedQuery`]:
//! An easier-to-execute representation of a batch of [`ShardQueryRequest`]. Created in [`LocalShard`].
//!
//! [`LocalShard`]: crate::shards::local_shard::LocalShard
//! [`RemoteShard`]: crate::shards::remote_shard::RemoteShard
//! [`ShardQueryRequest`]: shard_query::ShardQueryRequest
//! [`QueryShardPoints`]: api::grpc::qdrant::QueryShardPoints
pub mod collection_query;
pub mod shard_query;
pub mod planned_query {
pub use shard::query::planned_query::*;
}
pub mod formula {
pub use shard::query::formula::*;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/universal_query/shard_query.rs | lib/collection/src/operations/universal_query/shard_query.rs | use segment::types::Order;
pub use shard::query::*;
use crate::config::CollectionParams;
use crate::operations::types::CollectionResult;
/// Returns the expected order of results, depending on the type of query
pub fn query_result_order(
query: Option<&ScoringQuery>,
collection_params: &CollectionParams,
) -> CollectionResult<Option<Order>> {
let order = match query {
Some(scoring_query) => match scoring_query {
ScoringQuery::Vector(query_enum) => {
if query_enum.is_distance_scored() {
Some(
collection_params
.get_distance(query_enum.get_vector_name())?
.distance_order(),
)
} else {
Some(Order::LargeBetter)
}
}
ScoringQuery::Fusion(fusion) => match fusion {
FusionInternal::RrfK(_) | FusionInternal::Dbsf => Some(Order::LargeBetter),
},
// Score boosting formulas are always have descending order,
// Euclidean scores can be negated within the formula
ScoringQuery::Formula(_formula) => Some(Order::LargeBetter),
ScoringQuery::OrderBy(order_by) => Some(Order::from(order_by.direction())),
// Random sample does not require ordering
ScoringQuery::Sample(SampleInternal::Random) => None,
// MMR cannot be reordered
ScoringQuery::Mmr(_) => None,
},
None => {
// Order by ID
Some(Order::SmallBetter)
}
};
Ok(order)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/count.rs | lib/collection/src/operations/generalizer/count.rs | use crate::operations::generalizer::Generalizer;
use crate::operations::types::CountRequestInternal;
impl Generalizer for CountRequestInternal {
fn remove_details(&self) -> Self {
let CountRequestInternal { filter, exact } = self;
Self {
filter: filter.clone(),
exact: *exact,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/points.rs | lib/collection/src/operations/generalizer/points.rs | use crate::operations::generalizer::Generalizer;
use crate::operations::types::{PointRequestInternal, ScrollRequestInternal};
impl Generalizer for ScrollRequestInternal {
fn remove_details(&self) -> Self {
let ScrollRequestInternal {
offset,
limit,
filter,
with_payload,
with_vector,
order_by,
} = self;
Self {
offset: *offset,
limit: *limit,
filter: filter.clone(),
with_payload: with_payload.clone(),
with_vector: with_vector.clone(),
order_by: order_by.clone(),
}
}
}
impl Generalizer for PointRequestInternal {
fn remove_details(&self) -> Self {
let PointRequestInternal {
ids,
with_payload,
with_vector,
} = self;
Self {
ids: ids.clone(),
with_payload: with_payload.clone(),
with_vector: with_vector.clone(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/facet.rs | lib/collection/src/operations/generalizer/facet.rs | use api::rest::FacetRequestInternal;
use crate::operations::generalizer::Generalizer;
impl Generalizer for FacetRequestInternal {
fn remove_details(&self) -> Self {
let FacetRequestInternal {
key,
limit,
filter,
exact,
} = self;
Self {
key: key.clone(),
limit: *limit,
filter: filter.clone(),
exact: *exact,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/mod.rs | lib/collection/src/operations/generalizer/mod.rs | mod count;
mod facet;
mod matrix;
mod points;
mod query;
mod update_persisted;
/// A trait, that provides an interface for removing vectors and payloads from a structure.
/// This is useful for generalizing requests by stripping out vector-specific and payload-specific details,
/// and essentially making the structure much lighter.
///
/// It does create copy of the structure for all other fields except vectors.
/// Vectors are replaces with length indications, payloads are replaced with keys and length indications.
pub trait Generalizer {
fn remove_details(&self) -> Self;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/update_persisted.rs | lib/collection/src/operations/generalizer/update_persisted.rs | use itertools::Itertools;
use segment::types::{Payload, PointIdType};
use serde_json::Value;
use shard::operations::payload_ops::{PayloadOps, SetPayloadOp};
use shard::operations::point_ops::{
BatchPersisted, BatchVectorStructPersisted, ConditionalInsertOperationInternal,
PointInsertOperationsInternal, PointOperations, PointStructPersisted, PointSyncOperation,
VectorPersisted, VectorStructPersisted,
};
use shard::operations::vector_ops::{PointVectorsPersisted, UpdateVectorsOp, VectorOperations};
use shard::operations::{CollectionUpdateOperations, FieldIndexOperations};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::DimId;
use crate::operations::generalizer::Generalizer;
impl Generalizer for Payload {
fn remove_details(&self) -> Self {
let mut stripped_payload = Payload::default();
stripped_payload.0.insert(
"keys".to_string(),
Value::Array(self.keys().cloned().sorted().map(Value::String).collect()),
);
stripped_payload
}
}
impl Generalizer for CollectionUpdateOperations {
fn remove_details(&self) -> Self {
match self {
CollectionUpdateOperations::PointOperation(point_operation) => {
CollectionUpdateOperations::PointOperation(point_operation.remove_details())
}
CollectionUpdateOperations::VectorOperation(vector_operation) => {
CollectionUpdateOperations::VectorOperation(vector_operation.remove_details())
}
CollectionUpdateOperations::PayloadOperation(payload_operation) => {
CollectionUpdateOperations::PayloadOperation(payload_operation.remove_details())
}
CollectionUpdateOperations::FieldIndexOperation(field_operation) => {
CollectionUpdateOperations::FieldIndexOperation(field_operation.remove_details())
}
}
}
}
impl Generalizer for PointOperations {
fn remove_details(&self) -> Self {
match self {
PointOperations::UpsertPoints(upsert_operation) => {
PointOperations::UpsertPoints(upsert_operation.remove_details())
}
PointOperations::UpsertPointsConditional(upsert_conditional_operation) => {
PointOperations::UpsertPointsConditional(
upsert_conditional_operation.remove_details(),
)
}
PointOperations::DeletePoints { ids } => {
PointOperations::DeletePoints { ids: ids.clone() }
}
PointOperations::DeletePointsByFilter(filter) => {
PointOperations::DeletePointsByFilter(filter.clone())
}
PointOperations::SyncPoints(sync_operation) => {
PointOperations::SyncPoints(sync_operation.remove_details())
}
#[cfg(feature = "staging")]
PointOperations::TestDelay(op) => PointOperations::TestDelay(op.clone()),
}
}
}
impl Generalizer for PointSyncOperation {
fn remove_details(&self) -> Self {
let Self {
from_id,
to_id,
points,
} = self;
Self {
from_id: *from_id,
to_id: *to_id,
points: points.iter().map(|point| point.remove_details()).collect(),
}
}
}
impl Generalizer for PointStructPersisted {
fn remove_details(&self) -> Self {
let Self {
id: _, // ignore actual id for generalization
vector,
payload,
} = self;
Self {
id: PointIdType::NumId(0),
vector: vector.remove_details(),
payload: payload.as_ref().map(|p| p.remove_details()),
}
}
}
impl Generalizer for ConditionalInsertOperationInternal {
fn remove_details(&self) -> Self {
let Self {
points_op,
condition,
} = self;
Self {
condition: condition.clone(),
points_op: points_op.remove_details(),
}
}
}
impl Generalizer for PointInsertOperationsInternal {
fn remove_details(&self) -> Self {
match self {
PointInsertOperationsInternal::PointsBatch(batch) => {
PointInsertOperationsInternal::PointsBatch(batch.remove_details())
}
PointInsertOperationsInternal::PointsList(list) => {
PointInsertOperationsInternal::PointsList(
list.iter().map(|point| point.remove_details()).collect(),
)
}
}
}
}
impl Generalizer for BatchPersisted {
fn remove_details(&self) -> Self {
let Self {
ids: _, // Remove ids for generalization
vectors,
payloads,
} = self;
let vectors = match vectors {
BatchVectorStructPersisted::Single(vectors) => BatchVectorStructPersisted::Single(
vectors.iter().map(|v| vec![v.len() as f32]).collect(),
),
BatchVectorStructPersisted::MultiDense(multi) => {
BatchVectorStructPersisted::MultiDense(
multi
.iter()
.map(|v| {
let dim = if v.is_empty() { 0 } else { v[0].len() };
vec![vec![v.len() as f32, dim as f32]]
})
.collect(),
)
}
BatchVectorStructPersisted::Named(named) => {
let generalized_named = named
.iter()
.map(|(name, vectors)| {
let generalized_vectors = vectors
.iter()
.map(|vector| vector.remove_details())
.collect();
(name.clone(), generalized_vectors)
})
.collect();
BatchVectorStructPersisted::Named(generalized_named)
}
};
Self {
ids: vec![], // Remove ids for generalization
vectors,
payloads: payloads.as_ref().map(|pls| {
pls.iter()
.map(|payload| payload.as_ref().map(|pl| pl.remove_details()))
.collect()
}),
}
}
}
impl Generalizer for VectorOperations {
fn remove_details(&self) -> Self {
match self {
VectorOperations::UpdateVectors(update_vectors) => {
VectorOperations::UpdateVectors(update_vectors.remove_details())
}
VectorOperations::DeleteVectors(_, _) => self.clone(),
VectorOperations::DeleteVectorsByFilter(_, _) => self.clone(),
}
}
}
impl Generalizer for UpdateVectorsOp {
fn remove_details(&self) -> Self {
let UpdateVectorsOp {
points,
update_filter,
} = self;
Self {
points: points.iter().map(|point| point.remove_details()).collect(),
update_filter: update_filter.clone(),
}
}
}
impl Generalizer for PointVectorsPersisted {
fn remove_details(&self) -> Self {
let PointVectorsPersisted { id: _, vector } = self;
Self {
id: PointIdType::NumId(0),
vector: vector.remove_details(),
}
}
}
impl Generalizer for VectorStructPersisted {
fn remove_details(&self) -> Self {
match self {
VectorStructPersisted::Single(dense) => {
VectorStructPersisted::Single(vec![dense.len() as f32])
}
VectorStructPersisted::MultiDense(multi) => {
let dim = if multi.is_empty() { 0 } else { multi[0].len() };
VectorStructPersisted::MultiDense(vec![vec![multi.len() as f32, dim as f32]])
}
VectorStructPersisted::Named(named) => {
let generalized_named = named
.iter()
.map(|(name, vector)| (name.clone(), vector.remove_details()))
.collect();
VectorStructPersisted::Named(generalized_named)
}
}
}
}
impl Generalizer for VectorPersisted {
fn remove_details(&self) -> Self {
match self {
VectorPersisted::Dense(dense) => VectorPersisted::Dense(vec![dense.len() as f32]),
VectorPersisted::Sparse(sparse) => VectorPersisted::Sparse(
SparseVector::new(vec![sparse.len() as DimId], vec![0.0]).unwrap(),
),
VectorPersisted::MultiDense(multi) => {
let dim = if multi.is_empty() { 0 } else { multi[0].len() };
VectorPersisted::MultiDense(vec![vec![multi.len() as f32, dim as f32]])
}
}
}
}
impl Generalizer for PayloadOps {
fn remove_details(&self) -> Self {
match self {
PayloadOps::SetPayload(set_payload) => {
PayloadOps::SetPayload(set_payload.remove_details())
}
PayloadOps::DeletePayload(delete_payload) => {
PayloadOps::DeletePayload(delete_payload.clone())
}
PayloadOps::ClearPayload { points } => PayloadOps::ClearPayload {
points: points.clone(),
},
PayloadOps::ClearPayloadByFilter(filter) => {
PayloadOps::ClearPayloadByFilter(filter.clone())
}
PayloadOps::OverwritePayload(overwrite_payload) => {
PayloadOps::OverwritePayload(overwrite_payload.remove_details())
}
}
}
}
impl Generalizer for SetPayloadOp {
fn remove_details(&self) -> Self {
let Self {
payload,
points,
filter,
key,
} = self;
Self {
payload: payload.remove_details(),
points: points.clone(),
filter: filter.clone(),
key: key.clone(),
}
}
}
impl Generalizer for FieldIndexOperations {
fn remove_details(&self) -> Self {
self.clone()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/query.rs | lib/collection/src/operations/generalizer/query.rs | use segment::data_types::vectors::{MultiDenseVectorInternal, NamedQuery, VectorInternal};
use segment::vector_storage::query::{
ContextPair, ContextQuery, DiscoveryQuery, FeedbackItem, NaiveFeedbackCoefficients,
NaiveFeedbackQuery, RecoQuery,
};
use shard::query::query_enum::QueryEnum;
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::DimId;
use crate::operations::generalizer::Generalizer;
use crate::operations::universal_query::collection_query::VectorInputInternal;
use crate::operations::universal_query::shard_query::{
MmrInternal, ScoringQuery, ShardPrefetch, ShardQueryRequest,
};
impl Generalizer for Vec<ShardQueryRequest> {
fn remove_details(&self) -> Self {
self.iter().map(|req| req.remove_details()).collect()
}
}
impl Generalizer for ShardQueryRequest {
fn remove_details(&self) -> Self {
let ShardQueryRequest {
prefetches,
query,
filter,
score_threshold,
limit,
offset,
params,
with_vector,
with_payload,
} = self;
ShardQueryRequest {
prefetches: prefetches.iter().map(|p| p.remove_details()).collect(),
query: query.as_ref().map(|q| q.remove_details()),
filter: filter.clone(),
score_threshold: *score_threshold,
limit: *limit,
offset: *offset,
params: *params,
with_vector: with_vector.clone(),
with_payload: with_payload.clone(),
}
}
}
impl Generalizer for ShardPrefetch {
fn remove_details(&self) -> Self {
let ShardPrefetch {
prefetches,
query,
limit,
params,
filter,
score_threshold,
} = self;
Self {
prefetches: prefetches.iter().map(|p| p.remove_details()).collect(),
query: query.as_ref().map(|q| q.remove_details()),
filter: filter.clone(),
score_threshold: *score_threshold,
limit: *limit,
params: *params,
}
}
}
impl Generalizer for ScoringQuery {
fn remove_details(&self) -> Self {
match self {
ScoringQuery::Vector(vector) => ScoringQuery::Vector(vector.remove_details()),
ScoringQuery::Fusion(_) => self.clone(),
ScoringQuery::OrderBy(_) => self.clone(),
ScoringQuery::Formula(_) => self.clone(),
ScoringQuery::Sample(_) => self.clone(),
ScoringQuery::Mmr(mmr) => ScoringQuery::Mmr(mmr.remove_details()),
}
}
}
impl Generalizer for MmrInternal {
fn remove_details(&self) -> Self {
let Self {
vector,
using,
lambda,
candidates_limit,
} = self;
Self {
vector: vector.remove_details(),
using: using.clone(),
lambda: *lambda,
candidates_limit: *candidates_limit,
}
}
}
impl Generalizer for QueryEnum {
fn remove_details(&self) -> Self {
match self {
QueryEnum::Nearest(nearest) => QueryEnum::Nearest(nearest.remove_details()),
QueryEnum::RecommendBestScore(recommend) => {
QueryEnum::RecommendBestScore(recommend.remove_details())
}
QueryEnum::RecommendSumScores(recommend) => {
QueryEnum::RecommendSumScores(recommend.remove_details())
}
QueryEnum::Discover(disocover) => QueryEnum::Discover(disocover.remove_details()),
QueryEnum::Context(context) => QueryEnum::Context(context.remove_details()),
QueryEnum::FeedbackNaive(feedback) => {
QueryEnum::FeedbackNaive(feedback.remove_details())
}
}
}
}
impl<T: Generalizer> Generalizer for NamedQuery<T> {
fn remove_details(&self) -> Self {
let NamedQuery { query, using } = self;
Self {
using: using.clone(),
query: query.remove_details(),
}
}
}
impl Generalizer for VectorInputInternal {
fn remove_details(&self) -> Self {
match self {
VectorInputInternal::Vector(vector) => {
VectorInputInternal::Vector(vector.remove_details())
}
VectorInputInternal::Id(id) => VectorInputInternal::Id(*id),
}
}
}
impl Generalizer for VectorInternal {
fn remove_details(&self) -> Self {
match self {
VectorInternal::Dense(dense) => VectorInternal::Dense(vec![dense.len() as f32]),
VectorInternal::Sparse(sparse) => VectorInternal::Sparse(
SparseVector::new(vec![sparse.len() as DimId], vec![0.0]).unwrap(),
),
VectorInternal::MultiDense(multi) => {
VectorInternal::MultiDense(MultiDenseVectorInternal::new(
vec![multi.num_vectors() as f32, multi.dim as f32],
2,
))
}
}
}
}
impl<T: Generalizer> Generalizer for DiscoveryQuery<T> {
fn remove_details(&self) -> Self {
let DiscoveryQuery { target, pairs } = self;
Self {
target: target.remove_details(),
pairs: pairs.iter().map(|p| p.remove_details()).collect(),
}
}
}
impl<T: Generalizer> Generalizer for ContextQuery<T> {
fn remove_details(&self) -> Self {
let ContextQuery { pairs } = self;
Self {
pairs: pairs.iter().map(|p| p.remove_details()).collect(),
}
}
}
impl<T: Generalizer> Generalizer for ContextPair<T> {
fn remove_details(&self) -> Self {
let ContextPair { positive, negative } = self;
Self {
positive: positive.remove_details(),
negative: negative.remove_details(),
}
}
}
impl<T: Generalizer> Generalizer for RecoQuery<T> {
fn remove_details(&self) -> Self {
let RecoQuery {
positives,
negatives,
} = self;
Self {
positives: positives.iter().map(|p| p.remove_details()).collect(),
negatives: negatives.iter().map(|p| p.remove_details()).collect(),
}
}
}
impl<T: Generalizer> Generalizer for NaiveFeedbackQuery<T> {
fn remove_details(&self) -> Self {
let Self {
target,
feedback,
coefficients,
} = self;
Self {
target: target.remove_details(),
feedback: feedback.iter().map(|p| p.remove_details()).collect(),
coefficients: coefficients.remove_details(),
}
}
}
impl<T: Generalizer> Generalizer for FeedbackItem<T> {
fn remove_details(&self) -> Self {
let FeedbackItem { vector, score: _ } = self;
Self {
vector: vector.remove_details(),
score: 0.0.into(),
}
}
}
impl Generalizer for NaiveFeedbackCoefficients {
fn remove_details(&self) -> Self {
let NaiveFeedbackCoefficients { a: _, b: _, c: _ } = self;
Self {
a: 0.0.into(),
b: 0.0.into(),
c: 0.0.into(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/operations/generalizer/matrix.rs | lib/collection/src/operations/generalizer/matrix.rs | use api::rest::SearchMatrixRequestInternal;
use crate::collection::distance_matrix::CollectionSearchMatrixRequest;
use crate::operations::generalizer::Generalizer;
impl Generalizer for SearchMatrixRequestInternal {
fn remove_details(&self) -> Self {
let SearchMatrixRequestInternal {
filter,
sample,
limit,
using,
} = self;
Self {
filter: filter.clone(),
sample: *sample,
limit: *limit,
using: using.clone(),
}
}
}
impl Generalizer for CollectionSearchMatrixRequest {
fn remove_details(&self) -> Self {
let CollectionSearchMatrixRequest {
sample_size,
limit_per_sample,
filter,
using,
} = self;
Self {
sample_size: *sample_size,
limit_per_sample: *limit_per_sample,
filter: filter.clone(),
using: using.clone(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/fetch_vectors.rs | lib/collection/src/common/fetch_vectors.rs | use std::fmt::Debug;
use std::time::Duration;
use ahash::{AHashMap, AHashSet};
use api::rest::ShardKeySelector;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::Future;
use futures::future::try_join_all;
use segment::data_types::vectors::{VectorInternal, VectorRef};
use segment::types::{PointIdType, VectorName, VectorNameBuf, WithPayloadInterface, WithVector};
use shard::retrieve::record_internal::RecordInternal;
use tokio::sync::RwLockReadGuard;
use crate::collection::Collection;
use crate::common::batching::batch_requests;
use crate::common::retrieve_request_trait::RetrieveRequest;
use crate::operations::consistency_params::ReadConsistency;
use crate::operations::shard_selector_internal::ShardSelectorInternal;
use crate::operations::types::{
CollectionError, CollectionResult, PointRequestInternal, RecommendExample,
};
use crate::operations::universal_query::collection_query::{
CollectionQueryRequest, CollectionQueryResolveRequest, Query, VectorInputInternal,
};
pub async fn retrieve_points(
collection: &Collection,
ids: Vec<PointIdType>,
vector_names: Vec<VectorNameBuf>,
read_consistency: Option<ReadConsistency>,
shard_selector: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
collection
.retrieve(
PointRequestInternal {
ids,
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: WithVector::Selector(vector_names),
},
read_consistency,
shard_selector,
timeout,
hw_measurement_acc,
)
.await
}
pub enum CollectionRefHolder<'a> {
Ref(&'a Collection),
Guard(RwLockReadGuard<'a, Collection>),
}
pub async fn retrieve_points_with_locked_collection(
collection_holder: CollectionRefHolder<'_>,
ids: Vec<PointIdType>,
vector_names: Vec<VectorNameBuf>,
read_consistency: Option<ReadConsistency>,
shard_selector: &ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
match collection_holder {
CollectionRefHolder::Ref(collection) => {
retrieve_points(
collection,
ids,
vector_names,
read_consistency,
shard_selector,
timeout,
hw_measurement_acc,
)
.await
}
CollectionRefHolder::Guard(guard) => {
retrieve_points(
&guard,
ids,
vector_names,
read_consistency,
shard_selector,
timeout,
hw_measurement_acc,
)
.await
}
}
}
pub type CollectionName = String;
/// This is a temporary structure, which holds resolved references to vectors,
/// mentioned in the query.
///
/// ┌──────────────┐
/// │ │ -> Batch request
/// │ request(+ids)├───────┐ to storage
/// │ │ │
/// └──────────────┘ │
/// │
/// │
/// Reference Vectors ▼
/// ┌──────────────────────────────┐
/// │ │
/// │ ┌───────┐ ┌──────────┐ │
/// │ │ │ │ │ │
/// │ │ IDs ├─────►│ Vectors │ │
/// │ │ │ │ │ │
/// │ └───────┘ └──────────┘ │
/// │ │
/// └──────────────────────────────┘
///
#[derive(Default, Debug)]
pub struct ReferencedVectors {
collection_mapping: AHashMap<CollectionName, AHashMap<PointIdType, RecordInternal>>,
default_mapping: AHashMap<PointIdType, RecordInternal>,
}
impl ReferencedVectors {
pub fn extend(
&mut self,
collection_name: Option<CollectionName>,
mapping: impl IntoIterator<Item = (PointIdType, RecordInternal)>,
) {
match collection_name {
None => self.default_mapping.extend(mapping),
Some(collection) => {
let entry = self.collection_mapping.entry(collection);
let entry_internal: &mut AHashMap<_, _> = entry.or_default();
entry_internal.extend(mapping);
}
}
}
pub fn extend_from_other(&mut self, other: Self) {
self.default_mapping.extend(other.default_mapping);
for (collection_name, points) in other.collection_mapping {
let entry = self.collection_mapping.entry(collection_name);
let entry_internal: &mut AHashMap<_, _> = entry.or_default();
entry_internal.extend(points);
}
}
pub fn get(
&self,
lookup_collection_name: Option<&CollectionName>,
point_id: PointIdType,
) -> Option<&RecordInternal> {
match lookup_collection_name {
None => self.default_mapping.get(&point_id),
Some(collection) => {
let collection_mapping = self.collection_mapping.get(collection)?;
collection_mapping.get(&point_id)
}
}
}
/// Convert potential reference to a vector (vector id) into actual vector,
/// which was resolved by the request to the storage.
pub fn resolve_reference<'a>(
&'a self,
collection_name: Option<&'a String>,
vector_name: &VectorName,
vector_input: VectorInputInternal,
) -> Option<VectorInternal> {
match vector_input {
VectorInputInternal::Vector(vector) => Some(vector),
VectorInputInternal::Id(vid) => {
let rec = self.get(collection_name, vid)?;
rec.get_vector_by_name(vector_name).map(|v| v.to_owned())
}
}
}
}
#[derive(Default, Debug)]
pub struct ReferencedPoints<'coll_name> {
ids_per_collection: AHashMap<Option<&'coll_name String>, AHashSet<PointIdType>>,
vector_names_per_collection: AHashMap<Option<&'coll_name String>, AHashSet<VectorNameBuf>>,
}
impl<'coll_name> ReferencedPoints<'coll_name> {
pub fn is_empty(&self) -> bool {
self.ids_per_collection.is_empty() && self.vector_names_per_collection.is_empty()
}
pub fn add_from_iter(
&mut self,
point_ids: impl Iterator<Item = PointIdType>,
vector_name: VectorNameBuf,
collection_name: Option<&'coll_name String>,
) {
let reference_vectors_ids = self.ids_per_collection.entry(collection_name).or_default();
let vector_names = self
.vector_names_per_collection
.entry(collection_name)
.or_default();
vector_names.insert(vector_name);
point_ids.for_each(|point_id| {
reference_vectors_ids.insert(point_id);
});
}
pub async fn fetch_vectors<'a, F, Fut>(
mut self,
collection: &Collection,
read_consistency: Option<ReadConsistency>,
collection_by_name: &F,
shard_selector: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<ReferencedVectors>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
debug_assert!(self.ids_per_collection.len() == self.vector_names_per_collection.len());
let mut collections_names = Vec::new();
let mut vector_retrieves = Vec::new();
for (collection_name, reference_vectors_ids) in self.ids_per_collection {
// do not fetch vectors if there are no reference vectors
if reference_vectors_ids.is_empty() {
continue;
}
collections_names.push(collection_name);
let points: Vec<_> = reference_vectors_ids.into_iter().collect();
let vector_names: Vec<_> = self
.vector_names_per_collection
.remove(&collection_name)
.unwrap()
.into_iter()
.collect();
match collection_name {
None => vector_retrieves.push(retrieve_points_with_locked_collection(
CollectionRefHolder::Ref(collection),
points,
vector_names,
read_consistency,
&shard_selector,
timeout,
hw_measurement_acc.clone(),
)),
Some(name) => {
let other_collection = collection_by_name(name.clone()).await;
match other_collection {
Some(other_collection) => {
vector_retrieves.push(retrieve_points_with_locked_collection(
CollectionRefHolder::Guard(other_collection),
points,
vector_names,
read_consistency,
&shard_selector,
timeout,
hw_measurement_acc.clone(),
))
}
None => {
return Err(CollectionError::NotFound {
what: format!("Collection {name}"),
});
}
}
}
}
}
let all_reference_vectors: Vec<Vec<RecordInternal>> =
try_join_all(vector_retrieves).await?;
let mut all_vectors_records_map: ReferencedVectors = Default::default();
for (collection_name, reference_vectors) in
collections_names.into_iter().zip(all_reference_vectors)
{
all_vectors_records_map.extend(
collection_name.cloned(),
reference_vectors
.into_iter()
.map(|record| (record.id, record)),
);
}
Ok(all_vectors_records_map)
}
}
pub fn convert_to_vectors_owned(
examples: Vec<RecommendExample>,
all_vectors_records_map: &ReferencedVectors,
vector_name: &VectorName,
collection_name: Option<&String>,
) -> Vec<VectorInternal> {
examples
.into_iter()
.filter_map(|example| match example {
RecommendExample::Dense(vector) => Some(vector.into()),
RecommendExample::Sparse(vector) => Some(vector.into()),
RecommendExample::PointId(vid) => {
let rec = all_vectors_records_map.get(collection_name, vid).unwrap();
rec.get_vector_by_name(vector_name).map(|v| v.to_owned())
}
})
.collect()
}
pub fn convert_to_vectors<'a>(
examples: impl Iterator<Item = &'a RecommendExample> + 'a,
all_vectors_records_map: &'a ReferencedVectors,
vector_name: &'a VectorName,
collection_name: Option<&'a String>,
) -> impl Iterator<Item = VectorRef<'a>> + 'a {
examples.filter_map(move |example| match example {
RecommendExample::Dense(vector) => Some(vector.into()),
RecommendExample::Sparse(vector) => Some(vector.into()),
RecommendExample::PointId(vid) => {
let rec = all_vectors_records_map.get(collection_name, *vid).unwrap();
rec.get_vector_by_name(vector_name)
}
})
}
pub async fn resolve_referenced_vectors_batch<'a, 'b, F, Fut, Req: RetrieveRequest>(
requests: &'b [(Req, ShardSelectorInternal)],
collection: &Collection,
collection_by_name: F,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<ReferencedVectors>
where
F: Fn(String) -> Fut,
Fut: Future<Output = Option<RwLockReadGuard<'a, Collection>>>,
{
let fetch_requests = batch_requests::<
&(Req, ShardSelectorInternal),
Option<ShardKeySelector>,
ReferencedPoints,
Vec<_>,
>(
requests,
|(request, _)| request.get_lookup_shard_key(),
|(request, _), referenced_points| {
let collection_name = request.get_lookup_collection();
let vector_name = request.get_lookup_vector_name();
let point_ids_iter = request.get_referenced_point_ids();
referenced_points.add_from_iter(
point_ids_iter.into_iter(),
vector_name,
collection_name,
);
Ok(())
},
|shard_selector, referenced_points, requests| {
let shard_selector = match shard_selector {
None => ShardSelectorInternal::All,
Some(shard_key_selector) => ShardSelectorInternal::from(shard_key_selector),
};
if referenced_points.is_empty() {
return Ok(());
}
let fetch = referenced_points.fetch_vectors(
collection,
read_consistency,
&collection_by_name,
shard_selector,
timeout,
hw_measurement_acc.clone(),
);
requests.push(fetch);
Ok(())
},
)?;
let batch_reference_vectors: Vec<_> = try_join_all(fetch_requests).await?;
if batch_reference_vectors.len() == 1 {
return Ok(batch_reference_vectors.into_iter().next().unwrap());
}
let mut all_vectors_records_map: ReferencedVectors = Default::default();
for reference_vectors in batch_reference_vectors {
all_vectors_records_map.extend_from_other(reference_vectors);
}
Ok(all_vectors_records_map)
}
/// This function is used to build a list of queries to resolve vectors for the given batch of query requests.
///
/// For each request, one query is issue for the root request and one query for each nested prefetch.
/// The resolver queries have no prefetches.
pub fn build_vector_resolver_queries(
requests_batch: &Vec<(CollectionQueryRequest, ShardSelectorInternal)>,
) -> Vec<(CollectionQueryResolveRequest, ShardSelectorInternal)> {
let mut resolve_prefetches = vec![];
for (request, shard_selector) in requests_batch {
build_vector_resolver_query(request, shard_selector)
.into_iter()
.for_each(|(resolve_request, shard_selector)| {
resolve_prefetches.push((resolve_request, shard_selector))
});
}
resolve_prefetches
}
pub fn build_vector_resolver_query(
request: &CollectionQueryRequest,
shard_selector: &ShardSelectorInternal,
) -> Vec<(CollectionQueryResolveRequest, ShardSelectorInternal)> {
let mut resolve_prefetches = vec![];
// resolve ids for root query
let referenced_ids = request
.query
.as_ref()
.map(Query::get_referenced_ids)
.unwrap_or_default();
if !referenced_ids.is_empty() {
let resolve_root = CollectionQueryResolveRequest {
referenced_ids,
lookup_from: request.lookup_from.clone(),
using: request.using.clone(),
};
resolve_prefetches.push((resolve_root, shard_selector.clone()));
}
// flatten prefetches
for prefetch in &request.prefetch {
for flatten in prefetch.flatten_resolver_requests() {
resolve_prefetches.push((flatten, shard_selector.clone()));
}
}
resolve_prefetches
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/batching.rs | lib/collection/src/common/batching.rs | use crate::operations::types::CollectionResult;
/// The function performs batching processing of read requests, that some arbitrary key
///
/// Functions are split into sequential subgroups based on the shard key.
/// There are two customizable aggregation functions:
///
/// * `accumulate_local` - called for each request to form a subgroup
/// * `accumulate_global` - called for each subgroup accumulated by `accumulate_local`
///
/// The function returns the result of the last call of `accumulate_global` function.
///
///
/// Example usage (simplified):
///
/// ```python
/// requests = [
/// Recommend(positive=[1], shard_key="cats"),
/// Recommend(positive=[2], shard_key="cats"),
/// Recommend(positive=[3], shard_key="dogs"),
/// Recommend(positive=[3], shard_key="dogs"),
/// ]
/// ```
///
/// We want to:
/// 1. Group requests by shard_key into Acc1 (vector of requests)
/// 2. Execute each group of requests and push the result into Acc2 (vector of results)
///
/// How to do that:
///
/// ```rust,ignore
/// batch_requests::<
/// Recommend, // Type of request
/// String, // Type of shard_key
/// Vec<Recommend>, // Type of local accumulator
/// Vec<Vec<ScoredPoint>>, // Type of global accumulator,
/// >(
/// requests,
/// |request| &request.shard_key,
/// |request, local_accumulator| { // Accumulate requests
/// local_accumulator.push(request);
/// // Note: we can have more complex logic here
/// // E.g. extracting IDs from requests and de-duplicating them
/// Ok(())
/// },
/// |shard_key, local_accumulator, global_accumulator| { // Execute requests and accumulate results
/// let result = execute_recommendations(local_accumulator, shard_key);
/// global_accumulator.push(result);
/// Ok(())
/// }
/// )
/// ```
pub fn batch_requests<Req, Key: PartialEq + Clone, Acc1: Default, Acc2: Default>(
requests: impl IntoIterator<Item = Req>,
get_key: impl Fn(&Req) -> &Key,
mut accumulate_local: impl FnMut(Req, &mut Acc1) -> CollectionResult<()>,
mut accumulate_global: impl FnMut(Key, Acc1, &mut Acc2) -> CollectionResult<()>,
) -> CollectionResult<Acc2> {
let mut local_accumulator = Acc1::default();
let mut global_accumulator = Acc2::default();
let mut prev_key = None;
for request in requests {
let request_key = get_key(&request);
if let Some(ref pk) = prev_key {
if request_key != pk {
accumulate_global(pk.clone(), local_accumulator, &mut global_accumulator)?;
prev_key = Some(request_key.clone());
local_accumulator = Acc1::default();
}
} else {
prev_key = Some(request_key.clone());
}
accumulate_local(request, &mut local_accumulator)?;
}
if let Some(prev_key) = prev_key {
accumulate_global(prev_key, local_accumulator, &mut global_accumulator)?;
}
Ok(global_accumulator)
}
#[cfg(test)]
mod tests {
use super::*;
fn run_batch_requests(requests: &[(char, usize)]) -> Vec<(char, Vec<(char, usize)>)> {
batch_requests::<(char, usize), char, Vec<(char, usize)>, Vec<(char, Vec<(char, usize)>)>>(
requests.iter().copied(),
|req| &req.0,
|req, acc1| {
acc1.push(req);
Ok(())
},
|key, acc1, acc2| {
acc2.push((key, acc1));
Ok(())
},
)
.unwrap()
}
#[test]
fn test_batch_requests() {
assert_eq!(
run_batch_requests(&[('a', 1), ('b', 2), ('c', 3)]),
vec![
('a', vec![('a', 1)]),
('b', vec![('b', 2)]),
('c', vec![('c', 3)]),
]
);
assert_eq!(
run_batch_requests(&[('a', 1), ('a', 2), ('b', 3), ('b', 4), ('c', 5), ('c', 6)]),
vec![
('a', vec![('a', 1), ('a', 2)]),
('b', vec![('b', 3), ('b', 4)]),
('c', vec![('c', 5), ('c', 6)]),
]
);
assert_eq!(
run_batch_requests(&[('a', 1), ('b', 2), ('a', 3)]),
vec![
('a', vec![('a', 1)]),
('b', vec![('b', 2)]),
('a', vec![('a', 3)]),
]
);
assert!(run_batch_requests(&[]).is_empty());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/retrieve_request_trait.rs | lib/collection/src/common/retrieve_request_trait.rs | use api::rest::schema::ShardKeySelector;
use segment::data_types::vectors::DEFAULT_VECTOR_NAME;
use segment::types::{PointIdType, VectorNameBuf};
use crate::operations::types::{DiscoverRequestInternal, RecommendRequestInternal, UsingVector};
use crate::operations::universal_query::collection_query::{
CollectionQueryResolveRequest, VectorInputInternal, VectorQuery,
};
const EMPTY_SHARD_KEY_SELECTOR: Option<ShardKeySelector> = None;
pub trait RetrieveRequest {
fn get_lookup_collection(&self) -> Option<&String>;
fn get_referenced_point_ids(&self) -> Vec<PointIdType>;
fn get_lookup_vector_name(&self) -> VectorNameBuf;
fn get_lookup_shard_key(&self) -> &Option<ShardKeySelector>;
}
impl RetrieveRequest for RecommendRequestInternal {
fn get_lookup_collection(&self) -> Option<&String> {
self.lookup_from.as_ref().map(|x| &x.collection)
}
fn get_referenced_point_ids(&self) -> Vec<PointIdType> {
self.positive
.iter()
.chain(self.negative.iter())
.filter_map(|example| example.as_point_id())
.collect()
}
fn get_lookup_vector_name(&self) -> VectorNameBuf {
match &self.lookup_from {
None => match &self.using {
None => DEFAULT_VECTOR_NAME.to_owned(),
Some(UsingVector::Name(vector_name)) => vector_name.clone(),
},
Some(lookup_from) => match &lookup_from.vector {
None => DEFAULT_VECTOR_NAME.to_owned(),
Some(vector_name) => vector_name.clone(),
},
}
}
fn get_lookup_shard_key(&self) -> &Option<ShardKeySelector> {
self.lookup_from
.as_ref()
.map(|x| &x.shard_key)
.unwrap_or(&EMPTY_SHARD_KEY_SELECTOR)
}
}
impl RetrieveRequest for DiscoverRequestInternal {
fn get_lookup_collection(&self) -> Option<&String> {
self.lookup_from.as_ref().map(|x| &x.collection)
}
fn get_referenced_point_ids(&self) -> Vec<PointIdType> {
let mut res = Vec::new();
match &self.target {
None => {}
Some(example) => {
if let Some(point_id) = example.as_point_id() {
res.push(point_id);
}
}
}
if let Some(context) = &self.context {
for pair in context {
if let Some(pos_id) = pair.positive.as_point_id() {
res.push(pos_id);
}
if let Some(neg_id) = pair.negative.as_point_id() {
res.push(neg_id);
}
}
}
res
}
fn get_lookup_vector_name(&self) -> VectorNameBuf {
match &self.lookup_from {
None => match &self.using {
None => DEFAULT_VECTOR_NAME.to_owned(),
Some(UsingVector::Name(vector_name)) => vector_name.clone(),
},
Some(lookup_from) => match &lookup_from.vector {
None => DEFAULT_VECTOR_NAME.to_owned(),
Some(vector_name) => vector_name.clone(),
},
}
}
fn get_lookup_shard_key(&self) -> &Option<ShardKeySelector> {
self.lookup_from
.as_ref()
.map(|x| &x.shard_key)
.unwrap_or(&EMPTY_SHARD_KEY_SELECTOR)
}
}
impl RetrieveRequest for CollectionQueryResolveRequest {
fn get_lookup_collection(&self) -> Option<&String> {
self.lookup_from.as_ref().map(|x| &x.collection)
}
fn get_referenced_point_ids(&self) -> Vec<PointIdType> {
self.referenced_ids.clone()
}
fn get_lookup_vector_name(&self) -> VectorNameBuf {
match &self.lookup_from {
None => self.using.clone(),
Some(lookup_from) => match &lookup_from.vector {
None => self.using.clone(),
Some(vector_name) => vector_name.clone(),
},
}
}
fn get_lookup_shard_key(&self) -> &Option<ShardKeySelector> {
self.lookup_from
.as_ref()
.map(|x| &x.shard_key)
.unwrap_or(&EMPTY_SHARD_KEY_SELECTOR)
}
}
impl VectorQuery<VectorInputInternal> {
pub fn get_referenced_ids(&self) -> Vec<&PointIdType> {
self.flat_iter()
.filter_map(VectorInputInternal::as_id)
.collect()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/eta_calculator.rs | lib/collection/src/common/eta_calculator.rs | use std::time::{Duration, Instant};
use ringbuffer::{ConstGenericRingBuffer, RingBuffer as _};
/// A progress ETA calculator.
/// Calculates the ETA roughly based on the last ten seconds of measurements.
pub struct EtaCalculator(ConstGenericRingBuffer<(Instant, usize), { Self::SIZE }>);
impl EtaCalculator {
const SIZE: usize = 16;
const DURATION: Duration = Duration::from_millis(625);
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self::new_raw(Instant::now())
}
/// Capture the current progress and time.
pub fn set_progress(&mut self, current_progress: usize) {
self.set_progress_raw(Instant::now(), current_progress);
}
/// Calculate the ETA to reach the target progress.
pub fn estimate(&self, target_progress: usize) -> Option<Duration> {
self.estimate_raw(Instant::now(), target_progress)
}
fn new_raw(now: Instant) -> Self {
Self([(now, 0)].as_ref().into())
}
fn set_progress_raw(&mut self, now: Instant, current_progress: usize) {
if self.0.back().is_some_and(|(_, l)| current_progress < *l) {
// Progress went backwards, reset the state.
*self = Self::new();
}
// Consider this progress history: `[recent, older, even_older, ..., oldest]`.
// Based on the age of `older`, we decide whether to update the `recent` or push a new item.
//
// NOTE: When `len() == 1`, calling `get_signed(-2)` would return the same value as
// `get_signed(-1)`, but this is not what we want. Thus, we explicitly check for length.
// Unwraps are safe because the length is checked.
if self.0.len() >= 2 && now - self.0.get_signed(-2).unwrap().0 < Self::DURATION {
*self.0.back_mut().unwrap() = (now, current_progress);
} else {
self.0.enqueue((now, current_progress));
}
}
fn estimate_raw(&self, now: Instant, target_progress: usize) -> Option<Duration> {
let &(last_time, last_progress) = self.0.back()?;
// Check if the progress is already reached.
let value_diff = match target_progress.checked_sub(last_progress) {
None | Some(0) => return Some(Duration::from_secs(0)),
Some(value_diff) => value_diff,
};
// Find the oldest measurement that is not too old.
let &(old_time, old_progress) = self
.0
.iter()
.find(|(time, _)| now - *time <= Self::DURATION * Self::SIZE as u32)?;
if last_progress == old_progress {
// No progress, no rate.
return None;
}
let rate = (last_progress - old_progress) as f64 / (last_time - old_time).as_secs_f64();
let elapsed = (now - last_time).as_secs_f64();
let eta = (value_diff as f64 / rate - elapsed).max(0.0);
Duration::try_from_secs_f64(eta).ok()
}
}
#[cfg(test)]
mod tests {
use approx::assert_relative_eq;
use super::*;
#[test]
fn test_eta_calculator() {
let mut now = Instant::now();
let mut eta = EtaCalculator::new();
let delta = Duration::from_millis(500);
for i in 0..=40 {
now += delta;
eta.set_progress_raw(now, i);
}
assert_relative_eq!(
eta.estimate_raw(now, 100).unwrap().as_secs_f64(),
((100 - 40) * delta).as_secs_f64(),
max_relative = 0.02,
);
// Emulate a stall.
assert!(
eta.estimate_raw(now + Duration::from_secs(20), 100)
.is_none(),
);
// Change the speed.
let delta = Duration::from_millis(5000);
for i in 41..=60 {
now += delta;
eta.set_progress_raw(now, i);
}
assert_relative_eq!(
eta.estimate_raw(now, 100).unwrap().as_secs_f64(),
((100 - 60) * delta).as_secs_f64(),
max_relative = 0.02,
);
// Should be 0 when the target progress is reached or overreached.
assert_eq!(eta.estimate_raw(now, 60).unwrap(), Duration::from_secs(0));
assert_eq!(eta.estimate_raw(now, 50).unwrap(), Duration::from_secs(0));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/snapshot_stream.rs | lib/collection/src/common/snapshot_stream.rs | use std::error::Error;
use std::path::PathBuf;
use std::pin::Pin;
use actix_files::NamedFile;
use actix_web::http::header::ContentDisposition;
use actix_web::{HttpResponse, Responder};
use bytes::Bytes;
use futures::{Stream, TryStreamExt};
pub struct SnapShotStreamLocalFS {
pub snapshot_path: PathBuf,
}
type ByteStream = Pin<Box<dyn Stream<Item = Result<Bytes, Box<dyn Error>>>>>;
pub struct SnapShotStreamCloudStrage {
stream: ByteStream,
filename: Option<String>,
}
pub enum SnapshotStream {
LocalFS(SnapShotStreamLocalFS),
ByteStream(SnapShotStreamCloudStrage),
}
impl SnapshotStream {
/// Create a new snapshot stream from a byte stream.
///
/// The `filename` is used as the `Content-Disposition` header and only
/// makes sense when the snapshot needs to be saved under a different
/// name than the endpoint path.
pub fn new_stream<S, E>(stream: S, filename: Option<String>) -> Self
where
S: Stream<Item = Result<Bytes, E>> + 'static,
E: Into<Box<dyn Error>>,
{
SnapshotStream::ByteStream(SnapShotStreamCloudStrage {
stream: Box::pin(stream.map_err(|e| e.into())),
filename,
})
}
}
impl Responder for SnapshotStream {
type Body = actix_web::body::BoxBody;
fn respond_to(self, req: &actix_web::HttpRequest) -> HttpResponse<Self::Body> {
match self {
SnapshotStream::LocalFS(stream) => match NamedFile::open(stream.snapshot_path) {
Ok(file) => file.into_response(req),
Err(e) => match e.kind() {
std::io::ErrorKind::NotFound => {
HttpResponse::NotFound().body(format!("File not found: {e}"))
}
_ => HttpResponse::InternalServerError()
.body(format!("Failed to open file: {e}")),
},
},
SnapshotStream::ByteStream(SnapShotStreamCloudStrage { stream, filename }) => {
let mut response = HttpResponse::Ok();
response.content_type("application/octet-stream");
if let Some(filename) = filename {
response.insert_header(ContentDisposition::attachment(filename));
}
response.streaming(stream)
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/collection_size_stats.rs | lib/collection/src/common/collection_size_stats.rs | use std::future::Future;
use std::sync::atomic::{AtomicUsize, Ordering};
/// Amount of requests that have to be done until the cached data gets updated.
const UPDATE_INTERVAL: usize = 32;
/// A cache for `LocalDataStats` utilizing `AtomicUsize` for better performance.
#[derive(Default)]
pub(crate) struct CollectionSizeStatsCache {
stats: Option<CollectionSizeAtomicStats>,
request_counter: AtomicUsize,
}
impl CollectionSizeStatsCache {
pub fn new_with_values(stats: Option<CollectionSizeStats>) -> Self {
let stats = stats.map(CollectionSizeAtomicStats::new);
Self {
stats,
request_counter: AtomicUsize::new(1), // Prevent same data getting loaded a second time when doing the first request.
}
}
/// Checks whether the cache needs to be updated.
/// For performance reasons, this also assumes a cached value gets read afterwards and brings the
/// Update counter one tick closer to the next update.
pub fn check_need_update_and_increment(&self) -> bool {
let req_counter = self.request_counter.fetch_add(1, Ordering::Relaxed);
req_counter.is_multiple_of(UPDATE_INTERVAL)
}
/// Returns the cached values. Automatically updates the cache every 32 calls using the given `update` function.
pub async fn get_or_update_cache<U>(
&self,
update_fn: impl FnOnce() -> U,
) -> Option<&CollectionSizeAtomicStats>
where
U: Future<Output = Option<CollectionSizeStats>>,
{
// Update if necessary
if self.check_need_update_and_increment() {
let updated = update_fn().await?;
self.update(updated);
}
// Give caller access to cached (inner) values which are always updated if required
self.stats.as_ref()
}
/// Sets all cache values to `new_stats`.
pub fn update(&self, new_stats: CollectionSizeStats) {
if let Some(stats) = self.stats.as_ref() {
stats.update(new_stats)
}
}
}
/// Same as `LocalDataStats` but each value is atomic.
#[derive(Default)]
pub(crate) struct CollectionSizeAtomicStats {
vector_storage_size: AtomicUsize,
payload_storage_size: AtomicUsize,
points_count: AtomicUsize,
}
impl CollectionSizeAtomicStats {
/// Get the vector storage size.
pub fn get_vector_storage_size(&self) -> usize {
self.vector_storage_size.load(Ordering::Relaxed)
}
/// Get the payload storage size.
pub fn get_payload_storage_size(&self) -> usize {
self.payload_storage_size.load(Ordering::Relaxed)
}
/// Get the points count.
pub fn get_points_count(&self) -> usize {
self.points_count.load(Ordering::Relaxed)
}
fn new(data: CollectionSizeStats) -> Self {
let CollectionSizeStats {
vector_storage_size,
payload_storage_size,
points_count,
} = data;
Self {
vector_storage_size: AtomicUsize::new(vector_storage_size),
payload_storage_size: AtomicUsize::new(payload_storage_size),
points_count: AtomicUsize::new(points_count),
}
}
fn update(&self, new_stats: CollectionSizeStats) {
let CollectionSizeStats {
vector_storage_size,
payload_storage_size,
points_count,
} = new_stats;
self.vector_storage_size
.store(vector_storage_size, Ordering::Relaxed);
self.payload_storage_size
.store(payload_storage_size, Ordering::Relaxed);
self.points_count.store(points_count, Ordering::Relaxed);
}
}
/// Statistics for local data, like the size of vector storage.
#[derive(Clone, Copy, Default)]
pub struct CollectionSizeStats {
/// Estimated amount of vector storage size.
pub vector_storage_size: usize,
/// Estimated amount of payload storage size.
pub payload_storage_size: usize,
/// Estimated amount of points.
pub points_count: usize,
}
impl CollectionSizeStats {
pub(crate) fn accumulate_metrics_from(&mut self, other: &Self) {
let CollectionSizeStats {
vector_storage_size,
payload_storage_size,
points_count,
} = other;
self.vector_storage_size += vector_storage_size;
self.payload_storage_size += payload_storage_size;
self.points_count += points_count;
}
pub(crate) fn multiplied_with(self, factor: usize) -> Self {
let CollectionSizeStats {
mut vector_storage_size,
mut payload_storage_size,
mut points_count,
} = self;
vector_storage_size *= factor;
payload_storage_size *= factor;
points_count *= factor;
Self {
vector_storage_size,
payload_storage_size,
points_count,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/file_utils.rs | lib/collection/src/common/file_utils.rs | use std::path::{Path, PathBuf};
use fs_err::tokio as tokio_fs;
use fs_extra::dir::CopyOptions;
use crate::operations::types::{CollectionError, CollectionResult};
/// Move directory from one location to another
///
/// Handles the case when the source and destination are on different filesystems.
///
/// # Cancel safety
///
/// This function is cancel safe.
///
/// If the future is dropped, moving the directory will either fully complete or not start at all.
/// With the exception of file IO errors in which case data may be partially moved.
pub async fn move_dir(from: impl Into<PathBuf>, to: impl Into<PathBuf>) -> CollectionResult<()> {
let from = from.into();
let to = to.into();
log::trace!("Renaming directory {} to {}", from.display(), to.display());
let Err(err) = tokio_fs::rename(&from, &to).await else {
return Ok(());
};
log::trace!(
"Failed to rename directory {} to {}: {err}",
from.display(),
to.display(),
);
// TODO: Only retry to move directory, if error kind is `CrossesDevices` or `AlreadyExists`?
//
// match err.kind() {
// io::ErrorKind::AlreadyExists | io::ErrorKind::CrossesDevices => (),
// _ => {
// return Err(CollectionError::service_error(format!(
// "failed to rename directory {} to {}: {err}",
// from.display(),
// to.display(),
// )));
// }
// }
tokio::task::spawn_blocking(move || {
if !to.exists() {
log::trace!("Creating destination directory {}", to.display());
fs_err::create_dir(&to).map_err(|err| {
CollectionError::service_error(format!(
"failed to move directory {} to {}: \
failed to create destination directory: \
{err}",
from.display(),
to.display(),
))
})?;
}
log::trace!("Moving directory {} to {}", from.display(), to.display());
let opts = CopyOptions::new().content_only(true).overwrite(true);
fs_extra::dir::move_dir(&from, &to, &opts).map_err(|err| {
CollectionError::service_error(format!(
"failed to move directory {} to {}: {err}",
from.display(),
to.display(),
))
})
})
.await??;
Ok(())
}
/// Move file from one location to another.
/// Handles the case when the source and destination are on different filesystems.
pub async fn move_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> CollectionResult<()> {
let from = from.as_ref();
let to = to.as_ref();
// Try to rename first and fallback to copy to prevent TOCTOU.
if let Ok(()) = tokio_fs::rename(from, to).await {
return Ok(());
}
// If rename failed, try to copy.
// It is possible that the source and destination are on different filesystems.
if let Err(err) = tokio_fs::copy(from, to).await {
cleanup_file(to).await;
return Err(CollectionError::service_error(format!(
"Can't move file from {} to {} due to {}",
from.display(),
to.display(),
err
)));
}
if let Err(err) = tokio_fs::remove_file(from).await {
cleanup_file(to).await;
return Err(CollectionError::service_error(format!(
"Can't remove file {} due to {}",
from.display(),
err
)));
}
Ok(())
}
/// Remove the file if it exists. Print a warning if the file can't be removed.
async fn cleanup_file(path: &Path) {
if let Err(err) = tokio_fs::remove_file(path).await
&& err.kind() != std::io::ErrorKind::NotFound
{
log::warn!("Failed to remove file {}: {err}", path.display());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/sha_256.rs | lib/collection/src/common/sha_256.rs | use std::io;
use std::path::Path;
use bytes::BytesMut;
use fs_err::tokio::File;
use sha2::{Digest, Sha256};
use tokio::io::AsyncReadExt;
/// Compute sha256 hash for the given file
pub async fn hash_file(file_path: &Path) -> io::Result<String> {
log::debug!("Computing checksum for file: {file_path:?}");
const ONE_MB: usize = 1024 * 1024;
let input_file = File::open(file_path).await?;
let mut reader = tokio::io::BufReader::new(input_file);
let mut sha = Sha256::new();
let mut buf = BytesMut::with_capacity(ONE_MB);
loop {
buf.clear();
let len = reader.read_buf(&mut buf).await?;
if len == 0 {
break;
}
sha.update(&buf[0..len]);
}
let hash = sha.finalize();
Ok(format!("{hash:x}"))
}
/// Compare two hashes, ignoring whitespace and case
pub fn hashes_equal(a: &str, b: &str) -> bool {
Iterator::eq(
a.chars()
.filter(|c| !c.is_whitespace())
.map(|c| c.to_ascii_lowercase()),
b.chars()
.filter(|c| !c.is_whitespace())
.map(|c| c.to_ascii_lowercase()),
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compare_hash() {
assert!(hashes_equal("0123abc", "0123abc"));
assert!(hashes_equal("0123abc", "0123ABC"));
assert!(hashes_equal("0123abc", "0123abc "));
assert!(!hashes_equal("0123abc", "0123abd"));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/mod.rs | lib/collection/src/common/mod.rs | pub mod batching;
pub mod collection_size_stats;
pub mod eta_calculator;
pub mod fetch_vectors;
pub mod file_utils;
pub mod is_ready;
pub mod retrieve_request_trait;
pub mod sha_256;
pub mod snapshot_stream;
pub mod snapshots_manager;
pub mod stoppable_task;
pub mod stoppable_task_async;
pub mod transpose_iterator;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/stoppable_task.rs | lib/collection/src/common/stoppable_task.rs | use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Weak};
use tokio::task::JoinHandle;
use tokio_util::task::AbortOnDropHandle;
/// A task that can be asked to stop
///
/// If this future is dropped the blocking task may be aborted prematurely if it has not started
/// yet.
pub struct StoppableTaskHandle<T> {
pub join_handle: AbortOnDropHandle<Option<T>>,
started: Arc<AtomicBool>,
stopped: Weak<AtomicBool>,
}
impl<T> StoppableTaskHandle<T> {
pub fn is_started(&self) -> bool {
self.started.load(Ordering::Relaxed)
}
pub fn is_finished(&self) -> bool {
self.join_handle.is_finished()
}
pub fn ask_to_stop(&self) {
if let Some(v) = self.stopped.upgrade() {
v.store(true, Ordering::Relaxed);
}
}
pub fn stop(self) -> Option<JoinHandle<Option<T>>> {
self.ask_to_stop();
self.is_started().then_some(self.join_handle.detach())
}
/// Join this stoppable task
///
/// To call this, the task must already be finished. Otherwise it panics in development, or
/// blocks in release.
pub async fn join(self) {
debug_assert!(
self.join_handle.is_finished(),
"Task must be finished, we cannot block here on awaiting the join handle",
);
match self.join_handle.await {
Ok(_) => {}
Err(err) if err.is_cancelled() => {}
// Log error on unknown error
Err(err) => {
log::error!("Stoppable task handle error for unknown reason: {err}");
}
}
}
}
/// Spawn stoppable task `f`
pub fn spawn_stoppable<F, T>(f: F) -> StoppableTaskHandle<T>
where
F: FnOnce(&AtomicBool) -> T + Send + 'static,
T: Send + 'static,
{
let started = Arc::new(AtomicBool::new(false));
let started_c = started.clone();
let stopped = Arc::new(AtomicBool::new(false));
// We are OK if original value is destroyed with the thread
// Weak reference is sufficient
let stopped_w = Arc::downgrade(&stopped);
let handle = tokio::task::spawn_blocking(move || {
// TODO: Should we use `Ordering::Acquire` or `Ordering::SeqCst`? 🤔
if stopped.load(Ordering::Relaxed) {
return None;
}
// TODO: Should we use `Ordering::Release` or `Ordering::SeqCst`? 🤔
started.store(true, Ordering::Relaxed);
Some(f(&stopped))
});
StoppableTaskHandle {
join_handle: AbortOnDropHandle::new(handle),
started: started_c,
stopped: stopped_w,
}
}
#[cfg(test)]
mod tests {
use std::thread;
use std::time::{Duration, Instant};
use tokio::time::sleep;
use super::*;
const STEP: Duration = Duration::from_millis(5);
/// Simple stoppable task counting steps until stopped. Panics after 1 minute.
fn counting_task(stop: &AtomicBool) -> usize {
let mut count = 0;
let start = Instant::now();
while !stop.load(Ordering::SeqCst) {
count += 1;
if start.elapsed() > Duration::from_secs(60) {
panic!("Task is not stopped within 60 seconds");
}
thread::sleep(STEP);
}
count
}
#[tokio::test(flavor = "multi_thread")]
async fn test_task_stop() {
let handle = spawn_stoppable(counting_task);
// Signal task to stop after ~20 steps
sleep(STEP * 20).await;
assert!(!handle.is_finished());
handle.ask_to_stop();
sleep(Duration::from_secs(1)).await;
assert!(handle.is_finished());
// Expect task counter to be between [5, 25], we cannot be exact on busy systems
if let Some(handle) = handle.stop()
&& let Some(count) = handle.await.unwrap()
{
assert!(
count < 25,
"Stoppable task should have count should be less than 25, but it is {count}",
);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_task_stop_many() {
const TASKS: usize = 64;
let handles = (0..TASKS)
.map(|_| spawn_stoppable(counting_task))
.collect::<Vec<_>>();
// Signal tasks to stop after ~20 steps
sleep(STEP * 20).await;
for handle in &handles {
assert!(!handle.is_finished());
handle.ask_to_stop();
}
// Expect task counters to be between [5, 30], we cannot be exact on busy systems
for handle in handles {
if let Some(handle) = handle.stop()
&& let Some(count) = handle.await.unwrap()
{
assert!(
count < 30, // 10 extra steps to stop all tasks
"Stoppable task should have count should be less than 30, but it is {count}",
);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/stoppable_task_async.rs | lib/collection/src/common/stoppable_task_async.rs | use std::future::Future;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use parking_lot::Mutex;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
pub struct CancellableAsyncTaskHandle<T: Clone> {
pub join_handle: JoinHandle<T>,
result_holder: Arc<Mutex<Option<T>>>,
cancelled: CancellationToken,
finished: Arc<AtomicBool>,
}
impl<T: Clone> CancellableAsyncTaskHandle<T> {
pub fn is_finished(&self) -> bool {
self.finished.load(Ordering::Relaxed)
}
pub fn ask_to_cancel(&self) {
self.cancelled.cancel();
}
pub fn cancel(self) -> JoinHandle<T> {
self.ask_to_cancel();
self.join_handle
}
pub fn get_result(&self) -> Option<T> {
self.result_holder.lock().clone()
}
}
pub fn spawn_async_cancellable<F, T>(f: F) -> CancellableAsyncTaskHandle<T::Output>
where
F: FnOnce(CancellationToken) -> T,
F: Send + 'static,
T: Future + Send + 'static,
T::Output: Clone + Send + 'static,
{
let cancelled = CancellationToken::new();
let finished = Arc::new(AtomicBool::new(false));
let result_holder = Arc::new(Mutex::new(None));
CancellableAsyncTaskHandle {
join_handle: tokio::task::spawn({
let (cancel, finished, result_holder) =
(cancelled.clone(), finished.clone(), result_holder.clone());
async move {
let res = f(cancel).await;
let mut result_holder_w = result_holder.lock();
result_holder_w.replace(res.clone());
// We use `Release` ordering to ensure that `f` won't be moved after the `store`
// by the compiler
finished.store(true, Ordering::Release);
res
}
}),
result_holder,
cancelled,
finished,
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use tokio::time::sleep;
use super::*;
const STEP_MILLIS: u64 = 5;
async fn long_task(cancel: CancellationToken) -> i32 {
let mut n = 0;
for i in 0..10 {
n = i;
if cancel.is_cancelled() {
break;
}
sleep(Duration::from_millis(STEP_MILLIS)).await;
}
n
}
#[tokio::test]
async fn test_task_stop() {
let handle = spawn_async_cancellable(long_task);
sleep(Duration::from_millis(STEP_MILLIS * 5)).await;
assert!(!handle.is_finished());
handle.ask_to_cancel();
sleep(Duration::from_millis(STEP_MILLIS * 3)).await;
// If windows, we need to wait a bit more
#[cfg(windows)]
sleep(Duration::from_millis(STEP_MILLIS * 10)).await;
assert!(handle.is_finished());
let res = handle.cancel().await.unwrap();
assert!(res < 10);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/transpose_iterator.rs | lib/collection/src/common/transpose_iterator.rs | /// Convert a shape [N, M] to [M, N].
///
/// # Example
///
/// [
/// [1, 2, 3],
/// [4, 5, 6],
/// ]
///
/// to
///
/// [
/// [1, 4],
/// [2, 5],
/// [3, 6],
/// ]
///
/// # Panics
///
/// May panic if the input is not a rectangle.
pub fn transpose<T>(v: Vec<Vec<T>>) -> Vec<Vec<T>> {
transposed_iter(v).collect()
}
/// Convert a shape [N, M] to an iterator which collects into [M, N].
///
/// # Example
///```text
/// [
/// [1, 2, 3],
/// [4, 5, 6],
/// ]
///
/// to
///
/// [1, 4] -> [2, 5] -> [3, 6]
///```
/// # Panics
///
/// May panic if the input is not a rectangle.
pub fn transposed_iter<T>(rectangle: Vec<Vec<T>>) -> impl Iterator<Item = Vec<T>> {
assert!(!rectangle.is_empty());
let len = rectangle.first().map(Vec::len).unwrap_or(0);
let mut iters: Vec<_> = rectangle.into_iter().map(|n| n.into_iter()).collect();
(0..len).map(move |_| {
iters
.iter_mut()
.map(|n| n.next().expect("Input is rectangular"))
.collect()
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_transpose() {
let v = vec![vec![1, 2, 3], vec![4, 5, 6]];
let res = transpose(v);
let expected = [vec![1, 4], vec![2, 5], vec![3, 6]];
for (i, column) in res.iter().enumerate() {
assert_eq!(column, &expected[i]);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/is_ready.rs | lib/collection/src/common/is_ready.rs | use std::time::Duration;
use parking_lot::{Condvar, Mutex};
pub struct IsReady {
condvar: Condvar,
value: Mutex<bool>,
}
impl Default for IsReady {
fn default() -> Self {
Self {
condvar: Condvar::new(),
value: Mutex::new(false),
}
}
}
impl IsReady {
pub fn make_ready(&self) {
let mut is_ready = self.value.lock();
if !*is_ready {
*is_ready = true;
self.condvar.notify_all();
}
}
pub fn make_not_ready(&self) {
*self.value.lock() = false;
}
pub fn check_ready(&self) -> bool {
*self.value.lock()
}
pub fn await_ready(&self) {
let mut is_ready = self.value.lock();
if !*is_ready {
self.condvar.wait(&mut is_ready);
}
}
/// Return `true` if ready, `false` if timed out.
pub fn await_ready_for_timeout(&self, timeout: Duration) -> bool {
let mut is_ready = self.value.lock();
if !*is_ready {
!self.condvar.wait_for(&mut is_ready, timeout).timed_out()
} else {
true
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/common/snapshots_manager.rs | lib/collection/src/common/snapshots_manager.rs | use std::path::{Path, PathBuf};
use common::tempfile_ext::MaybeTempPath;
use fs_err as fs;
use fs_err::tokio as tokio_fs;
use object_store::aws::AmazonS3Builder;
use serde::Deserialize;
use tempfile::TempPath;
use tokio::io::AsyncWriteExt;
use super::snapshot_stream::{SnapShotStreamLocalFS, SnapshotStream};
use crate::common::file_utils::move_file;
use crate::common::sha_256::hash_file;
use crate::operations::snapshot_ops::{
SnapshotDescription, get_checksum_path, get_snapshot_description,
};
use crate::operations::snapshot_storage_ops;
use crate::operations::types::{CollectionError, CollectionResult};
#[derive(Clone, Deserialize, Debug, Default)]
pub struct SnapshotsConfig {
pub snapshots_storage: SnapshotsStorageConfig,
pub s3_config: Option<S3Config>,
}
#[derive(Clone, Debug, Default, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SnapshotsStorageConfig {
#[default]
Local,
S3,
}
#[derive(Clone, Deserialize, Debug, Default)]
pub struct S3Config {
pub bucket: String,
pub region: Option<String>,
pub access_key: Option<String>,
pub secret_key: Option<String>,
pub endpoint_url: Option<String>,
}
pub struct SnapshotStorageCloud {
client: Box<dyn object_store::ObjectStore>,
}
pub struct SnapshotStorageLocalFS;
pub enum SnapshotStorageManager {
LocalFS(SnapshotStorageLocalFS),
// Assuming that we can have common operations for all cloud storages
S3(SnapshotStorageCloud),
// <TODO> : Implement other cloud storage
// GCS(SnapshotStorageCloud),
// AZURE(SnapshotStorageCloud),
}
impl SnapshotStorageManager {
pub fn new(snapshots_config: &SnapshotsConfig) -> CollectionResult<Self> {
match snapshots_config.snapshots_storage {
SnapshotsStorageConfig::Local => {
Ok(SnapshotStorageManager::LocalFS(SnapshotStorageLocalFS))
}
SnapshotsStorageConfig::S3 => {
let mut builder = AmazonS3Builder::from_env();
if let Some(s3_config) = &snapshots_config.s3_config {
builder = builder.with_bucket_name(&s3_config.bucket);
if let Some(access_key) = &s3_config.access_key {
builder = builder.with_access_key_id(access_key);
}
if let Some(secret_key) = &s3_config.secret_key {
builder = builder.with_secret_access_key(secret_key);
}
if let Some(region) = &s3_config.region {
builder = builder.with_region(region);
}
if let Some(endpoint_url) = &s3_config.endpoint_url {
builder = builder.with_endpoint(endpoint_url);
if endpoint_url.starts_with("http://") {
builder = builder.with_allow_http(true);
}
}
}
let client: Box<dyn object_store::ObjectStore> =
Box::new(builder.build().map_err(|e| {
CollectionError::service_error(format!("Failed to create S3 client: {e}"))
})?);
Ok(SnapshotStorageManager::S3(SnapshotStorageCloud { client }))
}
}
}
pub async fn delete_snapshot(&self, snapshot_name: &Path) -> CollectionResult<bool> {
match self {
SnapshotStorageManager::LocalFS(storage_impl) => {
storage_impl.delete_snapshot(snapshot_name).await
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl.delete_snapshot(snapshot_name).await
}
}
}
pub async fn list_snapshots(
&self,
directory: &Path,
) -> CollectionResult<Vec<SnapshotDescription>> {
match self {
SnapshotStorageManager::LocalFS(storage_impl) => {
storage_impl.list_snapshots(directory).await
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl.list_snapshots(directory).await
}
}
}
/// Store file in the snapshot storage.
/// On success, the `source_path` is deleted.
pub async fn store_file(
&self,
source_path: &Path,
target_path: &Path,
) -> CollectionResult<SnapshotDescription> {
debug_assert_ne!(
source_path, target_path,
"Source and target paths must be different"
);
match self {
SnapshotStorageManager::LocalFS(storage_impl) => {
storage_impl.store_file(source_path, target_path).await
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl.store_file(source_path, target_path).await
}
}
}
pub async fn get_stored_file(
&self,
storage_path: &Path,
local_path: &Path,
) -> CollectionResult<()> {
match self {
SnapshotStorageManager::LocalFS(storage_impl) => {
storage_impl.get_stored_file(storage_path, local_path).await
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl.get_stored_file(storage_path, local_path).await
}
}
}
pub fn get_snapshot_path(
&self,
snapshots_path: &Path,
snapshot_name: &str,
) -> CollectionResult<PathBuf> {
match self {
SnapshotStorageManager::LocalFS(_storage_impl) => {
SnapshotStorageLocalFS::get_snapshot_path(snapshots_path, snapshot_name)
}
SnapshotStorageManager::S3(_storage_impl) => Ok(
SnapshotStorageCloud::get_snapshot_path(snapshots_path, snapshot_name),
),
}
}
pub fn get_full_snapshot_path(
&self,
snapshots_path: &str,
snapshot_name: &str,
) -> CollectionResult<PathBuf> {
match self {
SnapshotStorageManager::LocalFS(_storage_impl) => {
SnapshotStorageLocalFS::get_full_snapshot_path(snapshots_path, snapshot_name)
}
SnapshotStorageManager::S3(_storage_impl) => Ok(
SnapshotStorageCloud::get_full_snapshot_path(snapshots_path, snapshot_name),
),
}
}
pub async fn get_snapshot_file(
&self,
snapshot_path: &Path,
temp_dir: &Path,
) -> CollectionResult<MaybeTempPath> {
match self {
SnapshotStorageManager::LocalFS(_storage_impl) => {
SnapshotStorageLocalFS::get_snapshot_file(snapshot_path, temp_dir)
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl
.get_snapshot_file(snapshot_path, temp_dir)
.await
}
}
}
pub async fn get_snapshot_stream(
&self,
snapshot_path: &Path,
) -> CollectionResult<SnapshotStream> {
match self {
SnapshotStorageManager::LocalFS(_storage_impl) => {
Ok(SnapshotStorageLocalFS::get_snapshot_stream(snapshot_path))
}
SnapshotStorageManager::S3(storage_impl) => {
storage_impl.get_snapshot_stream(snapshot_path).await
}
}
}
}
impl SnapshotStorageLocalFS {
async fn delete_snapshot(&self, snapshot_path: &Path) -> CollectionResult<bool> {
let checksum_path = get_checksum_path(snapshot_path);
let (delete_snapshot, delete_checksum) = tokio::join!(
tokio_fs::remove_file(snapshot_path),
tokio_fs::remove_file(checksum_path),
);
delete_snapshot.map_err(|e| match e.kind() {
std::io::ErrorKind::NotFound => {
CollectionError::not_found(format!("Snapshot {snapshot_path:?}"))
}
_ => e.into(),
})?;
// We might not have a checksum file for the snapshot, ignore deletion errors in that case
if let Err(err) = delete_checksum {
log::warn!("Failed to delete checksum file for snapshot, ignoring: {err}");
}
Ok(true)
}
async fn list_snapshots(&self, directory: &Path) -> CollectionResult<Vec<SnapshotDescription>> {
let mut entries = match tokio_fs::read_dir(directory).await {
Ok(entries) => entries,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
Err(e) => return Err(e.into()),
};
let mut snapshots = Vec::new();
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if !path.is_dir() && path.extension().is_some_and(|ext| ext == "snapshot") {
snapshots.push(get_snapshot_description(&path).await?);
}
}
Ok(snapshots)
}
async fn store_file(
&self,
source_path: &Path,
target_path: &Path,
) -> CollectionResult<SnapshotDescription> {
// Steps:
//
// 1. Make sure that the target directory exists.
// 2. Compute the checksum of the source file.
// 3. Generate temporary file name, which should be used on the same file system as the target directory.
// 4. Move or copy the source file to the temporary file. (move might not be possible if the source and target are on different file systems)
// 5. Move the temporary file to the target file. (move is atomic, copy is not)
if let Some(target_dir) = target_path.parent() {
fs::create_dir_all(target_dir)?;
}
// Move snapshot to permanent location.
// We can't move right away, because snapshot folder can be on another mounting point.
// We can't copy to the target location directly, because copy is not atomic.
// So we copy to the final location with a temporary name and then rename atomically.
let target_path_tmp = TempPath::from_path(target_path.with_extension("tmp"));
// compute and store the file's checksum before the final snapshot file is saved
// to avoid making snapshot available without checksum
let checksum_path = get_checksum_path(target_path);
let checksum = hash_file(source_path).await?;
let checksum_file = TempPath::from_path(&checksum_path);
let mut file = tokio_fs::File::create(checksum_path.as_path()).await?;
file.write_all(checksum.as_bytes()).await?;
move_file(&source_path, &target_path_tmp).await?;
target_path_tmp.persist(target_path).map_err(|e| e.error)?;
checksum_file.keep()?;
get_snapshot_description(target_path).await
}
async fn get_stored_file(
&self,
storage_path: &Path,
local_path: &Path,
) -> CollectionResult<()> {
if let Some(target_dir) = local_path.parent()
&& !target_dir.exists()
{
fs::create_dir_all(target_dir)?;
}
if storage_path != local_path {
move_file(&storage_path, &local_path).await?;
}
Ok(())
}
/// Get absolute file path for a full snapshot by name
///
/// This enforces the file to be inside the snapshots directory
fn get_full_snapshot_path(
snapshots_path: &str,
snapshot_name: &str,
) -> CollectionResult<PathBuf> {
let absolute_snapshot_dir = fs::canonicalize(snapshots_path).map_err(|_| {
CollectionError::not_found(format!("Snapshot directory: {snapshots_path}"))
})?;
let absolute_snapshot_path = fs::canonicalize(absolute_snapshot_dir.join(snapshot_name))
.map_err(|_| CollectionError::not_found(format!("Snapshot {snapshot_name}")))?;
if !absolute_snapshot_path.starts_with(absolute_snapshot_dir) {
return Err(CollectionError::not_found(format!(
"Snapshot {snapshot_name}"
)));
}
if !absolute_snapshot_path.is_file() {
return Err(CollectionError::not_found(format!(
"Snapshot {snapshot_name}"
)));
}
Ok(absolute_snapshot_path)
}
/// Get absolute file path for a collection snapshot by name
///
/// This enforces the file to be inside the snapshots directory
fn get_snapshot_path(snapshots_path: &Path, snapshot_name: &str) -> CollectionResult<PathBuf> {
let absolute_snapshot_dir = fs::canonicalize(snapshots_path).map_err(|_| {
CollectionError::not_found(format!("Snapshot directory: {}", snapshots_path.display()))
})?;
let absolute_snapshot_path = fs::canonicalize(absolute_snapshot_dir.join(snapshot_name))
.map_err(|_| CollectionError::not_found(format!("Snapshot {snapshot_name}")))?;
if !absolute_snapshot_path.starts_with(absolute_snapshot_dir) {
return Err(CollectionError::not_found(format!(
"Snapshot {snapshot_name}"
)));
}
if !absolute_snapshot_path.is_file() {
return Err(CollectionError::not_found(format!(
"Snapshot {snapshot_name}"
)));
}
Ok(absolute_snapshot_path)
}
fn get_snapshot_file(
snapshot_path: &Path,
_temp_dir: &Path,
) -> CollectionResult<MaybeTempPath> {
if !snapshot_path.exists() {
return Err(CollectionError::not_found(format!(
"Snapshot {snapshot_path:?}"
)));
}
Ok(MaybeTempPath::Persistent(snapshot_path.to_path_buf()))
}
fn get_snapshot_stream(snapshot_path: &Path) -> SnapshotStream {
SnapshotStream::LocalFS(SnapShotStreamLocalFS {
snapshot_path: snapshot_path.to_path_buf(),
})
}
}
impl SnapshotStorageCloud {
async fn delete_snapshot(&self, snapshot_path: &Path) -> CollectionResult<bool> {
snapshot_storage_ops::delete_snapshot(&self.client, snapshot_path).await
}
async fn list_snapshots(&self, directory: &Path) -> CollectionResult<Vec<SnapshotDescription>> {
snapshot_storage_ops::list_snapshot_descriptions(&self.client, directory).await
}
async fn store_file(
&self,
source_path: &Path,
target_path: &Path,
) -> CollectionResult<SnapshotDescription> {
snapshot_storage_ops::multipart_upload(&self.client, source_path, target_path).await?;
tokio_fs::remove_file(source_path).await?;
snapshot_storage_ops::get_snapshot_description(&self.client, target_path).await
}
async fn get_stored_file(
&self,
storage_path: &Path,
local_path: &Path,
) -> CollectionResult<()> {
if let Some(target_dir) = local_path.parent()
&& !target_dir.exists()
{
fs::create_dir_all(target_dir)?;
}
if storage_path != local_path {
// download snapshot from cloud storage to local path
snapshot_storage_ops::download_snapshot(&self.client, storage_path, local_path).await?;
}
Ok(())
}
fn get_snapshot_path(snapshots_path: &Path, snapshot_name: &str) -> PathBuf {
let absolute_snapshot_dir = snapshots_path;
absolute_snapshot_dir.join(snapshot_name)
}
fn get_full_snapshot_path(snapshots_path: &str, snapshot_name: &str) -> PathBuf {
let absolute_snapshot_dir = PathBuf::from(snapshots_path);
absolute_snapshot_dir.join(snapshot_name)
}
async fn get_snapshot_file(
&self,
snapshot_path: &Path,
temp_dir: &Path,
) -> CollectionResult<MaybeTempPath> {
let temp_path = tempfile::Builder::new()
.prefix(
snapshot_path
.file_stem()
.ok_or_else(|| CollectionError::bad_request("Invalid snapshot path"))?,
)
.suffix(".snapshot")
.tempfile_in(temp_dir)?
.into_temp_path();
snapshot_storage_ops::download_snapshot(&self.client, snapshot_path, &temp_path).await?;
Ok(MaybeTempPath::Temporary(temp_path))
}
pub async fn get_snapshot_stream(
&self,
snapshot_path: &Path,
) -> CollectionResult<SnapshotStream> {
let snapshot_path = snapshot_storage_ops::trim_dot_slash(snapshot_path)?;
let download = self.client.get(&snapshot_path).await.map_err(|e| match e {
object_store::Error::NotFound { path, source } => {
CollectionError::not_found(format!("Snapshot {path} does not exist: {source}"))
}
_ => CollectionError::service_error(format!("Failed to get {snapshot_path}: {e}")),
})?;
Ok(SnapshotStream::new_stream(download.into_stream(), None))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/test.rs | lib/collection/src/shards/test.rs | use super::local_shard::clock_map::ClockMap;
use super::replica_set::clock_set::ClockSet;
use super::shard::PeerId;
use crate::operations::ClockTag;
#[test]
fn clock_set_clock_map_workflow() {
let mut helper = Helper::new();
// `ClockSet` and `ClockMap` "stick" to tick `0`, until `ClockSet` is advanced at least once
helper.tick_clock().assert(0);
helper.advance_clock_map(false).assert(0, 0, false);
helper.advance_clock_map(false).assert(0, 0, false);
helper.advance_clock(false).assert(0, 0, false);
// `ClockSet` and `ClockMap` tick sequentially and in sync after that
for tick in 1..=10 {
helper.advance_clock(false).assert(tick, tick, true);
}
// `ClockMap` advances to newer ticks
for tick in 11..=50 {
if tick % 10 != 0 {
// Tick `ClockSet` few times, without advancing `ClockMap`...
helper.tick_clock().assert(tick);
} else {
// ...then advance both `ClockMap` and `ClockSet`
helper.advance_clock(false).assert(tick, tick, true);
}
}
// `ClockMap` rejects tick `0` and advances `ClockSet`
helper.clock_set = Default::default();
helper.advance_clock(false).assert(0, 50, false);
helper.tick_clock().assert(51);
// `ClockMap` rejects older (or current) ticks...
helper.clock_set = Default::default();
helper.clock_set.get_clock().advance_to(0);
for tick in 1..=50 {
helper.advance_clock_map(false).assert(tick, 50, false);
}
// ...and advances `ClockSet`
helper.clock_set = Default::default();
helper.clock_set.get_clock().advance_to(42);
helper.advance_clock(false).assert(43, 50, false);
helper.tick_clock().assert(51);
// `ClockMap` advances to newer ticks with `force = true`
helper.clock_set = Default::default();
helper.advance_clock(false).assert(0, 50, false);
for tick in 51..=100 {
helper.advance_clock(true).assert(tick, tick, true);
}
// `ClockMap` accepts older (or current) ticks with `force = true`...
helper.clock_set = Default::default();
for tick in 0..=100 {
helper.advance_clock(true).assert(tick, tick, true);
}
// ...but it does not affect current tick of `ClockMap` in any way
helper.clock_set = Default::default();
helper.clock_set.get_clock().advance_to(42);
helper.advance_clock(false).assert(43, 100, false);
helper.tick_clock().assert(101);
}
#[derive(Clone, Debug)]
struct Helper {
clock_set: ClockSet,
clock_map: ClockMap,
}
const PEER_ID: PeerId = 1337;
impl Helper {
pub fn new() -> Self {
Self {
clock_set: ClockSet::default(),
clock_map: ClockMap::default(),
}
}
pub fn tick_clock(&mut self) -> TickClockStatus {
let mut clock = self.clock_set.get_clock();
let clock_tag = ClockTag::new(PEER_ID, clock.id() as _, clock.tick_once());
TickClockStatus { clock_tag }
}
pub fn advance_clock_map(&mut self, force: bool) -> AdvanceStatus {
self.advance(force, false)
}
pub fn advance_clock(&mut self, force: bool) -> AdvanceStatus {
self.advance(force, true)
}
fn advance(&mut self, force: bool, advance_clock: bool) -> AdvanceStatus {
let mut clock = self.clock_set.get_clock();
let clock_tag = ClockTag::new(PEER_ID, clock.id() as _, clock.tick_once()).force(force);
let mut clock_map_tag = clock_tag;
let accepted = self
.clock_map
.advance_clock_and_correct_tag(&mut clock_map_tag);
assert_eq!(clock_tag.peer_id, clock_map_tag.peer_id);
assert_eq!(clock_tag.clock_id, clock_map_tag.clock_id);
if advance_clock {
clock.advance_to(clock_map_tag.clock_tick);
}
AdvanceStatus {
clock_tag,
clock_map_tag,
accepted,
}
}
}
#[derive(Copy, Clone, Debug)]
struct TickClockStatus {
clock_tag: ClockTag,
}
impl TickClockStatus {
pub fn assert(&self, expected_tick: u64) {
assert_eq!(self.clock_tag.clock_tick, expected_tick)
}
}
#[derive(Copy, Clone, Debug)]
struct AdvanceStatus {
clock_tag: ClockTag,
clock_map_tag: ClockTag,
accepted: bool,
}
impl AdvanceStatus {
pub fn assert(&self, expected_tick: u64, expected_cm_tick: u64, expected_status: bool) {
assert_eq!(expected_tick, self.clock_tag.clock_tick);
assert_eq!(expected_cm_tick, self.clock_map_tag.clock_tick);
assert_eq!(expected_status, self.accepted);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard_config.rs | lib/collection/src/shards/shard_config.rs | use std::path::{Path, PathBuf};
use common::tar_ext;
use io::file_operations::{atomic_save_json, read_json};
use serde::{Deserialize, Serialize};
use crate::operations::types::CollectionResult;
use crate::shards::shard::PeerId;
pub const SHARD_CONFIG_FILE: &str = "shard_config.json";
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub enum ShardType {
Local, // Deprecated
Remote { peer_id: PeerId }, // Deprecated
Temporary, // Deprecated
ReplicaSet,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct ShardConfig {
pub r#type: ShardType,
}
impl ShardConfig {
pub fn get_config_path(shard_path: &Path) -> PathBuf {
shard_path.join(SHARD_CONFIG_FILE)
}
pub fn new_replica_set() -> Self {
Self {
r#type: ShardType::ReplicaSet,
}
}
pub fn load(shard_path: &Path) -> CollectionResult<Option<Self>> {
let config_path = Self::get_config_path(shard_path);
if !config_path.exists() {
log::info!("Detected missing shard config file in {shard_path:?}");
return Ok(None);
}
Ok(Some(read_json(&config_path)?))
}
pub fn save(&self, shard_path: &Path) -> CollectionResult<()> {
let config_path = Self::get_config_path(shard_path);
Ok(atomic_save_json(&config_path, self)?)
}
pub async fn save_to_tar(&self, tar: &tar_ext::BuilderExt) -> CollectionResult<()> {
let bytes = serde_json::to_vec(self)?;
tar.append_data(bytes, Path::new(SHARD_CONFIG_FILE)).await?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/update_tracker.rs | lib/collection/src/shards/update_tracker.rs | use std::future::{self, Future};
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio::sync::watch;
#[derive(Clone, Debug)]
pub struct UpdateTracker {
update_operations: Arc<AtomicUsize>,
update_notifier: Arc<watch::Sender<()>>,
}
impl Default for UpdateTracker {
fn default() -> Self {
let (update_notifier, _) = watch::channel(());
Self {
update_operations: Default::default(),
update_notifier: Arc::new(update_notifier),
}
}
}
impl UpdateTracker {
pub fn is_update_in_progress(&self) -> bool {
self.update_operations.load(Ordering::Relaxed) > 0
}
pub fn watch_for_update(&self) -> impl Future<Output = ()> {
let mut update_subscriber = self.update_notifier.subscribe();
async move {
match update_subscriber.changed().await {
Ok(()) => (),
Err(_) => future::pending().await,
}
}
}
pub fn update(&self) -> UpdateGuard {
if self.update_operations.fetch_add(1, Ordering::Relaxed) == 0 {
self.update_notifier.send_replace(());
}
UpdateGuard::new(self.update_operations.clone())
}
}
#[derive(Debug)]
pub struct UpdateGuard {
update_operations: Arc<AtomicUsize>,
}
impl UpdateGuard {
fn new(update_operations: Arc<AtomicUsize>) -> Self {
Self { update_operations }
}
}
impl Drop for UpdateGuard {
fn drop(&mut self) {
self.update_operations.fetch_sub(1, Ordering::Relaxed);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/telemetry.rs | lib/collection/src/shards/telemetry.rs | use std::collections::HashMap;
use schemars::JsonSchema;
use segment::common::anonymize::{Anonymize, anonymize_collection_values};
use segment::common::operation_time_statistics::OperationDurationStatistics;
use segment::telemetry::SegmentTelemetry;
use segment::types::ShardKey;
use serde::Serialize;
use crate::collection_manager::optimizers::TrackerTelemetry;
use crate::operations::types::{OptimizersStatus, ShardStatus};
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct ReplicaSetTelemetry {
#[anonymize(false)]
pub id: ShardId,
pub key: Option<ShardKey>,
pub local: Option<LocalShardTelemetry>,
pub remote: Vec<RemoteShardTelemetry>,
#[anonymize(with = anonymize_collection_values)]
pub replicate_states: HashMap<PeerId, ReplicaState>,
#[serde(skip_serializing_if = "Option::is_none")]
pub partial_snapshot: Option<PartialSnapshotTelemetry>,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct RemoteShardTelemetry {
#[anonymize(false)]
pub shard_id: ShardId,
#[anonymize(false)]
pub peer_id: Option<PeerId>,
pub searches: OperationDurationStatistics,
pub updates: OperationDurationStatistics,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize, Default)]
pub struct LocalShardTelemetry {
#[anonymize(false)]
pub variant_name: Option<String>,
pub status: Option<ShardStatus>,
/// Total number of optimized points since the last start.
pub total_optimized_points: usize,
/// An ESTIMATION of effective amount of bytes used for vectors
/// Do NOT rely on this number unless you know what you are doing
#[serde(skip_serializing_if = "Option::is_none")]
pub vectors_size_bytes: Option<usize>,
/// An estimation of the effective amount of bytes used for payloads
/// Do NOT rely on this number unless you know what you are doing
#[serde(skip_serializing_if = "Option::is_none")]
pub payloads_size_bytes: Option<usize>,
/// Sum of segment points
/// This is an approximate number
/// Do NOT rely on this number unless you know what you are doing
#[serde(skip_serializing_if = "Option::is_none")]
pub num_points: Option<usize>,
/// Sum of number of vectors in all segments
/// This is an approximate number
/// Do NOT rely on this number unless you know what you are doing
#[serde(skip_serializing_if = "Option::is_none")]
pub num_vectors: Option<usize>,
/// Sum of number of vectors across all segments, grouped by their name.
/// This is an approximate number.
/// Do NOT rely on this number unless you know what you are doing
#[serde(skip_serializing_if = "Option::is_none")]
pub num_vectors_by_name: Option<HashMap<String, usize>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub segments: Option<Vec<SegmentTelemetry>>,
pub optimizations: OptimizerTelemetry,
#[serde(skip_serializing_if = "Option::is_none")]
pub async_scorer: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub indexed_only_excluded_vectors: Option<HashMap<String, usize>>,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize, Default)]
pub struct OptimizerTelemetry {
pub status: OptimizersStatus,
pub optimizations: OperationDurationStatistics,
#[serde(skip_serializing_if = "Option::is_none")]
pub log: Option<Vec<TrackerTelemetry>>,
}
#[derive(Copy, Clone, Debug, Serialize, JsonSchema, Anonymize)]
pub struct PartialSnapshotTelemetry {
#[anonymize(false)]
pub ongoing_create_snapshot_requests: usize,
#[anonymize(false)]
pub is_recovering: bool,
#[anonymize(false)]
pub recovery_timestamp: u64,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard.rs | lib/collection/src/shards/shard.rs | use core::marker::{Send, Sync};
use std::future::{self, Future};
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use common::types::TelemetryDetail;
use parking_lot::Mutex as ParkingMutex;
use segment::data_types::manifest::SnapshotManifest;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{Filter, SizeStats, SnapshotFormat};
use super::local_shard::clock_map::RecoveryPoint;
use super::update_tracker::UpdateTracker;
use crate::collection_manager::optimizers::TrackerLog;
use crate::operations::operation_effect::{EstimateOperationEffectArea, OperationEffectArea};
use crate::operations::types::{CollectionError, CollectionResult, OptimizersStatus};
use crate::shards::dummy_shard::DummyShard;
use crate::shards::forward_proxy_shard::ForwardProxyShard;
use crate::shards::local_shard::LocalShard;
use crate::shards::proxy_shard::ProxyShard;
use crate::shards::queue_proxy_shard::QueueProxyShard;
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::LocalShardTelemetry;
pub type ShardId = u32;
pub type PeerId = u64;
/// List of peers that should be used to place replicas of a shard
pub type ShardReplicasPlacement = Vec<PeerId>;
/// List of shards placements. Each element defines placements of replicas for a single shard.
///
/// Number of elements corresponds to the number of shards.
/// Example: [
/// [1, 2],
/// [2, 3],
/// [3, 4],
/// ] - 3 shards, each has 2 replicas
pub type ShardsPlacement = Vec<ShardReplicasPlacement>;
/// Shard
///
/// Contains a part of the collection's points
pub enum Shard {
Local(LocalShard),
Proxy(ProxyShard),
ForwardProxy(ForwardProxyShard),
QueueProxy(QueueProxyShard),
Dummy(DummyShard),
}
impl Shard {
pub fn variant_name(&self) -> &'static str {
match self {
Shard::Local(_) => "local shard",
Shard::Proxy(_) => "proxy shard",
Shard::ForwardProxy(_) => "forward proxy shard",
Shard::QueueProxy(_) => "queue proxy shard",
Shard::Dummy(_) => "dummy shard",
}
}
pub fn get(&self) -> &(dyn ShardOperation + Sync + Send + '_) {
match self {
Shard::Local(local_shard) => local_shard,
Shard::Proxy(proxy_shard) => proxy_shard,
Shard::ForwardProxy(proxy_shard) => proxy_shard,
Shard::QueueProxy(proxy_shard) => proxy_shard,
Shard::Dummy(dummy_shard) => dummy_shard,
}
}
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<LocalShardTelemetry> {
let mut telemetry = match self {
Shard::Local(local_shard) => {
let mut shard_telemetry = local_shard.get_telemetry_data(detail, timeout).await?;
// can't take sync locks in async fn so local_shard_status() has to be
// called outside get_telemetry_data()
shard_telemetry.status = Some(local_shard.local_shard_status().await.0);
shard_telemetry
}
Shard::Proxy(proxy_shard) => proxy_shard.get_telemetry_data(detail, timeout).await?,
Shard::ForwardProxy(proxy_shard) => {
proxy_shard.get_telemetry_data(detail, timeout).await?
}
Shard::QueueProxy(proxy_shard) => {
proxy_shard.get_telemetry_data(detail, timeout).await?
}
Shard::Dummy(dummy_shard) => dummy_shard.get_telemetry_data(),
};
telemetry.variant_name = Some(self.variant_name().to_string());
Ok(telemetry)
}
pub async fn get_optimization_status(
&self,
timeout: Duration,
) -> CollectionResult<OptimizersStatus> {
match self {
Shard::Local(local_shard) => local_shard.get_optimization_status(timeout).await,
Shard::Proxy(proxy_shard) => proxy_shard.get_optimization_status(timeout).await,
Shard::ForwardProxy(proxy_shard) => proxy_shard.get_optimization_status(timeout).await,
Shard::QueueProxy(queue_proxy_shard) => {
queue_proxy_shard.get_optimization_status(timeout).await
}
Shard::Dummy(dummy_shard) => Ok(dummy_shard.get_optimization_status()),
}
}
pub async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> {
match self {
Shard::Local(local_shard) => local_shard.get_size_stats(timeout).await,
Shard::Proxy(proxy_shard) => proxy_shard.get_size_stats(timeout).await,
Shard::ForwardProxy(proxy_shard) => proxy_shard.get_size_stats(timeout).await,
Shard::QueueProxy(queue_proxy_shard) => queue_proxy_shard.get_size_stats(timeout).await,
Shard::Dummy(dummy_shard) => Ok(dummy_shard.get_size_stats()),
}
}
pub async fn create_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<SnapshotManifest>,
save_wal: bool,
) -> CollectionResult<()> {
match self {
Shard::Local(local_shard) => {
local_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
Shard::Proxy(proxy_shard) => {
proxy_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
Shard::ForwardProxy(proxy_shard) => {
proxy_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
Shard::QueueProxy(proxy_shard) => {
proxy_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
Shard::Dummy(dummy_shard) => {
dummy_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
}
}
pub async fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
match self {
Shard::Local(local_shard) => local_shard.snapshot_manifest().await,
Shard::Proxy(proxy_shard) => proxy_shard.snapshot_manifest().await,
Shard::ForwardProxy(proxy_shard) => proxy_shard.snapshot_manifest().await,
Shard::QueueProxy(proxy_shard) => proxy_shard.snapshot_manifest().await,
Shard::Dummy(dummy_shard) => dummy_shard.snapshot_manifest(),
}
}
/// ## Cancel safety
///
/// This function is **not** cancel safe.
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
match self {
Shard::Local(local_shard) => local_shard.on_optimizer_config_update().await,
Shard::Proxy(proxy_shard) => proxy_shard.on_optimizer_config_update().await,
Shard::ForwardProxy(proxy_shard) => proxy_shard.on_optimizer_config_update().await,
Shard::QueueProxy(proxy_shard) => proxy_shard.on_optimizer_config_update().await,
Shard::Dummy(dummy_shard) => dummy_shard.on_optimizer_config_update().await,
}
}
pub async fn on_strict_mode_config_update(&mut self) {
match self {
Shard::Local(local_shard) => local_shard.on_strict_mode_config_update().await,
Shard::Proxy(proxy_shard) => proxy_shard.on_strict_mode_config_update().await,
Shard::ForwardProxy(proxy_shard) => proxy_shard.on_strict_mode_config_update().await,
Shard::QueueProxy(proxy_shard) => proxy_shard.on_strict_mode_config_update().await,
Shard::Dummy(dummy_shard) => dummy_shard.on_strict_mode_config_update().await,
}
}
pub fn trigger_optimizers(&self) {
match self {
Shard::Local(local_shard) => local_shard.trigger_optimizers(),
Shard::Proxy(proxy_shard) => proxy_shard.trigger_optimizers(),
Shard::ForwardProxy(forward_proxy_shard) => {
forward_proxy_shard.trigger_optimizers();
}
Shard::QueueProxy(queue_proxy_shard) => queue_proxy_shard.trigger_optimizers(),
Shard::Dummy(_) => (),
}
}
pub fn is_update_in_progress(&self) -> bool {
self.update_tracker()
.is_some_and(UpdateTracker::is_update_in_progress)
}
pub fn watch_for_update(&self) -> impl Future<Output = ()> {
let update_watcher = self.update_tracker().map(UpdateTracker::watch_for_update);
async move {
match update_watcher {
Some(update_watcher) => update_watcher.await,
None => future::pending().await,
}
}
}
fn update_tracker(&self) -> Option<&UpdateTracker> {
let update_tracker = match self {
Self::Local(local_shard) => local_shard.update_tracker(),
Self::Proxy(proxy_shard) => proxy_shard.update_tracker(),
Self::ForwardProxy(proxy_shard) => proxy_shard.update_tracker(),
Self::QueueProxy(proxy_shard) => proxy_shard.update_tracker(),
Self::Dummy(_) => return None,
};
Some(update_tracker)
}
pub fn optimizers_log(&self) -> Option<Arc<ParkingMutex<TrackerLog>>> {
let optimizers_log = match self {
Self::Local(local_shard) => local_shard.optimizers_log(),
Self::Proxy(proxy_shard) => proxy_shard.optimizers_log(),
Self::ForwardProxy(proxy_shard) => proxy_shard.optimizers_log(),
Self::QueueProxy(proxy_shard) => proxy_shard.optimizers_log(),
Self::Dummy(_) => return None,
};
Some(optimizers_log)
}
pub async fn shard_recovery_point(&self) -> CollectionResult<RecoveryPoint> {
match self {
Self::Local(local_shard) => Ok(local_shard.recovery_point().await),
Self::ForwardProxy(proxy_shard) => Ok(proxy_shard.wrapped_shard.recovery_point().await),
Self::Proxy(_) | Self::QueueProxy(_) | Self::Dummy(_) => {
Err(CollectionError::service_error(format!(
"Recovery point not supported on {}",
self.variant_name(),
)))
}
}
}
pub async fn take_newest_clocks_snapshot(&self) -> CollectionResult<()> {
match self {
Self::Local(local_shard) => local_shard.take_newest_clocks_snapshot().await,
Self::Proxy(ProxyShard { wrapped_shard, .. })
| Self::ForwardProxy(ForwardProxyShard { wrapped_shard, .. }) => {
wrapped_shard.take_newest_clocks_snapshot().await
}
Self::QueueProxy(proxy) => {
if let Some(local_shard) = proxy.wrapped_shard() {
local_shard.take_newest_clocks_snapshot().await
} else {
Ok(())
}
}
// Ignore dummy shard, it is not loaded
Self::Dummy(_) => Ok(()),
}
}
pub async fn clear_newest_clocks_snapshot(&self) -> CollectionResult<()> {
match self {
Self::Local(local_shard) => local_shard.clear_newest_clocks_snapshot().await,
Self::Proxy(ProxyShard { wrapped_shard, .. })
| Self::ForwardProxy(ForwardProxyShard { wrapped_shard, .. }) => {
wrapped_shard.clear_newest_clocks_snapshot().await
}
Self::QueueProxy(proxy) => {
if let Some(local_shard) = proxy.wrapped_shard() {
local_shard.clear_newest_clocks_snapshot().await
} else {
Ok(())
}
}
// Ignore dummy shard, it is not loaded
Self::Dummy(_) => Ok(()),
}
}
pub async fn update_cutoff(&self, cutoff: &RecoveryPoint) -> CollectionResult<()> {
match self {
Self::Local(local_shard) => local_shard.update_cutoff(cutoff).await,
Self::Proxy(_) | Self::ForwardProxy(_) | Self::QueueProxy(_) | Self::Dummy(_) => {
return Err(CollectionError::service_error(format!(
"Setting cutoff point not supported on {}",
self.variant_name(),
)));
}
}
Ok(())
}
pub async fn resolve_wal_delta(
&self,
recovery_point: RecoveryPoint,
) -> CollectionResult<Option<u64>> {
let wal = match self {
Self::Local(local_shard) => &local_shard.wal,
Self::Proxy(_) | Self::ForwardProxy(_) | Self::QueueProxy(_) | Self::Dummy(_) => {
return Err(CollectionError::service_error(format!(
"Cannot resolve WAL delta on {}",
self.variant_name(),
)));
}
};
// Resolve WAL delta and report
match wal.resolve_wal_delta(recovery_point).await {
Ok(Some(version)) => {
log::debug!(
"Resolved WAL delta from {version}, which counts {} records",
wal.wal.lock().await.last_index().saturating_sub(version),
);
Ok(Some(version))
}
Ok(None) => {
log::debug!("Resolved WAL delta that is empty");
Ok(None)
}
Err(err) => Err(CollectionError::service_error(format!(
"Failed to resolve WAL delta on local shard: {err}"
))),
}
}
pub async fn wal_version(&self) -> CollectionResult<Option<u64>> {
match self {
Self::Local(local_shard) => local_shard.wal.wal_version().await.map_err(|err| {
CollectionError::service_error(format!(
"Cannot get WAL version on {}: {err}",
self.variant_name(),
))
}),
Self::Proxy(_) | Self::ForwardProxy(_) | Self::QueueProxy(_) | Self::Dummy(_) => {
Err(CollectionError::service_error(format!(
"Cannot get WAL version on {}",
self.variant_name(),
)))
}
}
}
pub async fn estimate_cardinality(
&self,
filter: Option<&Filter>,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
match self {
Shard::Local(local_shard) => {
local_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
Shard::Proxy(proxy_shard) => {
proxy_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
Shard::ForwardProxy(forward_proxy_shard) => {
forward_proxy_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
Shard::QueueProxy(queue_proxy_shard) => {
queue_proxy_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
Shard::Dummy(dummy_shard) => dummy_shard.estimate_cardinality(filter),
}
}
pub async fn estimate_request_cardinality(
&self,
operation: &impl EstimateOperationEffectArea,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
match operation.estimate_effect_area() {
OperationEffectArea::Empty => Ok(CardinalityEstimation::exact(0)),
OperationEffectArea::Points(vec) => Ok(CardinalityEstimation::exact(vec.len())),
OperationEffectArea::Filter(filter) => {
self.estimate_cardinality(Some(filter), hw_measurement_acc)
.await
}
}
}
pub async fn stop_gracefully(self) {
match self {
Shard::Local(local_shard) => local_shard.stop_gracefully().await,
Shard::Proxy(proxy_shard) => proxy_shard.stop_gracefully().await,
Shard::ForwardProxy(forward_proxy_shard) => forward_proxy_shard.stop_gracefully().await,
Shard::QueueProxy(queue_proxy_shard) => queue_proxy_shard.stop_gracefully().await,
Shard::Dummy(_) => {}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard_trait.rs | lib/collection/src/shards/shard_trait.rs | use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::types::*;
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use crate::operations::OperationWithClockTag;
use crate::operations::types::*;
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
#[async_trait]
pub trait ShardOperation {
async fn update(
&self,
operation: OperationWithClockTag,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult>;
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>>;
/// Scroll points ordered by their IDs.
/// Intended for internal use only.
/// This API is excluded from the rate limits and logging.
#[allow(clippy::too_many_arguments)]
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>>;
async fn info(&self) -> CollectionResult<CollectionInfo>;
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>>;
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult>;
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hardware_accumulator: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>>;
async fn query_batch(
&self,
requests: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>>;
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse>;
/// Signal `Stop` to all background operations gracefully
/// and wait till they are finished.
async fn stop_gracefully(self);
}
pub type ShardOperationSS = dyn ShardOperation + Send + Sync;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/proxy_shard.rs | lib/collection/src/shards/proxy_shard.rs | use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::time::Duration;
use ahash::AHashSet;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use common::types::TelemetryDetail;
use parking_lot::Mutex as ParkingMutex;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::data_types::manifest::SnapshotManifest;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{
ExtendedPointId, Filter, PointIdType, ScoredPoint, SizeStats, SnapshotFormat, WithPayload,
WithPayloadInterface, WithVector,
};
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use tokio::sync::{RwLock, oneshot};
use tokio::time::timeout;
use super::update_tracker::UpdateTracker;
use crate::collection_manager::optimizers::TrackerLog;
use crate::operations::OperationWithClockTag;
use crate::operations::operation_effect::{
EstimateOperationEffectArea, OperationEffectArea, PointsOperationEffect,
};
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CountRequestInternal, CountResult,
OptimizersStatus, PointRequestInternal, ScrollRequestInternal, UpdateResult,
};
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::LocalShardTelemetry;
use crate::update_handler::UpdateSignal;
type ChangedPointsSet = Arc<RwLock<AHashSet<PointIdType>>>;
/// ProxyShard
///
/// ProxyShard is a wrapper type for a LocalShard.
///
/// It can be used to provide all read and write operations while the wrapped shard is being transferred to another node.
/// It keeps track of changed points during the shard transfer to assure consistency.
pub struct ProxyShard {
pub(super) wrapped_shard: LocalShard,
changed_points: ChangedPointsSet,
pub changed_alot: AtomicBool,
}
/// Max number of updates tracked to synchronize after the transfer.
const MAX_CHANGES_TRACKED_COUNT: usize = 10_000;
/// How much time can we wait for the update queue to be empty.
/// We don't want false positive here, so it should be large.
/// If the queue stuck - it means something wrong with application logic.
const UPDATE_QUEUE_CLEAR_TIMEOUT: Duration = Duration::from_secs(1);
const UPDATE_QUEUE_CLEAR_MAX_TIMEOUT: Duration = Duration::from_secs(128);
impl ProxyShard {
#[allow(unused)]
pub async fn new(wrapped_shard: LocalShard) -> Self {
let res = Self {
wrapped_shard,
changed_points: Default::default(),
changed_alot: Default::default(),
};
res.reinit_changelog().await;
res
}
/// Forward `create_snapshot` to `wrapped_shard`
pub async fn create_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<SnapshotManifest>,
save_wal: bool,
) -> CollectionResult<()> {
self.wrapped_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
pub async fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
self.wrapped_shard.snapshot_manifest().await
}
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
self.wrapped_shard.on_optimizer_config_update().await
}
pub async fn on_strict_mode_config_update(&mut self) {
self.wrapped_shard.on_strict_mode_config_update().await;
}
pub fn trigger_optimizers(&self) {
// TODO: we might want to defer this trigger until we unproxy
self.wrapped_shard.trigger_optimizers();
}
pub async fn reinit_changelog(&self) -> CollectionResult<()> {
// Blocks updates in the wrapped shard.
let mut changed_points_guard = self.changed_points.write().await;
// Clear the update queue
let mut attempt = 1;
loop {
let (tx, rx) = oneshot::channel();
let plunger = UpdateSignal::Plunger(tx);
self.wrapped_shard
.update_sender
.load()
.send(plunger)
.await?;
let attempt_timeout = UPDATE_QUEUE_CLEAR_TIMEOUT * 2_u32.pow(attempt);
// It is possible, that the queue is recreated while we are waiting for plunger.
// So we will timeout and try again
if timeout(attempt_timeout, rx).await.is_err() {
log::warn!(
"Timeout {attempt_timeout:?} while waiting for the wrapped shard to finish the update queue, retrying",
);
attempt += 1;
if attempt_timeout > UPDATE_QUEUE_CLEAR_MAX_TIMEOUT {
return Err(CollectionError::service_error(
"Timeout while waiting for the wrapped shard to finish the update queue"
.to_string(),
));
}
continue;
}
break;
}
// Update queue is clear now
// Clear the changed_points set
changed_points_guard.clear();
// Clear changed_alot flag
self.changed_alot
.store(false, std::sync::atomic::Ordering::Relaxed);
Ok(())
}
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<LocalShardTelemetry> {
self.wrapped_shard.get_telemetry_data(detail, timeout).await
}
pub async fn get_optimization_status(
&self,
timeout: Duration,
) -> CollectionResult<OptimizersStatus> {
self.wrapped_shard.get_optimization_status(timeout).await
}
pub async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> {
self.wrapped_shard.get_size_stats(timeout).await
}
pub fn update_tracker(&self) -> &UpdateTracker {
self.wrapped_shard.update_tracker()
}
pub fn optimizers_log(&self) -> Arc<ParkingMutex<TrackerLog>> {
self.wrapped_shard.optimizers_log()
}
pub async fn estimate_cardinality(
&self,
filter: Option<&Filter>,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
self.wrapped_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
}
#[async_trait]
impl ShardOperation for ProxyShard {
/// Update `wrapped_shard` while keeping track of the changed points
///
/// # Cancel safety
///
/// This method is *not* cancel safe.
async fn update(
&self,
operation: OperationWithClockTag,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// If we modify `self.changed_points`, we *have to* (?) execute `local_shard` update
// to completion, so this method is not cancel safe.
let local_shard = &self.wrapped_shard;
let estimate_effect = operation.operation.estimate_effect_area();
let points_operation_effect: PointsOperationEffect = match estimate_effect {
OperationEffectArea::Empty => PointsOperationEffect::Empty,
OperationEffectArea::Points(points) => PointsOperationEffect::Some(Vec::from(points)),
OperationEffectArea::Filter(filter) => {
let cardinality = local_shard
.estimate_cardinality(Some(filter), &hw_measurement_acc)
.await?;
// validate the size of the change set before retrieving it
if cardinality.max > MAX_CHANGES_TRACKED_COUNT {
PointsOperationEffect::Many
} else {
let runtime_handle = self.wrapped_shard.search_runtime.clone();
let points = local_shard
.read_filtered(Some(filter), &runtime_handle, hw_measurement_acc.clone())
.await?;
PointsOperationEffect::Some(points.into_iter().collect())
}
}
};
{
let mut changed_points_guard = self.changed_points.write().await;
match points_operation_effect {
PointsOperationEffect::Empty => {}
PointsOperationEffect::Some(points) => {
for point in points {
// points updates are recorded but never trigger in `changed_alot`
changed_points_guard.insert(point);
}
}
PointsOperationEffect::Many => {
self.changed_alot
.store(true, std::sync::atomic::Ordering::Relaxed);
}
}
// Shard update is within a write lock scope, because we need a way to block the shard updates
// during the transfer restart and finalization.
local_shard
.update(operation, wait, hw_measurement_acc)
.await
}
}
/// Forward read-only `scroll_by` to `wrapped_shard`
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.scroll_by(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `local_scroll_by_id` to `wrapped_shard`
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.local_scroll_by_id(
offset,
limit,
with_payload_interface,
with_vector,
filter,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `info` to `wrapped_shard`
async fn info(&self) -> CollectionResult<CollectionInfo> {
let local_shard = &self.wrapped_shard;
local_shard.info().await
}
/// Forward read-only `search` to `wrapped_shard`
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let local_shard = &self.wrapped_shard;
local_shard
.core_search(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `count` to `wrapped_shard`
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
let local_shard = &self.wrapped_shard;
local_shard
.count(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `retrieve` to `wrapped_shard`
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.retrieve(
request,
with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `query` to `wrapped_shard`
async fn query_batch(
&self,
request: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let local_shard = &self.wrapped_shard;
local_shard
.query_batch(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
let local_shard = &self.wrapped_shard;
local_shard
.facet(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn stop_gracefully(self) {
let local_shard = self.wrapped_shard;
local_shard.stop_gracefully().await;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/mod.rs | lib/collection/src/shards/mod.rs | pub mod channel_service;
pub mod collection_shard_distribution;
mod conversions;
pub mod dummy_shard;
pub mod forward_proxy_shard;
pub mod local_shard;
pub mod proxy_shard;
pub mod queue_proxy_shard;
pub mod remote_shard;
pub mod replica_set;
pub mod resharding;
pub mod resolve;
pub mod shard;
pub mod shard_config;
pub mod shard_holder;
pub mod shard_trait;
pub mod telemetry;
pub mod transfer;
pub mod update_tracker;
#[cfg(test)]
mod test;
use std::path::{Path, PathBuf};
use channel_service::ChannelService;
use common::defaults;
use fs_err::tokio as tokio_fs;
use shard::ShardId;
use tokio::time::{sleep_until, timeout_at};
use transfer::ShardTransferConsensus;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::shard_config::ShardConfig;
pub type CollectionId = String;
pub type ShardVersion = usize;
/// Path to a shard directory
pub fn shard_path(collection_path: &Path, shard_id: ShardId) -> PathBuf {
collection_path.join(format!("{shard_id}"))
}
/// Path to a shard directory
pub fn shard_initializing_flag_path(collection_path: &Path, shard_id: ShardId) -> PathBuf {
collection_path.join(format!("shard_{shard_id}.initializing"))
}
/// Verify that a shard exists by loading its configuration.
/// Returns the path to the shard if it exists.
pub async fn check_shard_path(
collection_path: &Path,
shard_id: ShardId,
) -> CollectionResult<PathBuf> {
let path = shard_path(collection_path, shard_id);
let shard_config_opt = ShardConfig::load(&path)?;
if shard_config_opt.is_some() {
Ok(path)
} else {
Err(CollectionError::service_error(format!(
"No shard found: {shard_id} at {collection_path}",
shard_id = shard_id,
collection_path = collection_path.display()
)))
}
}
pub async fn create_shard_dir(
collection_path: &Path,
shard_id: ShardId,
) -> CollectionResult<PathBuf> {
let shard_path = shard_path(collection_path, shard_id);
match tokio_fs::create_dir(&shard_path).await {
Ok(_) => Ok(shard_path),
// If the directory already exists, remove it and create it again
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
log::warn!("Shard path already exists, removing and creating again: {shard_path:?}");
tokio_fs::remove_dir_all(&shard_path)
.await
.map_err(CollectionError::from)?;
tokio_fs::create_dir(&shard_path)
.await
.map_err(CollectionError::from)?;
Ok(shard_path)
}
Err(e) => Err(CollectionError::from(e)),
}
}
/// Await for consensus to synchronize across all peers
///
/// This will take the current consensus state of this node. It then explicitly waits on all other
/// nodes to reach the same (or later) consensus.
///
/// If awaiting on other nodes fails for any reason, this simply continues after the consensus
/// timeout.
///
/// # Cancel safety
///
/// This function is cancel safe.
async fn await_consensus_sync(
consensus: &dyn ShardTransferConsensus,
channel_service: &ChannelService,
) {
let wait_until = tokio::time::Instant::now() + defaults::CONSENSUS_META_OP_WAIT;
let sync_consensus =
timeout_at(wait_until, consensus.await_consensus_sync(channel_service)).await;
match sync_consensus {
Ok(Ok(_)) => log::trace!("All peers reached consensus"),
// Failed to sync explicitly, waiting until timeout to assume synchronization
Ok(Err(err)) => {
log::warn!("All peers failed to synchronize consensus, waiting until timeout: {err}");
sleep_until(wait_until).await;
}
// Reached timeout, assume consensus is synchronized
Err(err) => {
log::warn!(
"All peers failed to synchronize consensus, continuing after timeout: {err}"
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/channel_service.rs | lib/collection/src/shards/channel_service.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use api::grpc::qdrant::WaitOnConsensusCommitRequest;
use api::grpc::qdrant::qdrant_internal_client::QdrantInternalClient;
use api::grpc::transport_channel_pool::{AddTimeout, TransportChannelPool};
use futures::Future;
use futures::future::try_join_all;
use semver::Version;
use tonic::codegen::InterceptedService;
use tonic::transport::{Channel, Uri};
use tonic::{Request, Status};
use url::Url;
use crate::operations::types::{CollectionError, CollectionResult, PeerMetadata};
use crate::shards::shard::PeerId;
#[derive(Clone)]
pub struct ChannelService {
// Shared with consensus_state
pub id_to_address: Arc<parking_lot::RwLock<HashMap<PeerId, Uri>>>,
// Shared with consensus_state
pub id_to_metadata: Arc<parking_lot::RwLock<HashMap<PeerId, PeerMetadata>>>,
pub channel_pool: Arc<TransportChannelPool>,
/// Port at which the public REST API is exposed for the current peer.
pub current_rest_port: u16,
/// Instance wide API key if configured, must be used with care.
pub api_key: Option<String>,
}
impl ChannelService {
/// Construct a new channel service with the given REST port.
pub fn new(current_rest_port: u16, api_key: Option<String>) -> Self {
Self {
id_to_address: Default::default(),
id_to_metadata: Default::default(),
channel_pool: Default::default(),
current_rest_port,
api_key,
}
}
pub async fn remove_peer(&self, peer_id: PeerId) {
let removed = self.id_to_address.write().remove(&peer_id);
if let Some(uri) = removed {
self.channel_pool.drop_pool(&uri).await;
}
}
/// Wait until all other known peers reach the given commit
///
/// # Errors
///
/// This errors if:
/// - any of the peers is not on the same term
/// - waiting takes longer than the specified timeout
/// - any of the peers cannot be reached
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn await_commit_on_all_peers(
&self,
this_peer_id: PeerId,
commit: u64,
term: u64,
timeout: Duration,
) -> CollectionResult<()> {
let requests = self
.id_to_address
.read()
.keys()
.filter(|id| **id != this_peer_id)
// The collective timeout at the bottom of this function handles actually timing out.
// Since an explicit timeout must be given here as well, it is multiplied by two to
// give the collective timeout some space.
.map(|peer_id| self.await_commit_on_peer(*peer_id, commit, term, timeout * 2))
.collect::<Vec<_>>();
let responses = try_join_all(requests);
// Handle requests with timeout
tokio::time::timeout(timeout, responses)
.await
// Timeout error
.map_err(|_elapsed| CollectionError::Timeout {
description: "Failed to wait for consensus commit on all peers, timed out.".into(),
})?
// Await consensus error
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to wait for consensus commit on peer: {err}"
))
})?;
Ok(())
}
/// Wait until the given peer reaches the given commit
///
/// # Errors
///
/// This errors if the given peer is on a different term. Also errors if the peer cannot be reached.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn await_commit_on_peer(
&self,
peer_id: PeerId,
commit: u64,
term: u64,
timeout: Duration,
) -> CollectionResult<()> {
let response = self
.with_qdrant_client(peer_id, |mut client| async move {
let request = WaitOnConsensusCommitRequest {
commit: commit as i64,
term: term as i64,
timeout: timeout.as_secs() as i64,
};
client.wait_on_consensus_commit(Request::new(request)).await
})
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to wait for consensus commit on peer {peer_id}: {err}"
))
})?
.into_inner();
// Create error if wait request failed
if !response.ok {
return Err(CollectionError::service_error(format!(
"Failed to wait for consensus commit on peer {peer_id}, has diverged commit/term or timed out."
)));
}
Ok(())
}
async fn with_qdrant_client<T, O: Future<Output = Result<T, Status>>>(
&self,
peer_id: PeerId,
f: impl Fn(QdrantInternalClient<InterceptedService<Channel, AddTimeout>>) -> O,
) -> CollectionResult<T> {
let address = self
.id_to_address
.read()
.get(&peer_id)
.ok_or_else(|| CollectionError::service_error("Address for peer ID is not found."))?
.clone();
self.channel_pool
.with_channel(&address, |channel| {
let client = QdrantInternalClient::new(channel);
let client = client.max_decoding_message_size(usize::MAX);
f(client)
})
.await
.map_err(Into::into)
}
/// Check whether all peers are running at least the given version
///
/// If the version is not known for any peer, this returns `false`.
/// Peer versions are known since 1.9 and up.
pub fn all_peers_at_version(&self, version: &Version) -> bool {
let id_to_address = self.id_to_address.read();
let id_to_metadata = self.id_to_metadata.read();
// Ensure there aren't more peer addresses than metadata
if id_to_address.len() > id_to_metadata.len() {
return false;
}
id_to_metadata
.values()
.all(|metadata| &metadata.version >= version)
}
/// Check whether the specified peer is running at least the given version
///
/// If the version is not known for the peer, this returns `false`.
/// Peer versions are known since 1.9 and up.
pub fn peer_is_at_version(&self, peer_id: PeerId, version: &Version) -> bool {
self.id_to_metadata
.read()
.get(&peer_id)
.is_some_and(|metadata| &metadata.version >= version)
}
/// Get the REST address for the current peer.
pub fn current_rest_address(&self, this_peer_id: PeerId) -> CollectionResult<Url> {
// Get local peer URI
let local_peer_uri = self
.id_to_address
.read()
.get(&this_peer_id)
.cloned()
.ok_or_else(|| {
CollectionError::service_error(format!(
"Cannot determine REST address, this peer not found in cluster by ID {this_peer_id} ",
))
})?;
// Construct REST URL from URI
let mut url = Url::parse(&local_peer_uri.to_string()).expect("Malformed URL");
url.set_port(Some(self.current_rest_port))
.map_err(|()| {
CollectionError::service_error(format!(
"Cannot determine REST address, cannot specify port on address {url} for peer ID {this_peer_id}",
))
})?;
Ok(url)
}
}
#[cfg(test)]
impl Default for ChannelService {
fn default() -> Self {
Self {
id_to_address: Default::default(),
id_to_metadata: Default::default(),
channel_pool: Default::default(),
current_rest_port: 6333,
api_key: None,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/resolve.rs | lib/collection/src/shards/resolve.rs | use std::collections::{HashMap, HashSet};
use std::hash;
use std::iter::Peekable;
use std::rc::Rc;
use itertools::Itertools;
use segment::data_types::facets::{FacetResponse, FacetValue};
use segment::types::{Payload, ScoredPoint};
use shard::retrieve::record_internal::RecordInternal;
use tinyvec::TinyVec;
use crate::common::transpose_iterator::transposed_iter;
use crate::operations::types::CountResult;
use crate::operations::universal_query::shard_query::ShardQueryResponse;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ResolveCondition {
All,
Majority,
}
impl ResolveCondition {
fn resolution_count(&self, num_replicas: usize) -> usize {
match self {
Self::All => num_replicas,
Self::Majority => num_replicas / 2 + 1,
}
}
}
pub trait Resolve: Default + Sized {
fn resolve(responses: Vec<Self>, condition: ResolveCondition) -> Self;
}
impl Resolve for CountResult {
fn resolve(records: Vec<Self>, condition: ResolveCondition) -> Self {
match condition {
ResolveCondition::All => Self {
count: records
.iter()
.map(|result| result.count)
.min()
.unwrap_or_default(),
},
ResolveCondition::Majority => {
let mut counts = records
.iter()
.map(|result| result.count)
.collect::<Vec<_>>();
counts.sort_unstable();
let middle = counts.len() / 2;
Self {
count: counts.get(middle).copied().unwrap_or_default(),
}
}
}
}
}
impl Resolve for FacetResponse {
/// Resolve the counts for each value using the CountResult implementation
fn resolve(responses: Vec<Self>, condition: ResolveCondition) -> Self {
let num_replicas = responses.len();
let resolution_count = condition.resolution_count(num_replicas);
// Example responses:
// [
// {
// hits: [
// { value: "a", count: 20 },
// { value: "b": count: 15 }
// ]
// },
// {
// hits: [
// { value: "a", count: 21 },
// { value: "b": count: 13 }
// ]
// },
// ]
let resolved_counts: HashMap<_, _> = responses
.iter()
.flat_map(|FacetResponse { hits }| hits)
// Collect all hits into a Hashmap of {value -> Vec<CountResult>}
.fold(
HashMap::new(),
|mut map: HashMap<FacetValue, Vec<CountResult>>, hit| {
if let Some(counts) = map.get_mut(&hit.value) {
counts.push(CountResult { count: hit.count });
} else {
map.entry(hit.value.clone())
.or_insert(Vec::with_capacity(num_replicas))
.push(CountResult { count: hit.count });
};
map
},
)
.into_iter()
// Filter out values that don't appear in enough replicas
.filter(|(_, counts)| counts.len() >= resolution_count)
// Resolve the counts with the CountResult implementation
.map(|(value, counts)| {
let count = CountResult::resolve(counts, condition).count;
(value, count)
})
.collect();
let filtered_iters = responses.into_iter().map(|FacetResponse { hits }| {
hits.into_iter().filter_map(|mut hit| {
resolved_counts.get(&hit.value).map(|&count| {
// Use the resolved count
hit.count = count;
hit
})
})
});
// Retain the original order of the hits (instead of always sorting in the same direction).
let resolved_hits =
MergeInOrder::new(filtered_iters, |hit| hit.value.clone(), resolution_count).collect();
// resolved_hits for ResolveCondition::All:
// [
// { value: "a", count: 20 },
// { value: "b", count: 13 }
// ]
FacetResponse {
hits: resolved_hits,
}
}
}
impl Resolve for Vec<RecordInternal> {
fn resolve(records: Vec<Self>, condition: ResolveCondition) -> Self {
Resolver::resolve(records, |record| record.id, record_eq, condition)
}
}
impl Resolve for Vec<Vec<ScoredPoint>> {
fn resolve(batches: Vec<Self>, condition: ResolveCondition) -> Self {
// batches: <replica_id, <batch_id, ScoredPoints>>
// transpose to <batch_id, <replica_id, ScoredPoints>>
let batches = transposed_iter(batches);
batches
.map(|points| Resolver::resolve(points, |point| point.id, scored_point_eq, condition))
.collect()
}
}
impl Resolve for Vec<ShardQueryResponse> {
fn resolve(batches: Vec<Self>, condition: ResolveCondition) -> Self {
// batches: <replica_id, <batch_id, ShardQueryResponse>>
// transpose to <batch_id, <replica_id, ShardQueryResponse>>
let batches = transposed_iter(batches);
batches
.into_iter()
.map(|shard_responses| Resolve::resolve(shard_responses, condition))
.collect()
}
}
fn record_eq(this: &RecordInternal, other: &RecordInternal) -> bool {
this.id == other.id
&& this.order_value == other.order_value
&& this.vector == other.vector
&& payload_eq(&this.payload, &other.payload)
}
fn scored_point_eq(this: &ScoredPoint, other: &ScoredPoint) -> bool {
this.id == other.id
&& this.score == other.score
&& this.order_value == other.order_value
&& this.vector == other.vector
&& payload_eq(&this.payload, &other.payload)
}
fn payload_eq(this: &Option<Payload>, other: &Option<Payload>) -> bool {
match (this, other) {
(Some(payload), None) | (None, Some(payload)) => payload.is_empty(),
(this, other) => this == other,
}
}
/// Expected number of replicas
const EXPECTED_REPLICAS: usize = 5;
type ResolverRecords<'a, Item> = TinyVec<[ResolverRecord<'a, Item>; EXPECTED_REPLICAS]>;
struct Resolver<'a, Item, Id, Ident, Cmp> {
items: HashMap<Id, ResolverRecords<'a, Item>>,
identify: Ident,
compare: Cmp,
}
impl<'a, Item, Id, Ident, Cmp> Resolver<'a, Item, Id, Ident, Cmp>
where
Id: Eq + hash::Hash,
Ident: Fn(&Item) -> Id + Copy,
Cmp: Fn(&Item, &Item) -> bool,
{
pub fn resolve(
items: Vec<Vec<Item>>,
identify: Ident,
compare: Cmp,
condition: ResolveCondition,
) -> Vec<Item> {
let resolution_count = condition.resolution_count(items.len());
// Items:
// [
// [
// { id: 10, item: A, score: 0.9 },
// { id: 3, item: B, score: 0.8 },
// { id: 4, item: C, score: 0.7 }
// ],
// [
// { id: 10, item: A, score: 0.9 },
// { id: 3, item: B, score: 0.8 },
// { id: 4, item: C, score: 0.7 }
// ],
// [
// { id: 10, item: A, score: 0.9 },
// { id: 4, item: C, score: 0.7 },
// { id: 2, item: D, score: 0.6 }
// ]
// ]
let mut resolver = Resolver::new(items.first().map_or(0, Vec::len), identify, compare);
resolver.add_all(&items);
// resolver items:
// {
// 10: [ { item: A, count: 3, coordinates: [(0, 0), (1, 0), (2, 0)] } ],
// 3: [ { item: B, count: 2, coordinates: [(0, 1), (1, 1)] } ],
// 4: [ { item: C, count: 3, coordinates: [(0, 2), (1, 2), (2, 1)] } ],
// 2: [ { item: D, count: 1, coordinates: [(2, 2)] } ]
// }
// For majority, we need resolution_count = 2
// Select coordinates of accepted items, avoiding copying
let resolved_coords: HashSet<_> = resolver
.items
.into_iter()
.filter_map(|(_, points)| {
points
.into_iter()
.find(|point| point.count >= resolution_count)
.map(|point| point.coordinates.into_iter())
})
.flatten()
.collect();
// resolved coords:
// [
// (0, 0), (1, 0), (2, 0),
// (0, 1), (1, 1),
// (0, 2), (1, 2), (2, 1)
// ]
// Shortcut if everything is consistent: return first items, avoiding filtering
let all_items_len = items.iter().map(Vec::len).sum::<usize>();
let is_consistent = resolved_coords.len() == all_items_len;
if is_consistent {
// Return the first replica result as everything is consistent
return items.into_iter().next().unwrap_or_default();
}
// Items:
// [
// [
// { id: 3, item: B, score: 0.8 },
// { id: 4, item: C, score: 0.7 }
// ],
// [
// { id: 3, item: B, score: 0.8 },
// { id: 4, item: C, score: 0.7 }
// ],
// [
// { id: 4, item: C, score: 0.7 },
// ]
// ]
let resolved_coords = Rc::new(resolved_coords);
let resolved_iters = items
.into_iter()
.enumerate()
.map(|(replica_id, replica_response)| {
// replica_response:
// [
// { id: 10, item: A, score: 0.9 },
// { id: 4, item: C, score: 0.7 },
// { id: 2, item: D, score: 0.6 }
// ]
let resolved_coords = resolved_coords.clone();
replica_response
.into_iter()
.enumerate()
.filter_map(move |(index, item)| {
resolved_coords
.contains(&(replica_id, index))
.then_some(item)
})
// Iterator of filtered items:
// Iter<
// { id: 10, item: A, score: 0.9 },
// { id: 4, item: C, score: 0.7 },
// >
});
MergeInOrder::new(resolved_iters, identify, resolution_count).collect_vec()
}
fn new(capacity: usize, identify: Ident, compare: Cmp) -> Self {
Self {
items: HashMap::with_capacity(capacity),
identify,
compare,
}
}
fn add_all<I>(&mut self, items: I)
where
I: IntoIterator,
I::Item: IntoIterator<Item = &'a Item>,
{
for (row, items) in items.into_iter().enumerate() {
for (index, item) in items.into_iter().enumerate() {
self.add((self.identify)(item), item, row, index);
}
}
}
fn add(&mut self, id: Id, item: &'a Item, row: RowId, index: ColumnId) {
let points = self.items.entry(id).or_default();
for point in points.iter_mut() {
if (self.compare)(item, point.item.unwrap()) {
point.count += 1;
point.coordinates.push((row, index));
return;
}
}
points.push(ResolverRecord::new(item, row, index));
}
}
type RowId = usize;
type ColumnId = usize;
#[derive(Debug, Clone)]
struct ResolverRecord<'a, T> {
item: Option<&'a T>,
/// Store all coordinates of equal items in `(row, index)` tuples
coordinates: TinyVec<[(RowId, ColumnId); EXPECTED_REPLICAS]>,
/// Keeps track of the amount of times we see this same item
count: usize,
}
impl<T> Default for ResolverRecord<'_, T> {
fn default() -> Self {
Self {
item: None,
coordinates: Default::default(),
count: 0,
}
}
}
impl<'a, T> ResolverRecord<'a, T> {
fn new(item: &'a T, row: RowId, index: ColumnId) -> Self {
let mut coordinates = TinyVec::new();
coordinates.push((row, index));
Self {
item: Some(item),
coordinates,
count: 1,
}
}
}
/// Resolves multiple list of items by reading heads of all iterators on each step
/// and accepting the most common occurrence as the next resolved item.
///
/// [
/// [A, F, B, C],
/// [A, B, C],
/// [F, B, C],
/// ]
///
/// 1 2 3 4
/// [A, F, B, C]
/// [A, B, C]
/// [ F, B, C]
struct MergeInOrder<I: Iterator, Ident> {
/// One iterator per set of results, which outputs items that comply with resolution count, in their original order
resolved_iters: Vec<Peekable<I>>,
/// Closure which retrieves the item's ID
ident: Ident,
/// Only used to debug_assert correctness
resolution_count: usize,
}
impl<Iter, Ident, Id, Item> MergeInOrder<Iter, Ident>
where
Id: Eq + hash::Hash,
Ident: Fn(&Item) -> Id,
Iter: Iterator<Item = Item>,
{
fn new(
resolved_iters: impl Iterator<Item = Iter>,
identify: Ident,
resolution_count: usize,
) -> Self {
let resolved_iters = resolved_iters.map(|iter| iter.peekable()).collect();
Self {
resolved_iters,
ident: identify,
resolution_count,
}
}
/// An iterator over all current heads of the resolved iterators
fn peek_heads(&mut self) -> impl Iterator<Item = (RowId, Id)> + '_ {
self.resolved_iters
.iter_mut()
.enumerate()
.filter_map(|(row, iter)| iter.peek().map(|peeked| (row, (self.ident)(peeked))))
}
/// Peeks each row, then maps IDs to the peeked rows in which each ID appears
///
/// Example:
///
/// ```text
/// resolved_iters = [
/// <- (10, A) <- (4, B) <- (3, C)
/// <- (10, A) <- (4, B) <- (3, C)
/// <- (4, B) <- (3, C)
/// ]
/// ```
///
/// output:
/// ```text
/// {
/// 10: [0, 1],
/// 4: [2],
/// }
/// ```
fn heads_map(&mut self) -> HashMap<Id, TinyVec<[RowId; EXPECTED_REPLICAS]>> {
let capacity = self.resolved_iters.len();
self.peek_heads()
.fold(HashMap::with_capacity(capacity), |mut map, (row, id)| {
let entry = map.entry(id).or_default();
entry.push(row);
map
})
}
/// Advances the rows and returns the item in the first of them
///
/// Minimum len of `row_ids` should be the resolution count.
fn advance_rows(&mut self, row_ids: &[RowId]) -> Option<Item> {
debug_assert!(row_ids.len() >= self.resolution_count);
let mut merged_item = None;
for row_id in row_ids {
merged_item = self.resolved_iters[*row_id].next();
}
merged_item
}
}
impl<Iter, Ident, Id, Item> Iterator for MergeInOrder<Iter, Ident>
where
Id: Eq + hash::Hash,
Ident: Fn(&Item) -> Id,
Iter: Iterator<Item = Item>,
{
type Item = Item;
fn next(&mut self) -> Option<Self::Item> {
// Choose the item that appears the most times in the heads
// heads_map: (id to source row_ids)
// {
// 10: [0, 1],
// 4: [2],
// }
let heads_map = self.heads_map();
// Most frequent row IDs - Assume most frequent item is the one to be resolved next
// [0, 1]
let chosen_rows = heads_map.into_values().max_by_key(|kv| kv.len())?;
// Pull the item from the chosen rows (return only one of them)
self.advance_rows(&chosen_rows)
}
}
#[cfg(test)]
mod test {
use std::fmt;
use common::types::ScoreType;
use super::*;
#[rustfmt::skip]
fn resolve_scored_points_batch_4_data() -> [Vec<ScoredPoint>; 3] {
[
vec![
point(14, 0.0), point(17, 0.1), point(15, 0.1),
point(13, 0.2), point(11, 0.2), point(12, 0.3),
point(18, 0.3), point(16, 0.4), point(10, 0.5),
],
vec![
point(23, 0.0), point(21, 0.1), point(25, 0.2),
point(22, 0.2), point(20, 0.3), point(24, 0.3),
],
vec![
point(30, 0.1), point(31, 0.1), point(32, 0.1),
point(33, 0.2), point(34, 0.2), point(35, 0.3),
],
]
}
fn point(id: u64, score: ScoreType) -> ScoredPoint {
ScoredPoint {
id: id.into(),
version: 1,
score,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
#[rustfmt::skip]
fn resolve_scored_points_batch_4_input() -> Vec<Vec<Vec<ScoredPoint>>> {
let [batch1, batch2, batch3] = resolve_scored_points_batch_4_data();
vec![
vec![
batch(&batch1, [remove(2), remove(3)]),
batch(&batch2, [remove(0), remove(3)]),
batch(&batch3, [remove(4), remove(5)]),
],
vec![
batch(&batch1, [remove(1), modify(3)]),
batch(&batch2, [modify(0), remove(2)]),
batch(&batch3, [remove(3), modify(5)]),
],
vec![
batch(&batch1, [remove(1), modify(4)]),
batch(&batch2, [modify(3), remove(5)]),
batch(&batch3, [remove(2), modify(5)]),
],
vec![
batch1,
batch2,
batch3,
],
]
}
fn batch<const N: usize>(batch: &[ScoredPoint], mut actions: [Action; N]) -> Vec<ScoredPoint> {
let mut batch = batch.to_owned();
actions.sort_unstable_by_key(|action| action.index());
let mut removed = Vec::new();
for action in actions {
let offset = removed
.iter()
.filter(|&&removed| removed <= action.index())
.count();
match action {
Action::Remove(index) => {
batch.remove(index - offset);
removed.push(index);
}
Action::Modify(index) => {
batch[index - offset].score += 1.0;
}
}
}
batch
}
#[derive(Copy, Clone, Debug)]
enum Action {
Remove(usize),
Modify(usize),
}
impl Action {
pub fn index(self) -> usize {
match self {
Self::Remove(index) => index,
Self::Modify(index) => index,
}
}
}
fn remove(index: usize) -> Action {
Action::Remove(index)
}
fn modify(index: usize) -> Action {
Action::Modify(index)
}
#[test]
fn resolve_scored_points_batch_4_all() {
let [mut batch1, mut batch2, mut batch3] = resolve_scored_points_batch_4_data();
batch1.remove(4);
batch1.remove(3);
batch1.remove(2);
batch1.remove(1);
batch2.remove(5);
batch2.remove(3);
batch2.remove(2);
batch2.remove(0);
batch3.remove(5);
batch3.remove(4);
batch3.remove(3);
batch3.remove(2);
test_resolve(
resolve_scored_points_batch_4_input(),
[batch1, batch2, batch3],
ResolveCondition::All,
);
}
#[test]
fn resolve_scored_points_batch_4_majority() {
let [mut batch1, mut batch2, mut batch3] = resolve_scored_points_batch_4_data();
batch1.remove(3);
batch1.remove(1);
batch2.remove(3);
batch2.remove(0);
batch3.remove(5);
test_resolve(
resolve_scored_points_batch_4_input(),
[batch1, batch2, batch3],
ResolveCondition::Majority,
);
}
fn data_simple() -> [i32; 9] {
[1, 2, 3, 4, 5, 6, 7, 8, 9]
}
#[rustfmt::skip]
fn input_2() -> [Vec<i32>; 2] {
[
vec![1, 2, 3, 6, 7, 9, 11, 12, 13],
vec![ 3, 4, 5, 6, 8, 10, 11, ],
]
}
fn expected_2() -> [i32; 3] {
[3, 6, 11]
}
#[rustfmt::skip]
fn input_3() -> [Vec<i32>; 3] {
[
vec![1, 2, 6, 7, 8, 11, 13, 14, 15, ],
vec![ 2, 3, 4, 7, 9, 10, 13, 14, 16, ],
vec![ 4, 5, 6, 7, 9, 11, 12, 14, 17],
]
}
fn expected_3_all() -> [i32; 2] {
[7, 14]
}
fn expected_3_majority() -> [i32; 8] {
[2, 4, 6, 7, 9, 11, 13, 14]
}
#[rustfmt::skip]
fn input_4() -> [Vec<i32>; 4] {
[
vec![1, 2, 3, 9, 11, 12, 13, 14, 16, 19, 21, 22, 24, 27, 29],
vec![ 2, 3, 4, 5, 6, 12, 13, 15, 17, 19, 22, 24, 26, 27, 28, ],
vec![ 3, 5, 6, 7, 8, 9, 13, 15, 16, 18, 20, 22, 26, 27, 28, ],
vec![ 6, 8, 9, 10, 11, 12, 13, 16, 18, 19, 21, 23, 26, 27, 29],
]
}
fn expected_4_all() -> [i32; 2] {
[13, 27]
}
fn expected_4_majority() -> [i32; 10] {
[3, 6, 9, 12, 13, 16, 19, 22, 26, 27]
}
#[rustfmt::skip]
fn input_5() -> [Vec<i32>; 3] {
[
vec![1, 2, 3, 4, 5, 6, 7],
vec![ 6, 7],
vec![1, 2, 3, 4 ],
]
}
fn expected_5_majority() -> [i32; 6] {
[1, 2, 3, 4, 6, 7]
}
fn expected_5_all() -> [i32; 0] {
[]
}
#[test]
fn resolve_0_all() {
resolve_0(ResolveCondition::All);
}
#[test]
fn resolve_0_majority() {
resolve_0(ResolveCondition::Majority);
}
fn resolve_0(condition: ResolveCondition) {
test_resolve_simple(Vec::<Vec<i32>>::new(), Vec::new(), condition);
}
#[test]
fn resolve_simple_all() {
for replicas in 1..=5 {
resolve_simple(replicas, ResolveCondition::All);
}
}
#[test]
fn resolve_simple_majority() {
for replicas in 1..=5 {
resolve_simple(replicas, ResolveCondition::All);
}
}
fn resolve_simple(replicas: usize, condition: ResolveCondition) {
let input: Vec<_> = (0..replicas).map(|_| data_simple()).collect();
let expected = data_simple();
test_resolve_simple(input, expected, condition)
}
#[test]
fn resolve_2_all() {
test_resolve_simple(input_2(), expected_2(), ResolveCondition::All);
}
#[test]
fn resolve_2_majority() {
test_resolve_simple(input_2(), expected_2(), ResolveCondition::Majority);
}
#[test]
fn resolve_3_all() {
test_resolve_simple(input_3(), expected_3_all(), ResolveCondition::All);
}
#[test]
fn resolve_3_majority() {
test_resolve_simple(input_3(), expected_3_majority(), ResolveCondition::Majority);
}
#[test]
fn resolve_4_all() {
test_resolve_simple(input_4(), expected_4_all(), ResolveCondition::All);
}
#[test]
fn resolve_4_majority() {
test_resolve_simple(input_4(), expected_4_majority(), ResolveCondition::Majority);
}
#[test]
fn resolve_5_majority() {
test_resolve_simple(input_5(), expected_5_majority(), ResolveCondition::Majority);
}
#[test]
fn resolve_5_all() {
test_resolve_simple(input_5(), expected_5_all(), ResolveCondition::All);
}
fn test_resolve<T, E>(input: Vec<T>, expected: E, condition: ResolveCondition)
where
T: Resolve + Clone + PartialEq<E> + fmt::Debug,
E: fmt::Debug,
{
assert_eq!(T::resolve(input, condition), expected);
}
fn test_resolve_simple<I, E>(input: I, expected: E, condition: ResolveCondition)
where
I: IntoIterator,
I::Item: IntoIterator<Item = i32>,
E: IntoIterator<Item = i32>,
{
test_resolve(simple_input(input), simple_expected(expected), condition);
}
fn simple_input<I>(input: I) -> Vec<Vec<Val>>
where
I: IntoIterator,
I::Item: IntoIterator<Item = i32>,
{
input
.into_iter()
.map(|items| items.into_iter().map(Val).collect())
.collect()
}
fn simple_expected<E>(expected: E) -> Vec<Val>
where
E: IntoIterator<Item = i32>,
{
expected.into_iter().map(Val).collect()
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
struct Val(i32);
impl Resolve for Vec<Val> {
fn resolve(values: Vec<Self>, condition: ResolveCondition) -> Self {
Resolver::resolve(values, |val| val.0, PartialEq::eq, condition)
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/conversions.rs | lib/collection/src/shards/conversions.rs | use api::conversions::json::payload_to_proto;
use api::grpc::conversions::convert_shard_key_from_grpc_opt;
use api::grpc::qdrant::points_selector::PointsSelectorOneOf;
use api::grpc::qdrant::{
ClearPayloadPoints, ClearPayloadPointsInternal, CreateFieldIndexCollection,
CreateFieldIndexCollectionInternal, DeleteFieldIndexCollection,
DeleteFieldIndexCollectionInternal, DeletePayloadPoints, DeletePayloadPointsInternal,
DeletePointVectors, DeletePoints, DeletePointsInternal, DeleteVectorsInternal, PointVectors,
PointsIdsList, PointsSelector, SetPayloadPoints, SetPayloadPointsInternal, SyncPoints,
SyncPointsInternal, UpdatePointVectors, UpdateVectorsInternal, UpsertPoints,
UpsertPointsInternal, Vectors, VectorsSelector,
};
use segment::data_types::vectors::VectorStructInternal;
use segment::json_path::JsonPath;
use segment::types::{Filter, PayloadFieldSchema, PointIdType, ScoredPoint, VectorNameBuf};
use tonic::Status;
use crate::operations::conversions::write_ordering_to_proto;
use crate::operations::payload_ops::{DeletePayloadOp, SetPayloadOp};
use crate::operations::point_ops::{
ConditionalInsertOperationInternal, PointInsertOperationsInternal, PointSyncOperation,
WriteOrdering,
};
use crate::operations::types::CollectionResult;
use crate::operations::vector_ops::UpdateVectorsOp;
use crate::operations::{ClockTag, CreateIndex};
use crate::shards::shard::ShardId;
pub fn internal_sync_points(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
points_sync_operation: PointSyncOperation,
wait: bool,
ordering: Option<WriteOrdering>,
) -> CollectionResult<SyncPointsInternal> {
let PointSyncOperation {
points,
from_id,
to_id,
} = points_sync_operation;
Ok(SyncPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
sync_points: Some(SyncPoints {
collection_name,
wait: Some(wait),
points: points
.into_iter()
.map(api::grpc::qdrant::PointStruct::try_from)
.collect::<Result<Vec<_>, Status>>()?,
from_id: from_id.map(|x| x.into()),
to_id: to_id.map(|x| x.into()),
ordering: ordering.map(write_ordering_to_proto),
timeout: None,
}),
})
}
pub fn internal_upsert_points(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
point_insert_operations: PointInsertOperationsInternal,
wait: bool,
ordering: Option<WriteOrdering>,
) -> CollectionResult<UpsertPointsInternal> {
Ok(UpsertPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
upsert_points: Some(UpsertPoints {
collection_name,
wait: Some(wait),
points: match point_insert_operations {
PointInsertOperationsInternal::PointsBatch(batch) => TryFrom::try_from(batch)?,
PointInsertOperationsInternal::PointsList(list) => list
.into_iter()
.map(api::grpc::qdrant::PointStruct::try_from)
.collect::<Result<Vec<_>, Status>>()?,
},
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
update_filter: None,
timeout: None,
}),
})
}
pub fn internal_conditional_upsert_points(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
point_condition_upsert_operations: ConditionalInsertOperationInternal,
wait: bool,
ordering: Option<WriteOrdering>,
) -> CollectionResult<UpsertPointsInternal> {
let ConditionalInsertOperationInternal {
points_op: point_insert_operations,
condition,
} = point_condition_upsert_operations;
Ok(UpsertPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
upsert_points: Some(UpsertPoints {
collection_name,
wait: Some(wait),
points: match point_insert_operations {
PointInsertOperationsInternal::PointsBatch(batch) => TryFrom::try_from(batch)?,
PointInsertOperationsInternal::PointsList(list) => list
.into_iter()
.map(api::grpc::qdrant::PointStruct::try_from)
.collect::<Result<Vec<_>, Status>>()?,
},
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
update_filter: Some(api::grpc::Filter::from(condition)),
timeout: None,
}),
})
}
pub fn internal_delete_points(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
ids: Vec<PointIdType>,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeletePointsInternal {
DeletePointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_points: Some(DeletePoints {
collection_name,
wait: Some(wait),
points: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Points(PointsIdsList {
ids: ids.into_iter().map(|id| id.into()).collect(),
})),
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_delete_points_by_filter(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
filter: Filter,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeletePointsInternal {
DeletePointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_points: Some(DeletePoints {
collection_name,
wait: Some(wait),
points: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Filter(filter.into())),
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_update_vectors(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
update_vectors: UpdateVectorsOp,
wait: bool,
ordering: Option<WriteOrdering>,
) -> CollectionResult<UpdateVectorsInternal> {
let UpdateVectorsOp {
points,
update_filter,
} = update_vectors;
let points: Result<Vec<_>, _> = points
.into_iter()
.map(|point| {
VectorStructInternal::try_from(point.vector).map(|vector_struct| PointVectors {
id: Some(point.id.into()),
vectors: Some(Vectors::from(vector_struct)),
})
})
.collect();
Ok(UpdateVectorsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
update_vectors: Some(UpdatePointVectors {
collection_name,
wait: Some(wait),
points: points?,
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
update_filter: update_filter.map(api::grpc::Filter::from),
timeout: None,
}),
})
}
pub fn internal_delete_vectors(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
ids: Vec<PointIdType>,
vector_names: Vec<VectorNameBuf>,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeleteVectorsInternal {
DeleteVectorsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_vectors: Some(DeletePointVectors {
collection_name,
wait: Some(wait),
points_selector: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Points(PointsIdsList {
ids: ids.into_iter().map(|id| id.into()).collect(),
})),
}),
vectors: Some(VectorsSelector {
names: vector_names,
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_delete_vectors_by_filter(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
filter: Filter,
vector_names: Vec<VectorNameBuf>,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeleteVectorsInternal {
DeleteVectorsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_vectors: Some(DeletePointVectors {
collection_name,
wait: Some(wait),
points_selector: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Filter(filter.into())),
}),
vectors: Some(VectorsSelector {
names: vector_names,
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_set_payload(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
set_payload: SetPayloadOp,
wait: bool,
ordering: Option<WriteOrdering>,
) -> SetPayloadPointsInternal {
let points_selector = if let Some(points) = set_payload.points {
Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Points(PointsIdsList {
ids: points.into_iter().map(|id| id.into()).collect(),
})),
})
} else {
set_payload.filter.map(|filter| PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Filter(filter.into())),
})
};
SetPayloadPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
set_payload_points: Some(SetPayloadPoints {
collection_name,
wait: Some(wait),
payload: payload_to_proto(set_payload.payload),
points_selector,
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
key: set_payload.key.map(|key| key.to_string()),
timeout: None,
}),
}
}
pub fn internal_delete_payload(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
delete_payload: DeletePayloadOp,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeletePayloadPointsInternal {
let points_selector = if let Some(points) = delete_payload.points {
Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Points(PointsIdsList {
ids: points.into_iter().map(|id| id.into()).collect(),
})),
})
} else {
delete_payload.filter.map(|filter| PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Filter(filter.into())),
})
};
DeletePayloadPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_payload_points: Some(DeletePayloadPoints {
collection_name,
wait: Some(wait),
keys: delete_payload
.keys
.into_iter()
.map(|key| key.to_string())
.collect(),
points_selector,
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_clear_payload(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
points: Vec<PointIdType>,
wait: bool,
ordering: Option<WriteOrdering>,
) -> ClearPayloadPointsInternal {
ClearPayloadPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
clear_payload_points: Some(ClearPayloadPoints {
collection_name,
wait: Some(wait),
points: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Points(PointsIdsList {
ids: points.into_iter().map(|id| id.into()).collect(),
})),
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_clear_payload_by_filter(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
filter: Filter,
wait: bool,
ordering: Option<WriteOrdering>,
) -> ClearPayloadPointsInternal {
ClearPayloadPointsInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
clear_payload_points: Some(ClearPayloadPoints {
collection_name,
wait: Some(wait),
points: Some(PointsSelector {
points_selector_one_of: Some(PointsSelectorOneOf::Filter(filter.into())),
}),
ordering: ordering.map(write_ordering_to_proto),
shard_key_selector: None,
timeout: None,
}),
}
}
pub fn internal_create_index(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
create_index: CreateIndex,
wait: bool,
ordering: Option<WriteOrdering>,
) -> CreateFieldIndexCollectionInternal {
let (field_type, field_index_params) = create_index
.field_schema
.map(|field_schema| match field_schema {
PayloadFieldSchema::FieldType(field_type) => {
(api::grpc::qdrant::FieldType::from(field_type) as i32, None)
}
PayloadFieldSchema::FieldParams(field_params) => (
api::grpc::qdrant::FieldType::from(field_params.kind()) as i32,
Some(field_params.into()),
),
})
.map(|(field_type, field_params)| (Some(field_type), field_params))
.unwrap_or((None, None));
CreateFieldIndexCollectionInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
create_field_index_collection: Some(CreateFieldIndexCollection {
collection_name,
wait: Some(wait),
field_name: create_index.field_name.to_string(),
field_type,
field_index_params,
ordering: ordering.map(write_ordering_to_proto),
timeout: None,
}),
}
}
pub fn internal_delete_index(
shard_id: Option<ShardId>,
clock_tag: Option<ClockTag>,
collection_name: String,
delete_index: JsonPath,
wait: bool,
ordering: Option<WriteOrdering>,
) -> DeleteFieldIndexCollectionInternal {
DeleteFieldIndexCollectionInternal {
shard_id,
clock_tag: clock_tag.map(Into::into),
delete_field_index_collection: Some(DeleteFieldIndexCollection {
collection_name,
wait: Some(wait),
field_name: delete_index.to_string(),
ordering: ordering.map(write_ordering_to_proto),
timeout: None,
}),
}
}
pub fn try_scored_point_from_grpc(
point: api::grpc::qdrant::ScoredPoint,
with_payload: bool,
) -> Result<ScoredPoint, Status> {
let api::grpc::qdrant::ScoredPoint {
id,
payload,
score,
version,
vectors,
shard_key,
order_value,
} = point;
let id = id
.ok_or_else(|| Status::invalid_argument("scored point does not have an ID"))?
.try_into()?;
let payload = if with_payload {
Some(api::conversions::json::proto_to_payloads(payload)?)
} else {
debug_assert!(payload.is_empty());
None
};
let vector = vectors
.map(|vectors| vectors.try_into())
.transpose()
.map_err(|e| Status::invalid_argument(format!("Failed to parse vectors: {e}")))?;
Ok(ScoredPoint {
id,
version,
score,
payload,
vector,
shard_key: convert_shard_key_from_grpc_opt(shard_key),
order_value: order_value.map(TryFrom::try_from).transpose()?,
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/dummy_shard.rs | lib/collection/src/shards/dummy_shard.rs | use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::data_types::manifest::SnapshotManifest;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{
ExtendedPointId, Filter, ScoredPoint, SizeStats, SnapshotFormat, WithPayload,
WithPayloadInterface, WithVector,
};
use shard::operations::CollectionUpdateOperations;
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use crate::operations::OperationWithClockTag;
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CountRequestInternal, CountResult,
OptimizersStatus, PointRequestInternal, ScrollRequestInternal, ShardStatus, UpdateResult,
UpdateStatus,
};
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::LocalShardTelemetry;
#[derive(Clone, Debug)]
pub struct DummyShard {
message: String,
}
impl DummyShard {
pub fn new(message: impl Into<String>) -> Self {
Self {
message: message.into(),
}
}
pub async fn create_snapshot(
&self,
_temp_path: &Path,
_tar: &tar_ext::BuilderExt,
_format: SnapshotFormat,
_manifest: Option<SnapshotManifest>,
_save_wal: bool,
) -> CollectionResult<()> {
self.dummy()
}
pub fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
Ok(SnapshotManifest::default())
}
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
self.dummy()
}
pub async fn on_strict_mode_config_update(&mut self) {}
pub fn get_telemetry_data(&self) -> LocalShardTelemetry {
LocalShardTelemetry {
variant_name: Some("dummy shard".into()),
status: Some(ShardStatus::Green),
total_optimized_points: 0,
vectors_size_bytes: None,
payloads_size_bytes: None,
num_points: None,
num_vectors: None,
num_vectors_by_name: None,
segments: None,
optimizations: Default::default(),
async_scorer: None,
indexed_only_excluded_vectors: None,
}
}
pub fn get_optimization_status(&self) -> OptimizersStatus {
OptimizersStatus::Ok
}
pub fn get_size_stats(&self) -> SizeStats {
SizeStats::default()
}
pub fn estimate_cardinality(
&self,
_: Option<&Filter>,
) -> CollectionResult<CardinalityEstimation> {
self.dummy()
}
fn dummy<T>(&self) -> CollectionResult<T> {
Err(CollectionError::service_error(self.message.clone()))
}
}
#[async_trait]
impl ShardOperation for DummyShard {
async fn update(
&self,
op: OperationWithClockTag,
_: bool,
_: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
match &op.operation {
CollectionUpdateOperations::PointOperation(_) => self.dummy(),
CollectionUpdateOperations::VectorOperation(_) => self.dummy(),
CollectionUpdateOperations::PayloadOperation(_) => self.dummy(),
// Allow (and ignore) field index operations. Field index schema is stored in collection
// config, and indices will be created (if needed) when dummy shard is recovered.
CollectionUpdateOperations::FieldIndexOperation(_) => Ok(UpdateResult {
operation_id: None,
status: UpdateStatus::Acknowledged,
clock_tag: None,
}),
}
}
/// Forward read-only `scroll_by` to `wrapped_shard`
async fn scroll_by(
&self,
_: Arc<ScrollRequestInternal>,
_: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.dummy()
}
async fn local_scroll_by_id(
&self,
_: Option<ExtendedPointId>,
_: usize,
_: &WithPayloadInterface,
_: &WithVector,
_: Option<&Filter>,
_: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.dummy()
}
async fn info(&self) -> CollectionResult<CollectionInfo> {
self.dummy()
}
async fn core_search(
&self,
_: Arc<CoreSearchRequestBatch>,
_: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
self.dummy()
}
async fn count(
&self,
_: Arc<CountRequestInternal>,
_: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
self.dummy()
}
async fn retrieve(
&self,
_: Arc<PointRequestInternal>,
_: &WithPayload,
_: &WithVector,
_: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.dummy()
}
async fn query_batch(
&self,
_requests: Arc<Vec<ShardQueryRequest>>,
_search_runtime_handle: &Handle,
_timeout: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
self.dummy()
}
async fn facet(
&self,
_: Arc<FacetParams>,
_search_runtime_handle: &Handle,
_: Option<Duration>,
_: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
self.dummy()
}
async fn stop_gracefully(self) {}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/remote_shard.rs | lib/collection/src/shards/remote_shard.rs | use std::future::Future;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use api::grpc::qdrant::collections_internal_client::CollectionsInternalClient;
use api::grpc::qdrant::points_internal_client::PointsInternalClient;
use api::grpc::qdrant::qdrant_client::QdrantClient;
use api::grpc::qdrant::shard_snapshot_location::Location;
use api::grpc::qdrant::shard_snapshots_client::ShardSnapshotsClient;
use api::grpc::qdrant::{
CollectionOperationResponse, CoreSearchBatchPointsInternal, CountPoints, CountPointsInternal,
CountResponse, FacetCountsInternal, GetCollectionInfoRequest, GetCollectionInfoRequestInternal,
GetPoints, GetPointsInternal, GetShardRecoveryPointRequest, HealthCheckRequest,
InitiateShardTransferRequest, QueryBatchPointsInternal, QueryBatchResponseInternal,
QueryShardPoints, RecoverShardSnapshotRequest, RecoverSnapshotResponse, ScrollPoints,
ScrollPointsInternal, SearchBatchResponse, ShardSnapshotLocation,
UpdateShardCutoffPointRequest, WaitForShardStateRequest,
};
use api::grpc::transport_channel_pool::{AddTimeout, MAX_GRPC_CHANNEL_TIMEOUT};
use api::grpc::update_operation::Update;
use api::grpc::{UpdateBatchInternal, UpdateOperation, WithPayloadSelector};
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::types::TelemetryDetail;
use itertools::Itertools;
use parking_lot::Mutex;
use segment::common::operation_time_statistics::{
OperationDurationsAggregator, ScopeDurationMeasurer,
};
use segment::data_types::facets::{FacetParams, FacetResponse, FacetValueHit};
use segment::data_types::order_by::OrderBy;
use segment::types::{
ExtendedPointId, Filter, ScoredPoint, WithPayload, WithPayloadInterface, WithVector,
};
use semver::Version;
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use tonic::Status;
use tonic::codegen::InterceptedService;
use tonic::transport::{Channel, Uri};
use url::Url;
use super::conversions::{
internal_conditional_upsert_points, internal_delete_vectors, internal_delete_vectors_by_filter,
internal_update_vectors,
};
use super::local_shard::clock_map::RecoveryPoint;
use crate::operations::conversions::try_record_from_grpc;
use crate::operations::payload_ops::PayloadOps;
use crate::operations::point_ops::{PointOperations, WriteOrdering};
use crate::operations::snapshot_ops::SnapshotPriority;
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CoreSearchRequest, CountRequestInternal,
CountResult, PointRequestInternal, ScrollRequestInternal, UpdateResult,
};
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::operations::vector_ops::VectorOperations;
use crate::operations::{CollectionUpdateOperations, FieldIndexOperations, OperationWithClockTag};
use crate::shards::CollectionId;
use crate::shards::channel_service::ChannelService;
use crate::shards::conversions::{
internal_clear_payload, internal_clear_payload_by_filter, internal_create_index,
internal_delete_index, internal_delete_payload, internal_delete_points,
internal_delete_points_by_filter, internal_set_payload, internal_sync_points,
internal_upsert_points, try_scored_point_from_grpc,
};
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::RemoteShardTelemetry;
/// Timeout for transferring and recovering a shard snapshot on a remote peer.
const SHARD_SNAPSHOT_TRANSFER_RECOVER_TIMEOUT: Duration = MAX_GRPC_CHANNEL_TIMEOUT;
/// RemoteShard
///
/// Remote Shard is a representation of a shard that is located on a remote peer.
#[derive(Clone)]
pub struct RemoteShard {
pub(crate) id: ShardId,
pub(crate) collection_id: CollectionId,
pub peer_id: PeerId,
pub channel_service: ChannelService,
telemetry_search_durations: Arc<Mutex<OperationDurationsAggregator>>,
telemetry_update_durations: Arc<Mutex<OperationDurationsAggregator>>,
}
impl RemoteShard {
/// Instantiate a new remote shard in memory
pub fn new(
id: ShardId,
collection_id: CollectionId,
peer_id: PeerId,
channel_service: ChannelService,
) -> Self {
Self {
id,
collection_id,
peer_id,
channel_service,
telemetry_search_durations: OperationDurationsAggregator::new(),
telemetry_update_durations: OperationDurationsAggregator::new(),
}
}
/// Checks that remote shard is at least at the given version
/// - Returns `true` if we know that the peer is at least at the given version
/// - Returns `false` if we know that the peer not at the given version or version is unknown
pub fn check_version(&self, version: &Version) -> bool {
self.channel_service
.peer_is_at_version(self.peer_id, version)
}
pub fn restore_snapshot(_snapshot_path: &Path) {
// NO extra actions needed for remote shards
}
fn current_address(&self) -> CollectionResult<Uri> {
let guard_peer_address = self.channel_service.id_to_address.read();
let peer_address = guard_peer_address.get(&self.peer_id).cloned();
match peer_address {
None => Err(CollectionError::service_error(format!(
"no address found for peer {}",
self.peer_id
))),
Some(peer_address) => Ok(peer_address),
}
}
async fn with_points_client<T, O: Future<Output = Result<T, Status>>>(
&self,
f: impl Fn(PointsInternalClient<InterceptedService<Channel, AddTimeout>>) -> O,
) -> CollectionResult<T> {
let current_address = self.current_address()?;
self.channel_service
.channel_pool
.with_channel(¤t_address, |channel| {
let client = PointsInternalClient::new(channel);
let client = client.max_decoding_message_size(usize::MAX);
f(client)
})
.await
.map_err(|err| err.into())
}
async fn with_collections_client<T, O: Future<Output = Result<T, Status>>>(
&self,
f: impl Fn(CollectionsInternalClient<InterceptedService<Channel, AddTimeout>>) -> O,
) -> CollectionResult<T> {
let current_address = self.current_address()?;
self.channel_service
.channel_pool
.with_channel(¤t_address, |channel| {
let client = CollectionsInternalClient::new(channel);
let client = client.max_decoding_message_size(usize::MAX);
f(client)
})
.await
.map_err(|err| err.into())
}
async fn with_shard_snapshots_client_timeout<T, O: Future<Output = Result<T, Status>>>(
&self,
f: impl Fn(ShardSnapshotsClient<InterceptedService<Channel, AddTimeout>>) -> O,
timeout: Option<Duration>,
retries: usize,
) -> CollectionResult<T> {
let current_address = self.current_address()?;
self.channel_service
.channel_pool
.with_channel_timeout(
¤t_address,
|channel| {
let client = ShardSnapshotsClient::new(channel);
let client = client.max_decoding_message_size(usize::MAX);
f(client)
},
timeout,
retries,
)
.await
.map_err(|err| err.into())
}
async fn with_qdrant_client<T, Fut: Future<Output = Result<T, Status>>>(
&self,
f: impl Fn(QdrantClient<InterceptedService<Channel, AddTimeout>>) -> Fut,
) -> CollectionResult<T> {
let current_address = self.current_address()?;
self.channel_service
.channel_pool
.with_channel(¤t_address, |channel| {
let client = QdrantClient::new(channel);
f(client)
})
.await
.map_err(|err| err.into())
}
pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> RemoteShardTelemetry {
RemoteShardTelemetry {
shard_id: self.id,
peer_id: Some(self.peer_id),
searches: self
.telemetry_search_durations
.lock()
.get_statistics(detail),
updates: self
.telemetry_update_durations
.lock()
.get_statistics(detail),
}
}
pub async fn initiate_transfer(&self) -> CollectionResult<CollectionOperationResponse> {
let res = self
.with_collections_client(|mut client| async move {
client
.initiate(InitiateShardTransferRequest {
collection_name: self.collection_id.clone(),
shard_id: self.id,
})
.await
})
.await?
.into_inner();
Ok(res)
}
pub async fn forward_update_batch(
&self,
operations: Vec<OperationWithClockTag>,
wait: bool,
ordering: WriteOrdering,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
let mut updates = Vec::with_capacity(operations.len());
let shard_id = Some(self.id);
let collection_name = &self.collection_id;
let ordering = Some(ordering);
for operation in operations {
let update_op = match operation.operation {
CollectionUpdateOperations::PointOperation(point_ops) => match point_ops {
PointOperations::UpsertPoints(point_insert_operations) => {
let request = internal_upsert_points(
shard_id,
operation.clock_tag,
collection_name.clone(),
point_insert_operations,
wait,
ordering,
)?;
Update::Upsert(request)
}
PointOperations::UpsertPointsConditional(conditional_upsert) => {
let request = internal_conditional_upsert_points(
shard_id,
operation.clock_tag,
collection_name.clone(),
conditional_upsert,
wait,
ordering,
)?;
Update::Upsert(request)
}
PointOperations::DeletePoints { ids } => {
let request = internal_delete_points(
shard_id,
operation.clock_tag,
collection_name.clone(),
ids,
wait,
ordering,
);
Update::Delete(request)
}
PointOperations::DeletePointsByFilter(filter) => {
let request = internal_delete_points_by_filter(
shard_id,
operation.clock_tag,
collection_name.clone(),
filter,
wait,
ordering,
);
Update::Delete(request)
}
PointOperations::SyncPoints(operation) => {
let request = internal_sync_points(
shard_id,
None, // TODO!?
collection_name.clone(),
operation,
wait,
ordering,
)?;
Update::Sync(request)
}
#[cfg(feature = "staging")]
PointOperations::TestDelay(_) => {
// Staging test delay operations should not be forwarded to remote shards
continue;
}
},
CollectionUpdateOperations::VectorOperation(vector_ops) => match vector_ops {
VectorOperations::UpdateVectors(update_operation) => {
let request = internal_update_vectors(
shard_id,
operation.clock_tag,
collection_name.clone(),
update_operation,
wait,
ordering,
)?;
Update::UpdateVectors(request)
}
VectorOperations::DeleteVectors(ids, vector_names) => {
let request = internal_delete_vectors(
shard_id,
operation.clock_tag,
collection_name.clone(),
ids.points,
vector_names.clone(),
wait,
ordering,
);
Update::DeleteVectors(request)
}
VectorOperations::DeleteVectorsByFilter(filter, vector_names) => {
let request = internal_delete_vectors_by_filter(
shard_id,
operation.clock_tag,
collection_name.clone(),
filter,
vector_names.clone(),
wait,
ordering,
);
Update::DeleteVectors(request)
}
},
CollectionUpdateOperations::PayloadOperation(payload_ops) => match payload_ops {
PayloadOps::SetPayload(set_payload) => {
let request = internal_set_payload(
shard_id,
operation.clock_tag,
collection_name.clone(),
set_payload,
wait,
ordering,
);
Update::SetPayload(request)
}
PayloadOps::DeletePayload(delete_payload) => {
let request = internal_delete_payload(
shard_id,
operation.clock_tag,
collection_name.clone(),
delete_payload,
wait,
ordering,
);
Update::DeletePayload(request)
}
PayloadOps::ClearPayload { points } => {
let request = internal_clear_payload(
shard_id,
operation.clock_tag,
collection_name.clone(),
points,
wait,
ordering,
);
Update::ClearPayload(request)
}
PayloadOps::ClearPayloadByFilter(filter) => {
let request = internal_clear_payload_by_filter(
shard_id,
operation.clock_tag,
collection_name.clone(),
filter,
wait,
ordering,
);
Update::ClearPayload(request)
}
PayloadOps::OverwritePayload(set_payload) => {
let request = internal_set_payload(
shard_id,
operation.clock_tag,
collection_name.clone(),
set_payload,
wait,
ordering,
);
Update::OverwritePayload(request)
}
},
CollectionUpdateOperations::FieldIndexOperation(field_index_op) => {
match field_index_op {
FieldIndexOperations::CreateIndex(create_index) => {
let request = internal_create_index(
shard_id,
operation.clock_tag,
collection_name.clone(),
create_index,
wait,
ordering,
);
Update::CreateFieldIndex(request)
}
FieldIndexOperations::DeleteIndex(delete_index) => {
let request = internal_delete_index(
shard_id,
operation.clock_tag,
collection_name.clone(),
delete_index,
wait,
ordering,
);
Update::DeleteFieldIndex(request)
}
}
}
};
updates.push(UpdateOperation {
update: Some(update_op),
});
}
let batch_request = &UpdateBatchInternal {
operations: updates,
};
let point_operation_response = self
.with_points_client(|mut client| async move {
client
.update_batch(tonic::Request::new(batch_request.clone()))
.await
})
.await?
.into_inner();
if let Some(hw_usage) = point_operation_response.hardware_usage {
hw_measurement_acc.accumulate_request(hw_usage);
}
match point_operation_response.result {
None => Err(CollectionError::service_error(
"Malformed UpdateResult type".to_string(),
)),
Some(update_result) => update_result.try_into().map_err(|e: Status| e.into()),
}
}
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn forward_update(
&self,
operation: OperationWithClockTag,
wait: bool,
ordering: WriteOrdering,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// `RemoteShard::execute_update_operation` is cancel safe, so this method is cancel safe.
self.execute_update_operation(
Some(self.id),
self.collection_id.clone(),
operation,
wait,
Some(ordering),
hw_measurement_acc,
)
.await
}
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn execute_update_operation(
&self,
shard_id: Option<ShardId>,
collection_name: String,
operation: OperationWithClockTag,
wait: bool,
ordering: Option<WriteOrdering>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// Cancelling remote request should always be safe on the client side and update API
// *should be* cancel safe on the server side, so this method is cancel safe.
let mut timer = ScopeDurationMeasurer::new(&self.telemetry_update_durations);
timer.set_success(false);
let point_operation_response = match operation.operation {
CollectionUpdateOperations::PointOperation(point_ops) => match point_ops {
PointOperations::UpsertPoints(point_insert_operations) => {
let request = &internal_upsert_points(
shard_id,
operation.clock_tag,
collection_name,
point_insert_operations,
wait,
ordering,
)?;
self.with_points_client(|mut client| async move {
client.upsert(tonic::Request::new(request.clone())).await
})
.await?
.into_inner()
}
PointOperations::UpsertPointsConditional(conditional_upsert) => {
let request = &internal_conditional_upsert_points(
shard_id,
operation.clock_tag,
collection_name,
conditional_upsert,
wait,
ordering,
)?;
self.with_points_client(|mut client| async move {
client.upsert(tonic::Request::new(request.clone())).await
})
.await?
.into_inner()
}
PointOperations::DeletePoints { ids } => {
let request = &internal_delete_points(
shard_id,
operation.clock_tag,
collection_name,
ids,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client.delete(tonic::Request::new(request.clone())).await
})
.await?
.into_inner()
}
PointOperations::DeletePointsByFilter(filter) => {
let request = &internal_delete_points_by_filter(
shard_id,
operation.clock_tag,
collection_name,
filter,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client.delete(tonic::Request::new(request.clone())).await
})
.await?
.into_inner()
}
PointOperations::SyncPoints(operation) => {
let request = &internal_sync_points(
shard_id,
None, // TODO!?
collection_name,
operation,
wait,
ordering,
)?;
self.with_points_client(|mut client| async move {
client.sync(tonic::Request::new(request.clone())).await
})
.await?
.into_inner()
}
#[cfg(feature = "staging")]
PointOperations::TestDelay(op) => {
// TODO: Add gRPC support to forward staging operations to remote shards
// For now, staging test delay operations only execute on local shards
let delay = std::time::Duration::from_secs_f64(op.duration.into_inner());
log::debug!(
"TestDelay: skipping remote shard {} (duration: {delay:?})",
self.id
);
timer.set_success(true);
return Ok(UpdateResult {
operation_id: None,
status: crate::operations::types::UpdateStatus::Completed,
clock_tag: operation.clock_tag,
});
}
},
CollectionUpdateOperations::VectorOperation(vector_ops) => match vector_ops {
VectorOperations::UpdateVectors(update_operation) => {
let request = &internal_update_vectors(
shard_id,
operation.clock_tag,
collection_name,
update_operation,
wait,
ordering,
)?;
self.with_points_client(|mut client| async move {
client
.update_vectors(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
VectorOperations::DeleteVectors(ids, vector_names) => {
let request = &internal_delete_vectors(
shard_id,
operation.clock_tag,
collection_name,
ids.points,
vector_names.clone(),
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.delete_vectors(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
VectorOperations::DeleteVectorsByFilter(filter, vector_names) => {
let request = &internal_delete_vectors_by_filter(
shard_id,
operation.clock_tag,
collection_name,
filter,
vector_names.clone(),
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.delete_vectors(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
},
CollectionUpdateOperations::PayloadOperation(payload_ops) => match payload_ops {
PayloadOps::SetPayload(set_payload) => {
let request = &internal_set_payload(
shard_id,
operation.clock_tag,
collection_name,
set_payload,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.set_payload(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
PayloadOps::DeletePayload(delete_payload) => {
let request = &internal_delete_payload(
shard_id,
operation.clock_tag,
collection_name,
delete_payload,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.delete_payload(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
PayloadOps::ClearPayload { points } => {
let request = &internal_clear_payload(
shard_id,
operation.clock_tag,
collection_name,
points,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.clear_payload(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
PayloadOps::ClearPayloadByFilter(filter) => {
let request = &internal_clear_payload_by_filter(
shard_id,
operation.clock_tag,
collection_name,
filter,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.clear_payload(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
PayloadOps::OverwritePayload(set_payload) => {
let request = &internal_set_payload(
shard_id,
operation.clock_tag,
collection_name,
set_payload,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.overwrite_payload(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
},
CollectionUpdateOperations::FieldIndexOperation(field_index_op) => match field_index_op
{
FieldIndexOperations::CreateIndex(create_index) => {
let request = &internal_create_index(
shard_id,
operation.clock_tag,
collection_name,
create_index,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.create_field_index(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
FieldIndexOperations::DeleteIndex(delete_index) => {
let request = &internal_delete_index(
shard_id,
operation.clock_tag,
collection_name,
delete_index,
wait,
ordering,
);
self.with_points_client(|mut client| async move {
client
.delete_field_index(tonic::Request::new(request.clone()))
.await
})
.await?
.into_inner()
}
},
};
if let Some(hw_usage) = point_operation_response.hardware_usage {
hw_measurement_acc.accumulate_request(hw_usage);
}
match point_operation_response.result {
None => Err(CollectionError::service_error(
"Malformed UpdateResult type".to_string(),
)),
Some(update_result) => update_result.try_into().map_err(|e: Status| e.into()),
}
}
/// Recover a shard at the remote from the given public `url`.
///
/// # Warning
///
/// This method specifies a timeout of 24 hours.
///
/// Setting an API key may leak when requesting a snapshot file from a malicious server.
/// This is potentially dangerous if a user has control over what URL is accessed.
///
/// # Cancel safety
///
/// This method is cancel safe.
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/forward_proxy_shard.rs | lib/collection/src/shards/forward_proxy_shard.rs | use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use ahash::HashSet;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use common::types::TelemetryDetail;
use parking_lot::Mutex as ParkingMutex;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::data_types::manifest::SnapshotManifest;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{
ExtendedPointId, Filter, PointIdType, ScoredPoint, SizeStats, SnapshotFormat, WithPayload,
WithPayloadInterface, WithVector,
};
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use tokio::sync::Mutex;
use super::shard::ShardId;
use super::update_tracker::UpdateTracker;
use crate::collection_manager::optimizers::TrackerLog;
use crate::hash_ring::HashRingRouter;
use crate::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted, PointSyncOperation,
};
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CountRequestInternal, CountResult,
OptimizersStatus, PointRequestInternal, ScrollRequestInternal, UpdateResult, UpdateStatus,
};
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::operations::{
CollectionUpdateOperations, CreateIndex, FieldIndexOperations, OperationToShard,
OperationWithClockTag, SplitByShard as _,
};
use crate::shards::local_shard::LocalShard;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::LocalShardTelemetry;
/// ForwardProxyShard
///
/// ForwardProxyShard is a wrapper type for a LocalShard.
///
/// It can be used to provide all read and write operations while the wrapped shard is being transferred to another node.
/// Proxy forwards all operations to remote shards.
pub struct ForwardProxyShard {
shard_id: ShardId,
pub(crate) wrapped_shard: LocalShard,
pub(crate) remote_shard: RemoteShard,
resharding_hash_ring: Option<HashRingRouter>,
filter: Option<Box<Filter>>,
/// Lock required to protect transfer-in-progress updates.
/// It should block data updating operations while the batch is being transferred.
update_lock: Mutex<()>,
}
impl ForwardProxyShard {
#[allow(clippy::result_large_err)]
pub fn new(
shard_id: ShardId,
wrapped_shard: LocalShard,
remote_shard: RemoteShard,
resharding_hash_ring: Option<HashRingRouter>,
filter: Option<Filter>,
) -> Result<Self, (CollectionError, LocalShard)> {
// Validate that `ForwardProxyShard` initialized correctly
if resharding_hash_ring.is_some() && filter.is_some() {
return Err((CollectionError::forward_proxy_error(
remote_shard.peer_id,
"ForwardProxyShard cannot have both resharding_hash_ring and filter set at the same time".to_string(),
), wrapped_shard));
}
debug_assert!({
let is_regular = shard_id == remote_shard.id && resharding_hash_ring.is_none();
let is_resharding = shard_id != remote_shard.id && resharding_hash_ring.is_some();
let is_replicating_points = shard_id != remote_shard.id && filter.is_some();
is_regular || is_resharding || is_replicating_points
});
if shard_id == remote_shard.id && resharding_hash_ring.is_some() {
log::warn!(
"ForwardProxyShard initialized with resharding hashring, \
but wrapped shard id and remote shard id are the same",
);
}
Ok(Self {
shard_id,
wrapped_shard,
remote_shard,
resharding_hash_ring,
filter: filter.map(Box::new),
update_lock: Mutex::new(()),
})
}
/// Create payload indexes in the remote shard same as in the wrapped shard.
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn transfer_indexes(&self) -> CollectionResult<()> {
let _update_lock = self.update_lock.lock().await;
for (index_key, index_type) in self.wrapped_shard.info().await?.payload_schema {
// TODO: Is cancelling `RemoteShard::update` safe for *receiver*?
self.remote_shard
.update(
// TODO: Assign clock tag!? 🤔
OperationWithClockTag::from(CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::CreateIndex(CreateIndex {
field_name: index_key,
field_schema: Some(index_type.try_into()?),
}),
)),
false,
HwMeasurementAcc::disposable(), // Internal operation
)
.await?;
}
Ok(())
}
/// Move batch of points to the remote shard
///
/// Returns new point offset and actual number of transferred points. The new point offset can
/// be used to start the next batch from.
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn transfer_batch(
&self,
offset: Option<PointIdType>,
batch_size: usize,
hashring_filter: Option<&HashRingRouter>,
merge_points: bool,
runtime_handle: &Handle,
) -> CollectionResult<(Option<PointIdType>, usize)> {
debug_assert!(batch_size > 0);
let _update_lock = self.update_lock.lock().await;
let (points, next_page_offset) = match hashring_filter {
Some(hashring_filter) => {
self.read_batch_with_hashring(offset, batch_size, hashring_filter, runtime_handle)
.await?
}
None => {
self.read_batch(offset, batch_size, self.filter.as_deref(), runtime_handle)
.await?
}
};
// Only wait on last batch
let wait = next_page_offset.is_none();
let count = points.len();
// Use sync API to leverage potentially existing points
// Normally use SyncPoints, to completely replace everything in the target shard
// For resharding we need to merge points from multiple transfers, requiring a different operation
// Same when there is a filter, as we are only transferring a subset of points
let point_operation = if !merge_points {
PointOperations::SyncPoints(PointSyncOperation {
from_id: offset,
to_id: next_page_offset,
points,
})
} else {
PointOperations::UpsertPoints(PointInsertOperationsInternal::PointsList(points))
};
let insert_points_operation = CollectionUpdateOperations::PointOperation(point_operation);
self.remote_shard
.update(
OperationWithClockTag::from(insert_points_operation),
wait,
HwMeasurementAcc::disposable(), // Internal operation
) // TODO: Assign clock tag!? 🤔
.await?;
Ok((next_page_offset, count))
}
/// Read a batch of points to transfer to the remote shard
///
/// This function is optimized for reading and transferring 100% of the points in this shard
/// without filtering. If you need to filter by hash ring, use [`read_batch_with_hashring`]
/// instead.
///
/// Returns batch of points and new point offset. The new point offset can be used to start the
/// next batch from.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn read_batch(
&self,
offset: Option<PointIdType>,
batch_size: usize,
filter: Option<&Filter>,
runtime_handle: &Handle,
) -> CollectionResult<(Vec<PointStructPersisted>, Option<PointIdType>)> {
let limit = batch_size + 1;
let mut batch = self
.wrapped_shard
.local_scroll_by_id(
offset,
limit,
&WithPayloadInterface::Bool(true),
&WithVector::Bool(true),
filter,
runtime_handle,
None, // No timeout
HwMeasurementAcc::disposable(), // Internal operation, no need to measure hardware here.
)
.await?;
let next_page_offset = (batch.len() >= limit).then(|| batch.pop().unwrap().id);
let points = batch
.into_iter()
.map(PointStructPersisted::try_from)
.collect::<Result<Vec<PointStructPersisted>, String>>()?;
Ok((points, next_page_offset))
}
/// Read a batch of points using a hash ring to transfer to the remote shard
///
/// Only the points that satisfy the hash ring filter will be transferred.
///
/// This applies oversampling in case of resharding to account for points that will be filtered
/// out by the hash ring. Each batch of points should therefore be roughly `batch_size`, but it
/// may be a bit smaller or larger.
///
/// It is optimized for reading and transferring only a fraction of the points in this shard by
/// using a hash ring. If you need to read and transfer 100% of the points, use [`read_batch`]
/// instead.
///
/// Returns batch of points and new point offset. The new point offset can be used to start the
/// next batch from.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn read_batch_with_hashring(
&self,
offset: Option<PointIdType>,
batch_size: usize,
hashring_filter: &HashRingRouter,
runtime_handle: &Handle,
) -> CollectionResult<(Vec<PointStructPersisted>, Option<PointIdType>)> {
// Oversample batch size to account for points that will be filtered out by the hash ring
let oversample_factor = match &hashring_filter {
HashRingRouter::Single(_) => 1,
// - resharding: 1 -> 2, transfer 50%, factor 2
// - resharding: 2 -> 3, transfer 33%, factor 3
// - resharding: 3 -> 4, transfer 25%, factor 4
// - resharding: 2 -> 1, transfer 100%, factor 1
// - resharding: 3 -> 2, transfer 50%, factor 2
// - resharding: 4 -> 3, transfer 33%, factor 3
HashRingRouter::Resharding { old: _, new } => new.len().max(1),
};
let limit = (batch_size * oversample_factor) + 1;
// Read only point IDs without point data
// We first make a preselection of those point IDs by applying the hash ring filter, and
// then we read the actual point data in a separate request. It prevents reading a lot of
// data we immediately discard due to the hash ring. That is much more efficient,
// especially on large deployments when only a small fraction of points needs to be
// transferred.
let mut batch = self
.wrapped_shard
.local_scroll_by_id(
offset,
limit,
&WithPayloadInterface::Bool(false),
&WithVector::Bool(false),
None,
runtime_handle,
None, // No timeout
HwMeasurementAcc::disposable(), // Internal operation, no need to measure hardware here.
)
.await?;
let next_page_offset = (batch.len() >= limit).then(|| batch.pop().unwrap().id);
// Make preselection of point IDs by hash ring
let ids = batch
.into_iter()
.map(|point| point.id)
.filter(|point_id| hashring_filter.is_in_shard(point_id, self.remote_shard.id))
.collect();
// Read actual vectors and payloads for preselection of points
let request = PointRequestInternal {
ids,
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: WithVector::Bool(true),
};
let batch = self
.wrapped_shard
.retrieve(
Arc::new(request),
&WithPayload::from(true),
&WithVector::Bool(true),
runtime_handle,
None, // No timeout
HwMeasurementAcc::disposable(), // Internal operation, no need to measure hardware here.
)
.await?;
let points = batch
.into_iter()
.map(PointStructPersisted::try_from)
.collect::<Result<Vec<PointStructPersisted>, String>>()?;
Ok((points, next_page_offset))
}
pub fn deconstruct(self) -> (LocalShard, RemoteShard) {
(self.wrapped_shard, self.remote_shard)
}
/// Forward `create_snapshot` to `wrapped_shard`
pub async fn create_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<SnapshotManifest>,
save_wal: bool,
) -> CollectionResult<()> {
self.wrapped_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
pub async fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
self.wrapped_shard.snapshot_manifest().await
}
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
self.wrapped_shard.on_optimizer_config_update().await
}
pub async fn on_strict_mode_config_update(&mut self) {
self.wrapped_shard.on_strict_mode_config_update().await
}
pub fn trigger_optimizers(&self) {
self.wrapped_shard.trigger_optimizers();
}
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<LocalShardTelemetry> {
self.wrapped_shard.get_telemetry_data(detail, timeout).await
}
pub async fn get_optimization_status(
&self,
timeout: Duration,
) -> CollectionResult<OptimizersStatus> {
self.wrapped_shard.get_optimization_status(timeout).await
}
pub async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> {
self.wrapped_shard.get_size_stats(timeout).await
}
pub fn update_tracker(&self) -> &UpdateTracker {
self.wrapped_shard.update_tracker()
}
pub fn optimizers_log(&self) -> Arc<ParkingMutex<TrackerLog>> {
self.wrapped_shard.optimizers_log()
}
pub async fn estimate_cardinality(
&self,
filter: Option<&Filter>,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
self.wrapped_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
}
#[async_trait]
impl ShardOperation for ForwardProxyShard {
/// Update `wrapped_shard` while keeping track of the changed points
///
/// # Cancel safety
///
/// This method is *not* cancel safe.
async fn update(
&self,
operation: OperationWithClockTag,
_wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// If we apply `local_shard` update, we *have to* execute `remote_shard` update to completion
// (or we *might* introduce an inconsistency between shards?), so this method is not cancel
// safe.
let _update_lock = self.update_lock.lock().await;
// Shard update is within a write lock scope, because we need a way to block the shard updates
// during the transfer restart and finalization.
// We always have to wait for the result of the update, cause after we release the lock,
// the transfer needs to have access to the latest version of points.
let mut result = self
.wrapped_shard
.update(operation.clone(), true, hw_measurement_acc.clone())
.await?;
let points_matching_filter_before = {
if let Some(filter) = &self.filter
&& let Some(point_ids) = operation.operation.upsert_point_ids()
{
let filter = filter.clone();
let affected_points: HashSet<_> = self
.wrapped_shard
.local_scroll_by_id(
None,
point_ids.len(),
&WithPayloadInterface::Bool(false),
&WithVector::Bool(false),
Some(&filter.with_point_ids(point_ids)),
&Handle::current(),
None, // No timeout
HwMeasurementAcc::disposable(), // Internal operation, no need to measure hardware here?
)
.await?
.into_iter()
.map(|record| record.id)
.collect();
// Operation is applicable to a subset of points, only forward those
Some(affected_points)
} else {
// If operation doesn't create new points, we can safely forward it as-is
// Worst-case, it will error out with "point not found" on the remote side
// Which we will ignore anyway
None
}
};
let forward_operation = if let Some(ring) = &self.resharding_hash_ring {
// If `ForwardProxyShard::resharding_hash_ring` is `Some`, we assume that proxy is used
// during *resharding* shard transfer, which forwards points to a remote shard with
// *different* shard ID.
debug_assert_ne!(self.shard_id, self.remote_shard.id);
// Only forward a *part* of the operation that belongs to remote shard.
let op = match operation.operation.split_by_shard(ring) {
OperationToShard::ToAll(op) => Some(op),
OperationToShard::ByShard(by_shard) => by_shard
.into_iter()
.find(|&(shard_id, _)| shard_id == self.remote_shard.id)
.map(|(_, op)| op),
};
op.map(|op| OperationWithClockTag::new(op, operation.clock_tag))
} else if let Some(point_ids) = points_matching_filter_before {
let mut modified_operation = operation.clone();
modified_operation
.operation
.retain_point_ids(|point_id| point_ids.contains(point_id));
Some(modified_operation)
} else {
#[cfg(debug_assertions)]
if self.filter.is_none() {
// If `ForwardProxyShard` `resharding_hash_ring` and `filter` are `None`, we assume that proxy is used
// during *regular* shard transfer, so operation can be forwarded as-is, without any
// additional handling.
debug_assert_eq!(self.shard_id, self.remote_shard.id);
}
Some(operation)
};
let Some(mut operation) = forward_operation else {
return Ok(result);
};
// Strip the clock tag from the operation, because clock tags are incompatible between different shards.
if self.shard_id != self.remote_shard.id {
operation.clock_tag = None;
};
let remote_result = self
.remote_shard
.update(operation, false, hw_measurement_acc)
.await
.map_err(|err| CollectionError::forward_proxy_error(self.remote_shard.peer_id, err))?;
// Merge `result` and `remote_result`:
//
// - Pick `clock_tag` with *newer* `clock_tick`
let tick = result.clock_tag.map(|tag| tag.clock_tick);
let remote_tick = remote_result.clock_tag.map(|tag| tag.clock_tick);
if remote_tick > tick || tick.is_none() {
result.clock_tag = remote_result.clock_tag;
}
// - If any node *rejected* the operation, propagate `UpdateStatus::ClockRejected`
if remote_result.status == UpdateStatus::ClockRejected {
result.status = UpdateStatus::ClockRejected;
}
Ok(result)
}
/// Forward read-only `scroll_by` to `wrapped_shard`
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.scroll_by(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.local_scroll_by_id(
offset,
limit,
with_payload_interface,
with_vector,
filter,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
async fn info(&self) -> CollectionResult<CollectionInfo> {
let local_shard = &self.wrapped_shard;
local_shard.info().await
}
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let local_shard = &self.wrapped_shard;
local_shard
.core_search(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
let local_shard = &self.wrapped_shard;
local_shard
.count(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.retrieve(
request,
with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
async fn query_batch(
&self,
requests: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let local_shard = &self.wrapped_shard;
local_shard
.query_batch(requests, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
let local_shard = &self.wrapped_shard;
local_shard
.facet(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn stop_gracefully(self) {
self.wrapped_shard.stop_gracefully().await
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/collection_shard_distribution.rs | lib/collection/src/shards/collection_shard_distribution.rs | use std::collections::HashSet;
use ahash::AHashMap;
use crate::collection_state::ShardInfo;
use crate::shards::shard::{PeerId, ShardId};
#[derive(Debug, Clone)]
pub struct CollectionShardDistribution {
pub shards: AHashMap<ShardId, HashSet<PeerId>>,
}
impl CollectionShardDistribution {
pub fn all_local(shard_number: Option<u32>, this_peer_id: PeerId) -> Self {
Self {
shards: (0..shard_number.unwrap_or(1))
.map(|shard_id| (shard_id, HashSet::from([this_peer_id])))
.collect(),
}
}
pub fn from_shards_info(shards_info: AHashMap<ShardId, ShardInfo>) -> Self {
Self {
shards: shards_info
.into_iter()
.map(|(shard_id, info)| (shard_id, info.replicas.into_keys().collect()))
.collect(),
}
}
pub fn shard_count(&self) -> usize {
self.shards.len()
}
pub fn shard_replica_count(&self) -> usize {
self.shards.values().map(|shard| shard.len()).sum()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/resharding.rs | lib/collection/src/shards/resharding.rs | use std::fmt;
use schemars::JsonSchema;
use segment::types::ShardKey;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use super::shard::{PeerId, ShardId};
use crate::operations::cluster_ops::ReshardingDirection;
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct ReshardState {
pub uuid: Uuid,
pub peer_id: PeerId,
pub shard_id: ShardId,
pub shard_key: Option<ShardKey>,
pub direction: ReshardingDirection,
pub stage: ReshardStage,
}
impl ReshardState {
pub fn new(
uuid: Uuid,
direction: ReshardingDirection,
peer_id: PeerId,
shard_id: ShardId,
shard_key: Option<ShardKey>,
) -> Self {
Self {
uuid,
direction,
peer_id,
shard_id,
shard_key,
stage: ReshardStage::MigratingPoints,
}
}
pub fn matches(&self, key: &ReshardKey) -> bool {
self.uuid == key.uuid
&& self.direction == key.direction
&& self.peer_id == key.peer_id
&& self.shard_id == key.shard_id
&& self.shard_key == key.shard_key
}
pub fn key(&self) -> ReshardKey {
ReshardKey {
uuid: self.uuid,
direction: self.direction,
peer_id: self.peer_id,
shard_id: self.shard_id,
shard_key: self.shard_key.clone(),
}
}
}
/// Reshard stages
///
/// # Warning
///
/// This enum is ordered!
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ReshardStage {
#[default]
MigratingPoints,
ReadHashRingCommitted,
WriteHashRingCommitted,
}
/// Unique identifier of a resharding task
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize, JsonSchema)]
pub struct ReshardKey {
#[schemars(skip)]
pub uuid: Uuid,
#[serde(default)]
pub direction: ReshardingDirection,
pub peer_id: PeerId,
pub shard_id: ShardId,
pub shard_key: Option<ShardKey>,
}
impl fmt::Display for ReshardKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}/{}/{:?}", self.peer_id, self.shard_id, self.shard_key)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/queue_proxy_shard.rs | lib/collection/src/shards/queue_proxy_shard.rs | use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use common::types::TelemetryDetail;
use parking_lot::Mutex as ParkingMutex;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::data_types::manifest::SnapshotManifest;
use segment::index::field_index::CardinalityEstimation;
use segment::types::{
ExtendedPointId, Filter, ScoredPoint, SizeStats, SnapshotFormat, WithPayload,
WithPayloadInterface, WithVector,
};
use semver::Version;
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use tokio::sync::Mutex;
use super::remote_shard::RemoteShard;
use super::transfer::driver::MAX_RETRY_COUNT;
use super::transfer::transfer_tasks_pool::TransferTaskProgress;
use super::update_tracker::UpdateTracker;
use crate::collection_manager::optimizers::TrackerLog;
use crate::operations::OperationWithClockTag;
use crate::operations::point_ops::WriteOrdering;
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CountRequestInternal, CountResult,
OptimizersStatus, PointRequestInternal, ScrollRequestInternal, UpdateResult,
};
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::shards::telemetry::LocalShardTelemetry;
/// Number of operations in batch when syncing
const BATCH_SIZE: usize = 10;
/// Number of times to retry transferring updates batch
const BATCH_RETRIES: usize = MAX_RETRY_COUNT;
const MINIMAL_VERSION_FOR_BATCH_WAL_TRANSFER: Version = Version::new(1, 14, 1);
/// QueueProxyShard shard
///
/// QueueProxyShard is a wrapper type for a LocalShard.
///
/// It can be used to provide all read and write operations while the wrapped shard is being
/// snapshotted and transferred to another node. It keeps track of all collection updates since its
/// creation, and allows to transfer these updates to a remote shard at a given time to assure
/// consistency.
///
/// This keeps track of all updates through the WAL of the wrapped shard. It therefore doesn't have
/// any memory overhead while updates are accumulated. This type is called 'queue' even though it
/// doesn't use a real queue, just so it is easy to understand its purpose.
pub struct QueueProxyShard {
/// Inner queue proxy shard.
///
/// This is always `Some` until `finalize()` is called. This architecture is used to allow
/// taking out the queue proxy shard for destructing when finalizing. Destructing the current
/// type directly is not possible because it implements `Drop`.
inner: Option<Inner>,
}
impl QueueProxyShard {
/// Queue proxy the given local shard and point to the remote shard.
///
/// This starts queueing all new updates on the local shard at the point of creation.
pub async fn new(
wrapped_shard: LocalShard,
remote_shard: RemoteShard,
wal_keep_from: Arc<AtomicU64>,
progress: Arc<ParkingMutex<TransferTaskProgress>>,
) -> Self {
Self {
inner: Some(Inner::new(wrapped_shard, remote_shard, wal_keep_from, progress).await),
}
}
/// Queue proxy the given local shard and point to the remote shard, from a specific WAL version.
///
/// This queues all (existing) updates from a specific WAL `version` and onwards. In other
/// words, this will ensure we transfer updates we already have and all new updates from a
/// specific point in our WAL. The `version` may be in the past, but must always be within
/// range of the current WAL.
///
/// # Errors
///
/// This fails if the given `version` is not in bounds of our current WAL. If the given
/// `version` is too old or too new, queue proxy creation is rejected.
pub async fn new_from_version(
wrapped_shard: LocalShard,
remote_shard: RemoteShard,
wal_keep_from: Arc<AtomicU64>,
version: u64,
progress: Arc<ParkingMutex<TransferTaskProgress>>,
) -> Result<Self, (LocalShard, CollectionError)> {
// Lock WAL until we've successfully created the queue proxy shard
let wal = wrapped_shard.wal.wal.clone();
let wal_lock = wal.lock().await;
// If start version is not in current WAL bounds [first_idx, last_idx + 1], we cannot reliably transfer WAL
// Allow it to be one higher than the last index to only send new updates
let (first_idx, last_idx) = (wal_lock.first_closed_index(), wal_lock.last_index());
if !(first_idx..=last_idx + 1).contains(&version) {
return Err((
wrapped_shard,
CollectionError::service_error(format!(
"Cannot create queue proxy shard from version {version} because it is out of WAL bounds ({first_idx}..={last_idx})",
)),
));
}
Ok(Self {
inner: Some(Inner::new_from_version(
wrapped_shard,
remote_shard,
wal_keep_from,
version,
progress,
)),
})
}
/// Get the wrapped local shard
pub(super) fn wrapped_shard(&self) -> Option<&LocalShard> {
self.inner.as_ref().map(|inner| &inner.wrapped_shard)
}
/// Get inner queue proxy shard. Will panic if the queue proxy has been finalized.
fn inner_unchecked(&self) -> &Inner {
self.inner.as_ref().expect("Queue proxy has been finalized")
}
fn inner_mut_unchecked(&mut self) -> &mut Inner {
self.inner.as_mut().expect("Queue proxy has been finalized")
}
pub async fn create_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<SnapshotManifest>,
save_wal: bool,
) -> CollectionResult<()> {
self.inner_unchecked()
.wrapped_shard
.create_snapshot(temp_path, tar, format, manifest, save_wal)
.await
}
pub async fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
self.inner_unchecked()
.wrapped_shard
.snapshot_manifest()
.await
}
/// Transfer all updates that the remote missed from WAL
///
/// # Cancel safety
///
/// This method is cancel safe.
///
/// If cancelled - none, some or all operations may be transmitted to the remote.
///
/// The internal field keeping track of the last transfer and maximum acknowledged WAL version
/// likely won't be updated. In the worst case this might cause double sending operations.
/// This should be fine as operations are idempotent.
pub async fn transfer_all_missed_updates(&self) -> CollectionResult<()> {
self.inner_unchecked().transfer_all_missed_updates().await
}
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
self.inner_unchecked()
.wrapped_shard
.on_optimizer_config_update()
.await
}
pub async fn on_strict_mode_config_update(&mut self) {
self.inner_mut_unchecked()
.wrapped_shard
.on_strict_mode_config_update()
.await
}
pub fn trigger_optimizers(&self) {
self.inner_unchecked().wrapped_shard.trigger_optimizers();
}
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<LocalShardTelemetry> {
self.inner_unchecked()
.wrapped_shard
.get_telemetry_data(detail, timeout)
.await
}
pub async fn get_optimization_status(
&self,
timeout: Duration,
) -> CollectionResult<OptimizersStatus> {
self.inner_unchecked()
.wrapped_shard
.get_optimization_status(timeout)
.await
}
pub async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> {
self.inner_unchecked()
.wrapped_shard
.get_size_stats(timeout)
.await
}
pub fn update_tracker(&self) -> &UpdateTracker {
self.inner_unchecked().wrapped_shard.update_tracker()
}
pub fn optimizers_log(&self) -> Arc<ParkingMutex<TrackerLog>> {
self.inner_unchecked().wrapped_shard.optimizers_log()
}
/// Check if the queue proxy shard is already finalized
#[cfg(debug_assertions)]
fn is_finalized(&self) -> bool {
self.inner.is_none()
}
/// Forget all updates and finalize.
///
/// Forget all missed updates since creation of this queue proxy shard and finalize. This
/// unwraps the inner wrapped and remote shard.
///
/// It also releases the max acknowledged WAL version.
///
/// # Warning
///
/// This intentionally forgets and drops updates pending to be transferred to the remote shard.
/// The remote shard is therefore left in an inconsistent state, which should be resolved
/// separately.
pub fn forget_updates_and_finalize(mut self) -> (LocalShard, RemoteShard) {
// Unwrap queue proxy shards and release max acknowledged version for WAL
let queue_proxy = self
.inner
.take()
.expect("Queue proxy has already been finalized");
queue_proxy.set_wal_keep_from(None);
(queue_proxy.wrapped_shard, queue_proxy.remote_shard)
}
pub async fn estimate_cardinality(
&self,
filter: Option<&Filter>,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
self.inner_unchecked()
.wrapped_shard
.estimate_cardinality(filter, hw_measurement_acc)
.await
}
}
#[async_trait]
impl ShardOperation for QueueProxyShard {
/// Update `wrapped_shard` while keeping track of operations
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn update(
&self,
operation: OperationWithClockTag,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// `Inner::update` is cancel safe, so this is also cancel safe.
self.inner_unchecked()
.update(operation, wait, hw_measurement_acc)
.await
}
/// Forward read-only `scroll_by` to `wrapped_shard`
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.inner_unchecked()
.scroll_by(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `local_scroll_by_id` to `wrapped_shard`
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.inner_unchecked()
.local_scroll_by_id(
offset,
limit,
with_payload_interface,
with_vector,
filter,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `info` to `wrapped_shard`
async fn info(&self) -> CollectionResult<CollectionInfo> {
self.inner_unchecked().info().await
}
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
self.inner_unchecked()
.core_search(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `count` to `wrapped_shard`
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
self.inner_unchecked()
.count(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `retrieve` to `wrapped_shard`
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
self.inner_unchecked()
.retrieve(
request,
with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `query` to `wrapped_shard`
async fn query_batch(
&self,
requests: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
self.inner_unchecked()
.wrapped_shard
.query_batch(requests, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
self.inner_unchecked()
.wrapped_shard
.facet(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn stop_gracefully(mut self) {
if let Some(inner) = self.inner.take() {
debug_assert!(
false,
"QueueProxyShard should be finalized before stopping gracefully"
);
inner.wrapped_shard.stop_gracefully().await;
}
}
}
// Safe guard in debug mode to ensure that `finalize()` is called before dropping
#[cfg(debug_assertions)]
impl Drop for QueueProxyShard {
fn drop(&mut self) {
if !self.is_finalized() && !std::thread::panicking() {
panic!("To drop a queue proxy shard, finalize() must be used");
}
}
}
struct Inner {
/// Wrapped local shard to operate on.
pub(super) wrapped_shard: LocalShard,
/// Wrapped remote shard, to transfer operations to.
pub(super) remote_shard: RemoteShard,
/// WAL record at which we started the transfer.
started_at: u64,
/// ID of the WAL operation we should transfer next. We consider everything before it to be
/// transferred.
transfer_from: AtomicU64,
/// Lock required to protect transfer-in-progress updates.
/// It should block data updating operations while the batch is being transferred.
update_lock: Mutex<()>,
/// Always keep this WAL version and later and prevent acknowledgment/truncation from the WAL.
/// We keep it here for access in `set_wal_keep_from()` without needing async locks.
/// See `set_wal_keep_from()` and `UpdateHandler::wal_keep_from` for more details.
/// Defaults to `u64::MAX` to allow acknowledging all confirmed versions.
wal_keep_from: Arc<AtomicU64>,
/// Progression tracker.
progress: Arc<ParkingMutex<TransferTaskProgress>>,
}
impl Inner {
pub async fn new(
wrapped_shard: LocalShard,
remote_shard: RemoteShard,
wal_keep_from: Arc<AtomicU64>,
progress: Arc<ParkingMutex<TransferTaskProgress>>,
) -> Self {
let start_from = wrapped_shard.wal.wal.lock().await.last_index() + 1;
Self::new_from_version(
wrapped_shard,
remote_shard,
wal_keep_from,
start_from,
progress,
)
}
pub fn new_from_version(
wrapped_shard: LocalShard,
remote_shard: RemoteShard,
wal_keep_from: Arc<AtomicU64>,
version: u64,
progress: Arc<ParkingMutex<TransferTaskProgress>>,
) -> Self {
let shard = Self {
wrapped_shard,
remote_shard,
transfer_from: version.into(),
started_at: version,
update_lock: Default::default(),
wal_keep_from,
progress,
};
// Keep all WAL entries from `version` so we don't truncate them off when we still need to transfer
shard.set_wal_keep_from(Some(version));
shard
}
/// Transfer all updates that the remote missed from WAL
///
/// # Cancel safety
///
/// This method is cancel safe.
///
/// If cancelled - none, some or all operations of that batch may be transmitted to the remote.
///
/// The internal field keeping track of the last transfer and maximum acknowledged WAL version
/// likely won't be updated. In the worst case this might cause double sending operations.
/// This should be fine as operations are idempotent.
pub async fn transfer_all_missed_updates(&self) -> CollectionResult<()> {
while !self.transfer_wal_batch().await? {}
// Set the WAL version to keep to the next item we should transfer
let transfer_from = self.transfer_from.load(Ordering::Relaxed);
self.set_wal_keep_from(Some(transfer_from));
Ok(())
}
/// Grab and transfer single new batch of updates from the WAL
///
/// Returns `true` if this was the last batch and we're now done. `false` if more batches must
/// be sent.
///
/// # Cancel safety
///
/// This method is cancel safe.
///
/// If cancelled - none, some or all operations may be transmitted to the remote.
///
/// The internal field keeping track of the last transfer likely won't be updated. In the worst
/// case this might cause double sending operations. This should be fine as operations are
/// idempotent.
async fn transfer_wal_batch(&self) -> CollectionResult<bool> {
let mut update_lock = Some(self.update_lock.lock().await);
let transfer_from = self.transfer_from.load(Ordering::Relaxed);
// Lock wall, count pending items to transfer, grab batch
let (pending_count, total, batch) = {
let wal = self.wrapped_shard.wal.wal.lock().await;
let items_left = (wal.last_index() + 1).saturating_sub(transfer_from);
let items_total = (transfer_from - self.started_at) + items_left;
let batch = wal.read(transfer_from).take(BATCH_SIZE).collect::<Vec<_>>();
debug_assert!(
batch.len() <= items_left as usize,
"batch cannot be larger than items_left",
);
(items_left, items_total, batch)
};
log::trace!(
"Queue proxy transferring batch of {} updates to peer {}",
batch.len(),
self.remote_shard.peer_id,
);
// Normally, we immediately release the update lock to allow new updates.
// On the last batch we keep the lock to prevent accumulating more updates on the WAL,
// so we can finalize the transfer after this batch, before accepting new updates.
let last_batch = pending_count <= BATCH_SIZE as u64 || batch.is_empty();
if !last_batch {
drop(update_lock.take());
}
// If we are transferring the last batch, we need to wait for it to be applied.
// - Why can we not wait? Assuming that order of operations is still enforced by the WAL,
// we should end up in exactly the same state with or without waiting.
// - Why do we need to wait on the last batch? If we switch to ready state before
// updates are actually applied, we might create an inconsistency for read operations.
let wait = last_batch;
// Set initial progress on the first batch
let is_first = transfer_from == self.started_at;
if is_first {
self.progress.lock().set(0, total as usize);
}
// Transfer batch with retries and store last transferred ID
let last_idx = batch.last().map(|(idx, _)| *idx);
for remaining_attempts in (0..BATCH_RETRIES).rev() {
let disposed_hw = HwMeasurementAcc::disposable(); // Internal operation
match transfer_operations_batch(&batch, &self.remote_shard, wait, disposed_hw).await {
Ok(()) => {
if let Some(idx) = last_idx {
self.transfer_from.store(idx + 1, Ordering::Relaxed);
let transferred = (idx + 1 - self.started_at) as usize;
self.progress.lock().set(transferred, total as usize);
}
break;
}
Err(err) if remaining_attempts > 0 => {
log::error!(
"Failed to transfer batch of updates to peer {}, retrying: {err}",
self.remote_shard.peer_id,
);
}
Err(err) => return Err(err),
}
}
Ok(last_batch)
}
/// Set or release what WAL versions to keep preventing acknowledgment/truncation.
///
/// Because this proxy shard relies on the WAL to obtain operations in the past, it cannot be
/// truncated before all these update operations have been flushed.
/// Using this function we set the WAL not to acknowledge and truncate from a specific point.
///
/// Providing `None` will release this limitation.
fn set_wal_keep_from(&self, version: Option<u64>) {
log::trace!("set_wal_keep_from {version:?}");
let version = version.unwrap_or(u64::MAX);
self.wal_keep_from.store(version, Ordering::Relaxed);
}
}
#[async_trait]
impl ShardOperation for Inner {
/// Update `wrapped_shard` while keeping track of operations
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn update(
&self,
operation: OperationWithClockTag,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// `LocalShard::update` is cancel safe, so this is also cancel safe.
let _update_lock = self.update_lock.lock().await;
let local_shard = &self.wrapped_shard;
// Shard update is within a write lock scope, because we need a way to block the shard updates
// during the transfer restart and finalization.
local_shard
.update(operation.clone(), wait, hw_measurement_acc)
.await
}
/// Forward read-only `scroll_by` to `wrapped_shard`
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.scroll_by(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.local_scroll_by_id(
offset,
limit,
with_payload_interface,
with_vector,
filter,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `info` to `wrapped_shard`
async fn info(&self) -> CollectionResult<CollectionInfo> {
let local_shard = &self.wrapped_shard;
local_shard.info().await
}
/// Forward read-only `search` to `wrapped_shard`
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let local_shard = &self.wrapped_shard;
local_shard
.core_search(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `count` to `wrapped_shard`
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
let local_shard = &self.wrapped_shard;
local_shard
.count(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// Forward read-only `retrieve` to `wrapped_shard`
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let local_shard = &self.wrapped_shard;
local_shard
.retrieve(
request,
with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Forward read-only `query` to `wrapped_shard`
async fn query_batch(
&self,
request: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let local_shard = &self.wrapped_shard;
local_shard
.query_batch(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
let local_shard = &self.wrapped_shard;
local_shard
.facet(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
async fn stop_gracefully(self) {
self.wrapped_shard.stop_gracefully().await
}
}
/// Transfer batch of operations without retries
///
/// # Cancel safety
///
/// This method is cancel safe.
///
/// If cancelled - none, some or all operations of the batch may be transmitted to the remote.
async fn transfer_operations_batch(
batch: &[(u64, OperationWithClockTag)],
remote_shard: &RemoteShard,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<()> {
if batch.is_empty() {
return Ok(());
}
let supports_update_batching =
remote_shard.check_version(&MINIMAL_VERSION_FOR_BATCH_WAL_TRANSFER);
if supports_update_batching {
let mut batch_upd = Vec::with_capacity(batch.len());
for (_idx, operation) in batch {
let mut operation = operation.clone();
// Set force flag because operations from WAL may be unordered if another node is sending
// new operations at the same time
if let Some(clock_tag) = &mut operation.clock_tag {
clock_tag.force = true;
}
batch_upd.push(operation);
}
remote_shard
.forward_update_batch(
batch_upd,
wait,
WriteOrdering::Weak,
hw_measurement_acc.clone(),
)
.await?;
return Ok(());
}
// Fallback to one-by-one transfer, in case the remote shard doesn't support batch updates
for (_idx, operation) in batch {
let mut operation = operation.clone();
// Set force flag because operations from WAL may be unordered if another node is sending
// new operations at the same time
if let Some(clock_tag) = &mut operation.clock_tag {
clock_tag.force = true;
}
remote_shard
.forward_update(
operation,
wait,
WriteOrdering::Weak,
hw_measurement_acc.clone(),
)
.await?;
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/helpers.rs | lib/collection/src/shards/transfer/helpers.rs | use std::collections::{HashMap, HashSet};
use super::{ShardTransfer, ShardTransferKey, ShardTransferMethod};
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::{PeerId, ShardId};
use crate::shards::shard_holder::shard_mapping::ShardKeyMapping;
pub fn validate_transfer_exists(
transfer_key: &ShardTransferKey,
current_transfers: &HashSet<ShardTransfer>,
) -> CollectionResult<()> {
if !current_transfers.iter().any(|t| &t.key() == transfer_key) {
return Err(CollectionError::bad_request(format!(
"There is no transfer for shard {} from {} to {}",
transfer_key.shard_id, transfer_key.from, transfer_key.to,
)));
}
Ok(())
}
pub fn get_transfer(
transfer_key: &ShardTransferKey,
current_transfers: &HashSet<ShardTransfer>,
) -> Option<ShardTransfer> {
current_transfers
.iter()
.find(|t| &t.key() == transfer_key)
.cloned()
}
/// Confirms that the transfer does not conflict with any other active transfers
///
/// returns `None` if there is no conflicts, otherwise returns conflicting transfer
pub fn check_transfer_conflicts<'a, I>(
transfer: &ShardTransfer,
current_transfers: I,
) -> Option<ShardTransfer>
where
I: Iterator<Item = &'a ShardTransfer>,
{
let res = current_transfers
.filter(|t| t.shard_id == transfer.shard_id)
.find(|t| {
t.from == transfer.from
|| t.to == transfer.from
|| t.from == transfer.to
|| t.to == transfer.to
});
res.cloned()
}
/// Same as `check_transfer_conflicts` but doesn't allow transfers to/from the same peer
/// more than once for the whole collection
pub fn check_transfer_conflicts_strict<'a, I>(
transfer: &ShardTransfer,
mut current_transfers: I,
) -> Option<ShardTransfer>
where
I: Iterator<Item = &'a ShardTransfer>,
{
let res = current_transfers.find(|t| {
t.from == transfer.from
|| t.to == transfer.from
|| t.from == transfer.to
|| t.to == transfer.to
});
res.cloned()
}
/// Confirms that the transfer makes sense with the current state cluster
///
/// Checks:
/// 1. If `from` and `to` exists
/// 2. If `from` have local shard and it is active
/// 3. If there is no active transfers which involve `from` or `to`
/// 4. If a target shard is only set for resharding transfers
///
/// For resharding transfers this also checks:
/// 1. If the source and target shards are different
/// 2. If the source and target shardsd share the same shard key
///
/// If validation fails, return `BadRequest` error.
pub fn validate_transfer(
transfer: &ShardTransfer,
all_peers: &HashSet<PeerId>,
source_replicas: Option<&HashMap<PeerId, ReplicaState>>,
destination_replicas: Option<&HashMap<PeerId, ReplicaState>>,
current_transfers: &HashSet<ShardTransfer>,
shards_key_mapping: &ShardKeyMapping,
) -> CollectionResult<()> {
let Some(source_replicas) = source_replicas else {
return Err(CollectionError::service_error(format!(
"Shard {} does not exist",
transfer.shard_id,
)));
};
if !all_peers.contains(&transfer.from) {
return Err(CollectionError::bad_request(format!(
"Peer {} does not exist",
transfer.from,
)));
}
if !all_peers.contains(&transfer.to) {
return Err(CollectionError::bad_request(format!(
"Peer {} does not exist",
transfer.to,
)));
}
// We allow transfers *from* `ReshardingScaleDown` replicas, because they contain a *superset*
// of points in a regular replica
let is_active = matches!(
source_replicas.get(&transfer.from),
Some(ReplicaState::Active | ReplicaState::ReshardingScaleDown),
);
if !is_active {
return Err(CollectionError::bad_request(format!(
"Shard {} is not active on peer {}",
transfer.shard_id, transfer.from,
)));
}
if let Some(existing_transfer) = check_transfer_conflicts(transfer, current_transfers.iter()) {
return Err(CollectionError::bad_request(format!(
"Shard {} is already involved in transfer {} -> {}",
transfer.shard_id, existing_transfer.from, existing_transfer.to,
)));
}
if transfer.method == Some(ShardTransferMethod::ReshardingStreamRecords) {
let Some(destination_replicas) = destination_replicas else {
return Err(CollectionError::service_error(format!(
"Destination shard {} does not exist",
transfer.shard_id,
)));
};
let Some(to_shard_id) = transfer.to_shard_id else {
return Err(CollectionError::bad_request(
"Target shard is not set for resharding transfer",
));
};
if transfer.shard_id == to_shard_id {
return Err(CollectionError::bad_request(format!(
"Source and target shard must be different for resharding transfer, both are {to_shard_id}",
)));
}
if let Some(ReplicaState::Dead) = destination_replicas.get(&transfer.to) {
return Err(CollectionError::bad_request(format!(
"Resharding shard transfer can't be started, \
because destination shard {}/{to_shard_id} is dead",
transfer.to,
)));
}
// Both shard IDs must share the same shard key
let source_shard_key = shards_key_mapping
.iter()
.find(|(_, shard_ids)| shard_ids.contains(&to_shard_id))
.map(|(key, _)| key);
let target_shard_key = shards_key_mapping
.iter()
.find(|(_, shard_ids)| shard_ids.contains(&to_shard_id))
.map(|(key, _)| key);
if source_shard_key != target_shard_key {
return Err(CollectionError::bad_request(format!(
"Source and target shard must have the same shard key, but they have {source_shard_key:?} and {target_shard_key:?}",
)));
}
} else if transfer.filter.is_some() {
let Some(destination_replicas) = destination_replicas else {
return Err(CollectionError::service_error(format!(
"Destination shard {} does not exist",
transfer.shard_id,
)));
};
let Some(to_shard_id) = transfer.to_shard_id else {
return Err(CollectionError::bad_request(
"Target shard is not set for filtered points transfer",
));
};
if transfer.shard_id == to_shard_id {
return Err(CollectionError::bad_request(format!(
"Source and target shard must be different for filtered points transfer, both are {to_shard_id}",
)));
}
if let Some(ReplicaState::Dead) = destination_replicas.get(&transfer.to) {
return Err(CollectionError::bad_request(format!(
"Filtered shard transfer can't be started, \
because destination shard {}/{to_shard_id} is dead",
transfer.to,
)));
}
} else if let Some(to_shard_id) = transfer.to_shard_id {
return Err(CollectionError::bad_request(format!(
"Target shard {to_shard_id} can only be set for {:?} or filtered streaming records transfers",
ShardTransferMethod::ReshardingStreamRecords,
)));
}
Ok(())
}
/// Selects a best peer to transfer shard from.
///
/// Requirements:
/// 1. Peer should have an active replica of the shard
/// 2. There should be no active transfers from this peer with the same shard
/// 3. Prefer peer with the lowest number of active transfers
///
/// If there are no peers that satisfy the requirements, returns `None`.
pub fn suggest_transfer_source(
shard_id: ShardId,
target_peer: PeerId,
current_transfers: &[ShardTransfer],
shard_peers: &HashMap<PeerId, ReplicaState>,
) -> Option<PeerId> {
let mut candidates = HashSet::new();
for (&peer_id, &state) in shard_peers {
// We allow transfers *from* `ReshardingScaleDown` replicas, because they contain a *superset*
// of points in a regular replica
let is_active = matches!(
state,
ReplicaState::Active | ReplicaState::ReshardingScaleDown
);
if is_active && peer_id != target_peer {
candidates.insert(peer_id);
}
}
let currently_transferring = current_transfers
.iter()
.filter(|transfer| transfer.shard_id == shard_id)
.flat_map(|transfer| [transfer.from, transfer.to])
.collect::<HashSet<PeerId>>();
candidates = candidates
.difference(¤tly_transferring)
.cloned()
.collect();
let transfer_counts = current_transfers
.iter()
.fold(HashMap::new(), |mut counts, transfer| {
*counts.entry(transfer.from).or_insert(0_usize) += 1;
counts
});
// Sort candidates by the number of active transfers
let mut candidates = candidates
.into_iter()
.map(|peer_id| (peer_id, transfer_counts.get(&peer_id).unwrap_or(&0)))
.collect::<Vec<(PeerId, &usize)>>();
candidates.sort_unstable_by_key(|(_, count)| **count);
candidates.first().map(|(peer_id, _)| *peer_id)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/transfer_tasks_pool.rs | lib/collection/src/shards/transfer/transfer_tasks_pool.rs | use std::cmp::max;
use std::collections::HashMap;
use std::fmt::Write as _;
use std::sync::Arc;
use parking_lot::Mutex;
use crate::common::eta_calculator::EtaCalculator;
use crate::common::stoppable_task_async::CancellableAsyncTaskHandle;
use crate::shards::CollectionId;
use crate::shards::transfer::{ShardTransfer, ShardTransferKey};
pub struct TransferTasksPool {
collection_id: CollectionId,
tasks: HashMap<ShardTransferKey, TransferTaskItem>,
}
pub struct TransferTaskItem {
pub task: CancellableAsyncTaskHandle<bool>,
pub started_at: chrono::DateTime<chrono::Utc>,
pub progress: Arc<Mutex<TransferTaskProgress>>,
}
pub struct TransferTaskProgress {
points_transferred: usize,
points_total: usize,
pub eta: EtaCalculator,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub enum TaskResult {
Running,
Finished,
Failed,
}
pub struct TransferTaskStatus {
pub result: TaskResult,
pub comment: String,
}
impl TransferTaskProgress {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self {
points_transferred: 0,
points_total: 0,
eta: EtaCalculator::new(),
}
}
pub fn add(&mut self, delta: usize) {
self.points_transferred += delta;
self.points_total = max(self.points_total, self.points_transferred);
self.eta.set_progress(self.points_transferred);
}
pub fn set(&mut self, transferred: usize, total: usize) {
self.points_transferred = transferred;
self.points_total = total;
self.eta.set_progress(transferred);
}
}
impl TransferTasksPool {
pub fn new(collection_id: CollectionId) -> Self {
Self {
collection_id,
tasks: HashMap::new(),
}
}
/// Get the status of the task. If the task is not found, return None.
pub fn get_task_status(&self, transfer_key: &ShardTransferKey) -> Option<TransferTaskStatus> {
let task = self.tasks.get(transfer_key)?;
let result = match task.task.get_result() {
Some(true) => TaskResult::Finished,
Some(false) => TaskResult::Failed,
None if task.task.is_finished() => TaskResult::Failed,
None => TaskResult::Running,
};
let progress = task.progress.lock();
let total = max(progress.points_transferred, progress.points_total);
let mut comment = format!(
"Transferring records ({}/{}), started {}s ago, ETA: ",
progress.points_transferred,
total,
chrono::Utc::now()
.signed_duration_since(task.started_at)
.num_seconds(),
);
if let Some(eta) = progress.eta.estimate(total) {
write!(comment, "{:.2}s", eta.as_secs_f64()).unwrap();
} else {
comment.push('-');
}
Some(TransferTaskStatus { result, comment })
}
/// Stop the task and return the result. If the task is not found, return None.
pub async fn stop_task(&mut self, transfer_key: &ShardTransferKey) -> Option<TaskResult> {
let task = self.tasks.remove(transfer_key)?;
Some(match task.task.cancel().await {
Ok(true) => {
log::info!(
"Transfer of shard {}:{} -> {} finished",
self.collection_id,
transfer_key.shard_id,
transfer_key.to,
);
TaskResult::Finished
}
Ok(false) => {
log::info!(
"Transfer of shard {}:{} -> {} stopped",
self.collection_id,
transfer_key.shard_id,
transfer_key.to,
);
TaskResult::Failed
}
Err(err) => {
log::warn!(
"Transfer task for shard {}:{} -> {} failed: {err}",
self.collection_id,
transfer_key.shard_id,
transfer_key.to,
);
TaskResult::Failed
}
})
}
pub fn add_task(&mut self, shard_transfer: &ShardTransfer, item: TransferTaskItem) {
self.tasks.insert(shard_transfer.key(), item);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/resharding_stream_records.rs | lib/collection/src/shards/transfer/resharding_stream_records.rs | use std::sync::Arc;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use parking_lot::Mutex;
use super::transfer_tasks_pool::TransferTaskProgress;
use crate::hash_ring::HashRingRouter;
use crate::operations::types::{CollectionError, CollectionResult, CountRequestInternal};
use crate::shards::CollectionId;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::LockedShardHolder;
use crate::shards::transfer::stream_records::TRANSFER_BATCH_SIZE;
/// Orchestrate shard transfer by streaming records, but only the points that fall into the new
/// shard.
///
/// This is called on the sender and will arrange all that is needed for the shard transfer
/// process.
///
/// This first transfers configured indices. Then it transfers all point records in batches.
/// Updates to the local shard are forwarded to the remote concurrently.
///
/// # Cancel safety
///
/// This function is cancel safe.
pub(crate) async fn transfer_resharding_stream_records(
shard_holder: Arc<LockedShardHolder>,
progress: Arc<Mutex<TransferTaskProgress>>,
shard_id: ShardId,
remote_shard: RemoteShard,
collection_id: &CollectionId,
) -> CollectionResult<()> {
let remote_peer_id = remote_shard.peer_id;
let cutoff;
let hashring;
log::debug!(
"Starting shard {shard_id} transfer to peer {remote_peer_id} by reshard streaming records"
);
// Proxify local shard and create payload indexes on remote shard
{
let shard_holder = shard_holder.read().await;
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} cannot be proxied because it does not exist"
)));
};
// Derive shard key scope for this transfer from the shard ID, get the hash ring
let shard_key = shard_holder
.get_shard_id_to_key_mapping()
.get(&shard_id)
.cloned();
hashring = shard_holder.rings.get(&shard_key).cloned().ok_or_else(|| {
CollectionError::service_error(format!(
"Shard {shard_id} cannot be transferred for resharding, failed to get shard hash ring"
))
})?;
replica_set
.proxify_local(remote_shard.clone(), Some(hashring.clone()), None)
.await?;
let hw_acc = HwMeasurementAcc::disposable();
let Some(count_result) = replica_set
.count_local(
Arc::new(CountRequestInternal {
filter: None,
exact: false,
}),
None,
hw_acc,
)
.await?
else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} not found"
)));
};
// Resharding up:
//
// - shards: 1 -> 2
// points: 100 -> 50/50
// transfer points of each shard: 50/1 = 50 -> 50/100 = 50%
// transfer fraction to each shard: 1/new_shard_count = 1/2 = 0.5
// - shards: 2 -> 3
// points: 50/50 -> 33/33/33
// transfer points of each shard: 33/2 = 16.5 -> 16.5/50 = 33%
// transfer fraction to each shard: 1/new_shard_count = 1/3 = 0.33
// - shards: 3 -> 4
// points: 33/33/33 -> 25/25/25/25
// transfer points of each shard: 25/3 = 8.3 -> 8.3/33 = 25%
// transfer fraction to each shard: 1/new_shard_count = 1/4 = 0.25
//
// Resharding down:
//
// - shards: 2 -> 1
// points: 50/50 -> 100
// transfer points of each shard: 50/1 = 50 -> 50/50 = 100%
// transfer fraction to each shard: 1/new_shard_count = 1/1 = 1.0
// - shards: 3 -> 2
// points: 33/33/33 -> 50/50
// transfer points of each shard: 33/2 = 16.5 -> 16.5/33 = 50%
// transfer fraction to each shard: 1/new_shard_count = 1/2 = 0.5
// - shards: 4 -> 3
// points: 25/25/25/25 -> 33/33/33
// transfer points of each shard: 25/3 = 8.3 -> 8.3/25 = 33%
// transfer fraction to each shard: 1/new_shard_count = 1/3 = 0.33
let new_shard_count = match &hashring {
HashRingRouter::Single(_) => {
return Err(CollectionError::service_error(format!(
"Failed to do resharding transfer, hash ring for shard {shard_id} not in resharding state",
)));
}
HashRingRouter::Resharding { old, new } => {
debug_assert!(
old.len().abs_diff(new.len()) <= 1,
"expects resharding to only move up or down by one shard",
);
new.len()
}
};
let transfer_size = count_result.count / new_shard_count;
progress.lock().set(0, transfer_size);
replica_set.transfer_indexes().await?;
// Take our last seen clocks as cutoff point right before doing content batch transfers
cutoff = replica_set.shard_recovery_point().await?;
}
// Transfer contents batch by batch
log::trace!("Transferring points to shard {shard_id} by reshard streaming records");
let mut offset = None;
loop {
let shard_holder = shard_holder.read().await;
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
// Forward proxy gone?!
// That would be a programming error.
return Err(CollectionError::service_error(format!(
"Shard {shard_id} is not found"
)));
};
let (new_offset, count) = replica_set
.transfer_batch(offset, TRANSFER_BATCH_SIZE, Some(&hashring), true)
.await?;
offset = new_offset;
progress.lock().add(count);
// If this is the last batch, finalize
if offset.is_none() {
break;
}
}
// Update cutoff point on remote shard, disallow recovery before it
//
// We provide it our last seen clocks from just before transferrinmg the content batches, and
// not our current last seen clocks. We're sure that after the transfer the remote must have
// seen all point data for those clocks. While we cannot guarantee the remote has all point
// data for our current last seen clocks because some operations may still be in flight.
// This is a trade-off between being conservative and being too conservative.
//
// We must send a cutoff point to the remote so it can learn about all the clocks that exist.
// If we don't do this it is possible the remote will never see a clock, breaking all future
// WAL delta transfers.
remote_shard
.update_shard_cutoff_point(collection_id, remote_shard.id, &cutoff)
.await?;
log::debug!(
"Ending shard {shard_id} transfer to peer {remote_peer_id} by reshard streaming records"
);
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/mod.rs | lib/collection/src/shards/transfer/mod.rs | use std::time::Duration;
use async_trait::async_trait;
use common::defaults::{self, CONSENSUS_CONFIRM_RETRIES};
use schemars::JsonSchema;
use segment::types::Filter;
use serde::{Deserialize, Serialize};
use tokio::time::sleep;
use super::CollectionId;
use super::channel_service::ChannelService;
use super::remote_shard::RemoteShard;
use super::resharding::ReshardKey;
use super::shard::{PeerId, ShardId};
use crate::operations::cluster_ops::ReshardingDirection;
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::replica_set::replica_set_state::ReplicaState;
pub mod driver;
pub mod helpers;
pub mod resharding_stream_records;
pub mod snapshot;
pub mod stream_records;
pub mod transfer_tasks_pool;
pub mod wal_delta;
/// Time between consensus confirmation retries.
const CONSENSUS_CONFIRM_RETRY_DELAY: Duration = Duration::from_secs(1);
/// Time after which confirming a consensus operation times out.
const CONSENSUS_CONFIRM_TIMEOUT: Duration = defaults::CONSENSUS_META_OP_WAIT;
#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub struct ShardTransfer {
pub shard_id: ShardId,
/// Target shard ID if different than source shard ID
///
/// Used exclusively with `ReshardStreamRecords` transfer method.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub to_shard_id: Option<ShardId>,
pub from: PeerId,
pub to: PeerId,
/// If this flag is true, this is a replication related transfer of shard from 1 peer to another
/// Shard on original peer will not be deleted in this case
pub sync: bool,
/// Method to transfer shard with. `None` to choose automatically.
#[serde(default)]
pub method: Option<ShardTransferMethod>,
// Optional filter to apply when transferring points
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<Filter>,
}
impl ShardTransfer {
pub fn key(&self) -> ShardTransferKey {
ShardTransferKey {
shard_id: self.shard_id,
to_shard_id: self.to_shard_id,
from: self.from,
to: self.to,
}
}
pub fn is_resharding(&self) -> bool {
self.method.is_some_and(|method| method.is_resharding())
}
/// Checks whether this peer and shard ID pair is the source or target of this transfer
#[inline]
pub fn is_source_or_target(&self, peer_id: PeerId, shard_id: ShardId) -> bool {
self.is_source(peer_id, shard_id) || self.is_target(peer_id, shard_id)
}
/// Checks whether this peer and shard ID pair is the source of this transfer
#[inline]
pub fn is_source(&self, peer_id: PeerId, shard_id: ShardId) -> bool {
self.from == peer_id && self.shard_id == shard_id
}
/// Checks whether this peer and shard ID pair is the target of this transfer
#[inline]
pub fn is_target(&self, peer_id: PeerId, shard_id: ShardId) -> bool {
self.to == peer_id && self.to_shard_id.unwrap_or(self.shard_id) == shard_id
}
/// Check if this transfer is related to a specific resharding operation
pub fn is_related_to_resharding(&self, key: &ReshardKey) -> bool {
// Must be a resharding transfer
if !self.method.is_some_and(|method| method.is_resharding()) {
return false;
}
match key.direction {
// Resharding up: all related transfers target the resharding shard ID
ReshardingDirection::Up => self
.to_shard_id
.is_some_and(|to_shard_id| key.shard_id == to_shard_id),
// Resharding down: all related transfers are sourced from the resharding shard ID
ReshardingDirection::Down => self.shard_id == key.shard_id,
}
}
}
#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub struct ShardTransferRestart {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub to_shard_id: Option<ShardId>,
pub from: PeerId,
pub to: PeerId,
pub method: ShardTransferMethod,
}
impl ShardTransferRestart {
pub fn key(&self) -> ShardTransferKey {
ShardTransferKey {
shard_id: self.shard_id,
to_shard_id: self.to_shard_id,
from: self.from,
to: self.to,
}
}
}
impl From<ShardTransfer> for ShardTransferRestart {
fn from(transfer: ShardTransfer) -> Self {
Self {
shard_id: transfer.shard_id,
to_shard_id: transfer.to_shard_id,
from: transfer.from,
to: transfer.to,
method: transfer.method.unwrap_or_default(),
}
}
}
/// Unique identifier of a transfer, agnostic of transfer method
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub struct ShardTransferKey {
pub shard_id: ShardId,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub to_shard_id: Option<ShardId>,
pub from: PeerId,
pub to: PeerId,
}
impl ShardTransferKey {
pub fn check(self, transfer: &ShardTransfer) -> bool {
self == transfer.key()
}
}
/// Methods for transferring a shard from one node to another.
///
/// - `stream_records` - Stream all shard records in batches until the whole shard is transferred.
///
/// - `snapshot` - Snapshot the shard, transfer and restore it on the receiver.
///
/// - `wal_delta` - Attempt to transfer shard difference by WAL delta.
///
/// - `resharding_stream_records` - Shard transfer for resharding: stream all records in batches until all points are transferred.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Default, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum ShardTransferMethod {
// Stream all shard records in batches until the whole shard is transferred.
#[default]
StreamRecords,
// Snapshot the shard, transfer and restore it on the receiver.
Snapshot,
// Attempt to transfer shard difference by WAL delta.
WalDelta,
// Shard transfer for resharding: stream all records in batches until all points are
// transferred.
ReshardingStreamRecords,
}
impl ShardTransferMethod {
pub fn is_resharding(&self) -> bool {
matches!(self, Self::ReshardingStreamRecords)
}
}
/// Interface to consensus for shard transfer operations.
#[async_trait]
pub trait ShardTransferConsensus: Send + Sync {
/// Get the peer ID for the current node.
fn this_peer_id(&self) -> PeerId;
/// Get all peer IDs, including that of the current node.
fn peers(&self) -> Vec<PeerId>;
/// Get the current consensus commit and term state.
///
/// Returns `(commit, term)`.
fn consensus_commit_term(&self) -> (u64, u64);
/// After snapshot or WAL delta recovery, propose to switch shard to `Partial`
///
/// This is called after shard snapshot or WAL delta recovery has been completed on the remote.
/// It submits a proposal to consensus to switch the shard state from `Recovery` to `Partial`.
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
fn recovered_switch_to_partial(
&self,
transfer_config: &ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()>;
/// After snapshot or WAL delta recovery, propose to switch shard to `Partial` and confirm on
/// remote shard
///
/// This is called after shard snapshot or WAL delta recovery has been completed on the remote.
/// It submits a proposal to consensus to switch the shard state from `Recovery` to `Partial`.
///
/// This method also confirms consensus applied the operation before returning by asserting the
/// change is propagated on a remote shard. For the next stage only the remote needs to be in
/// `Partial` to accept updates, we therefore assert the state on the remote explicitly rather
/// than asserting locally. If it fails, it will be retried for up to
/// `CONSENSUS_CONFIRM_RETRIES` times.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn recovered_switch_to_partial_confirm_remote(
&self,
transfer_config: &ShardTransfer,
collection_id: &CollectionId,
remote_shard: &RemoteShard,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`recovered_switch_to_partial_confirm_remote` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
result = self.recovered_switch_to_partial(transfer_config, collection_id.clone());
if let Err(err) = &result {
log::error!("Failed to propose recovered operation to consensus: {err}");
continue;
}
log::trace!("Wait for remote shard to reach `Partial` state");
result = remote_shard
.wait_for_shard_state(
collection_id,
transfer_config.shard_id,
ReplicaState::Partial,
CONSENSUS_CONFIRM_TIMEOUT,
)
.await
.map(|_| ());
match &result {
Ok(()) => break,
Err(err) => {
log::error!("Failed to confirm recovered operation on consensus: {err}");
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to confirm recovered operation on consensus after {CONSENSUS_CONFIRM_RETRIES} retries: {err}",
))
})
}
/// After a stream records transfer between different shard IDs, propose to switch shard to
/// `ActiveRead` and confirm on remote shard
///
/// This is called after shard stream records has been completed on the remote.
/// It submits a proposal to consensus to switch the shard state from `Partial` to `ActiveRead`.
///
/// This method also confirms consensus applied the operation on ALL peers before returning. If
/// it fails, it will be retried for up to `CONSENSUS_CONFIRM_RETRIES` times.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn switch_partial_to_read_active_confirm_peers(
&self,
channel_service: &ChannelService,
collection_id: &CollectionId,
remote_shard: &RemoteShard,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`switch_partial_to_read_active_confirm_peers` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!(
"Propose and confirm to switch peer from `Partial` into `ActiveRead` state"
);
result = self
.set_shard_replica_set_state(
Some(remote_shard.peer_id),
collection_id.clone(),
remote_shard.id,
ReplicaState::ActiveRead,
Some(ReplicaState::Partial),
)
.await;
if let Err(err) = &result {
log::error!("Failed to propose state switch operation to consensus: {err}");
continue;
}
log::trace!("Wait for all peers to reach `ActiveRead` state");
result = self.await_consensus_sync(channel_service).await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!("Failed to confirm state switch operation on consensus: {err}");
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to confirm state switch operation on consensus after {CONSENSUS_CONFIRM_RETRIES} retries: {err}",
))
})
}
/// Propose to start a shard transfer
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn start_shard_transfer(
&self,
transfer_config: ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()>;
/// Propose to start a shard transfer
///
/// This internally confirms and retries a few times if needed to ensure consensus picks up the
/// operation.
async fn start_shard_transfer_confirm_and_retry(
&self,
transfer_config: &ShardTransfer,
collection_id: &str,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`start_shard_transfer_confirm_and_retry` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!("Propose and confirm shard transfer start operation");
result = self
.start_shard_transfer(transfer_config.clone(), collection_id.into())
.await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!(
"Failed to confirm start shard transfer operation on consensus: {err}"
);
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to start shard transfer through consensus \
after {CONSENSUS_CONFIRM_RETRIES} retries: {err}"
))
})
}
/// Propose to restart a shard transfer with a different given configuration
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn restart_shard_transfer(
&self,
transfer_config: ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()>;
/// Propose to restart a shard transfer with a different given configuration
///
/// This internally confirms and retries a few times if needed to ensure consensus picks up the
/// operation.
async fn restart_shard_transfer_confirm_and_retry(
&self,
transfer_config: &ShardTransfer,
collection_id: &CollectionId,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`restart_shard_transfer_confirm_and_retry` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!("Propose and confirm shard transfer restart operation");
result = self
.restart_shard_transfer(transfer_config.clone(), collection_id.into())
.await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!(
"Failed to confirm restart shard transfer operation on consensus: {err}"
);
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to restart shard transfer through consensus \
after {CONSENSUS_CONFIRM_RETRIES} retries: {err}"
))
})
}
/// Propose to abort a shard transfer
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn abort_shard_transfer(
&self,
transfer: ShardTransferKey,
collection_id: CollectionId,
reason: &str,
) -> CollectionResult<()>;
/// Propose to abort a shard transfer
///
/// This internally confirms and retries a few times if needed to ensure consensus picks up the
/// operation.
async fn abort_shard_transfer_confirm_and_retry(
&self,
transfer: ShardTransferKey,
collection_id: &CollectionId,
reason: &str,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`abort_shard_transfer_confirm_and_retry` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!("Propose and confirm shard transfer abort operation");
result = self
.abort_shard_transfer(transfer, collection_id.into(), reason)
.await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!(
"Failed to confirm abort shard transfer operation on consensus: {err}"
);
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to abort shard transfer through consensus \
after {CONSENSUS_CONFIRM_RETRIES} retries: {err}"
))
})
}
/// Set the shard replica state on this peer through consensus
///
/// If the peer ID is not provided, this will set the replica state for the current peer.
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn set_shard_replica_set_state(
&self,
peer_id: Option<PeerId>,
collection_id: CollectionId,
shard_id: ShardId,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> CollectionResult<()>;
/// Propose to commit the read hash ring.
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn commit_read_hashring(
&self,
collection_id: CollectionId,
reshard_key: ReshardKey,
) -> CollectionResult<()>;
/// Propose to commit the read hash ring, then confirm or retry.
///
/// This internally confirms and retries a few times if needed to ensure consensus picks up the
/// operation.
async fn commit_read_hashring_confirm_and_retry(
&self,
collection_id: &CollectionId,
reshard_key: &ReshardKey,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`commit_read_hashring_confirm_and_retry` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!("Propose and confirm commit read hashring operation");
result = self
.commit_read_hashring(collection_id.into(), reshard_key.clone())
.await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!(
"Failed to confirm commit read hashring operation on consensus: {err}"
);
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to commit read hashring through consensus \
after {CONSENSUS_CONFIRM_RETRIES} retries: {err}"
))
})
}
/// Propose to commit the write hash ring.
///
/// # Warning
///
/// This only submits a proposal to consensus. Calling this does not guarantee that consensus
/// will actually apply the operation across the cluster.
async fn commit_write_hashring(
&self,
collection_id: CollectionId,
reshard_key: ReshardKey,
) -> CollectionResult<()>;
/// Propose to commit the write hash ring, then confirm or retry.
///
/// This internally confirms and retries a few times if needed to ensure consensus picks up the
/// operation.
async fn commit_write_hashring_confirm_and_retry(
&self,
collection_id: &CollectionId,
reshard_key: &ReshardKey,
) -> CollectionResult<()> {
let mut result = Err(CollectionError::service_error(
"`commit_write_hashring_confirm_and_retry` exit without attempting any work, \
this is a programming error",
));
for attempt in 0..CONSENSUS_CONFIRM_RETRIES {
if attempt > 0 {
sleep(CONSENSUS_CONFIRM_RETRY_DELAY).await;
}
log::trace!("Propose and confirm commit write hashring operation");
result = self
.commit_write_hashring(collection_id.into(), reshard_key.clone())
.await;
match &result {
Ok(()) => break,
Err(err) => {
log::error!(
"Failed to confirm commit write hashring operation on consensus: {err}"
);
}
}
}
result.map_err(|err| {
CollectionError::service_error(format!(
"Failed to commit write hashring through consensus \
after {CONSENSUS_CONFIRM_RETRIES} retries: {err}"
))
})
}
/// Wait for all other peers to reach the current consensus
///
/// This will take the current consensus state of this node. It then explicitly awaits on all
/// other nodes to reach this consensus state.
///
/// # Errors
///
/// This errors if:
/// - any of the peers is not on the same term
/// - waiting takes longer than the specified timeout
/// - any of the peers cannot be reached
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn await_consensus_sync(&self, channel_service: &ChannelService) -> CollectionResult<()> {
let other_peer_count = channel_service.id_to_address.read().len().saturating_sub(1);
if other_peer_count == 0 {
log::warn!("There are no other peers, skipped synchronizing consensus");
return Ok(());
}
let (commit, term) = self.consensus_commit_term();
log::trace!(
"Waiting on {other_peer_count} peer(s) to reach consensus (commit: {commit}, term: {term}) before finalizing shard transfer"
);
channel_service
.await_commit_on_all_peers(
self.this_peer_id(),
commit,
term,
defaults::CONSENSUS_META_OP_WAIT,
)
.await
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/stream_records.rs | lib/collection/src/shards/transfer/stream_records.rs | use std::sync::Arc;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use parking_lot::Mutex;
use semver::Version;
use super::transfer_tasks_pool::TransferTaskProgress;
use crate::operations::types::{CollectionError, CollectionResult, CountRequestInternal};
use crate::shards::CollectionId;
use crate::shards::channel_service::ChannelService;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::LockedShardHolder;
use crate::shards::transfer::{ShardTransfer, ShardTransferConsensus};
pub(super) const TRANSFER_BATCH_SIZE: usize = 100;
/// Minimum version all peers need to be to use the intermediate `ActiveRead` state during transfer
const STATE_ACTIVE_READ_MIN_VERSION: Version = Version::new(1, 16, 0);
/// Orchestrate shard transfer by streaming records
///
/// This is called on the sender and will arrange all that is needed for the shard transfer
/// process.
///
/// This first transfers configured indices. Then it transfers all point records in batches.
/// Updates to the local shard are forwarded to the remote concurrently.
///
/// # Cancel safety
///
/// This function is cancel safe.
#[allow(clippy::too_many_arguments)]
pub(super) async fn transfer_stream_records(
transfer_config: ShardTransfer,
shard_holder: Arc<LockedShardHolder>,
progress: Arc<Mutex<TransferTaskProgress>>,
shard_id: ShardId,
remote_shard: RemoteShard,
channel_service: &ChannelService,
consensus: &dyn ShardTransferConsensus,
collection_id: &CollectionId,
) -> CollectionResult<()> {
let remote_peer_id = remote_shard.peer_id;
let cutoff;
let filter = transfer_config.filter;
let merge_points = filter.is_some();
#[cfg(feature = "staging")]
let staging_delay = std::env::var("QDRANT_STAGING_SHARD_TRANSFER_DELAY_SEC")
.ok()
.map(|val| {
std::time::Duration::from_secs_f64(
val.parse::<f64>()
.expect("invalid QDRANT_STAGING_SHARD_TRANSFER_DELAY_SEC value"),
)
});
// Whether we need an intermediate replica state (ActiveRead) during transfer to sync nodes
// We use this when transferring between different shard IDs to ensure data consistency, this
// way all readers can be switched to the new shard before any writers
let sync_intermediate_state = transfer_config
.to_shard_id
.is_some_and(|id| transfer_config.shard_id != id);
// If syncing peers with intermediate replica state, all nodes must have a certain version
if sync_intermediate_state
&& !channel_service.all_peers_at_version(&STATE_ACTIVE_READ_MIN_VERSION)
{
return Err(CollectionError::service_error(format!(
"Cannot perform shard transfer between different shards using streaming records because not all peers are version {STATE_ACTIVE_READ_MIN_VERSION} or higher"
)));
}
log::debug!("Starting shard {shard_id} transfer to peer {remote_peer_id} by streaming records");
// Proxify local shard and create payload indexes on remote shard
{
let shard_holder = shard_holder.read().await;
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} cannot be proxied because it does not exist"
)));
};
replica_set
.proxify_local(remote_shard.clone(), None, filter)
.await?;
// Don't increment hardware usage for internal operations
let hw_acc = HwMeasurementAcc::disposable();
let Some(count_result) = replica_set
.count_local(
Arc::new(CountRequestInternal {
filter: None,
exact: false,
}),
None, // no timeout
hw_acc,
)
.await?
else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} not found"
)));
};
progress.lock().set(0, count_result.count);
replica_set.transfer_indexes().await?;
// Take our last seen clocks as cutoff point right before doing content batch transfers
cutoff = replica_set.shard_recovery_point().await?;
}
// Transfer contents batch by batch
log::trace!("Transferring points to shard {shard_id} by streaming records");
let mut offset = None;
loop {
let shard_holder = shard_holder.read().await;
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
// Forward proxy gone?!
// That would be a programming error.
return Err(CollectionError::service_error(format!(
"Shard {shard_id} is not found"
)));
};
let (new_offset, count) = replica_set
.transfer_batch(offset, TRANSFER_BATCH_SIZE, None, merge_points)
.await?;
offset = new_offset;
progress.lock().add(count);
#[cfg(feature = "staging")]
if let Some(delay) = staging_delay {
tokio::time::sleep(delay).await;
}
// If this is the last batch, finalize
if offset.is_none() {
break;
}
}
// Sync all peers with intermediate replica state, switch to ActiveRead and sync all peers
if sync_intermediate_state {
log::trace!(
"Shard {shard_id} recovered on {remote_peer_id} for stream records transfer, switching into next stage through consensus",
);
consensus
.switch_partial_to_read_active_confirm_peers(
channel_service,
collection_id,
&remote_shard,
)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Can't switch shard {shard_id} to ActiveRead state after stream records transfer: {err}"
))
})?;
}
// Update cutoff point on remote shard, disallow recovery before it
//
// We provide it our last seen clocks from just before transferring the content batches, and
// not our current last seen clocks. We're sure that after the transfer the remote must have
// seen all point data for those clocks. While we cannot guarantee the remote has all point
// data for our current last seen clocks because some operations may still be in flight.
// This is a trade-off between being conservative and being too conservative.
//
// We must send a cutoff point to the remote so it can learn about all the clocks that exist.
// If we don't do this it is possible the remote will never see a clock, breaking all future
// WAL delta transfers.
remote_shard
.update_shard_cutoff_point(collection_id, remote_shard.id, &cutoff)
.await?;
log::debug!("Ending shard {shard_id} transfer to peer {remote_peer_id} by streaming records");
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/wal_delta.rs | lib/collection/src/shards/transfer/wal_delta.rs | use std::sync::Arc;
use common::defaults;
use parking_lot::Mutex;
use super::transfer_tasks_pool::TransferTaskProgress;
use super::{ShardTransfer, ShardTransferConsensus};
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::CollectionId;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::LockedShardHolder;
/// Orchestrate shard diff transfer
///
/// This is called on the sender and will arrange all that is needed for the shard diff transfer
/// process to a receiver.
///
/// The order of operations here is critical for correctness. Explicit synchronization across nodes
/// is used to ensure data consistency.
///
/// Before this function, this has happened:
///
/// - The existing shard is kept on the remote
/// - Set the remote shard state to `Recovery`
/// In `Recovery` state, the remote shard will ignore all operations by default and other nodes
/// will prevent sending operations to it. Only operations that are forced will be accepted. This
/// is critical not to mess with the order of operations while recovery is happening.
///
/// During this function, this happens in order:
///
/// - Request recovery point on remote shard
/// We use the recovery point to try and resolve a WAL delta to transfer to the remote.
/// - Resolve WAL delta locally
/// Find a point in our current WAL to transfer all operations from to the remote. If we cannot
/// resolve a WAL delta, the transfer is aborted. If the resolved delta is empty, we start from
/// our last WAL entry to ensure the remote does not miss any new updates.
/// - Queue proxy local shard
/// We queue all operations from the WAL delta point for the remote.
/// - Transfer queued updates to remote, transform into forward proxy
/// We transfer all accumulated updates in the queue proxy to the remote. This ensures all
/// operations reach the recovered shard on the remote to make it consistent again. When all
/// updates are transferred, we transform the queue proxy into a forward proxy to start
/// forwarding new updates to the remote right away. We transfer the queue and transform into a
/// forward proxy right now so that we can catch any errors as early as possible. The forward
/// proxy shard we end up with will not error again once we un-proxify.
/// - Set shard state to `Partial`
/// After recovery, we set the shard state from `Recovery` to `Partial`. We propose an operation
/// to consensus for this. Our logic explicitly confirms that the remote reaches the `Partial`
/// state.
/// - Wait for Partial state in our replica set
/// Wait for the remote shard to be set to `Partial` in our local replica set. That way we
/// confirm consensus has also propagated on this node.
/// - Synchronize all nodes
/// After confirming consensus propagation on this node, synchronize all nodes to reach the same
/// consensus state before finalizing the transfer. That way, we ensure we have a consistent
/// replica set state across all nodes. All nodes will have the `Partial` state, which makes the
/// shard participate on all nodes.
///
/// After this function, the following will happen:
///
/// - The local shard is un-proxified
/// - The shard transfer is finished
/// - The remote shard state is set to `Active` through consensus
///
/// # Cancel safety
///
/// This function is cancel safe.
///
/// If cancelled - the remote shard may only be partially recovered/transferred and the local shard
/// may be left in an unexpected state. This must be resolved manually in case of cancellation.
#[allow(clippy::too_many_arguments)]
pub(super) async fn transfer_wal_delta(
transfer_config: ShardTransfer,
shard_holder: Arc<LockedShardHolder>,
progress: Arc<Mutex<TransferTaskProgress>>,
shard_id: ShardId,
remote_shard: RemoteShard,
consensus: &dyn ShardTransferConsensus,
collection_id: &CollectionId,
) -> CollectionResult<()> {
let remote_peer_id = remote_shard.peer_id;
log::debug!("Starting shard {shard_id} transfer to peer {remote_peer_id} using diff transfer");
// Ask remote shard on failed node for recovery point
let recovery_point = remote_shard
.shard_recovery_point(collection_id, shard_id)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to request recovery point from remote shard: {err}"
))
})?;
let shard_holder_read = shard_holder.read().await;
let transferring_shard = shard_holder_read.get_shard(shard_id);
let Some(replica_set) = transferring_shard else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} cannot be queue proxied because it does not exist"
)));
};
// Resolve WAL delta, get the version to start the diff from
let next_wal_version = replica_set.wal_version().await?.map(|n| n + 1);
let wal_delta_version = replica_set
.resolve_wal_delta(recovery_point)
.await
.map_err(|err| {
CollectionError::service_error(format!("Failed to resolve shard diff: {err}"))
})?
// If diff is empty, queue and forward from our version to prevent losing new updates
// See: <https://github.com/qdrant/qdrant/pull/5271>
.or_else(|| {
log::trace!("Remote shard is up-to-date and WAL diff is empty, queueing newly incoming updates (version: {next_wal_version:?})");
next_wal_version
});
// Queue proxy local shard, start flushing updates to remote
replica_set
.queue_proxify_local(remote_shard.clone(), wal_delta_version, progress)
.await?;
debug_assert!(
replica_set.is_queue_proxy().await,
"Local shard must be a queue proxy",
);
log::trace!("Transfer WAL diff by transferring all current queue proxy updates");
replica_set.queue_proxy_flush().await?;
// Set shard state to Partial
log::trace!(
"Shard {shard_id} diff transferred to {remote_peer_id} for diff transfer, switching into next stage through consensus",
);
consensus
.recovered_switch_to_partial_confirm_remote(&transfer_config, collection_id, &remote_shard)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Can't switch shard {shard_id} to Partial state after diff transfer: {err}"
))
})?;
// Transform queue proxy into forward proxy, transfer any remaining updates that just came in
// After this returns, the complete WAL diff is transferred
log::trace!("Transform queue proxy into forward proxy, transferring any remaining records");
replica_set.queue_proxy_into_forward_proxy().await?;
// Wait for Partial state in our replica set
// Consensus sync is done right after this function
let partial_state = ReplicaState::Partial;
log::trace!("Wait for local shard to reach {partial_state:?} state");
replica_set
.wait_for_state(
transfer_config.to,
partial_state,
defaults::CONSENSUS_META_OP_WAIT,
)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Shard being transferred did not reach {partial_state:?} state in time: {err}",
))
})?;
log::debug!("Ending shard {shard_id} transfer to peer {remote_peer_id} using diff transfer");
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/snapshot.rs | lib/collection/src/shards/transfer/snapshot.rs | use std::path::Path;
use std::sync::Arc;
use common::defaults;
use parking_lot::Mutex;
use semver::Version;
use tempfile::TempPath;
use super::transfer_tasks_pool::TransferTaskProgress;
use super::{ShardTransfer, ShardTransferConsensus};
use crate::operations::snapshot_ops::{SnapshotPriority, get_checksum_path};
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::CollectionId;
use crate::shards::channel_service::ChannelService;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::LockedShardHolder;
/// Orchestrate shard snapshot transfer
///
/// This is called on the sender and will arrange all that is needed for the shard snapshot
/// transfer process to a receiver.
///
/// The order of operations here is critical for correctness. Explicit synchronization across nodes
/// is used to ensure data consistency.
///
/// Before this function, this has happened:
///
/// - An empty shard is initialized on the remote
/// - Set the remote shard state to `PartialSnapshot`
/// In `PartialSnapshot` state, the remote shard will ignore all operations and other nodes will
/// prevent sending operations to it. This is critical not to modify the shard while it is being
/// recovered from the snapshot.
///
/// During this function, this happens in order:
///
/// - Queue proxy local shard
/// We queue all new operations to the shard for the remote. Once the remote is ready, we can
/// transfer all these operations to it.
/// - Create shard snapshot
/// Snapshot the shard after the queue proxy is initialized. This snapshot will be used to get
/// the shard into the same state on the remote.
/// - Recover shard snapshot on remote
/// Instruct the remote to download the snapshot from this node over HTTP, then recover it.
/// - Set shard state to `Partial`
/// After recovery, we set the shard state from `PartialSnapshot` to `Partial`. We propose an
/// operation to consensus for this. Our logic explicitly confirms that the remote reaches the
/// `Partial` state. That is critical for the remote to accept incoming operations, that also
/// confirms consensus has accepted accepted our proposal. If this fails it will be retried up to
/// three times.
/// - Transfer queued updates to remote, transform into forward proxy
/// Once the remote is in `Partial` state we can transfer all accumulated updates in the queue
/// proxy to the remote. This ensures all operations reach the recovered shard on the remote to
/// make it consistent again. When all updates are transferred, we transform the queue proxy into
/// a forward proxy to start forwarding new updates to the remote right away.
/// We transfer the queue and transform into a forward proxy right now so that we can catch any
/// errors as early as possible. The forward proxy shard we end up with will not error again once
/// we un-proxify.
/// - Wait for Partial state in our replica set
/// Wait for the remote shard to be set to `Partial` in our local replica set. That way we
/// confirm consensus has also propagated on this node.
/// - Synchronize all nodes
/// After confirming consensus propagation on this node, synchronize all nodes to reach the same
/// consensus state before finalizing the transfer. That way, we ensure we have a consistent
/// replica set state across all nodes. All nodes will have the `Partial` state, which makes the
/// shard participate on all nodes.
///
/// After this function, the following will happen:
///
/// - The local shard is un-proxified
/// - The shard transfer is finished
/// - The remote shard state is set to `Active` through consensus
///
/// # Diagram
///
/// Here's a rough sequence diagram for the shard snasphot transfer process with the consensus,
/// sender and receiver actors:
///
/// ┌───────────┐ ┌───────────┐ ┌───────────┐
/// │ Consensus │ │ Sender │ │ Receiver │
/// └───────────┘ └───────────┘ └───────────┘
/// | | |
/// | start transfer | |
/// ────►┌─┬──────────────────────|────────────────────────►|──┐
/// │ │ | | │ shard state:
/// │ │ start transfer | init transfer | │ Dead→PartialSnapshot
/// └─┴────────────────────►┬─┬──────────────────────►┌─┐◄┘
/// | │X│ │ │
/// | │X│ │ ├─┐
/// | │X│ ready │ │ │ init local shard
/// | │X├───────────────────────┴─┘◄┘
/// | │ │ |
/// | │ ├─┐ |
/// | │ │ │ qproxy + snapshot |
/// | │ │◄┘ |
/// | │ │ |
/// | │ │ recover shard by URL |
/// | │X├───────────────────────┬─┐
/// | │X│ │ │
/// | │X│ │ │
/// | ┌─┐◄─·│X│·──────────────────────┤ │
/// | │ │ │X│ download snapshot │ │
/// | └─┴──·│X│·─────────────────────►│ ├─┐
/// | │X│ │ │ │ apply snapshot
/// | │X│ done recovery │ │ │ delete snapshot
/// | │X│◄──────────────────────┴─┘◄┘
/// | snapshot recovered │ │ |
/// ┌─┐◄────────────────────┤ │ |
/// │ │ │ │ |
/// │ │ ┌─┤X│ |
/// │ │ wait consensus │ │X│ |
/// │ │ or retry │ │X│ |
/// │ │ │ │X│ |
/// │ │ continue transfer │ │X│ |
/// │ ├──────────────────·│ │X│·─────────────────────►┌─┬─┐
/// │ │ continue transfer │ │X│ │ │ │ shard state:
/// └─┴───────────────────┤►│X├─┐ │ │ │ PartialSnapshot→Partial
/// | │ │X│ │ shard state: └─┘◄┘
/// | │ │X│ │ PartialSnpst→Partial |
/// | └►│X│◄┘ |
/// | │ │ |
/// | │ │ transfer queue ops |
/// | ┌►│X├──────────────────────►┌─┬─┐
/// | send batches │ │X│ │ │ │ apply operations
/// | └─┤X│◄──────────────────────┴─┘◄┘
/// | │ │ |
/// | │ ├─┐ |
/// | │ │ │ qproxy→fwd proxy |
/// | │ │◄┘ |
/// | │ │ |
/// | │ │ sync all nodes |
/// | │X├──────────────────────►┌─┬─┐
/// | │X│ │ │ │ wait consensus
/// | │X│ node synced │ │ │ commit+term
/// | │X│◄──────────────────────┴─┘◄┘
/// | │ │ |
/// | │ ├─┐ |
/// | finish transfer │ │ │ unproxify |
/// ┌─┐◄────────────────────┴─┘◄┘ |
/// │ │ transfer finished | |
/// │ ├──────────────────────|───────────────────────►┌─┬─┐
/// │ │ transfer finished | │ │ │ shard state:
/// └─┴────────────────────►┌─┬─┐ │ │ │ Partial→Active
/// | │ │ │ shard state: └─┘◄┘
/// | │ │ │ Partial→Active |
/// | └─┘◄┘ |
/// | | |
///
/// # Cancel safety
///
/// This function is cancel safe.
///
/// If cancelled - the remote shard may only be partially recovered/transferred and the local shard
/// may be left in an unexpected state. This must be resolved manually in case of cancellation.
#[allow(clippy::too_many_arguments)]
pub(super) async fn transfer_snapshot(
transfer_config: ShardTransfer,
shard_holder: Arc<LockedShardHolder>,
progress: Arc<Mutex<TransferTaskProgress>>,
shard_id: ShardId,
remote_shard: RemoteShard,
channel_service: &ChannelService,
consensus: &dyn ShardTransferConsensus,
snapshots_path: &Path,
collection_id: &CollectionId,
temp_dir: &Path,
) -> CollectionResult<()> {
let remote_peer_id = remote_shard.peer_id;
log::debug!(
"Starting shard {shard_id} transfer to peer {remote_peer_id} using snapshot transfer"
);
let shard_holder_read = shard_holder.read().await;
let local_rest_address = channel_service.current_rest_address(transfer_config.from)?;
let transferring_shard = shard_holder_read.get_shard(shard_id);
let Some(replica_set) = transferring_shard else {
return Err(CollectionError::service_error(format!(
"Shard {shard_id} cannot be queue proxied because it does not exist"
)));
};
// Queue proxy local shard
replica_set
.queue_proxify_local(remote_shard.clone(), None, progress)
.await?;
debug_assert!(
replica_set.is_queue_proxy().await,
"Local shard must be a queue proxy",
);
// The ability to read streaming snapshot format is introduced in 1.12 (#5179).
let use_streaming_endpoint =
channel_service.peer_is_at_version(remote_peer_id, &Version::new(1, 12, 0));
let mut snapshot_temp_paths = Vec::new();
let mut shard_download_url = local_rest_address;
let encoded_collection_name = urlencoding::encode(collection_id);
if use_streaming_endpoint {
log::trace!("Using streaming endpoint for shard snapshot transfer");
shard_download_url.set_path(&format!(
"/collections/{encoded_collection_name}/shards/{shard_id}/snapshot",
));
} else {
// Create shard snapshot
log::trace!("Creating snapshot of shard {shard_id} for shard snapshot transfer");
let snapshot_description = shard_holder_read
.create_shard_snapshot(snapshots_path, collection_id, shard_id, temp_dir)
.await?;
// TODO: If future is cancelled until `get_shard_snapshot_path` resolves, shard snapshot may not be cleaned up...
let snapshot_temp_path = shard_holder_read
.get_shard_snapshot_path(snapshots_path, shard_id, &snapshot_description.name)
.await
.map(TempPath::from_path)
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to determine snapshot path, cannot continue with shard snapshot recovery: {err}",
))
})?;
let snapshot_checksum_temp_path =
TempPath::from_path(get_checksum_path(&snapshot_temp_path));
snapshot_temp_paths.push(snapshot_temp_path);
snapshot_temp_paths.push(snapshot_checksum_temp_path);
let encoded_snapshot_name = urlencoding::encode(&snapshot_description.name);
shard_download_url.set_path(&format!(
"/collections/{encoded_collection_name}/shards/{shard_id}/snapshots/{encoded_snapshot_name}"
));
};
// Recover shard snapshot on remote
log::trace!("Transferring and recovering shard {shard_id} snapshot on peer {remote_peer_id}");
remote_shard
.recover_shard_snapshot_from_url(
collection_id,
shard_id,
&shard_download_url,
SnapshotPriority::ShardTransfer,
// Provide API key here so the remote can access our snapshot
channel_service.api_key.as_deref(),
)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to recover shard snapshot on remote: {err}"
))
})?;
for snapshot_temp_path in snapshot_temp_paths {
if let Err(err) = snapshot_temp_path.close() {
log::warn!(
"Failed to delete shard transfer snapshot after recovery, \
snapshot file may be left behind: {err}"
);
}
}
// Set shard state to Partial
log::trace!(
"Shard {shard_id} snapshot recovered on {remote_peer_id} for snapshot transfer, switching into next stage through consensus",
);
consensus
.recovered_switch_to_partial_confirm_remote(&transfer_config, collection_id, &remote_shard)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Can't switch shard {shard_id} to Partial state after snapshot transfer: {err}"
))
})?;
// Transfer queued updates to remote, transform into forward proxy
log::trace!("Transfer all queue proxy updates and transform into forward proxy");
replica_set.queue_proxy_into_forward_proxy().await?;
// Wait for Partial state in our replica set
// Consensus sync is done right after this function
let partial_state = ReplicaState::Partial;
log::trace!("Wait for local shard to reach {partial_state:?} state");
replica_set
.wait_for_state(
transfer_config.to,
partial_state,
defaults::CONSENSUS_META_OP_WAIT,
)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Shard being transferred did not reach {partial_state:?} state in time: {err}",
))
})?;
log::debug!(
"Ending shard {shard_id} transfer to peer {remote_peer_id} using snapshot transfer"
);
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/transfer/driver.rs | lib/collection/src/shards/transfer/driver.rs | use std::future::Future;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use parking_lot::Mutex;
use tokio::time::sleep;
use super::resharding_stream_records::transfer_resharding_stream_records;
use super::snapshot::transfer_snapshot;
use super::stream_records::transfer_stream_records;
use super::transfer_tasks_pool::TransferTaskProgress;
use super::wal_delta::transfer_wal_delta;
use super::{ShardTransfer, ShardTransferConsensus, ShardTransferMethod};
use crate::common::stoppable_task_async::{CancellableAsyncTaskHandle, spawn_async_cancellable};
use crate::operations::types::CollectionResult;
use crate::shards::channel_service::ChannelService;
use crate::shards::remote_shard::RemoteShard;
use crate::shards::shard::ShardId;
use crate::shards::shard_holder::{LockedShardHolder, ShardHolder};
use crate::shards::{CollectionId, await_consensus_sync};
const RETRY_DELAY: Duration = Duration::from_secs(1);
pub(crate) const MAX_RETRY_COUNT: usize = 3;
/// Drive the shard transfer on the source node based on the given transfer configuration
///
/// Returns `true` if we should finalize the shard transfer. Returns `false` if we should silently
/// drop it, because it is being restarted.
///
/// # Cancel safety
///
/// This function is cancel safe.
#[allow(clippy::too_many_arguments)]
pub async fn transfer_shard(
transfer_config: ShardTransfer,
progress: Arc<Mutex<TransferTaskProgress>>,
shard_holder: Arc<LockedShardHolder>,
consensus: &dyn ShardTransferConsensus,
collection_id: CollectionId,
channel_service: ChannelService,
snapshots_path: &Path,
temp_dir: &Path,
) -> CollectionResult<bool> {
// The remote might target a different shard ID depending on the shard transfer type
let local_shard_id = transfer_config.shard_id;
let remote_shard_id = transfer_config.to_shard_id.unwrap_or(local_shard_id);
// Initiate shard on a remote peer
let remote_shard = RemoteShard::new(
remote_shard_id,
collection_id.clone(),
transfer_config.to,
channel_service.clone(),
);
// Prepare the remote for receiving the shard, waits for the correct state on the remote
remote_shard.initiate_transfer().await?;
match transfer_config.method.unwrap_or_default() {
// Transfer shard record in batches
ShardTransferMethod::StreamRecords => {
transfer_stream_records(
transfer_config,
shard_holder.clone(),
progress,
local_shard_id,
remote_shard,
&channel_service,
consensus,
&collection_id,
)
.await?;
}
// Transfer shard record in batches for resharding
ShardTransferMethod::ReshardingStreamRecords => {
transfer_resharding_stream_records(
shard_holder.clone(),
progress,
local_shard_id,
remote_shard,
&collection_id,
)
.await?;
}
// Transfer shard as snapshot
ShardTransferMethod::Snapshot => {
transfer_snapshot(
transfer_config,
shard_holder,
progress,
local_shard_id,
remote_shard,
&channel_service,
consensus,
snapshots_path,
&collection_id,
temp_dir,
)
.await?;
}
// Attempt to transfer WAL delta
ShardTransferMethod::WalDelta => {
let result = transfer_wal_delta(
transfer_config.clone(),
shard_holder,
progress,
local_shard_id,
remote_shard,
consensus,
&collection_id,
)
.await;
// Handle failure, fall back to default transfer method or propagate error
if let Err(err) = result {
let fallback_shard_transfer_method = ShardTransferMethod::default();
log::warn!(
"Failed to do shard diff transfer, falling back to default method {fallback_shard_transfer_method:?}: {err}",
);
let did_fall_back = transfer_shard_fallback_default(
transfer_config,
consensus,
&collection_id,
fallback_shard_transfer_method,
)
.await?;
return if did_fall_back { Ok(false) } else { Err(err) };
}
}
}
// Synchronize all nodes
// Ensure all peers have reached a state where they'll start sending incoming updates to the
// remote shard. A lagging peer must not still have the target shard in dead/recovery state.
// Only then can we destruct the forward proxy.
await_consensus_sync(consensus, &channel_service).await;
Ok(true)
}
/// While in a shard transfer, fall back to the default shard transfer method
///
/// Returns true if we arranged falling back. Returns false if we could not fall back.
pub async fn transfer_shard_fallback_default(
mut transfer_config: ShardTransfer,
consensus: &dyn ShardTransferConsensus,
collection_id: &CollectionId,
fallback_method: ShardTransferMethod,
) -> CollectionResult<bool> {
// Do not attempt to fall back to the same method
let old_method = transfer_config.method;
if old_method.is_some_and(|method| method == fallback_method) {
log::warn!(
"Failed shard transfer fallback, because it would use the same transfer method: {fallback_method:?}",
);
return Ok(false);
}
// Propose to restart transfer with a different method
transfer_config.method.replace(fallback_method);
consensus
.restart_shard_transfer_confirm_and_retry(&transfer_config, collection_id)
.await?;
Ok(false)
}
/// Return local shard back from the forward proxy
///
/// # Cancel safety
///
/// This function is cancel safe.
pub async fn revert_proxy_shard_to_local(
shard_holder: &ShardHolder,
shard_id: ShardId,
) -> CollectionResult<bool> {
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
return Ok(false);
};
// Revert queue proxy if we still have any and forget all collected updates
replica_set.revert_queue_proxy_local().await;
// Un-proxify local shard
replica_set.un_proxify_local().await?;
Ok(true)
}
#[allow(clippy::too_many_arguments)]
pub fn spawn_transfer_task<T, F>(
shards_holder: Arc<LockedShardHolder>,
progress: Arc<Mutex<TransferTaskProgress>>,
transfer: ShardTransfer,
consensus: Box<dyn ShardTransferConsensus>,
collection_id: CollectionId,
channel_service: ChannelService,
snapshots_path: PathBuf,
temp_dir: PathBuf,
on_finish: T,
on_error: F,
) -> CancellableAsyncTaskHandle<bool>
where
T: Future<Output = ()> + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
spawn_async_cancellable(move |cancel| async move {
let mut result = Err(cancel::Error::Cancelled);
for attempt in 0..MAX_RETRY_COUNT {
let future = async {
if attempt > 0 {
sleep(RETRY_DELAY * attempt as u32).await;
log::warn!(
"Retrying shard transfer {collection_id}:{} -> {} (retry {attempt})",
transfer.shard_id,
transfer.to,
);
}
transfer_shard(
transfer.clone(),
progress.clone(),
shards_holder.clone(),
consensus.as_ref(),
collection_id.clone(),
channel_service.clone(),
&snapshots_path,
&temp_dir,
)
.await
};
result = cancel::future::cancel_on_token(cancel.clone(), future).await;
let is_ok = matches!(result, Ok(Ok(true)));
let is_err = matches!(result, Ok(Err(_)));
let is_cancelled = result.is_err() || matches!(result, Ok(Ok(false)));
if let Ok(Err(err)) = &result {
log::error!(
"Failed to transfer shard {collection_id}:{} -> {}: {err}",
transfer.shard_id,
transfer.to,
);
}
if is_err || is_cancelled {
// Revert queue proxy if we still have any to prepare for the next attempt
if let Some(shard) = shards_holder.read().await.get_shard(transfer.shard_id) {
shard.revert_queue_proxy_local().await;
}
}
if is_ok || is_cancelled {
break;
}
}
match &result {
Ok(Ok(true)) => on_finish.await,
Ok(Ok(false)) => (), // do nothing, we should not finish the task
Ok(Err(_)) => on_error.await,
Err(_) => (), // do nothing, if task was cancelled
}
let is_ok_and_finish = matches!(result, Ok(Ok(true)));
is_ok_and_finish
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/updaters.rs | lib/collection/src/shards/local_shard/updaters.rs | use std::sync::Arc;
use tokio::sync::mpsc;
use crate::operations::types::CollectionResult;
use crate::optimizers_builder::build_optimizers;
use crate::shards::local_shard::LocalShard;
use crate::update_handler::UpdateSignal;
impl LocalShard {
pub fn trigger_optimizers(&self) {
// Send a trigger signal and ignore errors because all error cases are acceptable:
// - If receiver is already dead - we do not care
// - If channel is full - optimization will be triggered by some other signal
let _ = self.update_sender.load().try_send(UpdateSignal::Nop);
}
pub async fn stop_flush_worker(&self) {
let mut update_handler = self.update_handler.lock().await;
update_handler.stop_flush_worker()
}
pub async fn wait_update_workers_stop(&self) -> CollectionResult<()> {
let mut update_handler = self.update_handler.lock().await;
update_handler.wait_workers_stops().await
}
/// Handles updates to the optimizer configuration by rebuilding optimizers
/// and restarting the update handler's workers with the new configuration.
///
/// ## Cancel safety
///
/// This function is **not** cancel safe.
pub async fn on_optimizer_config_update(&self) -> CollectionResult<()> {
let config = self.collection_config.read().await;
let mut update_handler = self.update_handler.lock().await;
let (update_sender, update_receiver) =
mpsc::channel(self.shared_storage_config.update_queue_size);
// makes sure that the Stop signal is the last one in this channel
let old_sender = self.update_sender.swap(Arc::new(update_sender));
old_sender.send(UpdateSignal::Stop).await?;
update_handler.stop_flush_worker();
update_handler.wait_workers_stops().await?;
let new_optimizers = build_optimizers(
&self.path,
&config.params,
&config.optimizer_config,
&config.hnsw_config,
&self.shared_storage_config.hnsw_global_config,
&config.quantization_config,
);
update_handler.optimizers = new_optimizers;
update_handler.flush_interval_sec = config.optimizer_config.flush_interval_sec;
update_handler.max_optimization_threads = config.optimizer_config.max_optimization_threads;
update_handler.run_workers(update_receiver);
self.update_sender.load().send(UpdateSignal::Nop).await?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/scroll.rs | lib/collection/src/shards/local_shard/scroll.rs | use std::collections::HashSet;
use std::sync::Arc;
use std::time::{Duration, Instant};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use futures::future::try_join_all;
use itertools::Itertools as _;
use rand::distr::weighted::WeightedIndex;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::order_by::{Direction, OrderBy};
use segment::types::{
ExtendedPointId, Filter, ScoredPoint, WithPayload, WithPayloadInterface, WithVector,
};
use shard::common::stopping_guard::StoppingGuard;
use shard::retrieve::record_internal::RecordInternal;
use tokio::runtime::Handle;
use tokio_util::task::AbortOnDropHandle;
use super::LocalShard;
use crate::collection_manager::holders::segment_holder::LockedSegment;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::operations::types::{
CollectionError, CollectionResult, QueryScrollRequestInternal, ScrollOrder,
};
impl LocalShard {
/// Basic parallel batching, it is conveniently used for the universal query API.
pub(super) async fn query_scroll_batch(
&self,
batch: Arc<Vec<QueryScrollRequestInternal>>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
if batch.is_empty() {
return Ok(vec![]);
}
let scrolls = batch.iter().map(|request| {
self.query_scroll(
request,
search_runtime_handle,
timeout,
hw_measurement_acc.clone(),
)
});
// execute all the scrolls concurrently
let all_scroll_results = try_join_all(scrolls);
tokio::time::timeout(timeout, all_scroll_results)
.await
.map_err(|_| {
log::debug!("Query scroll timeout reached: {timeout:?}");
CollectionError::timeout(timeout, "Query scroll")
})?
}
/// Scroll a single page, to be used for the universal query API only.
async fn query_scroll(
&self,
request: &QueryScrollRequestInternal,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let QueryScrollRequestInternal {
limit,
with_vector,
filter,
scroll_order,
with_payload,
} = request;
let limit = *limit;
let offset_id = None;
let record_results = match scroll_order {
ScrollOrder::ById => {
self.internal_scroll_by_id(
offset_id,
limit,
with_payload,
with_vector,
filter.as_ref(),
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?
}
ScrollOrder::ByField(order_by) => {
self.internal_scroll_by_field(
limit,
with_payload,
with_vector,
filter.as_ref(),
search_runtime_handle,
order_by,
timeout,
hw_measurement_acc,
)
.await?
}
ScrollOrder::Random => {
self.scroll_randomly(
limit,
with_payload,
with_vector,
filter.as_ref(),
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?
}
};
let point_results = record_results
.into_iter()
.map(|record| ScoredPoint {
id: record.id,
version: 0,
score: 1.0,
payload: record.payload,
vector: record.vector,
shard_key: record.shard_key,
order_value: record.order_value,
})
.collect();
Ok(point_results)
}
#[allow(clippy::too_many_arguments)]
pub async fn internal_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let start = Instant::now();
let stopping_guard = StoppingGuard::new();
let segments = self.segments.clone();
let update_operation_lock = self.update_operation_lock.read().await;
let (non_appendable, appendable) = segments.read().split_segments();
let read_filtered = |segment: LockedSegment, hw_counter: HardwareCounterCell| {
let filter = filter.cloned();
let is_stopped = stopping_guard.get_is_stopped();
let task = search_runtime_handle.spawn_blocking(move || {
segment.get().read().read_filtered(
offset,
Some(limit),
filter.as_ref(),
&is_stopped,
&hw_counter,
)
});
AbortOnDropHandle::new(task)
};
let hw_counter = hw_measurement_acc.get_counter_cell();
let all_reads = tokio::time::timeout(
timeout,
try_join_all(
non_appendable
.into_iter()
.chain(appendable)
.map(|segment| read_filtered(segment, hw_counter.fork())),
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "scroll_by_id"))??;
let point_ids = all_reads
.into_iter()
.flatten()
.sorted()
.dedup()
.take(limit)
.collect_vec();
let with_payload = WithPayload::from(with_payload_interface);
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
let mut records_map = tokio::time::timeout(
timeout,
SegmentsSearcher::retrieve(
segments,
&point_ids,
&with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "retrieve"))??;
drop(update_operation_lock);
let ordered_records = point_ids
.iter()
// Use remove to avoid cloning, we take each point ID only once
.filter_map(|point_id| records_map.remove(point_id))
.collect();
Ok(ordered_records)
}
#[allow(clippy::too_many_arguments)]
pub async fn internal_scroll_by_field(
&self,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
order_by: &OrderBy,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let start = Instant::now();
let stopping_guard = StoppingGuard::new();
let segments = self.segments.clone();
let update_operation_lock = self.update_operation_lock.read().await;
let (non_appendable, appendable) = segments.read().split_segments();
let read_ordered_filtered = |segment: LockedSegment, hw_counter: &HardwareCounterCell| {
let is_stopped = stopping_guard.get_is_stopped();
let filter = filter.cloned();
let order_by = order_by.clone();
let hw_counter = hw_counter.fork();
let task = search_runtime_handle.spawn_blocking(move || {
segment.get().read().read_ordered_filtered(
Some(limit),
filter.as_ref(),
&order_by,
&is_stopped,
&hw_counter,
)
});
AbortOnDropHandle::new(task)
};
let hw_counter = hw_measurement_acc.get_counter_cell();
let all_reads = tokio::time::timeout(
timeout,
try_join_all(
non_appendable
.into_iter()
.chain(appendable)
.map(|segment| read_ordered_filtered(segment, &hw_counter)),
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "scroll_by_field"))??;
let all_reads = all_reads.into_iter().collect::<Result<Vec<_>, _>>()?;
let (values, point_ids): (Vec<_>, Vec<_>) = all_reads
.into_iter()
.kmerge_by(|a, b| match order_by.direction() {
Direction::Asc => a <= b,
Direction::Desc => a >= b,
})
.dedup()
.take(limit)
.unzip();
let with_payload = WithPayload::from(with_payload_interface);
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
// Fetch with the requested vector and payload
let records_map = tokio::time::timeout(
timeout,
SegmentsSearcher::retrieve(
segments,
&point_ids,
&with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "retrieve"))??;
drop(update_operation_lock);
let ordered_records = point_ids
.iter()
.zip(values)
.filter_map(|(point_id, value)| {
let mut record = records_map.get(point_id).cloned()?;
record.order_value = Some(value);
Some(record)
})
.collect();
Ok(ordered_records)
}
#[allow(clippy::too_many_arguments)]
async fn scroll_randomly(
&self,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let start = Instant::now();
let stopping_guard = StoppingGuard::new();
let segments = self.segments.clone();
let update_operation_lock = self.update_operation_lock.read().await;
let (non_appendable, appendable) = segments.read().split_segments();
let read_filtered = |segment: LockedSegment, hw_counter: &HardwareCounterCell| {
let is_stopped = stopping_guard.get_is_stopped();
let filter = filter.cloned();
let hw_counter = hw_counter.fork();
let task = search_runtime_handle.spawn_blocking(move || {
let get_segment = segment.get();
let read_segment = get_segment.read();
(
read_segment.available_point_count(),
read_segment.read_random_filtered(
limit,
filter.as_ref(),
&is_stopped,
&hw_counter,
),
)
});
AbortOnDropHandle::new(task)
};
let hw_counter = hw_measurement_acc.get_counter_cell();
let all_reads = tokio::time::timeout(
timeout,
try_join_all(
non_appendable
.into_iter()
.chain(appendable)
.map(|segment| read_filtered(segment, &hw_counter)),
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "scroll_randomly"))??;
let (availability, mut segments_reads): (Vec<_>, Vec<_>) = all_reads.into_iter().unzip();
// Shortcut if all segments are empty
if availability.iter().all(|&count| count == 0) {
return Ok(Vec::new());
}
// Select points in a weighted fashion from each segment, depending on how many points each segment has.
let distribution = WeightedIndex::new(availability).map_err(|err| {
CollectionError::service_error(format!(
"Failed to create weighted index for random scroll: {err:?}"
))
})?;
let mut rng = StdRng::from_os_rng();
let mut random_points = HashSet::with_capacity(limit);
// Randomly sample points in two stages
//
// 1. This loop iterates <= LIMIT times, and either breaks early if we
// have enough points, or if some of the segments are exhausted.
//
// 2. If the segments are exhausted, we will fill up the rest of the
// points from other segments. In total, the complexity is guaranteed to
// be O(limit).
while random_points.len() < limit {
let segment_offset = rng.sample(&distribution);
let points = segments_reads.get_mut(segment_offset).unwrap();
if let Some(point) = points.pop() {
random_points.insert(point);
} else {
// It seems that some segments are empty early,
// so distribution does not make sense anymore.
// This is only possible if segments size < limit.
break;
}
}
// If we still need more points, we will get them from the rest of the segments.
// This is a rare case, as it seems we don't have enough points in individual segments.
// Therefore, we can ignore "proper" distribution, as it won't be accurate anyway.
if random_points.len() < limit {
let rest_points = segments_reads.into_iter().flatten();
for point in rest_points {
random_points.insert(point);
if random_points.len() >= limit {
break;
}
}
}
let selected_points: Vec<_> = random_points.into_iter().collect();
let with_payload = WithPayload::from(with_payload_interface);
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
let records_map = tokio::time::timeout(
timeout,
SegmentsSearcher::retrieve(
segments,
&selected_points,
&with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "retrieve"))??;
drop(update_operation_lock);
Ok(records_map.into_values().collect())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/clock_map.rs | lib/collection/src/shards/local_shard/clock_map.rs | use std::collections::{HashMap, hash_map};
use std::fmt;
use std::path::Path;
use api::grpc::qdrant::RecoveryPointClockTag;
use io::file_operations;
use serde::{Deserialize, Serialize};
use tonic::Status;
use crate::operations::types::CollectionError;
use crate::operations::{ClockTag, ClockToken};
use crate::shards::shard::PeerId;
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
#[serde(from = "ClockMapHelper", into = "ClockMapHelper")]
pub struct ClockMap {
clocks: HashMap<Key, Clock>,
/// Optional snapshot with earlier version of clocks
snapshot: Option<HashMap<Key, Clock>>,
/// Whether this clock map has changed since the last time it was persisted
changed: bool,
}
impl ClockMap {
pub fn load_or_default(path: &Path) -> Result<Self> {
let result = Self::load(path);
if let Err(Error::Io(err)) = &result
&& err.kind() == std::io::ErrorKind::NotFound
{
return Ok(Self::default());
}
result
}
pub fn load(path: &Path) -> Result<Self> {
let clock_map = file_operations::read_json(path)?;
Ok(clock_map)
}
pub fn store(&mut self, path: &Path) -> Result<()> {
file_operations::atomic_save_json(path, self)?;
self.changed = false;
Ok(())
}
pub fn store_if_changed(&mut self, path: &Path) -> Result<()> {
if self.changed {
self.store(path)?;
}
Ok(())
}
/// Advance clock referenced by `clock_tag` to `clock_tick`, if it's newer than current tick.
/// Update `clock_tick` to current tick, if it's older.
///
/// Returns whether operation should be accepted by the local shard and written into the WAL
/// and applied to the storage, or rejected.
#[must_use = "operation accept status must be used"]
pub fn advance_clock_and_correct_tag(&mut self, clock_tag: &mut ClockTag) -> bool {
let (clock_accepted, current_tick) = self.advance_clock_impl(*clock_tag);
// We *accept* an operation, if its `clock_tick` is *newer* than `current_tick`.
//
// If we *reject* an operation, we have to update its `clock_tick` to `current_tick`,
// so that we can return updated clock tag to the sender node, so that the node can
// correct its clock.
//
// There are two special cases:
// - we always *accept* operations with `force = true`
// - (*currently*, this is *stronger* than `clock_tick = 0` condition!)
// - we always *reject* operations with `clock_tick = 0`
// - (this is handled by `advance_clock_impl`, so we don't need to check for `clock_tick = 0` explicitly)
//
// TODO: Should we *reject* operations with `force = true`, *if* `clock_tick = 0`!?
let operation_accepted = clock_accepted || clock_tag.force;
if !operation_accepted {
clock_tag.clock_tick = current_tick;
}
operation_accepted
}
/// Advance clock referenced by `clock_tag` to `clock_tick`, if it's newer than current tick.
///
/// If the clock is not yet tracked by the `ClockMap`, it is initialized to
/// the `clock_tick` and added to the `ClockMap`.
pub fn advance_clock(&mut self, clock_tag: ClockTag) {
let _ = self.advance_clock_impl(clock_tag);
}
/// Advance clock referenced by `clock_tag` to `clock_tick`, if it's newer than current tick.
///
/// If the clock is not yet tracked by the `ClockMap`, it is initialized to
/// the `clock_tick` and added to the `ClockMap`.
///
/// Returns whether the clock was accepted (or initialized) and the current tick.
#[must_use = "clock update status and current tick must be used"]
fn advance_clock_impl(&mut self, clock_tag: ClockTag) -> (bool, u64) {
let key = Key::from_tag(clock_tag);
let new_tick = clock_tag.clock_tick;
let new_token = clock_tag.token;
let (is_accepted, new_tick) = match self.clocks.entry(key) {
hash_map::Entry::Occupied(mut entry) => entry.get_mut().advance_to(new_tick, new_token),
hash_map::Entry::Vacant(entry) => {
// Initialize new clock and accept the operation if `new_tick > 0`.
// Reject the operation if `new_tick = 0`.
let is_non_zero_tick = new_tick > 0;
if is_non_zero_tick {
entry.insert(Clock::new(new_tick, new_token));
}
(is_non_zero_tick, new_tick)
}
};
// Assume the state changed when the clock tag was accepted
if is_accepted {
self.changed = true;
}
(is_accepted, new_tick)
}
/// Take a snapshot of clocks
///
/// Does nothing if a snapshot already exists. Returns `true` if a snapshot was taken.
pub fn take_snapshot(&mut self) -> bool {
if self.snapshot.is_some() {
return false;
}
self.snapshot.replace(self.clocks.clone());
self.changed = true;
true
}
/// Clear any snapshot of clocks
///
/// Returns `true` if a snapshot was cleared.
pub fn clear_snapshot(&mut self) -> bool {
if self.snapshot.is_none() {
return false;
}
self.snapshot.take();
self.changed = true;
true
}
/// Create a recovery point based on the current clock map state, so that we can recover any
/// new operations with new clock values
///
/// The recovery point will be derived from a clocks snapshot if it exists. Otherwise the
/// current clocks are used.
///
/// The recovery point contains every clock that is in this clock map. So, it represents all
/// the clock ticks we have.
pub fn to_recovery_point(&self) -> RecoveryPoint {
let clocks = self.snapshot.as_ref().unwrap_or(&self.clocks);
RecoveryPoint {
clocks: clocks
.iter()
.map(|(&key, clock)| (key, (clock.current_tick, clock.token)))
.collect(),
}
}
#[cfg(test)]
pub fn current_tick(&self, peer_id: PeerId, clock_id: u32) -> Option<u64> {
self.clocks
.get(&Key::new(peer_id, clock_id))
.map(Clock::current_tick)
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
pub struct Key {
peer_id: PeerId,
clock_id: u32,
}
impl Key {
fn new(peer_id: PeerId, clock_id: u32) -> Self {
Self { peer_id, clock_id }
}
fn from_tag(clock_tag: ClockTag) -> Self {
Self::new(clock_tag.peer_id, clock_tag.clock_id)
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
struct Clock {
current_tick: u64,
token: ClockToken,
}
impl Clock {
fn new(current_tick: u64, token: ClockToken) -> Self {
Self {
current_tick,
token,
}
}
/// Advance clock to `new_tick`, if `new_tick` is newer than current tick.
///
/// Returns whether the clock was accepted and the current tick.
///
/// The clock is updated when:
/// - the given `new_tick` is newer than the current tick
/// - the given `new_tick` and `new_token` are equal to the current tick and token
#[must_use = "clock update status and current tick must be used"]
fn advance_to(&mut self, new_tick: u64, new_token: ClockToken) -> (bool, u64) {
if self.current_tick < new_tick {
self.current_tick = new_tick;
self.token = new_token;
}
let operation_accepted = self.current_tick == new_tick && self.token == new_token;
(operation_accepted, self.current_tick)
}
#[cfg(test)]
fn current_tick(&self) -> u64 {
self.current_tick
}
}
/// A recovery point, being a list of distributed clocks with their tick value and unique token
///
/// The recovery point describes from what point we want to get operations from another node in
/// case of recovery. In other words, the recovery point has the first clock tick values the
/// recovering node has not seen yet.
#[derive(Clone, Debug, Default)]
pub struct RecoveryPoint {
clocks: HashMap<Key, (u64, ClockToken)>,
}
impl RecoveryPoint {
pub fn is_empty(&self) -> bool {
self.clocks.is_empty()
}
/// Iterate over all recovery point entries as clock tags.
pub fn iter_as_clock_tags(&self) -> impl Iterator<Item = ClockTag> + '_ {
self.clocks.iter().map(|(key, &(tick, token))| {
ClockTag::new_with_token(key.peer_id, key.clock_id, tick, token)
})
}
/// Increase all existing clocks in this recovery point by the given amount
pub fn increase_all_clocks_by(&mut self, ticks: u64) {
for (current_tick, _) in self.clocks.values_mut() {
*current_tick += ticks;
}
}
/// Check whether this recovery point has any clocks that are not in `other`
pub fn has_clocks_not_in(&self, other: &Self) -> bool {
self.clocks
.keys()
.any(|key| !other.clocks.contains_key(key))
}
/// Check if this recovery point has any clock that is newer than the one in the `other`.
///
/// A clock that is present in this recovery point, but not in the `other`,
/// is always considered to be *newer*.
pub fn has_any_newer_clocks_than(&mut self, other: &Self) -> bool {
self.clocks.iter().any(|(key, &(tick, _token))| {
other
.clocks
.get(key)
.is_none_or(|&(other_tick, _token)| tick > other_tick)
})
}
/// Check if this recovery point has any clock that is older than the one in the `other`.
///
/// A clock that is present in this recovery point, but not in the `other`,
/// is always considered to be *newer*.
pub fn has_any_older_clocks_than(&self, other: &Self) -> bool {
self.clocks.iter().any(|(key, &(tick, _token))| {
other
.clocks
.get(key)
.is_some_and(|&(other_tick, _token)| tick < other_tick)
})
}
/// Extend this recovery point with clocks that are only present in the `other`.
///
/// Clocks that are not present in this recovery point are initialized to the tick 1,
/// because we must recover all operations for them.
///
/// Clocks that are already present in this recovery point are not updated.
pub fn initialize_clocks_missing_from(&mut self, other: &Self) {
// Clocks known on our node, that are not in the recovery point, are unknown on the
// recovering node. Add them here with tick 1, so that we include all records for it.
let random_token = rand::random::<ClockToken>();
for &key in other.clocks.keys() {
self.clocks.entry(key).or_insert_with(|| (1, random_token));
}
}
/// Remove clocks from this recovery point, that are equal to the clocks in the `other`.
pub fn remove_clocks_equal_to(&mut self, other: &Self) {
for (key, (other_tick, _)) in &other.clocks {
if let Some((tick, _)) = self.clocks.get(key)
&& tick == other_tick
{
self.clocks.remove(key);
}
}
}
/// Remove a clock referenced by the clock tag from this recovery point, if the clock is
/// *newer or equal* to the tick in the tag.
///
/// Returns `true` if removed clock was *equal* to the tick in the tag, or `false` otherwise.
pub fn remove_clock_if_newer_or_equal_to_tag(&mut self, tag: ClockTag) -> bool {
let key = Key::from_tag(tag);
let mut is_equal = false;
if let Some(&(tick, _)) = self.clocks.get(&key)
&& tick >= tag.clock_tick
{
self.clocks.remove(&key);
is_equal = tick == tag.clock_tick;
}
is_equal
}
#[cfg(test)]
pub(crate) fn insert(&mut self, peer_id: PeerId, clock_id: u32, clock_tick: u64) {
let random_token = rand::random::<ClockToken>();
self.clocks
.insert(Key::new(peer_id, clock_id), (clock_tick, random_token));
}
}
impl fmt::Display for RecoveryPoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RecoveryPoint[")?;
let mut separator = "";
for (key, (current_tick, token)) in &self.clocks {
write!(
f,
"{separator}{}({}): {current_tick}({token})",
key.peer_id, key.clock_id,
)?;
separator = ", ";
}
write!(f, "]")?;
Ok(())
}
}
impl From<&RecoveryPoint> for api::grpc::qdrant::RecoveryPoint {
fn from(rp: &RecoveryPoint) -> Self {
let clocks = rp
.clocks
.iter()
.map(|(key, &(clock_tick, token))| RecoveryPointClockTag {
peer_id: key.peer_id,
clock_id: key.clock_id,
clock_tick,
token,
})
.collect();
Self { clocks }
}
}
impl From<RecoveryPoint> for api::grpc::qdrant::RecoveryPoint {
fn from(rp: RecoveryPoint) -> Self {
(&rp).into()
}
}
impl TryFrom<api::grpc::qdrant::RecoveryPoint> for RecoveryPoint {
type Error = Status;
fn try_from(rp: api::grpc::qdrant::RecoveryPoint) -> Result<Self, Self::Error> {
let api::grpc::qdrant::RecoveryPoint { clocks } = rp;
let clocks = clocks
.into_iter()
.map(|tag| {
(
Key::new(tag.peer_id, tag.clock_id),
(tag.clock_tick, tag.token),
)
})
.collect();
Ok(Self { clocks })
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
struct ClockMapHelper {
clocks: Vec<KeyClockHelper>,
#[serde(default, skip_serializing_if = "Option::is_none")]
snapshot: Option<Vec<KeyClockHelper>>,
}
impl From<ClockMap> for ClockMapHelper {
fn from(clock_map: ClockMap) -> Self {
Self {
clocks: clock_map.clocks.into_iter().map(Into::into).collect(),
snapshot: clock_map
.snapshot
.map(|clocks| clocks.into_iter().map(Into::into).collect()),
}
}
}
impl From<ClockMapHelper> for ClockMap {
fn from(helper: ClockMapHelper) -> Self {
Self {
clocks: helper.clocks.into_iter().map(Into::into).collect(),
snapshot: helper
.snapshot
.map(|clocks| clocks.into_iter().map(Into::into).collect()),
changed: false,
}
}
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
struct KeyClockHelper {
#[serde(flatten)]
key: Key,
#[serde(flatten)]
clock: Clock,
}
impl From<(Key, Clock)> for KeyClockHelper {
fn from((key, clock): (Key, Clock)) -> Self {
Self { key, clock }
}
}
impl From<KeyClockHelper> for (Key, Clock) {
fn from(helper: KeyClockHelper) -> Self {
(helper.key, helper.clock)
}
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, thiserror::Error)]
#[error("failed to load/store the clock map: {0}")]
pub enum Error {
Io(#[from] std::io::Error),
SerdeJson(#[from] serde_json::Error),
}
impl From<file_operations::Error> for Error {
fn from(err: file_operations::Error) -> Self {
match err {
file_operations::Error::Io(err) => err.into(),
file_operations::Error::SerdeJson(err) => err.into(),
_ => unreachable!(),
}
}
}
impl From<Error> for CollectionError {
fn from(err: Error) -> Self {
match err {
Error::Io(err) => err.into(),
Error::SerdeJson(err) => err.into(),
}
}
}
#[cfg(test)]
mod test {
use proptest::prelude::*;
use super::*;
#[test]
fn clock_map_serde_empty() {
let input = ClockMap::default();
let json = serde_json::to_value(&input).unwrap();
let output = serde_json::from_value(json).unwrap();
assert_eq!(input, output);
}
#[test]
fn clock_map_serde() {
let mut input = ClockMap::default();
input.advance_clock(ClockTag::new(1, 1, 1));
input.advance_clock(ClockTag::new(1, 2, 8));
input.advance_clock(ClockTag::new(2, 1, 42));
input.advance_clock(ClockTag::new(2, 2, 12345));
let json = serde_json::to_value(&input).unwrap();
let mut output: ClockMap = serde_json::from_value(json).unwrap();
// Propagate changed flag to allow comparison
// Normally we would not need to do this, but we bypass the regular load/store functions
output.changed = input.changed;
assert_eq!(input, output);
}
#[test]
fn clock_map_accept_last_operation_multiple_times() {
let mut helper = Helper::empty();
helper.advance(tag(1)).assert(true, 1);
// Accept same operation multiple times if it is the last
// We might send it multiple times due to a forward proxy
let duplicate = tag(2);
helper.advance(duplicate).assert(true, 2);
helper.advance(duplicate).assert(true, 2);
helper.advance(duplicate).assert(true, 2);
// Reject same clock tag with different unique token
helper.advance(tag(2)).assert(false, 2);
// Still accept the same operation
helper.advance(duplicate).assert(true, 2);
// Accept newer operation
helper.advance(tag(3)).assert(true, 3);
// Reject duplicated operation now, because a newer one was accepted
helper.advance(duplicate).assert(false, 3);
}
#[test]
fn clock_map_advance_to_next_tick() {
let mut helper = Helper::empty();
// Advance to the next tick
for tick in 1..10 {
helper.advance(tag(tick)).assert(true, tick);
}
}
#[test]
fn clock_map_advance_to_newer_tick() {
let mut helper = Helper::empty();
// Advance to a newer tick
for tick in [10, 20, 30, 40, 50] {
helper.advance(tag(tick)).assert(true, tick);
}
}
#[test]
fn clock_map_reject_older_or_current_tick() {
let mut helper = Helper::default();
// Advance to a newer tick (already tested in `clock_map_advance_to_newer_tick`)
helper.advance(tag(10));
// Reject older tick
for older_tick in 0..10 {
helper.advance(tag(older_tick)).assert(false, 10);
}
// Reject current tick
for current_tick in [10, 10, 10, 10, 10] {
helper.advance(tag(current_tick)).assert(false, 10);
}
}
#[test]
fn clock_map_reject_tick_0() {
let mut helper = Helper::empty();
// Reject tick 0, if clock map is empty
for _ in 0..5 {
helper.advance(tag(0)).assert(false, 0);
}
// Advance to a newer tick (already tested in `clock_map_advance_to_newer_tick`)
helper.advance(tag(10));
// Reject tick 0, if clock map is non-empty
for _ in 0..5 {
helper.advance(tag(0)).assert(false, 10);
}
}
#[test]
fn clock_map_advance_to_newer_tick_with_force_true() {
let mut helper = Helper::empty();
// Advance to a newer tick with `force = true`
for tick in [10, 20, 30, 40, 50] {
helper.advance(tag(tick).force(true)).assert(true, tick);
assert_eq!(helper.clock_map.current_tick(PEER_ID, CLOCK_ID), Some(tick));
}
}
#[test]
fn clock_map_accept_older_or_current_tick_with_force_true() {
let mut helper = Helper::default();
// Advance to a newer tick (already tested in `clock_map_advance_to_newer_tick`)
helper.advance(tag(10));
// Accept older tick with `force = true`
for older_tick in 0..10 {
helper
.advance(tag(older_tick).force(true))
.assert(true, older_tick);
}
// Accept current tick with `force = true`
for current_tick in [10, 10, 10, 10, 10] {
helper
.advance(tag(current_tick).force(true))
.assert(true, current_tick);
}
}
proptest! {
#[test]
fn clock_map_workflow(execution in proptest::collection::vec(clock_tag(), 0..4096)) {
let mut helper = Helper::default();
for clock_tag in execution {
let current_tick = helper.clock_map.current_tick(clock_tag.peer_id, clock_tag.clock_id);
assert_ne!(current_tick, Some(0));
let expected_status =
clock_tag.clock_tick > current_tick.unwrap_or(0) || clock_tag.force;
let expected_tick = if expected_status {
clock_tag.clock_tick
} else {
current_tick.unwrap_or(0)
};
helper.advance(clock_tag).assert(expected_status, expected_tick);
}
}
#[ignore]
#[test]
fn clock_map_clocks_isolation(execution in proptest::collection::vec(clock_tag(), 4096)) {
let mut helper = Helper::default();
for clock_tag in execution {
// Back-up current state
let backup = helper.clone();
// Advance the clock map
helper.advance(clock_tag);
// Ensure that no more than a single entry in the clock map was updated during advance
let helper_len = helper.clock_map.clocks.len();
let backup_len = backup.clock_map.clocks.len();
assert!(helper_len == backup_len || helper_len == backup_len + 1);
for (key, clock) in backup.clock_map.clocks {
let current_tick = helper.clock_map.current_tick(key.peer_id, key.clock_id);
if clock_tag.peer_id == key.peer_id && clock_tag.clock_id == key.clock_id {
assert!(current_tick.is_some() || clock_tag.clock_tick == 0);
} else {
assert_eq!(current_tick, Some(clock.current_tick));
}
}
}
}
}
prop_compose! {
fn clock_tag() (
peer_id in 0..128_u64,
clock_id in 0..64_u32,
clock_tick in any::<u64>(),
force in any::<bool>(),
) -> ClockTag {
ClockTag::new(peer_id, clock_id, clock_tick).force(force)
}
}
#[derive(Clone, Debug, Default)]
struct Helper {
clock_map: ClockMap,
}
impl Helper {
pub fn empty() -> Self {
Self::default()
}
pub fn advance(&mut self, mut clock_tag: ClockTag) -> Status {
let peer_id = clock_tag.peer_id;
let clock_id = clock_tag.clock_id;
let token = clock_tag.token;
let accepted = self.clock_map.advance_clock_and_correct_tag(&mut clock_tag);
assert_eq!(clock_tag.peer_id, peer_id);
assert_eq!(clock_tag.clock_id, clock_id);
assert_eq!(clock_tag.token, token);
Status {
accepted,
clock_tag,
}
}
}
const PEER_ID: PeerId = 1337;
const CLOCK_ID: u32 = 42;
fn tag(tick: u64) -> ClockTag {
ClockTag::new(PEER_ID, CLOCK_ID, tick)
}
#[derive(Copy, Clone, Debug)]
struct Status {
accepted: bool,
clock_tag: ClockTag,
}
impl Status {
pub fn assert(&self, expected_status: bool, expected_tick: u64) {
assert_eq!(self.accepted, expected_status);
assert_eq!(self.clock_tag.clock_tick, expected_tick);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/telemetry.rs | lib/collection/src/shards/local_shard/telemetry.rs | use std::collections::HashMap;
use std::sync::atomic::Ordering;
use std::time::Duration;
use common::types::{DetailsLevel, TelemetryDetail};
use segment::common::BYTES_IN_KB;
use segment::common::operation_time_statistics::OperationDurationStatistics;
use segment::types::{SizeStats, VectorNameBuf};
use segment::vector_storage::common::get_async_scorer;
use shard::common::stopping_guard::StoppingGuard;
use shard::segment_holder::SegmentHolder;
use tokio_util::task::AbortOnDropHandle;
use crate::config::CollectionConfigInternal;
use crate::operations::types::{CollectionError, CollectionResult, OptimizersStatus};
use crate::optimizers_builder::DEFAULT_INDEXING_THRESHOLD_KB;
use crate::shards::local_shard::LocalShard;
use crate::shards::telemetry::{LocalShardTelemetry, OptimizerTelemetry};
impl LocalShard {
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
timeout: Duration,
) -> CollectionResult<LocalShardTelemetry> {
let start = std::time::Instant::now();
let segments = self.segments.clone();
let segments_data = if detail.level < DetailsLevel::Level4 {
Ok((vec![], HashMap::default()))
} else {
let locked_collection_config = self.collection_config.clone();
let is_stopped_guard = StoppingGuard::new();
let is_stopped = is_stopped_guard.get_is_stopped();
let handle = tokio::task::spawn_blocking(move || {
// blocking sync lock
let Some(segments_guard) = segments.try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "shard telemetry"));
};
let mut segments_telemetry = Vec::with_capacity(segments_guard.len());
for (_id, segment) in segments_guard.iter() {
if is_stopped.load(Ordering::Relaxed) {
return Ok((vec![], HashMap::default()));
}
// blocking sync lock
let Some(segment_guard) = segment.get().try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "shard telemetry"));
};
segments_telemetry.push(segment_guard.get_telemetry_data(detail))
}
let collection_config = locked_collection_config.blocking_read();
let indexed_only_excluded_vectors =
get_index_only_excluded_vectors(&segments_guard, &collection_config);
Ok((segments_telemetry, indexed_only_excluded_vectors))
});
AbortOnDropHandle::new(handle).await?
};
let (segments, index_only_excluded_vectors) = segments_data?;
let total_optimized_points = self.total_optimized_points.load(Ordering::Relaxed);
let optimizations: OperationDurationStatistics = self
.optimizers
.iter()
.map(|optimizer| {
optimizer
.get_telemetry_counter()
.lock()
.get_statistics(detail)
})
.fold(Default::default(), |total, stats| total + stats);
let status = self
.get_optimization_status(timeout.saturating_sub(start.elapsed()))
.await?;
let SizeStats {
num_vectors,
num_vectors_by_name,
vectors_size_bytes,
payloads_size_bytes,
num_points,
} = self
.get_size_stats(timeout.saturating_sub(start.elapsed()))
.await?;
Ok(LocalShardTelemetry {
variant_name: None,
status: None,
total_optimized_points,
vectors_size_bytes: Some(vectors_size_bytes),
payloads_size_bytes: Some(payloads_size_bytes),
num_points: Some(num_points),
num_vectors: Some(num_vectors),
num_vectors_by_name: Some(HashMap::from(num_vectors_by_name)),
segments: if segments.is_empty() {
None
} else {
Some(segments)
},
optimizations: OptimizerTelemetry {
status,
optimizations,
log: (detail.level >= DetailsLevel::Level4)
.then(|| self.optimizers_log.lock().to_telemetry()),
},
async_scorer: Some(get_async_scorer()),
indexed_only_excluded_vectors: (!index_only_excluded_vectors.is_empty())
.then_some(index_only_excluded_vectors),
})
}
pub async fn get_optimization_status(
&self,
timeout: Duration,
) -> CollectionResult<OptimizersStatus> {
let segments = self.segments.clone();
let status = tokio::task::spawn_blocking(move || {
// blocking sync lock
let Some(segments) = segments.try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "optimization status"));
};
match &segments.optimizer_errors {
None => Ok(OptimizersStatus::Ok),
Some(err) => Ok(OptimizersStatus::Error(err.clone())),
}
});
AbortOnDropHandle::new(status).await?
}
pub async fn get_size_stats(&self, timeout: Duration) -> CollectionResult<SizeStats> {
let segments = self.segments.clone();
let stats = tokio::task::spawn_blocking(move || {
// blocking sync lock
let Some(segments) = segments.try_read_for(timeout) else {
return Err(CollectionError::timeout(timeout, "get size stats"));
};
let SizeStats {
mut num_points,
mut num_vectors,
mut num_vectors_by_name,
mut vectors_size_bytes,
mut payloads_size_bytes,
} = SizeStats::default();
for (_, segment) in segments.iter() {
let info = segment.get().read().info();
num_points += info.num_points;
num_vectors += info.num_vectors;
vectors_size_bytes += info.vectors_size_bytes;
payloads_size_bytes += info.payloads_size_bytes;
for (vector_name, vector_data) in info.vector_data.iter() {
*num_vectors_by_name.get_or_insert_default(vector_name) +=
vector_data.num_vectors;
}
}
Ok(SizeStats {
num_vectors,
num_vectors_by_name,
vectors_size_bytes,
payloads_size_bytes,
num_points,
})
});
AbortOnDropHandle::new(stats).await?
}
}
/// Returns the number of vectors which will be excluded from requests with `indexed_only` enabled.
/// Note: For vectors names without any excluded vectors, we return `0` instead of skipping them in the output.
///
/// This effectively counts vectors in large unindexed segments.
fn get_index_only_excluded_vectors(
segment_holder: &SegmentHolder,
collection_config: &CollectionConfigInternal,
) -> HashMap<VectorNameBuf, usize> {
let indexing_threshold = collection_config
.optimizer_config
.indexing_threshold
.unwrap_or(DEFAULT_INDEXING_THRESHOLD_KB);
// Threshold in kilobytes below which we allow full-search.
let search_optimized_threshold_bytes = indexing_threshold.max(collection_config.hnsw_config.full_scan_threshold)
// convert KB to bytes
* BYTES_IN_KB;
let mut index_only_excluded: HashMap<VectorNameBuf, usize> = HashMap::with_capacity(1);
segment_holder
.iter()
.flat_map(|(_, segment)| {
let segment_guard = segment.get().read();
// Get a map of vector-name=>vector-storage-size for unindexed vectors in this segment.
segment_guard
.vector_names()
.into_iter()
.filter_map(move |vector_name| {
let segment_config = segment_guard.config().vector_data.get(&vector_name)?;
let points = segment_guard.available_point_count();
// Skip segments that have an index.
if segment_config.index.is_indexed() {
return Some((vector_name, None, points));
}
let vector_storage_size =
segment_guard.available_vectors_size_in_bytes(&vector_name);
if let Err(err) = vector_storage_size {
log::error!("Failed to get vector size from segment: {err:?}");
return Some((vector_name, None, points));
}
Some((vector_name, Some(vector_storage_size.unwrap()), points))
})
})
.for_each(|(name, vector_size_bytes, point_count)| {
let entry = index_only_excluded.entry(name).or_insert(0);
// Filter out only large segments that do not support full-scan, as smaller segments can
// be searched quickly without using an index and are included in index-only searches.
let is_excluded = vector_size_bytes.is_some_and(|vector_size_bytes| {
vector_size_bytes > search_optimized_threshold_bytes
});
if is_excluded {
*entry += point_count;
}
});
index_only_excluded
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/search.rs | lib/collection/src/shards/local_shard/search.rs | use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::ScoredPoint;
use shard::common::stopping_guard::StoppingGuard;
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use super::LocalShard;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::operations::types::{CollectionError, CollectionResult};
// Chunk requests for parallelism in certain scenarios
//
// Deeper down, each segment gets its own dedicated search thread. If this shard has just
// one segment, all requests will be executed on a single thread.
//
// To prevent this from being a bottleneck if we have a lot of requests, we can chunk the
// requests into multiple searches to allow more parallelism.
//
// For simplicity, we use a fixed chunk size. Using chunks helps to ensure our 'filter
// reuse optimization' is still properly utilized.
// See: <https://github.com/qdrant/qdrant/pull/813>
// See: <https://github.com/qdrant/qdrant/pull/6326>
const CHUNK_SIZE: usize = 16;
impl LocalShard {
pub async fn do_search(
&self,
core_request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_counter_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
if core_request.searches.is_empty() {
return Ok(vec![]);
}
let skip_batching = if core_request.searches.len() <= CHUNK_SIZE {
// Don't batch if we have few searches, prevents cloning request
true
} else if self.segments.read().len() > self.shared_storage_config.search_thread_count {
// Don't batch if we have more segments than search threads
// Not a perfect condition, but it helps to prevent consuming a lot of search threads
// if the number of segments is large
// Note: search threads are shared with all other search threads on this Qdrant
// instance, and other shards also have segments. For simplicity this only considers
// the global search thread count and local segment count.
// See: <https://github.com/qdrant/qdrant/pull/6478>
true
} else {
false
};
let is_stopped_guard = StoppingGuard::new();
if skip_batching {
return self
.do_search_impl(
core_request,
search_runtime_handle,
timeout,
hw_counter_acc,
&is_stopped_guard,
)
.await;
}
// Batch if we have many searches, allows for more parallelism
let CoreSearchRequestBatch { searches } = core_request.as_ref();
let chunk_futures = searches
.chunks(CHUNK_SIZE)
.map(|chunk| {
let core_request = CoreSearchRequestBatch {
searches: chunk.to_vec(),
};
self.do_search_impl(
Arc::new(core_request),
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
&is_stopped_guard,
)
})
.collect::<Vec<_>>();
let results = futures::future::try_join_all(chunk_futures)
.await?
.into_iter()
.flatten()
.collect();
Ok(results)
}
async fn do_search_impl(
&self,
core_request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_counter_acc: HwMeasurementAcc,
is_stopped_guard: &StoppingGuard,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let start = std::time::Instant::now();
let (query_context, collection_params) = {
let collection_config = self.collection_config.read().await;
let query_context_opt = SegmentsSearcher::prepare_query_context(
self.segments.clone(),
&core_request,
&collection_config,
timeout,
search_runtime_handle,
is_stopped_guard,
hw_counter_acc.clone(),
)
.await?;
let Some(query_context) = query_context_opt else {
// No segments to search
return Ok(vec![]);
};
(query_context, collection_config.params.clone())
};
// update timeout
let timeout = timeout.saturating_sub(start.elapsed());
let search_request = SegmentsSearcher::search(
Arc::clone(&self.segments),
Arc::clone(&core_request),
search_runtime_handle,
true,
query_context,
timeout,
);
let res = tokio::time::timeout(timeout, search_request)
.await
.map_err(|_| {
log::debug!("Search timeout reached: {timeout:?}");
// StoppingGuard takes care of setting is_stopped to true
CollectionError::timeout(timeout, "Search")
})??;
let top_results = res
.into_iter()
.zip(core_request.searches.iter())
.map(|(vector_res, req)| {
let vector_name = req.query.get_vector_name();
let distance = collection_params.get_distance(vector_name).unwrap();
let processed_res = vector_res.into_iter().map(|mut scored_point| {
match req.query {
QueryEnum::Nearest(_) => {
scored_point.score = distance.postprocess_score(scored_point.score);
}
// Don't post-process if we are dealing with custom scoring
QueryEnum::RecommendBestScore(_)
| QueryEnum::RecommendSumScores(_)
| QueryEnum::Discover(_)
| QueryEnum::Context(_)
| QueryEnum::FeedbackNaive(_) => {}
};
scored_point
});
if let Some(threshold) = req.score_threshold {
processed_res
.take_while(|scored_point| {
distance.check_threshold(scored_point.score, threshold)
})
.collect()
} else {
processed_res.collect()
}
})
.collect();
Ok(top_results)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/formula_rescore.rs | lib/collection/src/shards/local_shard/formula_rescore.rs | use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::query_context::FormulaContext;
use segment::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula;
use segment::types::ScoredPoint;
use shard::common::stopping_guard::StoppingGuard;
use super::LocalShard;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::operations::types::{CollectionError, CollectionResult};
impl LocalShard {
pub async fn rescore_with_formula(
&self,
formula: ParsedFormula,
prefetches_results: Vec<Vec<ScoredPoint>>,
limit: usize,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let stopping_guard = StoppingGuard::new();
let ctx = FormulaContext {
formula,
prefetches_results,
limit,
is_stopped: stopping_guard.get_is_stopped(),
};
let arc_ctx = Arc::new(ctx);
let future = SegmentsSearcher::rescore_with_formula(
self.segments.clone(),
arc_ctx,
&self.search_runtime,
hw_measurement_acc,
);
let res = tokio::time::timeout(timeout, future)
.await
.map_err(|_elapsed| CollectionError::timeout(timeout, "rescore_with_formula"))??;
Ok(res)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/facet.rs | lib/collection/src/shards/local_shard/facet.rs | use std::collections::BTreeSet;
use std::sync::Arc;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use futures::future;
use futures::future::try_join_all;
use itertools::{Itertools, process_results};
use segment::data_types::facets::{FacetParams, FacetValue, FacetValueHit};
use segment::types::{Condition, FieldCondition, Filter, Match};
use shard::common::stopping_guard::StoppingGuard;
use tokio::runtime::Handle;
use tokio::time::error::Elapsed;
use tokio_util::task::AbortOnDropHandle;
use super::LocalShard;
use crate::collection_manager::holders::segment_holder::LockedSegment;
use crate::operations::types::{CollectionError, CollectionResult};
impl LocalShard {
/// Returns values with approximate counts for the given facet request.
pub async fn approx_facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<FacetValueHit>> {
let stopping_guard = StoppingGuard::new();
let spawn_read = |segment: LockedSegment, hw_counter: &HardwareCounterCell| {
let request = Arc::clone(&request);
let is_stopped = stopping_guard.get_is_stopped();
let hw_counter = hw_counter.fork();
let task = search_runtime_handle.spawn_blocking(move || {
let get_segment = segment.get();
let read_segment = get_segment.read();
read_segment.facet(&request, &is_stopped, &hw_counter)
});
AbortOnDropHandle::new(task)
};
let all_reads = {
let segments_lock = self.segments().read();
let hw_counter = hw_measurement_acc.get_counter_cell();
tokio::time::timeout(
timeout,
try_join_all(
segments_lock
.non_appendable_then_appendable_segments()
.map(|segment| spawn_read(segment, &hw_counter)),
),
)
}
.await
.map_err(|_: Elapsed| CollectionError::timeout(timeout, "facet"))??;
let merged_hits = process_results(all_reads, |reads| {
reads.reduce(|mut acc, map| {
map.into_iter()
.for_each(|(value, count)| *acc.entry(value).or_insert(0) += count);
acc
})
})?;
// We can't just select top values, because we need to aggregate across segments,
// which we can't assume to select the same best top.
//
// We need all values to be able to aggregate correctly across segments
let top_hits = merged_hits
.map(|map| {
map.iter()
.map(|(value, count)| FacetValueHit {
value: value.to_owned(),
count: *count,
})
.collect_vec()
})
.unwrap_or_default();
Ok(top_hits)
}
/// Returns values with exact counts for a given facet request.
pub async fn exact_facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<FacetValueHit>> {
// To return exact counts we need to consider that the same point can be in different segments if it has different versions.
// So, we need to consider all point ids for a given filter in all segments to do an accurate count.
//
// To do this we will perform exact counts for each of the values in the field.
let instant = std::time::Instant::now();
// Get unique values for the field
let unique_values = self
.unique_values(
Arc::clone(&request),
search_runtime_handle,
timeout,
hw_measurement_acc.clone(),
)
.await?;
// Make an exact count for each value
let hits_futures = unique_values.into_iter().map(|value| {
let match_value = Filter::new_must(Condition::Field(FieldCondition::new_match(
request.key.clone(),
Match::new_value(From::from(value.clone())),
)));
let filter = Filter::merge_opts(request.filter.clone(), Some(match_value));
let hw_acc = hw_measurement_acc.clone();
async move {
let count = self
.read_filtered(filter.as_ref(), search_runtime_handle, hw_acc)
.await?
.len();
CollectionResult::Ok(FacetValueHit { value, count })
}
});
let hits = tokio::time::timeout(
timeout.saturating_sub(instant.elapsed()),
future::try_join_all(hits_futures),
)
.await
.map_err(|_: Elapsed| CollectionError::timeout(timeout, "facet"))??;
Ok(hits)
}
async fn unique_values(
&self,
request: Arc<FacetParams>,
handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<BTreeSet<FacetValue>> {
let stopping_guard = StoppingGuard::new();
let spawn_read = |segment: LockedSegment, hw_counter: &HardwareCounterCell| {
let request = Arc::clone(&request);
let is_stopped = stopping_guard.get_is_stopped();
let hw_counter = hw_counter.fork();
let task = handle.spawn_blocking(move || {
let get_segment = segment.get();
let read_segment = get_segment.read();
read_segment.unique_values(
&request.key,
request.filter.as_ref(),
&is_stopped,
&hw_counter,
)
});
AbortOnDropHandle::new(task)
};
let hw_counter = hw_measurement_acc.get_counter_cell();
let all_reads = {
let segments_lock = self.segments().read();
tokio::time::timeout(
timeout,
try_join_all(
segments_lock
.non_appendable_then_appendable_segments()
.map(|segment| spawn_read(segment, &hw_counter)),
),
)
}
.await
.map_err(|_: Elapsed| CollectionError::timeout(timeout, "facet"))??;
let all_values =
process_results(all_reads, |reads| reads.flatten().collect::<BTreeSet<_>>())?;
Ok(all_values)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/testing.rs | lib/collection/src/shards/local_shard/testing.rs | use shard::locked_segment::LockedSegment;
use crate::shards::local_shard::LocalShard;
impl LocalShard {
// Testing helper: performs partial flush of the segments
pub fn partial_flush(&self) {
let segments = self.segments.read();
for (_segment_id, segment) in segments.iter() {
match segment {
LockedSegment::Original(raw_segment_lock) => {
let raw_segment = raw_segment_lock.read();
raw_segment.id_tracker.borrow().mapping_flusher()().unwrap();
}
LockedSegment::Proxy(_) => {} // Skipping
}
}
}
pub fn full_flush(&self) {
let segments = self.segments.read();
segments.flush_all(true, true).unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/mod.rs | lib/collection/src/shards/local_shard/mod.rs | pub mod clock_map;
pub mod disk_usage_watcher;
pub(super) mod facet;
pub(super) mod formula_rescore;
pub(super) mod query;
pub(super) mod scroll;
pub(super) mod search;
pub(super) mod shard_ops;
mod snapshot;
mod telemetry;
pub(super) mod updaters;
#[cfg(test)]
mod snapshot_tests;
mod drop;
#[cfg(feature = "testing")]
pub mod testing;
use std::collections::{BTreeSet, HashMap};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::thread;
use std::time::{Duration, Instant};
use arc_swap::ArcSwap;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::defaults::MAX_CONCURRENT_SEGMENT_LOADS;
use common::rate_limiting::RateLimiter;
use common::save_on_disk::SaveOnDisk;
use common::{panic, tar_ext};
use fs_err as fs;
use fs_err::tokio as tokio_fs;
use futures::StreamExt as _;
use futures::stream::FuturesUnordered;
use indicatif::{ProgressBar, ProgressStyle};
use itertools::Itertools;
use parking_lot::{Mutex as ParkingMutex, RwLock};
use segment::entry::entry_point::SegmentEntry as _;
use segment::index::field_index::CardinalityEstimation;
use segment::segment_constructor::{build_segment, load_segment};
use segment::types::{
Filter, PayloadIndexInfo, PayloadKeyType, PointIdType, SegmentConfig, SegmentType,
};
use shard::operations::CollectionUpdateOperations;
use shard::operations::point_ops::{PointInsertOperationsInternal, PointOperations};
use shard::wal::SerdeWal;
use tokio::runtime::Handle;
use tokio::sync::mpsc::Sender;
use tokio::sync::{Mutex, RwLock as TokioRwLock, mpsc};
use tokio_util::task::AbortOnDropHandle;
use self::clock_map::{ClockMap, RecoveryPoint};
use self::disk_usage_watcher::DiskUsageWatcher;
use super::update_tracker::UpdateTracker;
use crate::collection::payload_index_schema::PayloadIndexSchema;
use crate::collection_manager::collection_updater::CollectionUpdater;
use crate::collection_manager::holders::segment_holder::{
LockedSegment, LockedSegmentHolder, SegmentHolder,
};
use crate::collection_manager::optimizers::TrackerLog;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::common::file_utils::{move_dir, move_file};
use crate::config::CollectionConfigInternal;
use crate::operations::OperationWithClockTag;
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::operations::types::{
CollectionError, CollectionResult, OptimizersStatus, ShardInfoInternal, ShardStatus,
check_sparse_compatible_with_segment_config,
};
use crate::optimizers_builder::{OptimizersConfig, build_optimizers, clear_temp_segments};
use crate::shards::CollectionId;
use crate::shards::shard::ShardId;
use crate::shards::shard_config::ShardConfig;
use crate::update_handler::{Optimizer, UpdateHandler, UpdateSignal};
use crate::wal_delta::RecoverableWal;
/// If rendering WAL load progression in basic text form, report progression every 60 seconds.
const WAL_LOAD_REPORT_EVERY: Duration = Duration::from_secs(60);
const WAL_PATH: &str = "wal";
const SEGMENTS_PATH: &str = "segments";
const NEWEST_CLOCKS_PATH: &str = "newest_clocks.json";
const OLDEST_CLOCKS_PATH: &str = "oldest_clocks.json";
/// LocalShard
///
/// LocalShard is an entity that can be moved between peers and contains some part of one collections data.
///
/// Holds all object, required for collection functioning
#[must_use = "Local Shard must be explicitly handled"]
pub struct LocalShard {
collection_name: CollectionId,
pub(super) segments: LockedSegmentHolder,
pub(super) collection_config: Arc<TokioRwLock<CollectionConfigInternal>>,
pub(super) shared_storage_config: Arc<SharedStorageConfig>,
pub(crate) payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
pub(super) wal: RecoverableWal,
pub(super) update_handler: Arc<Mutex<UpdateHandler>>,
pub(super) update_sender: ArcSwap<Sender<UpdateSignal>>,
pub(super) update_tracker: UpdateTracker,
pub(super) path: PathBuf,
pub(super) optimizers: Arc<Vec<Arc<Optimizer>>>,
pub(super) optimizers_log: Arc<ParkingMutex<TrackerLog>>,
pub(super) total_optimized_points: Arc<AtomicUsize>,
pub(super) search_runtime: Handle,
disk_usage_watcher: DiskUsageWatcher,
read_rate_limiter: Option<ParkingMutex<RateLimiter>>,
is_gracefully_stopped: bool,
/// Update operation lock
/// The lock, which must prevent updates critical sections of other operations, which
/// are not compatible with updates.
///
/// Currently used for:
///
/// * Blocking updates during scroll + retrieve operations
/// Consistency of scroll operations is especially important for internal processes like
/// re-sharding and shard transfer, so explicit lock for those operations is required.
///
/// * Blocking updates during some parts of snapshot creation
/// Snapshotting process wraps and unwraps proxy segments, which might
/// create inconsistencies if updates are applied concurrently.
///
/// Write lock must be held for updates, while read lock must be held for critical sections
pub(super) update_operation_lock: Arc<tokio::sync::RwLock<()>>,
}
/// Shard holds information about segments and WAL.
impl LocalShard {
/// Moves `wal`, `segments` and `clocks` data from one path to another.
pub async fn move_data(from: &Path, to: &Path) -> CollectionResult<()> {
log::debug!(
"Moving local shard from {} to {}",
from.display(),
to.display()
);
let wal_from = Self::wal_path(from);
let wal_to = Self::wal_path(to);
let segments_from = Self::segments_path(from);
let segments_to = Self::segments_path(to);
move_dir(wal_from, wal_to).await?;
move_dir(segments_from, segments_to).await?;
LocalShardClocks::move_data(from, to).await?;
Ok(())
}
/// Checks if path have local shard data present
pub fn check_data(shard_path: &Path) -> bool {
let wal_path = Self::wal_path(shard_path);
let segments_path = Self::segments_path(shard_path);
wal_path.exists() && segments_path.exists()
}
/// Clear local shard related data.
///
/// Do NOT remove config file.
pub async fn clear(shard_path: &Path) -> CollectionResult<()> {
// Delete WAL
let wal_path = Self::wal_path(shard_path);
if wal_path.exists() {
tokio_fs::remove_dir_all(wal_path).await?;
}
// Delete segments
let segments_path = Self::segments_path(shard_path);
if segments_path.exists() {
tokio_fs::remove_dir_all(segments_path).await?;
}
LocalShardClocks::delete_data(shard_path).await?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn new(
collection_name: String,
segment_holder: SegmentHolder,
collection_config: Arc<TokioRwLock<CollectionConfigInternal>>,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
wal: SerdeWal<OperationWithClockTag>,
optimizers: Arc<Vec<Arc<Optimizer>>>,
optimizer_resource_budget: ResourceBudget,
shard_path: &Path,
clocks: LocalShardClocks,
update_runtime: Handle,
search_runtime: Handle,
) -> Self {
let segment_holder = Arc::new(RwLock::new(segment_holder));
let config = collection_config.read().await;
let locked_wal = Arc::new(Mutex::new(wal));
let optimizers_log = Arc::new(ParkingMutex::new(Default::default()));
let total_optimized_points = Arc::new(AtomicUsize::new(0));
// default to 2x the WAL capacity
let disk_buffer_threshold_mb =
2 * (collection_config.read().await.wal_config.wal_capacity_mb);
let disk_usage_watcher = disk_usage_watcher::DiskUsageWatcher::new(
shard_path.to_owned(),
disk_buffer_threshold_mb,
)
.await;
let scroll_read_lock = Arc::new(tokio::sync::RwLock::new(()));
let update_tracker = UpdateTracker::default();
let mut update_handler = UpdateHandler::new(
collection_name.clone(),
shared_storage_config.clone(),
payload_index_schema.clone(),
optimizers.clone(),
optimizers_log.clone(),
total_optimized_points.clone(),
optimizer_resource_budget.clone(),
update_runtime.clone(),
segment_holder.clone(),
locked_wal.clone(),
config.optimizer_config.flush_interval_sec,
config.optimizer_config.max_optimization_threads,
clocks.clone(),
shard_path.into(),
scroll_read_lock.clone(),
update_tracker.clone(),
);
let (update_sender, update_receiver) =
mpsc::channel(shared_storage_config.update_queue_size);
update_handler.run_workers(update_receiver);
let read_rate_limiter = config.strict_mode_config.as_ref().and_then(|strict_mode| {
strict_mode
.read_rate_limit
.map(RateLimiter::new_per_minute)
.map(ParkingMutex::new)
});
drop(config); // release `shared_config` from borrow checker
Self {
collection_name,
segments: segment_holder,
collection_config,
shared_storage_config,
payload_index_schema,
wal: RecoverableWal::new(locked_wal, clocks.newest_clocks, clocks.oldest_clocks),
update_handler: Arc::new(Mutex::new(update_handler)),
update_sender: ArcSwap::from_pointee(update_sender),
update_tracker,
path: shard_path.to_owned(),
search_runtime,
optimizers,
optimizers_log,
total_optimized_points,
disk_usage_watcher,
read_rate_limiter,
is_gracefully_stopped: false,
update_operation_lock: scroll_read_lock,
}
}
pub fn segments(&self) -> &RwLock<SegmentHolder> {
self.segments.deref()
}
/// Recovers shard from disk.
#[allow(clippy::too_many_arguments)]
pub async fn load(
_id: ShardId,
collection_id: CollectionId,
shard_path: &Path,
collection_config: Arc<TokioRwLock<CollectionConfigInternal>>,
effective_optimizers_config: OptimizersConfig,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
rebuild_payload_index: bool,
update_runtime: Handle,
search_runtime: Handle,
optimizer_resource_budget: ResourceBudget,
) -> CollectionResult<LocalShard> {
let collection_config_read = collection_config.read().await;
let wal_path = Self::wal_path(shard_path);
let segments_path = Self::segments_path(shard_path);
let wal: SerdeWal<OperationWithClockTag> =
SerdeWal::new(&wal_path, (&collection_config_read.wal_config).into())
.map_err(|e| CollectionError::service_error(format!("Wal error: {e}")))?;
// Walk over segments directory and collect all directory entries now
// Collect now and error early to prevent errors while we've already spawned load threads
let segment_paths = fs::read_dir(&segments_path)
.map_err(|err| {
CollectionError::service_error(format!(
"Can't read segments directory due to {err}\nat {}",
segments_path.display(),
))
})?
.collect::<Result<Vec<_>, _>>()
.map_err(|err| {
CollectionError::service_error(format!(
"Failed to read segment path in segment directory: {err}",
))
})?;
// Grab segment paths, filter out hidden entries and non-directories
let segment_paths = segment_paths
.into_iter()
.filter(|entry| {
let is_hidden = entry
.file_name()
.to_str()
.is_some_and(|s| s.starts_with('.'));
if is_hidden {
log::debug!(
"Segments path entry prefixed with a period, ignoring: {}",
entry.path().display(),
);
}
!is_hidden
})
.filter(|entry| {
let is_dir = entry.path().is_dir();
if !is_dir {
log::warn!(
"Segments path entry is not a directory, skipping: {}",
entry.path().display(),
);
}
is_dir
})
.map(|entry| entry.path());
let mut segment_stream = futures::stream::iter(segment_paths)
.map(|segment_path| {
let payload_index_schema = Arc::clone(&payload_index_schema);
let handle = tokio::task::spawn_blocking(move || {
let segment = load_segment(&segment_path, &AtomicBool::new(false))?;
let Some(mut segment) = segment else {
fs::remove_dir_all(&segment_path).map_err(|err| {
CollectionError::service_error(format!(
"failed to remove leftover segment {}: {err}",
segment_path.display(),
))
})?;
return Ok(None);
};
segment.check_consistency_and_repair()?;
if rebuild_payload_index {
segment.update_all_field_indices(
&payload_index_schema.read().schema.clone(),
)?;
}
CollectionResult::Ok(Some(segment))
});
AbortOnDropHandle::new(handle)
})
.buffer_unordered(MAX_CONCURRENT_SEGMENT_LOADS);
let mut segment_holder = SegmentHolder::default();
while let Some(result) = segment_stream.next().await {
let segment = result??;
let Some(segment) = segment else {
continue;
};
collection_config_read
.params
.vectors
.check_compatible_with_segment_config(&segment.config().vector_data, true)?;
collection_config_read
.params
.sparse_vectors
.as_ref()
.map(|sparse_vectors| {
check_sparse_compatible_with_segment_config(
sparse_vectors,
&segment.config().sparse_vector_data,
true,
)
})
.unwrap_or(Ok(()))?;
segment_holder.add_new(segment);
}
drop(segment_stream); // release `payload_index_schema` from borrow checker
let res = deduplicate_points_async(&segment_holder).await?;
if res > 0 {
log::debug!("Deduplicated {res} points");
}
clear_temp_segments(shard_path);
let optimizers = build_optimizers(
shard_path,
&collection_config_read.params,
&effective_optimizers_config,
&collection_config_read.hnsw_config,
&shared_storage_config.hnsw_global_config,
&collection_config_read.quantization_config,
);
drop(collection_config_read); // release `shared_config` from borrow checker
let clocks = LocalShardClocks::load(shard_path)?;
// Always make sure we have any appendable segments, needed for update operations
if !segment_holder.has_appendable_segment() {
debug_assert!(
false,
"Shard has no appendable segments, this should never happen",
);
log::warn!(
"Shard has no appendable segments, this should never happen. Creating new appendable segment now",
);
let segments_path = LocalShard::segments_path(shard_path);
let segment_config = collection_config.read().await.to_base_segment_config()?;
segment_holder.create_appendable_segment(
&segments_path,
segment_config,
payload_index_schema.clone(),
)?;
}
let local_shard = LocalShard::new(
collection_id.clone(),
segment_holder,
collection_config,
shared_storage_config,
payload_index_schema,
wal,
optimizers,
optimizer_resource_budget,
shard_path,
clocks,
update_runtime,
search_runtime,
)
.await;
// Apply outstanding operations from WAL
local_shard.load_from_wal(collection_id).await?;
Ok(local_shard)
}
pub fn shard_path(&self) -> PathBuf {
self.path.clone()
}
pub fn wal_path(shard_path: &Path) -> PathBuf {
shard_path.join(WAL_PATH)
}
pub fn segments_path(shard_path: &Path) -> PathBuf {
shard_path.join(SEGMENTS_PATH)
}
#[allow(clippy::too_many_arguments)]
pub async fn build_local(
id: ShardId,
collection_id: CollectionId,
shard_path: &Path,
collection_config: Arc<TokioRwLock<CollectionConfigInternal>>,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
update_runtime: Handle,
search_runtime: Handle,
optimizer_resource_budget: ResourceBudget,
effective_optimizers_config: OptimizersConfig,
) -> CollectionResult<LocalShard> {
// initialize local shard config file
let local_shard_config = ShardConfig::new_replica_set();
let shard = Self::build(
id,
collection_id,
shard_path,
collection_config,
shared_storage_config,
payload_index_schema,
update_runtime,
search_runtime,
optimizer_resource_budget,
effective_optimizers_config,
)
.await?;
local_shard_config.save(shard_path)?;
Ok(shard)
}
/// Creates new empty shard with given configuration, initializing all storages, optimizers and directories.
#[allow(clippy::too_many_arguments)]
pub async fn build(
id: ShardId,
collection_id: CollectionId,
shard_path: &Path,
collection_config: Arc<TokioRwLock<CollectionConfigInternal>>,
shared_storage_config: Arc<SharedStorageConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
update_runtime: Handle,
search_runtime: Handle,
optimizer_resource_budget: ResourceBudget,
effective_optimizers_config: OptimizersConfig,
) -> CollectionResult<LocalShard> {
let config = collection_config.read().await;
let wal_path = Self::wal_path(shard_path);
tokio_fs::create_dir_all(&wal_path).await.map_err(|err| {
CollectionError::service_error(format!(
"Can't create shard wal directory. Error: {err}"
))
})?;
let segments_path = Self::segments_path(shard_path);
tokio_fs::create_dir_all(&segments_path)
.await
.map_err(|err| {
CollectionError::service_error(format!(
"Can't create shard segments directory. Error: {err}"
))
})?;
let mut segment_holder = SegmentHolder::default();
let mut build_handlers = vec![];
let vector_params = config
.params
.to_base_vector_data(config.quantization_config.as_ref())?;
let sparse_vector_params = config.params.to_sparse_vector_data()?;
let segment_number = config.optimizer_config.get_number_segments();
for _sid in 0..segment_number {
let path_clone = segments_path.clone();
let segment_config = SegmentConfig {
vector_data: vector_params.clone(),
sparse_vector_data: sparse_vector_params.clone(),
payload_storage_type: config.params.payload_storage_type(),
};
let segment = thread::Builder::new()
.name(format!("shard-build-{collection_id}-{id}"))
.spawn(move || build_segment(&path_clone, &segment_config, true))
.unwrap();
build_handlers.push(segment);
}
let join_results = build_handlers
.into_iter()
.map(|handler| handler.join())
.collect_vec();
for join_result in join_results {
let segment = join_result.map_err(|err| {
let message = panic::downcast_str(&err).unwrap_or("");
let separator = if !message.is_empty() { "with:\n" } else { "" };
CollectionError::service_error(format!(
"Segment DB create panicked{separator}{message}",
))
})??;
segment_holder.add_new(segment);
}
let wal: SerdeWal<OperationWithClockTag> =
SerdeWal::new(&wal_path, (&config.wal_config).into())?;
let optimizers = build_optimizers(
shard_path,
&config.params,
&effective_optimizers_config,
&config.hnsw_config,
&shared_storage_config.hnsw_global_config,
&config.quantization_config,
);
drop(config); // release `shared_config` from borrow checker
let local_shard = LocalShard::new(
collection_id,
segment_holder,
collection_config,
shared_storage_config,
payload_index_schema,
wal,
optimizers,
optimizer_resource_budget,
shard_path,
LocalShardClocks::default(),
update_runtime,
search_runtime,
)
.await;
local_shard.insert_fake_operation().await?;
Ok(local_shard)
}
/// This operation inserts an empty operation into WAL.
/// Operation does nothing, but takes a spot in WAL.
/// We need it mostly to force WAL to start with something besides zero as a first operation number.
pub async fn insert_fake_operation(&self) -> CollectionResult<()> {
let mut operation = OperationWithClockTag {
operation: CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(
PointInsertOperationsInternal::from(vec![]),
)),
clock_tag: None,
};
self.wal.lock_and_write(&mut operation).await?;
Ok(())
}
/// Loads latest collection operations from WAL
pub async fn load_from_wal(&self, collection_id: CollectionId) -> CollectionResult<()> {
let mut newest_clocks = self.wal.newest_clocks.lock().await;
let wal = self.wal.wal.lock().await;
let bar = ProgressBar::new(wal.len(false));
let progress_style = ProgressStyle::default_bar()
.template("{msg} [{elapsed_precise}] {wide_bar} {pos}/{len} (eta:{eta})")
.expect("Failed to create progress style");
bar.set_style(progress_style);
log::debug!(
"Recovering shard {} starting reading WAL from {} up to {}",
self.path.display(),
wal.first_index(),
wal.last_index(),
);
bar.set_message(format!("Recovering collection {collection_id}"));
let segments = self.segments();
// Fall back to basic text output if the progress bar is hidden (e.g. not a tty)
let show_progress_bar = !bar.is_hidden();
let mut last_progress_report = Instant::now();
if !show_progress_bar {
log::info!(
"Recovering shard {}: 0/{} (0%)",
self.path.display(),
wal.len(false),
);
}
// When `Segment`s are flushed, WAL is truncated up to the index of the last operation
// that has been applied and flushed.
//
// `SerdeWal` wrapper persists/keeps track of this index (in addition to any handling
// in the `wal` crate itself).
//
// `SerdeWal::read_all` starts reading WAL from the first "un-truncated" index,
// so no additional handling required to "skip" any potentially applied entries.
//
// Note, that it's not guaranteed that some operation won't be re-applied to the storage.
// (`SerdeWal::read_all` may even start reading WAL from some already truncated
// index *occasionally*), but the storage can handle it.
for (op_num, update) in wal.read_all(false) {
if let Some(clock_tag) = update.clock_tag {
newest_clocks.advance_clock(clock_tag);
}
// Propagate `CollectionError::ServiceError`, but skip other error types.
match &CollectionUpdater::update(
segments,
op_num,
update.operation,
self.update_operation_lock.clone(),
self.update_tracker.clone(),
&HardwareCounterCell::disposable(), // Internal operation, no measurement needed.
) {
Err(err @ CollectionError::ServiceError { error, backtrace }) => {
let path = self.path.display();
log::error!(
"Can't apply WAL operation: {error}, \
collection: {collection_id}, \
shard: {path}, \
op_num: {op_num}"
);
if let Some(backtrace) = &backtrace {
log::error!("Backtrace: {backtrace}");
}
return Err(err.clone());
}
Err(err @ CollectionError::OutOfMemory { .. }) => {
log::error!("{err}");
return Err(err.clone());
}
Err(err @ CollectionError::NotFound { .. }) => log::warn!("{err}"),
Err(err) => log::error!("{err}"),
Ok(_) => (),
}
// Update progress bar or show text progress every WAL_LOAD_REPORT_EVERY
bar.inc(1);
if !show_progress_bar && last_progress_report.elapsed() >= WAL_LOAD_REPORT_EVERY {
let progress = bar.position();
log::info!(
"{progress}/{} ({}%)",
wal.len(false),
(progress as f32 / wal.len(false) as f32 * 100.0) as usize,
);
last_progress_report = Instant::now();
}
}
{
let segments = self.segments.read();
// Force a flush after re-applying WAL operations, to ensure we maintain on-disk data
// consistency, if we happened to only apply *past* operations to a segment with newer
// version.
segments.flush_all(true, true)?;
}
bar.finish();
if !show_progress_bar {
log::info!(
"Recovered collection {collection_id}: {0}/{0} (100%)",
wal.len(false),
);
}
// The storage is expected to be consistent after WAL recovery
#[cfg(feature = "data-consistency-check")]
self.check_data_consistency()?;
Ok(())
}
/// Check data consistency for all segments
///
/// Returns an error at the first inconsistent segment
pub fn check_data_consistency(&self) -> CollectionResult<()> {
log::info!("Checking data consistency for shard {:?}", self.path);
let segments = self.segments.read();
for (_idx, segment) in segments.iter() {
match segment {
LockedSegment::Original(raw_segment) => {
let segment_guard = raw_segment.read();
if let Err(err) = segment_guard.check_data_consistency() {
log::error!(
"Segment {:?} is inconsistent: {}",
segment_guard.current_path,
err
);
return Err(err.into());
}
}
LockedSegment::Proxy(_) => {
return Err(CollectionError::service_error(
"Proxy segment found in check_data_consistency",
));
}
}
}
Ok(())
}
/// Apply shard's strict mode configuration update
/// - Update read rate limiter
pub async fn on_strict_mode_config_update(&mut self) {
let config = self.collection_config.read().await;
if let Some(strict_mode_config) = &config.strict_mode_config
&& strict_mode_config.enabled == Some(true)
{
// update read rate limiter
if let Some(read_rate_limit_per_min) = strict_mode_config.read_rate_limit {
let new_read_rate_limiter = RateLimiter::new_per_minute(read_rate_limit_per_min);
self.read_rate_limiter
.replace(parking_lot::Mutex::new(new_read_rate_limiter));
return;
}
}
// remove read rate limiter for all other situations
self.read_rate_limiter.take();
}
pub async fn estimate_cardinality<'a>(
&'a self,
filter: Option<&'a Filter>,
hw_measurement_acc: &HwMeasurementAcc,
) -> CollectionResult<CardinalityEstimation> {
let segments = self.segments.clone();
let hw_counter = hw_measurement_acc.get_counter_cell();
// clone filter for spawning task
let filter = filter.cloned();
let cardinality = tokio::task::spawn_blocking(move || {
let segments = segments.read(); // blocking sync lock
segments
.iter()
.map(|(_id, segment)| {
segment
.get()
.read() // blocking sync lock
.estimate_point_count(filter.as_ref(), &hw_counter)
})
.fold(CardinalityEstimation::exact(0), |acc, x| {
CardinalityEstimation {
primary_clauses: vec![],
min: acc.min + x.min,
exp: acc.exp + x.exp,
max: acc.max + x.max,
}
})
});
let cardinality = AbortOnDropHandle::new(cardinality).await?;
Ok(cardinality)
}
pub async fn read_filtered<'a>(
&'a self,
filter: Option<&'a Filter>,
runtime_handle: &Handle,
hw_counter: HwMeasurementAcc,
) -> CollectionResult<BTreeSet<PointIdType>> {
let segments = self.segments.clone();
SegmentsSearcher::read_filtered(segments, filter, runtime_handle, hw_counter).await
}
pub async fn local_shard_status(&self) -> (ShardStatus, OptimizersStatus) {
{
let segments = self.segments.clone();
let has_errored_optimizers = tokio::task::spawn_blocking(move || {
let segments = segments.read(); // blocking sync lock
// Red status on failed operation or optimizer error
if !segments.failed_operation.is_empty() || segments.optimizer_errors.is_some() {
let optimizer_status = segments
.optimizer_errors
.as_ref()
.map_or(OptimizersStatus::Ok, |err| {
OptimizersStatus::Error(err.clone())
});
return Some((ShardStatus::Red, optimizer_status));
}
// Yellow status if we have a special segment, indicates a proxy segment used during optimization
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/drop.rs | lib/collection/src/shards/local_shard/drop.rs | use std::thread;
use crate::shards::local_shard::LocalShard;
use crate::update_handler::UpdateSignal;
impl Drop for LocalShard {
fn drop(&mut self) {
if self.is_gracefully_stopped {
return;
}
log::debug!(
"Local shard {} is not explicitly stopped before drop. Attempting to stop background workers.",
self.shard_path().display()
);
// Uncomment to investigate shard drop without graceful shutdown
// #[cfg(feature = "staging")]
// {
// log::debug!("{}", std::backtrace::Backtrace::force_capture());
// }
// Normally we expect, that LocalShard should be explicitly stopped in asynchronous
// runtime before being dropped.
// We want this because:
// - We don't want a conflict of background tasks.
// - We might want to delete files, and can't have other threads working with them.
//
// However.
// It is not possible to explicitly catch all cases of LocalShard being dropped.
// For example, then the service is shutdown by interruption, we might not have enough
// control over the shutdown sequence. Or we might simply have forgotten to gracefully
// shutdown before drop.
// So we have to assume, that it is fine to not await for explicit shutdown in some cases.
// But we still want to call explicit shutdown on all removes and internal operations.
let update_handler = self.update_handler.clone();
let update_sender = self.update_sender.load().clone();
// Operation might happen in the runtime, so we need to spawn a thread to do blocking operations.
let handle_res = thread::Builder::new()
.name("drop-shard".to_string())
.spawn(move || {
{
let mut update_handler = update_handler.blocking_lock();
if update_handler.is_stopped() {
return true;
}
update_handler.stop_flush_worker();
update_handler.notify_optimization_handles_to_stop();
}
// This can block longer, if the channel is full
// If channel is closed, assume it is already stopped
if let Err(err) = update_sender.blocking_send(UpdateSignal::Stop) {
log::trace!("Error sending update signal to update handler: {err}");
}
false
});
match handle_res {
Ok(_) => {
// We spawned a thread, but we can't wait for it here, because we are in Drop.
// So we just let it run.
}
Err(err) => {
log::warn!("Failed to background ask workers to stop: {err:?}");
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/shard_ops.rs | lib/collection/src/shards/local_shard/shard_ops.rs | use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::data_types::order_by::OrderBy;
use segment::types::{
ExtendedPointId, Filter, ScoredPoint, WithPayload, WithPayloadInterface, WithVector,
};
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use tokio::sync::oneshot;
use tokio::time::Instant;
use tokio::time::error::Elapsed;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::operations::OperationWithClockTag;
use crate::operations::generalizer::Generalizer;
use crate::operations::types::{
CollectionError, CollectionInfo, CollectionResult, CountRequestInternal, CountResult,
PointRequestInternal, ScrollRequestInternal, UpdateResult, UpdateStatus,
};
use crate::operations::universal_query::planned_query::PlannedQuery;
use crate::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use crate::operations::verification::operation_rate_cost::{BASE_COST, filter_rate_cost};
use crate::profiling::interface::log_request_to_collector;
use crate::shards::local_shard::LocalShard;
use crate::shards::shard_trait::ShardOperation;
use crate::update_handler::{OperationData, UpdateSignal};
#[async_trait]
impl ShardOperation for LocalShard {
/// Imply interior mutability.
/// Performs update operation on this collection asynchronously.
/// Explicitly waits for result to be updated.
///
/// # Cancel safety
///
/// This method is cancel safe.
async fn update(
&self,
mut operation: OperationWithClockTag,
wait: bool,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<UpdateResult> {
// `LocalShard::update` only has a single cancel safe `await`, WAL operations are blocking,
// and update is applied by a separate task, so, surprisingly, this method is cancel safe. :D
let (callback_sender, callback_receiver) = if wait {
let (tx, rx) = oneshot::channel();
(Some(tx), Some(rx))
} else {
(None, None)
};
if self
.disk_usage_watcher
.is_disk_full()
.await?
.unwrap_or(false)
{
return Err(CollectionError::service_error(
"No space left on device: WAL buffer size exceeds available disk space".to_string(),
));
}
let operation_id = {
let update_sender = self.update_sender.load();
let channel_permit = update_sender.reserve().await?;
// It is *critical* to hold `_wal_lock` while sending operation to the update handler!
//
// TODO: Refactor `lock_and_write`, so this is less terrible? :/
let (operation_id, _wal_lock) = match self.wal.lock_and_write(&mut operation).await {
Ok(id_and_lock) => id_and_lock,
Err(shard::wal::WalError::ClockRejected) => {
// Propagate clock rejection to operation sender
return Ok(UpdateResult {
operation_id: None,
status: UpdateStatus::ClockRejected,
clock_tag: operation.clock_tag,
});
}
Err(err) => return Err(err.into()),
};
channel_permit.send(UpdateSignal::Operation(OperationData {
op_num: operation_id,
operation: operation.operation,
sender: callback_sender,
wait,
hw_measurements: hw_measurement_acc.clone(),
}));
operation_id
};
if let Some(receiver) = callback_receiver {
let _res = receiver.await??;
Ok(UpdateResult {
operation_id: Some(operation_id),
status: UpdateStatus::Completed,
clock_tag: operation.clock_tag,
})
} else {
Ok(UpdateResult {
operation_id: Some(operation_id),
status: UpdateStatus::Acknowledged,
clock_tag: operation.clock_tag,
})
}
}
/// This call is rate limited by the read rate limiter.
async fn scroll_by(
&self,
request: Arc<ScrollRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let ScrollRequestInternal {
offset,
limit,
filter,
with_payload,
with_vector,
order_by,
} = request.as_ref();
let default_with_payload = ScrollRequestInternal::default_with_payload();
// Validate user did not try to use an id offset with order_by
if order_by.is_some() && offset.is_some() {
return Err(CollectionError::bad_input("Cannot use an `offset` when using `order_by`. The alternative for paging is to use `order_by.start_from` and a filter to exclude the IDs that you've already seen for the `order_by.start_from` value".to_string()));
};
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "scroll_by", || {
let mut cost = BASE_COST;
if let Some(filter) = &filter {
cost += filter_rate_cost(filter);
}
cost
})?;
let start_time = Instant::now();
let limit = limit.unwrap_or(ScrollRequestInternal::default_limit());
let order_by = order_by.clone().map(OrderBy::from);
let timeout = self.timeout_or_default_search_timeout(timeout);
let result = match order_by {
None => {
self.internal_scroll_by_id(
*offset,
limit,
with_payload.as_ref().unwrap_or(&default_with_payload),
with_vector,
filter.as_ref(),
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?
}
Some(order_by) => {
self.internal_scroll_by_field(
limit,
with_payload.as_ref().unwrap_or(&default_with_payload),
with_vector,
filter.as_ref(),
search_runtime_handle,
&order_by,
timeout,
hw_measurement_acc,
)
.await?
}
};
let elapsed = start_time.elapsed();
log_request_to_collector(&self.collection_name, elapsed, || request);
Ok(result)
}
async fn local_scroll_by_id(
&self,
offset: Option<ExtendedPointId>,
limit: usize,
with_payload_interface: &WithPayloadInterface,
with_vector: &WithVector,
filter: Option<&Filter>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
let timeout = self.timeout_or_default_search_timeout(timeout);
self.internal_scroll_by_id(
offset,
limit,
with_payload_interface,
with_vector,
filter,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await
}
/// Collect overview information about the shard
async fn info(&self) -> CollectionResult<CollectionInfo> {
Ok(CollectionInfo::from(self.local_shard_info().await))
}
/// This call is rate limited by the read rate limiter.
async fn core_search(
&self,
request: Arc<CoreSearchRequestBatch>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "core_search", || {
request.searches.iter().map(|s| s.search_rate_cost()).sum()
})?;
let timeout = self.timeout_or_default_search_timeout(timeout);
self.do_search(request, search_runtime_handle, timeout, hw_measurement_acc)
.await
}
/// This call is rate limited by the read rate limiter.
async fn count(
&self,
request: Arc<CountRequestInternal>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<CountResult> {
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "count", || {
let mut cost = BASE_COST;
if let Some(filter) = &request.filter {
cost += filter_rate_cost(filter);
}
cost
})?;
let start_time = Instant::now();
let total_count = if request.exact {
let timeout = self.timeout_or_default_search_timeout(timeout);
let all_points = tokio::time::timeout(
timeout,
self.read_filtered(
request.filter.as_ref(),
search_runtime_handle,
hw_measurement_acc,
),
)
.await
.map_err(|_: Elapsed| CollectionError::timeout(timeout, "count"))??;
all_points.len()
} else {
self.estimate_cardinality(request.filter.as_ref(), &hw_measurement_acc)
.await?
.exp
};
let elapsed = start_time.elapsed();
log_request_to_collector(&self.collection_name, elapsed, || request);
Ok(CountResult { count: total_count })
}
/// This call is rate limited by the read rate limiter.
async fn retrieve(
&self,
request: Arc<PointRequestInternal>,
with_payload: &WithPayload,
with_vector: &WithVector,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<RecordInternal>> {
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "retrieve", || request.ids.len())?;
let timeout = self.timeout_or_default_search_timeout(timeout);
let start_time = Instant::now();
let records_map = tokio::time::timeout(
timeout,
SegmentsSearcher::retrieve(
self.segments.clone(),
&request.ids,
with_payload,
with_vector,
search_runtime_handle,
timeout,
hw_measurement_acc,
),
)
.await
.map_err(|_: Elapsed| CollectionError::timeout(timeout, "retrieve"))??;
let ordered_records = request
.ids
.iter()
.filter_map(|point| records_map.get(point).cloned())
.collect();
let elapsed = start_time.elapsed();
log_request_to_collector(&self.collection_name, elapsed, || request);
Ok(ordered_records)
}
/// This call is rate limited by the read rate limiter.
async fn query_batch(
&self,
requests: Arc<Vec<ShardQueryRequest>>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let start_time = Instant::now();
let planned_query = PlannedQuery::try_from(requests.as_ref().to_owned())?;
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "query_batch", || {
planned_query
.searches
.iter()
.map(|s| s.search_rate_cost())
.chain(planned_query.scrolls.iter().map(|s| s.scroll_rate_cost()))
.sum()
})?;
let timeout = self.timeout_or_default_search_timeout(timeout);
let result = self
.do_planned_query(
planned_query,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await;
let elapsed = start_time.elapsed();
log_request_to_collector(&self.collection_name, elapsed, || requests.remove_details());
result
}
/// This call is rate limited by the read rate limiter.
async fn facet(
&self,
request: Arc<FacetParams>,
search_runtime_handle: &Handle,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<FacetResponse> {
// Check read rate limiter before proceeding
self.check_read_rate_limiter(&hw_measurement_acc, "facet", || {
let mut cost = BASE_COST;
if let Some(filter) = &request.filter {
cost += filter_rate_cost(filter);
}
cost
})?;
let start_time = Instant::now();
let timeout = self.timeout_or_default_search_timeout(timeout);
let hits = if request.exact {
self.exact_facet(
request.clone(),
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?
} else {
self.approx_facet(
request.clone(),
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?
};
let elapsed = start_time.elapsed();
log_request_to_collector(&self.collection_name, elapsed, || request);
Ok(FacetResponse { hits })
}
/// Finishes ongoing update tasks
async fn stop_gracefully(mut self) {
if let Err(err) = self.update_sender.load().send(UpdateSignal::Stop).await {
log::warn!("Error sending stop signal to update handler: {err}");
}
self.stop_flush_worker().await;
if let Err(err) = self.wait_update_workers_stop().await {
log::warn!("Update workers failed with: {err}");
}
self.is_gracefully_stopped = true;
drop(self);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/query.rs | lib/collection/src/shards/local_shard/query.rs | use std::mem;
use std::sync::Arc;
use std::time::{Duration, Instant};
use ahash::AHashSet;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::FutureExt;
use futures::future::BoxFuture;
use ordered_float::OrderedFloat;
use parking_lot::Mutex;
use segment::common::reciprocal_rank_fusion::rrf_scoring;
use segment::common::score_fusion::{ScoreFusion, score_fusion};
use segment::types::{Filter, HasIdCondition, ScoredPoint, WithPayloadInterface, WithVector};
use shard::query::planned_query::RescoreStages;
use shard::search::CoreSearchRequestBatch;
use tokio::runtime::Handle;
use super::LocalShard;
use crate::collection::mmr::mmr_from_points_with_vector;
use crate::collection_manager::segments_searcher::SegmentsSearcher;
use crate::operations::types::{
CollectionError, CollectionResult, CoreSearchRequest, QueryScrollRequestInternal, ScrollOrder,
};
use crate::operations::universal_query::planned_query::{
MergePlan, PlannedQuery, RescoreParams, RootPlan, Source,
};
use crate::operations::universal_query::shard_query::{
FusionInternal, MmrInternal, SampleInternal, ScoringQuery, ShardQueryResponse,
};
pub enum FetchedSource {
Search(usize),
Scroll(usize),
}
struct PrefetchResults {
search_results: Mutex<Vec<Vec<ScoredPoint>>>,
scroll_results: Mutex<Vec<Vec<ScoredPoint>>>,
}
impl PrefetchResults {
fn new(search_results: Vec<Vec<ScoredPoint>>, scroll_results: Vec<Vec<ScoredPoint>>) -> Self {
Self {
scroll_results: Mutex::new(scroll_results),
search_results: Mutex::new(search_results),
}
}
fn get(&self, element: FetchedSource) -> CollectionResult<Vec<ScoredPoint>> {
match element {
FetchedSource::Search(idx) => self.search_results.lock().get_mut(idx).map(mem::take),
FetchedSource::Scroll(idx) => self.scroll_results.lock().get_mut(idx).map(mem::take),
}
.ok_or_else(|| CollectionError::service_error("Expected a prefetched source to exist"))
}
}
impl LocalShard {
pub async fn do_planned_query(
&self,
request: PlannedQuery,
search_runtime_handle: &Handle,
timeout: Duration,
hw_counter_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ShardQueryResponse>> {
let start_time = std::time::Instant::now();
let searches_f = self.do_search(
Arc::new(CoreSearchRequestBatch {
searches: request.searches,
}),
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
);
let scrolls_f = self.query_scroll_batch(
Arc::new(request.scrolls),
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
);
// execute both searches and scrolls concurrently
let (search_results, scroll_results) = tokio::try_join!(searches_f, scrolls_f)?;
let prefetch_holder = PrefetchResults::new(search_results, scroll_results);
// decrease timeout by the time spent so far
let timeout = timeout.saturating_sub(start_time.elapsed());
let plans_futures = request.root_plans.into_iter().map(|root_plan| {
self.resolve_plan(
root_plan,
&prefetch_holder,
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
)
});
let batched_scored_points = futures::future::try_join_all(plans_futures).await?;
Ok(batched_scored_points)
}
/// Fetches the payload and/or vector if required. This will filter out points if they are deleted between search and retrieve.
async fn fill_with_payload_or_vectors(
&self,
query_response: ShardQueryResponse,
with_payload: WithPayloadInterface,
with_vector: WithVector,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<ShardQueryResponse> {
if !with_payload.is_required() && !with_vector.is_enabled() {
return Ok(query_response);
}
// ids to retrieve (deduplication happens in the searcher)
let point_ids: Vec<_> = query_response
.iter()
.flatten()
.map(|scored_point| scored_point.id)
.collect();
// Collect retrieved records into a hashmap for fast lookup
let records_map = tokio::time::timeout(
timeout,
SegmentsSearcher::retrieve(
self.segments.clone(),
&point_ids,
&(&with_payload).into(),
&with_vector,
&self.search_runtime,
timeout,
hw_measurement_acc,
),
)
.await
.map_err(|_| CollectionError::timeout(timeout, "retrieve"))??;
// It might be possible, that we won't find all records,
// so we need to re-collect the results
let query_response: ShardQueryResponse = query_response
.into_iter()
.map(|points| {
points
.into_iter()
.filter_map(|mut point| {
records_map.get(&point.id).map(|record| {
point.payload.clone_from(&record.payload);
point.vector.clone_from(&record.vector);
point
})
})
.collect()
})
.collect();
Ok(query_response)
}
async fn resolve_plan(
&self,
root_plan: RootPlan,
prefetch_holder: &PrefetchResults,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<Vec<ScoredPoint>>> {
let RootPlan {
merge_plan,
with_payload,
with_vector,
} = root_plan;
// resolve merging plan
let results = self
.recurse_prefetch(
merge_plan,
prefetch_holder,
search_runtime_handle,
timeout,
0,
hw_measurement_acc.clone(),
)
.await?;
// fetch payloads and vectors if required
self.fill_with_payload_or_vectors(
results,
with_payload,
with_vector,
timeout,
hw_measurement_acc,
)
.await
}
fn recurse_prefetch<'a>(
&'a self,
merge_plan: MergePlan,
prefetch_holder: &'a PrefetchResults,
search_runtime_handle: &'a Handle,
timeout: Duration,
depth: usize,
hw_counter_acc: HwMeasurementAcc,
) -> BoxFuture<'a, CollectionResult<Vec<Vec<ScoredPoint>>>> {
async move {
let MergePlan {
sources: plan_sources,
rescore_stages,
} = merge_plan;
let start_time = std::time::Instant::now();
let max_len = plan_sources.len();
let mut sources = Vec::with_capacity(max_len);
// We need to preserve the order of the sources for some fusion strategies
for source in plan_sources {
match source {
Source::SearchesIdx(idx) => {
sources.push(prefetch_holder.get(FetchedSource::Search(idx))?)
}
Source::ScrollsIdx(idx) => {
sources.push(prefetch_holder.get(FetchedSource::Scroll(idx))?)
}
Source::Prefetch(prefetch) => {
let merged = self
.recurse_prefetch(
*prefetch,
prefetch_holder,
search_runtime_handle,
timeout,
depth + 1,
hw_counter_acc.clone(),
)
.await?
.into_iter();
sources.extend(merged);
}
}
}
// decrease timeout by the time spent so far (recursive calls)
let timeout = timeout.saturating_sub(start_time.elapsed());
if let Some(rescore_stages) = rescore_stages {
let RescoreStages {
shard_level,
collection_level: _, // We can ignore collection level here
} = rescore_stages;
let rescored = if let Some(rescore_params) = shard_level {
let rescored = self
.rescore(
sources,
rescore_params,
search_runtime_handle,
timeout,
hw_counter_acc,
)
.await?;
vec![rescored]
} else {
// This re-scoring method requires full knowledge of all sources across all shards,
// so we just pass the sources up to the collection level.
debug_assert_eq!(depth, 0);
sources
};
Ok(rescored)
} else {
// The sources here are passed to the next layer without any extra processing.
// It should be a query without prefetches.
debug_assert_eq!(depth, 0);
debug_assert_eq!(sources.len(), 1);
Ok(sources)
}
}
.boxed()
}
/// Rescore list of scored points
async fn rescore(
&self,
sources: Vec<Vec<ScoredPoint>>,
rescore_params: RescoreParams,
search_runtime_handle: &Handle,
timeout: Duration,
hw_counter_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let RescoreParams {
rescore,
score_threshold,
limit,
params,
} = rescore_params;
match rescore {
ScoringQuery::Fusion(fusion) => {
self.fusion_rescore(
sources.into_iter(),
fusion,
score_threshold.map(OrderedFloat::into_inner),
limit,
)
.await
}
ScoringQuery::OrderBy(order_by) => {
// create single scroll request for rescoring query
let filter = filter_with_sources_ids(sources.into_iter());
// Note: score_threshold is not used in this case, as all results will have same score,
// but different order_value
let scroll_request = QueryScrollRequestInternal {
limit,
filter: Some(filter),
with_payload: false.into(),
with_vector: false.into(),
scroll_order: ScrollOrder::ByField(order_by),
};
self.query_scroll_batch(
Arc::new(vec![scroll_request]),
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
)
.await?
.pop()
.ok_or_else(|| {
CollectionError::service_error(
"Rescoring with order-by query didn't return expected batch of results",
)
})
}
ScoringQuery::Vector(query_enum) => {
// create single search request for rescoring query
let filter = filter_with_sources_ids(sources.into_iter());
let search_request = CoreSearchRequest {
query: query_enum,
filter: Some(filter),
params,
limit,
offset: 0,
with_payload: None,
with_vector: None,
score_threshold: score_threshold.map(OrderedFloat::into_inner),
};
let rescoring_core_search_request = CoreSearchRequestBatch {
searches: vec![search_request],
};
self.do_search(
Arc::new(rescoring_core_search_request),
search_runtime_handle,
timeout,
hw_counter_acc,
)
.await?
// One search request is sent. We expect only one result
.pop()
.ok_or_else(|| {
CollectionError::service_error(
"Rescoring with vector(s) query didn't return expected batch of results",
)
})
}
ScoringQuery::Formula(formula) => {
self.rescore_with_formula(formula, sources, limit, timeout, hw_counter_acc)
.await
}
ScoringQuery::Sample(sample) => match sample {
SampleInternal::Random => {
// create single scroll request for rescoring query
let filter = filter_with_sources_ids(sources.into_iter());
// Note: score_threshold is not used in this case, as all results will have same score and order_value
let scroll_request = QueryScrollRequestInternal {
limit,
filter: Some(filter),
with_payload: false.into(),
with_vector: false.into(),
scroll_order: ScrollOrder::Random,
};
self.query_scroll_batch(
Arc::new(vec![scroll_request]),
search_runtime_handle,
timeout,
hw_counter_acc.clone(),
)
.await?
.pop()
.ok_or_else(|| {
CollectionError::service_error(
"Rescoring with order-by query didn't return expected batch of results",
)
})
}
},
ScoringQuery::Mmr(mmr) => {
self.mmr_rescore(
sources,
mmr,
limit,
search_runtime_handle,
timeout,
hw_counter_acc,
)
.await
}
}
}
#[allow(clippy::too_many_arguments)]
async fn fusion_rescore(
&self,
sources: impl Iterator<Item = Vec<ScoredPoint>>,
fusion: FusionInternal,
score_threshold: Option<f32>,
limit: usize,
) -> CollectionResult<Vec<ScoredPoint>> {
let fused = match fusion {
FusionInternal::RrfK(k) => rrf_scoring(sources, k),
FusionInternal::Dbsf => score_fusion(sources, ScoreFusion::dbsf()),
};
let top_fused: Vec<_> = if let Some(score_threshold) = score_threshold {
fused
.into_iter()
.take_while(|point| point.score >= score_threshold)
.take(limit)
.collect()
} else {
fused.into_iter().take(limit).collect()
};
Ok(top_fused)
}
/// Maximal Marginal Relevance rescoring
async fn mmr_rescore(
&self,
sources: Vec<Vec<ScoredPoint>>,
mmr: MmrInternal,
limit: usize,
search_runtime_handle: &Handle,
timeout: Duration,
hw_measurement_acc: HwMeasurementAcc,
) -> CollectionResult<Vec<ScoredPoint>> {
let start = Instant::now();
let points_with_vector = self
.fill_with_payload_or_vectors(
sources,
false.into(),
WithVector::from(mmr.using.clone()),
timeout,
hw_measurement_acc.clone(),
)
.await?
.into_iter()
.flatten();
let timeout = timeout.saturating_sub(start.elapsed());
let collection_params = &self.collection_config.read().await.params;
// Even if we have fewer points than requested, still calculate MMR.
let mut top_mmr = mmr_from_points_with_vector(
collection_params,
points_with_vector,
mmr,
limit,
search_runtime_handle,
timeout,
hw_measurement_acc,
)
.await?;
// strip mmr vector. We will handle user-requested vectors at root level of request.
for p in &mut top_mmr {
p.vector = None;
}
Ok(top_mmr)
}
}
/// Extracts point ids from sources, and creates a filter to only include those ids.
fn filter_with_sources_ids(sources: impl Iterator<Item = Vec<ScoredPoint>>) -> Filter {
let mut point_ids = AHashSet::new();
for source in sources {
for point in source.iter() {
point_ids.insert(point.id);
}
}
// create filter for target point ids
Filter::new_must(segment::types::Condition::HasId(HasIdCondition::from(
point_ids,
)))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/snapshot.rs | lib/collection/src/shards/local_shard/snapshot.rs | use std::num::NonZeroUsize;
use std::path::Path;
use std::sync::Arc;
use common::save_on_disk::SaveOnDisk;
use common::tar_ext;
use fs_err as fs;
use parking_lot::RwLock;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::manifest::SnapshotManifest;
use segment::entry::SegmentEntry;
use segment::segment::Segment;
use segment::types::{SegmentConfig, SnapshotFormat};
use shard::locked_segment::LockedSegment;
use shard::payload_index_schema::PayloadIndexSchema;
use shard::segment_holder::{LockedSegmentHolder, SegmentHolder};
use tokio::sync::oneshot;
use tokio_util::task::AbortOnDropHandle;
use wal::{Wal, WalOptions};
use crate::operations::types::{CollectionError, CollectionResult};
use crate::shards::local_shard::{LocalShard, LocalShardClocks, SEGMENTS_PATH, WAL_PATH};
use crate::update_handler::UpdateSignal;
use crate::wal_delta::LockedWal;
impl LocalShard {
pub async fn snapshot_manifest(&self) -> CollectionResult<SnapshotManifest> {
let task = {
let _runtime = self.search_runtime.enter();
let segments = self.segments.clone();
cancel::blocking::spawn_cancel_on_drop(move |_| segments.read().snapshot_manifest())
};
Ok(task.await??)
}
pub fn restore_snapshot(snapshot_path: &Path) -> CollectionResult<()> {
log::info!("Restoring shard snapshot {}", snapshot_path.display());
// Read dir first as the directory contents would change during restore
let entries = fs::read_dir(LocalShard::segments_path(snapshot_path))?
.collect::<Result<Vec<_>, _>>()?;
// Filter out hidden entries
let entries = entries.into_iter().filter(|entry| {
let is_hidden = entry
.file_name()
.to_str()
.is_some_and(|s| s.starts_with('.'));
if is_hidden {
log::debug!(
"Ignoring hidden segment in local shard during snapshot recovery: {}",
entry.path().display(),
);
}
!is_hidden
});
for entry in entries {
Segment::restore_snapshot_in_place(&entry.path())?;
}
Ok(())
}
/// Create snapshot for local shard into `target_path`
pub async fn create_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<SnapshotManifest>,
save_wal: bool,
) -> CollectionResult<()> {
let segments = self.segments.clone();
let wal = self.wal.wal.clone();
if !save_wal {
// If we are not saving WAL, we still need to make sure that all submitted by this point
// updates have made it to the segments. So we use the Plunger to achieve that.
// It will notify us when all submitted updates so far have been processed.
let (tx, rx) = oneshot::channel();
let plunger = UpdateSignal::Plunger(tx);
self.update_sender.load().send(plunger).await?;
rx.await?;
}
let segments_path = Self::segments_path(&self.path);
let segment_config = self
.collection_config
.read()
.await
.to_base_segment_config()?;
let payload_index_schema = self.payload_index_schema.clone();
let temp_path = temp_path.to_owned();
let tar_c = tar.clone();
let update_lock = self.update_operation_lock.clone();
let handle = tokio::task::spawn_blocking(move || {
// Do not change segments while snapshotting
snapshot_all_segments(
segments.clone(),
&segments_path,
Some(segment_config),
payload_index_schema.clone(),
&temp_path,
&tar_c.descend(Path::new(SEGMENTS_PATH))?,
format,
manifest.as_ref(),
update_lock,
)?;
if save_wal {
// snapshot all shard's WAL
Self::snapshot_wal(wal, &tar_c)
} else {
Self::snapshot_empty_wal(wal, &temp_path, &tar_c)
}
});
AbortOnDropHandle::new(handle).await??;
LocalShardClocks::archive_data(&self.path, tar).await?;
Ok(())
}
/// Create empty WAL which is compatible with currently stored data
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
fn snapshot_empty_wal(
wal: LockedWal,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
) -> CollectionResult<()> {
let (segment_capacity, latest_op_num) = {
let wal_guard = wal.blocking_lock();
(wal_guard.segment_capacity(), wal_guard.last_index())
};
let temp_dir = tempfile::tempdir_in(temp_path).map_err(|err| {
CollectionError::service_error(format!(
"Can not create temporary directory for WAL: {err}",
))
})?;
Wal::generate_empty_wal_starting_at_index(
temp_dir.path(),
&WalOptions {
segment_capacity,
segment_queue_len: 0,
retain_closed: NonZeroUsize::new(1).unwrap(),
},
latest_op_num,
)
.map_err(|err| {
CollectionError::service_error(format!("Error while create empty WAL: {err}"))
})?;
tar.blocking_append_dir_all(temp_dir.path(), Path::new(WAL_PATH))
.map_err(|err| {
CollectionError::service_error(format!("Error while archiving WAL: {err}"))
})
}
/// snapshot WAL
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
fn snapshot_wal(wal: LockedWal, tar: &tar_ext::BuilderExt) -> CollectionResult<()> {
// lock wal during snapshot
let mut wal_guard = wal.blocking_lock();
wal_guard.flush()?;
let source_wal_path = wal_guard.path();
let tar = tar.descend(Path::new(WAL_PATH))?;
for entry in fs::read_dir(source_wal_path).map_err(|err| {
CollectionError::service_error(format!("Can't read WAL directory: {err}",))
})? {
let entry = entry.map_err(|err| {
CollectionError::service_error(format!("Can't read WAL directory: {err}",))
})?;
if entry.file_name() == ".wal" {
// This sentinel file is used for WAL locking. Trying to archive
// or open it will cause the following error on Windows:
// > The process cannot access the file because another process
// > has locked a portion of the file. (os error 33)
// https://github.com/qdrant/wal/blob/7c9202d0874/src/lib.rs#L125-L145
continue;
}
tar.blocking_append_file(&entry.path(), Path::new(&entry.file_name()))
.map_err(|err| {
CollectionError::service_error(format!("Error while archiving WAL: {err}"))
})?;
}
Ok(())
}
}
/// Take a snapshot of all segments into `snapshot_dir_path`
///
/// It is recommended to provide collection parameters. This function internally creates a
/// temporary segment, which will source the configuration from it.
///
/// Shortcuts at the first failing segment snapshot.
#[expect(clippy::too_many_arguments)]
pub fn snapshot_all_segments(
segments: LockedSegmentHolder,
segments_path: &Path,
segment_config: Option<SegmentConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
temp_dir: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<&SnapshotManifest>,
// Update lock prevents segment operations during update.
// For instance, we can't unproxy segments while update operation is in progress.
update_lock: Arc<tokio::sync::RwLock<()>>,
) -> OperationResult<()> {
// Snapshotting may take long-running read locks on segments blocking incoming writes, do
// this through proxied segments to allow writes to continue.
proxy_all_segments_and_apply(
segments,
segments_path,
segment_config,
payload_index_schema,
|segment| {
let read_segment = segment.read();
read_segment.take_snapshot(temp_dir, tar, format, manifest)?;
Ok(())
},
update_lock,
)
}
/// Temporarily proxify all segments and apply function `f` to it.
///
/// Intended to smoothly accept writes while performing long-running read operations on each
/// segment, such as during snapshotting. It should prevent blocking reads on segments for any
/// significant amount of time.
///
/// This calls function `f` on all segments, but each segment is temporarily proxified while
/// the function is called.
///
/// All segments are proxified at the same time on start. That ensures each wrapped (proxied)
/// segment is kept at the same point in time. Each segment is unproxied one by one, right
/// after function `f` has been applied. That helps keeping proxies as shortlived as possible.
///
/// A read lock is kept during the whole process to prevent external actors from messing with
/// the segment holder while segments are in proxified state. That means no other actors can
/// take a write lock while this operation is running.
///
/// As part of this process, a new segment is created. All proxies direct their writes to this
/// segment. The segment is added to the collection if it has any operations, otherwise it is
/// deleted when all segments are unproxied again.
///
/// It is recommended to provide collection parameters. The segment configuration will be
/// sourced from it.
///
/// Before snapshotting all segments are forcefully flushed to ensure all data is persisted.
pub fn proxy_all_segments_and_apply<F>(
segments: LockedSegmentHolder,
segments_path: &Path,
segment_config: Option<SegmentConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
mut operation: F,
update_lock: Arc<tokio::sync::RwLock<()>>,
) -> OperationResult<()>
where
F: FnMut(&RwLock<dyn SegmentEntry>) -> OperationResult<()>,
{
let segments_lock = segments.upgradable_read();
// Proxy all segments
// Proxied segments are sorted by flush ordering
log::trace!("Proxying all shard segments to apply function");
let (mut proxies, tmp_segment_id, mut segments_lock) = SegmentHolder::proxy_all_segments(
segments_lock,
segments_path,
segment_config,
payload_index_schema,
)?;
// Flush all pending changes of each segment, now wrapped segments won't change anymore
segments_lock.flush_all(true, true)?;
// Apply provided function
log::trace!("Applying function on all proxied shard segments");
let mut result = Ok(());
let mut unproxied_segment_ids = Vec::with_capacity(proxies.len());
// Proxied segments are sorted by flush ordering, important because the operation we apply
// might explicitly flush segments
for (segment_id, proxy_segment) in &proxies {
// Get segment to snapshot
let op_result = match proxy_segment {
LockedSegment::Proxy(proxy_segment) => {
let guard = proxy_segment.read();
let segment = guard.wrapped_segment.get();
// Call provided function on wrapped segment while holding guard to parent segment
operation(segment)
}
// All segments to snapshot should be proxy, warn if this is not the case
LockedSegment::Original(segment) => {
debug_assert!(
false,
"Reached non-proxy segment while applying function to proxies, this should not happen, ignoring",
);
// Call provided function on segment
operation(segment.as_ref())
}
};
if let Err(err) = op_result {
result = Err(OperationError::service_error(format!(
"Applying function to a proxied shard segment {segment_id} failed: {err}"
)));
break;
}
// Try to unproxy/release this segment since we don't use it anymore
// Unproxying now lets us release the segment earlier, prevent unnecessary writes to the temporary segment.
// Make sure to keep at least one proxy segment to maintain access to the points in the shared write segment.
// The last proxy and the shared write segment will be promoted into the segment_holder atomically
// by `Self::unproxy_all_segments` afterwards to maintain the read consistency.
let remaining = proxies.len() - unproxied_segment_ids.len();
if remaining > 1 {
let _update_guard = update_lock.blocking_write();
match SegmentHolder::try_unproxy_segment(
segments_lock,
*segment_id,
proxy_segment.clone(),
) {
Ok(lock) => {
segments_lock = lock;
unproxied_segment_ids.push(*segment_id);
}
Err(lock) => segments_lock = lock,
}
}
}
proxies.retain(|(id, _)| !unproxied_segment_ids.contains(id));
// Unproxy all segments
// Always do this to prevent leaving proxy segments behind
log::trace!("Unproxying all shard segments after function is applied");
let _update_guard = update_lock.blocking_write();
SegmentHolder::unproxy_all_segments(segments_lock, proxies, tmp_segment_id)?;
result
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/disk_usage_watcher.rs | lib/collection/src/shards/local_shard/disk_usage_watcher.rs | use std::path::PathBuf;
use tokio::sync::Mutex;
use tokio::time::Instant;
use tokio_util::task::AbortOnDropHandle;
use crate::operations::types::{CollectionError, CollectionResult};
/// Defines how often the disk usage should be checked if the disk is far from being full
const DEFAULT_FREQUENCY: usize = 128;
/// Defines how frequent the check of the disk usage should be, depending on the free space
///
/// The idea is that if we have a lot of disk space, it is unlikely that it will be out of space soon,
/// so we can check it less frequently. But if we have a little space, we should check it more often
/// and at some point, we should check it every time
const FREE_SPACE_TO_CHECK_FREQUENCY_HEURISTIC_MB: &[(usize, usize); 5] =
&[(512, 0), (1024, 8), (2048, 16), (4096, 32), (8096, 64)];
/// Even if there were no many updates, we still want to force the check of the disk usage
/// because some external process could have consumed the disk space
const MIN_DISK_CHECK_INTERVAL_MILLIS: usize = 2000;
#[derive(Default)]
struct LastCheck {
last_check_time: Option<Instant>,
next_check_count: usize,
}
pub struct DiskUsageWatcher {
disk_path: PathBuf,
disabled: bool,
min_free_disk_size_mb: usize,
last_check: Mutex<LastCheck>,
}
impl DiskUsageWatcher {
pub async fn new(disk_path: PathBuf, min_free_disk_size_mb: usize) -> Self {
let mut watcher = Self {
disk_path,
disabled: false,
min_free_disk_size_mb,
last_check: Default::default(),
};
match watcher.is_disk_full().await {
Ok(Some(_)) => {} // do nothing
Ok(None) => watcher.disabled = true,
Err(_) => {
watcher.disabled = true;
}
};
watcher
}
/// Returns true if the disk free space is less than the `disk_buffer_threshold_mb`
/// As the side effect, it updates the disk usage every `update_count_threshold` calls
pub async fn is_disk_full(&self) -> CollectionResult<Option<bool>> {
if self.disabled {
return Ok(None);
}
let mut last_check_guard = self.last_check.lock().await;
let since_last_check = last_check_guard
.last_check_time
.map(|x| x.elapsed().as_millis() as usize)
.unwrap_or(usize::MAX);
if last_check_guard.next_check_count == 0
|| since_last_check >= MIN_DISK_CHECK_INTERVAL_MILLIS
{
let free_space = self.get_free_space_bytes().await?;
last_check_guard.last_check_time = Some(Instant::now());
let is_full = match free_space {
Some(free_space) => {
let free_space = free_space as usize;
let mut next_check = DEFAULT_FREQUENCY;
for (threshold_mb, interval) in FREE_SPACE_TO_CHECK_FREQUENCY_HEURISTIC_MB {
if free_space < (*threshold_mb * 1024 * 1024) {
next_check = *interval;
break;
}
}
last_check_guard.next_check_count = next_check;
Some(free_space < self.min_free_disk_size_mb * 1024 * 1024)
}
None => {
last_check_guard.next_check_count = 0;
None
}
};
Ok(is_full)
} else {
last_check_guard.next_check_count = last_check_guard.next_check_count.saturating_sub(1);
Ok(None)
}
}
/// Return current disk usage in bytes, if available
pub async fn get_free_space_bytes(&self) -> CollectionResult<Option<u64>> {
if self.disabled {
return Ok(None);
}
let path = self.disk_path.clone();
let result = AbortOnDropHandle::new(tokio::task::spawn_blocking(move || {
fs4::available_space(path.as_path())
}))
.await
.map_err(|e| CollectionError::service_error(format!("Failed to join async task: {e}")))?;
let result = match result {
Ok(result) => Some(result),
Err(err) => {
log::debug!(
"Failed to get free space for path: {} due to: {}",
self.disk_path.as_path().display(),
err
);
None
}
};
Ok(result)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/local_shard/snapshot_tests.rs | lib/collection/src/shards/local_shard/snapshot_tests.rs | use std::collections::HashSet;
use std::sync::Arc;
use common::save_on_disk::SaveOnDisk;
use common::tar_ext;
use fs_err::File;
use parking_lot::RwLock;
use segment::types::SnapshotFormat;
use shard::fixtures::{build_segment_1, build_segment_2};
use shard::payload_index_schema::PayloadIndexSchema;
use shard::segment_holder::SegmentHolder;
use tempfile::Builder;
use crate::shards::local_shard::snapshot::snapshot_all_segments;
#[test]
fn test_snapshot_all() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let segment2 = build_segment_2(dir.path());
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
assert_ne!(sid1, sid2);
let holder = Arc::new(RwLock::new(holder));
let before_ids = holder
.read()
.iter()
.map(|(id, _)| *id)
.collect::<HashSet<_>>();
let segments_dir = Builder::new().prefix("segments_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap();
let snapshot_file = Builder::new().suffix(".snapshot.tar").tempfile().unwrap();
let tar = tar_ext::BuilderExt::new_seekable_owned(File::create(snapshot_file.path()).unwrap());
let payload_schema_file = dir.path().join("payload.schema");
let schema: Arc<SaveOnDisk<PayloadIndexSchema>> =
Arc::new(SaveOnDisk::load_or_init_default(payload_schema_file).unwrap());
let update_lock = Arc::new(tokio::sync::RwLock::new(()));
snapshot_all_segments(
holder.clone(),
segments_dir.path(),
None,
schema,
temp_dir.path(),
&tar,
SnapshotFormat::Regular,
None,
update_lock,
)
.unwrap();
let after_ids = holder
.read()
.iter()
.map(|(id, _)| *id)
.collect::<HashSet<_>>();
assert_eq!(
before_ids, after_ids,
"segment holder IDs before and after snapshotting must be equal",
);
let mut tar = tar::Archive::new(File::open(snapshot_file.path()).unwrap());
let archive_count = tar.entries_with_seek().unwrap().count();
// one archive produced per concrete segment in the SegmentHolder
assert_eq!(archive_count, 2);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/collection/src/shards/shard_holder/shard_mapping.rs | lib/collection/src/shards/shard_holder/shard_mapping.rs | use std::collections::{HashMap, HashSet};
use std::ops;
use ahash::AHashMap;
use itertools::Itertools;
use segment::types::ShardKey;
use serde::{Deserialize, Serialize};
use crate::shards::shard::ShardId;
/// Shard key mapping type
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
#[serde(from = "SerdeHelper", into = "SerdeHelper")]
pub struct ShardKeyMapping {
shard_key_to_shard_ids: HashMap<ShardKey, HashSet<ShardId>>,
/// `true` if the ShardKeyMapping was specified in the old format.
// TODO(1.17.0): Remove once all keys are migrated.
#[serde(skip)]
pub(crate) was_old_format: bool,
}
impl ops::Deref for ShardKeyMapping {
type Target = HashMap<ShardKey, HashSet<ShardId>>;
fn deref(&self) -> &Self::Target {
&self.shard_key_to_shard_ids
}
}
impl ops::DerefMut for ShardKeyMapping {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.shard_key_to_shard_ids
}
}
impl ShardKeyMapping {
/// Get an inverse mapping, all shard IDs and their key
pub fn shard_id_to_shard_key(&self) -> AHashMap<ShardId, ShardKey> {
self.shard_key_to_shard_ids
.iter()
.flat_map(|(shard_key, shard_ids)| {
shard_ids
.iter()
.map(|shard_id| (*shard_id, shard_key.clone()))
})
.collect()
}
/// Return all shard IDs from the mappings
pub fn shard_ids(&self) -> Vec<ShardId> {
let ids = self.iter_shard_ids().collect::<Vec<_>>();
debug_assert!(
ids.iter().all_unique(),
"shard mapping contains duplicate shard IDs",
);
ids
}
/// Iterate over all shard IDs from the mappings
pub fn iter_shard_ids(&self) -> impl Iterator<Item = ShardId> {
self.shard_key_to_shard_ids
.values()
.flat_map(|shard_ids| shard_ids.iter().copied())
}
/// Iterate over all shard keys
pub fn iter_shard_keys(&self) -> impl Iterator<Item = &ShardKey> {
self.shard_key_to_shard_ids.keys()
}
/// Get the shard key for a given shard ID
///
/// `None` is returned if the shard ID has no key, or if the shard ID is unknown
pub fn shard_key(&self, shard_id: ShardId) -> Option<ShardKey> {
self.shard_key_to_shard_ids
.iter()
.find(|(_, shard_ids)| shard_ids.contains(&shard_id))
.map(|(key, _)| key.clone())
}
}
impl From<SerdeHelper> for ShardKeyMapping {
fn from(helper: SerdeHelper) -> Self {
let mut was_old_format = false;
let shard_key_to_shard_ids = match helper {
SerdeHelper::New(key_ids_pairs) => key_ids_pairs
.into_iter()
.map(KeyIdsPair::into_parts)
.collect(),
SerdeHelper::Old(key_ids_map) => {
was_old_format = true;
key_ids_map
}
};
Self {
shard_key_to_shard_ids,
was_old_format,
}
}
}
/// Helper structure for persisting shard key mapping
///
/// The original format of persisting shard key mappings as hash map is broken. It forgets type
/// information for the shard key, which resulted in shard key numbers to be converted into
/// strings.
///
/// Bug: <https://github.com/qdrant/qdrant/pull/5838>
#[derive(Deserialize, Serialize)]
#[serde(untagged)]
enum SerdeHelper {
New(Vec<KeyIdsPair>),
// TODO(1.15): remove this old format, deployment should exclusively be using new format
Old(HashMap<ShardKey, HashSet<ShardId>>),
}
impl From<ShardKeyMapping> for SerdeHelper {
fn from(mapping: ShardKeyMapping) -> Self {
let key_ids_pairs = mapping
.shard_key_to_shard_ids
.into_iter()
.map(KeyIdsPair::from)
.collect();
Self::New(key_ids_pairs)
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
struct KeyIdsPair {
/// Shard key
///
/// The key is persisted as untagged variant. The JSON value gives us enough information
/// however to distinguish between a shard key number and string.
key: ShardKey,
/// Associalted shard IDs.
shard_ids: HashSet<ShardId>,
}
impl KeyIdsPair {
fn into_parts(self) -> (ShardKey, HashSet<ShardId>) {
let Self { key, shard_ids } = self;
(key, shard_ids)
}
}
impl From<(ShardKey, HashSet<ShardId>)> for KeyIdsPair {
fn from((key, shard_ids): (ShardKey, HashSet<ShardId>)) -> Self {
Self { key, shard_ids }
}
}
#[cfg(test)]
mod test {
use std::sync::Arc;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use fs_err::File;
use segment::types::{PayloadFieldSchema, PayloadSchemaType};
use tempfile::{Builder, TempDir};
use super::*;
use crate::collection::{Collection, RequestShardTransfer};
use crate::config::{CollectionConfigInternal, CollectionParams, ShardingMethod, WalConfig};
use crate::operations::shared_storage_config::SharedStorageConfig;
use crate::optimizers_builder::OptimizersConfig;
use crate::shards::channel_service::ChannelService;
use crate::shards::collection_shard_distribution::CollectionShardDistribution;
use crate::shards::replica_set::replica_set_state::ReplicaState;
use crate::shards::replica_set::{AbortShardTransfer, ChangePeerFromState};
use crate::shards::shard_holder::SHARD_KEY_MAPPING_FILE;
const COLLECTION_TEST_NAME: &str = "shard_key_test";
async fn make_collection(collection_name: &str, collection_dir: &TempDir) -> Collection {
let wal_config = WalConfig::default();
let mut collection_params = CollectionParams::empty();
collection_params.sharding_method = Some(ShardingMethod::Custom);
let config = CollectionConfigInternal {
params: collection_params,
optimizer_config: OptimizersConfig::fixture(),
wal_config,
hnsw_config: Default::default(),
quantization_config: Default::default(),
strict_mode_config: None,
uuid: None,
metadata: None,
};
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
let collection = Collection::new(
collection_name.to_string(),
0,
collection_dir.path(),
snapshots_path.path(),
&config,
Arc::new(SharedStorageConfig::default()),
CollectionShardDistribution::all_local(None, 0),
None,
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await
.expect("Failed to create new fixture collection");
collection
.create_payload_index(
"field".parse().unwrap(),
PayloadFieldSchema::FieldType(PayloadSchemaType::Integer),
HwMeasurementAcc::new(),
)
.await
.expect("failed to create payload index");
collection
}
pub fn dummy_on_replica_failure() -> ChangePeerFromState {
Arc::new(move |_peer_id, _shard_id, _from_state| {})
}
pub fn dummy_request_shard_transfer() -> RequestShardTransfer {
Arc::new(move |_transfer| {})
}
pub fn dummy_abort_shard_transfer() -> AbortShardTransfer {
Arc::new(|_transfer, _reason| {})
}
#[tokio::test(flavor = "multi_thread")]
async fn test_shard_key_migration() {
let collection_dir = Builder::new().prefix("test_collection").tempdir().unwrap();
{
let collection = make_collection(COLLECTION_TEST_NAME, &collection_dir).await;
collection
.create_shard_key(
ShardKey::Keyword("helloworld".into()),
vec![vec![]],
ReplicaState::Active,
)
.await
.unwrap();
}
let shard_mapping_file = collection_dir.path().join(SHARD_KEY_MAPPING_FILE);
let shard_key_data: SerdeHelper = {
let file = File::open(&shard_mapping_file).unwrap();
serde_json::from_reader(file).unwrap()
};
let shard_key_data = ShardKeyMapping::from(shard_key_data);
// Ensure we have at least one shard key.
assert!(!shard_key_data.is_empty());
// Convert to old shard key and overwrite file on disk.
{
let old_shard_key_data = SerdeHelper::Old(shard_key_data.shard_key_to_shard_ids);
let mut writer = File::create(&shard_mapping_file).unwrap();
serde_json::to_writer(&mut writer, &old_shard_key_data).unwrap();
}
// Ensure on disk is now the old version.
{
let shard_key_data: SerdeHelper =
serde_json::from_reader(File::open(&shard_mapping_file).unwrap()).unwrap();
assert!(matches!(shard_key_data, SerdeHelper::Old(..)));
}
let snapshots_path = Builder::new().prefix("test_snapshots").tempdir().unwrap();
// Load collection once to trigger mirgation to the new shard-key format.
{
Collection::load(
COLLECTION_TEST_NAME.to_string(),
0,
collection_dir.path(),
snapshots_path.path(),
Default::default(),
ChannelService::default(),
dummy_on_replica_failure(),
dummy_request_shard_transfer(),
dummy_abort_shard_transfer(),
None,
None,
ResourceBudget::default(),
None,
)
.await;
}
let shard_key_data: SerdeHelper =
{ serde_json::from_reader(File::open(&shard_mapping_file).unwrap()).unwrap() };
// Now we have the new key on disk!
assert!(matches!(shard_key_data, SerdeHelper::New(..)));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.