repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/top_k.rs | lib/common/common/src/top_k.rs | use std::cmp::Reverse;
use ordered_float::Float;
use crate::types::{ScoreType, ScoredPointOffset};
/// TopK implementation following the median algorithm described in
/// <https://quickwit.io/blog/top-k-complexity>.
///
/// Keeps the largest `k` ScoredPointOffset.
#[derive(Default)]
pub struct TopK {
k: usize,
elements: Vec<Reverse<ScoredPointOffset>>,
threshold: ScoreType,
}
impl TopK {
pub fn new(k: usize) -> Self {
TopK {
k,
elements: Vec::with_capacity(2 * k),
threshold: ScoreType::min_value(),
}
}
pub fn len(&self) -> usize {
self.elements.len()
}
pub fn is_empty(&self) -> bool {
self.elements.is_empty()
}
/// Returns the minimum score of the top k elements.
///
/// Updated every 2k elements.
/// Initially set to `ScoreType::MIN`.
pub fn threshold(&self) -> ScoreType {
self.threshold
}
pub fn push(&mut self, element: ScoredPointOffset) {
if element.score > self.threshold {
self.elements.push(Reverse(element));
// check if full
if self.elements.len() == self.k * 2 {
let (_, median_el, _) = self.elements.select_nth_unstable(self.k - 1);
self.threshold = median_el.0.score;
self.elements.truncate(self.k);
}
}
}
pub fn into_vec(mut self) -> Vec<ScoredPointOffset> {
self.elements.sort_unstable();
self.elements.truncate(self.k);
self.elements.into_iter().map(|Reverse(x)| x).collect()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn empty_with_double_capacity() {
let top_k = TopK::new(3);
assert_eq!(top_k.len(), 0);
assert_eq!(top_k.elements.capacity(), 2 * 3);
assert_eq!(top_k.threshold(), ScoreType::MIN);
}
#[test]
fn test_top_k_under() {
let mut top_k = TopK::new(3);
top_k.push(ScoredPointOffset { score: 1.0, idx: 1 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 1);
top_k.push(ScoredPointOffset { score: 2.0, idx: 2 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 2);
let res = top_k.into_vec();
assert_eq!(res.len(), 2);
assert_eq!(res[0].score, 2.0);
assert_eq!(res[1].score, 1.0);
}
#[test]
fn test_top_k_over() {
let mut top_k = TopK::new(3);
top_k.push(ScoredPointOffset { score: 1.0, idx: 1 });
assert_eq!(top_k.len(), 1);
assert_eq!(top_k.threshold(), ScoreType::MIN);
top_k.push(ScoredPointOffset { score: 3.0, idx: 3 });
assert_eq!(top_k.len(), 2);
assert_eq!(top_k.threshold(), ScoreType::MIN);
top_k.push(ScoredPointOffset { score: 2.0, idx: 2 });
assert_eq!(top_k.len(), 3);
assert_eq!(top_k.threshold(), ScoreType::MIN);
top_k.push(ScoredPointOffset { score: 4.0, idx: 4 });
assert_eq!(top_k.len(), 4);
assert_eq!(top_k.threshold(), ScoreType::MIN);
let res = top_k.into_vec();
assert_eq!(res.len(), 3);
assert_eq!(res[0].score, 4.0);
assert_eq!(res[1].score, 3.0);
assert_eq!(res[2].score, 2.0);
}
#[test]
fn test_top_k_pruned() {
let mut top_k = TopK::new(3);
top_k.push(ScoredPointOffset { score: 1.0, idx: 1 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 1);
top_k.push(ScoredPointOffset { score: 4.0, idx: 4 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 2);
top_k.push(ScoredPointOffset { score: 2.0, idx: 2 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 3);
top_k.push(ScoredPointOffset { score: 5.0, idx: 5 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 4);
top_k.push(ScoredPointOffset { score: 3.0, idx: 3 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 5);
top_k.push(ScoredPointOffset { score: 6.0, idx: 6 });
assert_eq!(top_k.threshold(), 4.0);
assert_eq!(top_k.len(), 3);
assert_eq!(top_k.elements.capacity(), 6);
let res = top_k.into_vec();
assert_eq!(res.len(), 3);
assert_eq!(res[0].score, 6.0);
assert_eq!(res[1].score, 5.0);
assert_eq!(res[2].score, 4.0);
}
#[test]
fn test_top_same_scores() {
let mut top_k = TopK::new(3);
top_k.push(ScoredPointOffset { score: 1.0, idx: 1 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 1);
top_k.push(ScoredPointOffset { score: 1.0, idx: 4 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 2);
top_k.push(ScoredPointOffset { score: 2.0, idx: 2 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 3);
top_k.push(ScoredPointOffset { score: 1.0, idx: 5 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 4);
top_k.push(ScoredPointOffset { score: 1.0, idx: 3 });
assert_eq!(top_k.threshold(), ScoreType::MIN);
assert_eq!(top_k.len(), 5);
top_k.push(ScoredPointOffset { score: 1.0, idx: 6 });
assert_eq!(top_k.threshold(), 1.0);
assert_eq!(top_k.len(), 3);
assert_eq!(top_k.elements.capacity(), 6);
let res = top_k.into_vec();
assert_eq!(res.len(), 3);
assert_eq!(res[0], ScoredPointOffset { score: 2.0, idx: 2 });
assert_eq!(res[1], ScoredPointOffset { score: 1.0, idx: 1 });
assert_eq!(res[2], ScoredPointOffset { score: 1.0, idx: 4 });
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/save_on_disk.rs | lib/common/common/src/save_on_disk.rs | use std::io::{BufReader, BufWriter, Write};
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::Duration;
use atomicwrites::OverwriteBehavior::AllowOverwrite;
use atomicwrites::{AtomicFile, Error as AtomicWriteError};
use fs_err::{File, tokio as tokio_fs};
use parking_lot::{Condvar, Mutex, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard};
use serde::{Deserialize, Serialize};
use crate::tar_ext;
/// Functions as a smart pointer which gives a write guard and saves data on disk
/// when write guard is dropped.
#[derive(Debug, Default)]
pub struct SaveOnDisk<T> {
change_notification: Condvar,
notification_lock: Mutex<()>,
data: RwLock<T>,
path: PathBuf,
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("Failed to save structure on disk with error: {0}")]
AtomicWrite(#[from] AtomicWriteError<serde_json::Error>),
#[error("Failed to perform io operation: {0}")]
IoError(#[from] std::io::Error),
#[error("Failed to (de)serialize from/to json: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Error in write closure: {0}")]
FromClosure(Box<dyn std::error::Error>),
}
impl<T: Serialize + for<'de> Deserialize<'de> + Clone> SaveOnDisk<T> {
/// Load data from disk at the given path, or initialize the default if it doesn't exist.
pub fn load_or_init_default(path: impl Into<PathBuf>) -> Result<Self, Error>
where
T: Default,
{
Self::load_or_init(path, T::default)
}
/// Load data from disk at the given path, or initialize it with `init` if it doesn't exist.
pub fn load_or_init(path: impl Into<PathBuf>, init: impl FnOnce() -> T) -> Result<Self, Error> {
let path: PathBuf = path.into();
let data = if path.exists() {
let file = BufReader::new(File::open(&path)?);
serde_json::from_reader(file)?
} else {
init()
};
Ok(Self {
change_notification: Condvar::new(),
notification_lock: Default::default(),
data: RwLock::new(data),
path,
})
}
/// Initialize new data, even if it already exists on disk at the given path.
///
/// If data already exists on disk, it will be immediately overwritten.
pub fn new(path: impl Into<PathBuf>, data: T) -> Result<Self, Error> {
let data = Self {
change_notification: Condvar::new(),
notification_lock: Default::default(),
data: RwLock::new(data),
path: path.into(),
};
if data.path.exists() {
data.save()?;
}
Ok(data)
}
/// Wait for a condition on data to be true.
///
/// Returns `true` if condition is true, `false` if timed out.
#[must_use]
pub fn wait_for<F>(&self, check: F, timeout: Duration) -> bool
where
F: Fn(&T) -> bool,
{
let start = std::time::Instant::now();
while start.elapsed() < timeout {
let mut data_read_guard = self.data.read();
if check(&data_read_guard) {
return true;
}
let notification_guard = self.notification_lock.lock();
// Based on https://github.com/Amanieu/parking_lot/issues/165
RwLockReadGuard::unlocked(&mut data_read_guard, || {
// Move the guard in so it gets unlocked before we re-lock the RwLock read guard
let mut guard = notification_guard;
self.change_notification.wait_for(&mut guard, timeout);
});
}
false
}
/// Perform an operation over the stored data,
/// persisting the result to disk if the operation returns `Some`.
///
/// If the operation returns `None`, assumes that data has not changed
pub fn write_optional(&self, f: impl FnOnce(&T) -> Option<T>) -> Result<bool, Error> {
let read_data = self.data.upgradable_read();
let output_opt = f(&read_data);
if let Some(output) = output_opt {
Self::save_data_to(&self.path, &output)?;
let mut write_data = RwLockUpgradableReadGuard::upgrade(read_data);
*write_data = output;
self.change_notification.notify_all();
Ok(true)
} else {
Ok(false)
}
}
pub fn write<O>(&self, f: impl FnOnce(&mut T) -> O) -> Result<O, Error> {
let read_data = self.data.upgradable_read();
let mut data_copy = (*read_data).clone();
let output = f(&mut data_copy);
Self::save_data_to(&self.path, &data_copy)?;
let mut write_data = RwLockUpgradableReadGuard::upgrade(read_data);
*write_data = data_copy;
self.change_notification.notify_all();
Ok(output)
}
fn save_data_to(path: impl Into<PathBuf>, data: &T) -> Result<(), Error> {
let path: PathBuf = path.into();
AtomicFile::new(path, AllowOverwrite).write(|file| {
let mut writer = BufWriter::new(file);
serde_json::to_writer(&mut writer, data)?;
writer.flush().map_err(serde_json::Error::io)
})?;
Ok(())
}
pub fn save(&self) -> Result<(), Error> {
self.save_to(&self.path)
}
pub fn save_to(&self, path: impl Into<PathBuf>) -> Result<(), Error> {
Self::save_data_to(path, &self.data.read())
}
pub async fn save_to_tar(
&self,
tar: &tar_ext::BuilderExt,
path: impl AsRef<Path>,
) -> Result<(), Error> {
let data_bytes = serde_json::to_vec(self.data.read().deref())?;
tar.append_data(data_bytes, path.as_ref()).await?;
Ok(())
}
pub async fn delete(self) -> std::io::Result<()> {
tokio_fs::remove_file(self.path).await
}
}
impl<T> Deref for SaveOnDisk<T> {
type Target = RwLock<T>;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> DerefMut for SaveOnDisk<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use std::thread::sleep;
use std::time::Duration;
use fs_err as fs;
use tempfile::Builder;
use super::SaveOnDisk;
#[test]
fn saves_data() {
let dir = Builder::new().prefix("test").tempdir().unwrap();
let counter_file = dir.path().join("counter");
let counter: SaveOnDisk<u32> = SaveOnDisk::load_or_init_default(&counter_file).unwrap();
counter.write(|counter| *counter += 1).unwrap();
assert_eq!(*counter.read(), 1);
assert_eq!(
counter.read().to_string(),
fs::read_to_string(&counter_file).unwrap()
);
counter.write(|counter| *counter += 1).unwrap();
assert_eq!(*counter.read(), 2);
assert_eq!(
counter.read().to_string(),
fs::read_to_string(&counter_file).unwrap()
);
}
#[test]
fn loads_data() {
let dir = Builder::new().prefix("test").tempdir().unwrap();
let counter_file = dir.path().join("counter");
let counter: SaveOnDisk<u32> = SaveOnDisk::load_or_init_default(&counter_file).unwrap();
counter.write(|counter| *counter += 1).unwrap();
let counter: SaveOnDisk<u32> = SaveOnDisk::load_or_init_default(&counter_file).unwrap();
let value = *counter.read();
assert_eq!(value, 1)
}
#[test]
fn test_wait_for_condition_change() {
let dir = Builder::new().prefix("test").tempdir().unwrap();
let counter_file = dir.path().join("counter");
let counter: Arc<SaveOnDisk<u32>> =
Arc::new(SaveOnDisk::load_or_init_default(counter_file).unwrap());
let counter_copy = counter.clone();
let handle = thread::spawn(move || {
sleep(Duration::from_millis(200));
counter_copy.write(|counter| *counter += 3).unwrap();
sleep(Duration::from_millis(200));
counter_copy.write(|counter| *counter += 7).unwrap();
sleep(Duration::from_millis(200));
});
assert!(counter.wait_for(|counter| *counter > 5, Duration::from_secs(2)));
handle.join().unwrap();
}
#[test]
fn test_wait_for_condition_change_timeout() {
let dir = Builder::new().prefix("test").tempdir().unwrap();
let counter_file = dir.path().join("counter");
let counter: Arc<SaveOnDisk<u32>> =
Arc::new(SaveOnDisk::load_or_init_default(counter_file).unwrap());
let counter_copy = counter.clone();
let handle = thread::spawn(move || {
sleep(Duration::from_millis(200));
counter_copy.write(|counter| *counter += 3).unwrap();
sleep(Duration::from_millis(200));
counter_copy.write(|counter| *counter += 7).unwrap();
sleep(Duration::from_millis(200));
});
assert!(!counter.wait_for(|counter| *counter > 5, Duration::from_millis(300)));
handle.join().unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/progress_tracker.rs | lib/common/common/src/progress_tracker.rs | //! Hierarchical progress tracker.
//!
//! # Example
//!
//! ```text
//! now
//! βββββββββββββββββββββββββββ Time βββββββββββββββββββββββββββββ΄βΆβΆβΆβΆβΆβΆβΆ
//!
//! ββββββββββββββββββββββ Segment Indexing βββββββββββββββββββββββΆβΆβΆβΆβΆβΆβΆ
//! ββββββββ Quantization ββββββββ€βββββ HNSW Index Building ββββββΆβΆβΆβΆβΆβΆβΆ
//! ββ Vector A ββ€ββ Vector B ββ€ ββ Vector A ββ€ββ Vector B βββββΆβΆβΆβΆβΆβΆβΆ
//! ```
//!
//! # Errors and Panic Safety
//!
//! Most of methods are infallible (not returning `Result`/`Option`).
//! On debug builds they might panic, on release builds they will fallback to
//! some placeholder behavior.
//!
//! Why? Because progress tracking is a non-critical feature, and we don't want
//! to abort segment building just because of a bug in progress tracking code.
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use chrono::{DateTime, Utc};
use parking_lot::Mutex;
use schemars::JsonSchema;
use serde::Serialize;
/// Read-only view of a root progress node.
///
/// Keep it around to observe the progress from another thread.
#[derive(Clone, Debug)]
pub struct ProgressView {
root: Arc<Mutex<ProgressNode>>,
/// This field is redundant, but kept outside of the lock for faster access.
started_at: DateTime<Utc>,
}
#[derive(Clone, Debug, Serialize, JsonSchema)]
pub struct ProgressTree {
/// Name of the operation.
pub name: String,
/// When the operation started.
#[serde(skip_serializing_if = "Option::is_none")]
pub started_at: Option<DateTime<Utc>>,
/// When the operation finished.
#[serde(skip_serializing_if = "Option::is_none")]
pub finished_at: Option<DateTime<Utc>>,
/// For finished operations, how long they took, in seconds.
#[serde(skip_serializing_if = "Option::is_none")]
pub duration_sec: Option<f64>,
/// Number of completed units of work, if applicable.
#[serde(skip_serializing_if = "Option::is_none")]
pub done: Option<u64>,
/// Total number of units of work, if applicable and known.
#[serde(skip_serializing_if = "Option::is_none")]
pub total: Option<u64>,
/// Child operations.
#[serde(skip_serializing_if = "Vec::is_empty")]
pub children: Vec<ProgressTree>,
}
impl ProgressView {
pub fn snapshot(&self, root: impl Into<String>) -> ProgressTree {
self.root.lock().render(root.into())
}
/// The same as `self.snapshot("").started_at().unwrap()`.
pub fn started_at(&self) -> DateTime<Utc> {
self.started_at
}
}
/// Write-only handle to report the progress of operation.
///
/// Might be root node or a sub-task.
pub struct ProgressTracker {
root: Arc<Mutex<ProgressNode>>,
path: Vec<usize>,
}
#[derive(Debug)]
pub struct ProgressNode {
/// Sub-tasks of this task, with their names, in order of creation.
children: Vec<(String, ProgressNode)>,
progress: Option<NodeProgress>,
state: ProgressState,
}
#[derive(Debug)]
struct NodeProgress {
current: Arc<AtomicU64>,
total: Option<u64>,
}
#[derive(Debug)]
enum ProgressState {
Pending,
InProgress {
started_at: DateTime<Utc>,
started_instant: Instant,
},
Finished {
started_at: DateTime<Utc>,
finished_at: DateTime<Utc>,
duration: Duration,
},
Skipped,
}
/// Create a new root progress tracker.
///
/// Returns a read-only [`ProgressView`] to observe the progress,
/// and a write-only [`ProgressTracker`] to signal progress updates.
pub fn new_progress_tracker() -> (ProgressView, ProgressTracker) {
let started_at = Utc::now();
let started_instant = Instant::now();
let root = Arc::new(Mutex::new(ProgressNode {
children: Vec::new(),
progress: None,
state: ProgressState::InProgress {
started_at,
started_instant,
},
}));
(
ProgressView {
root: root.clone(),
started_at,
},
ProgressTracker {
root,
path: Vec::new(),
},
)
}
impl ProgressTracker {
#[cfg(any(test, feature = "testing"))]
pub fn new_for_test() -> Self {
new_progress_tracker().1
}
/// Create a pending subtask.
pub fn subtask(&self, name: impl Into<String>) -> ProgressTracker {
self.subtask_impl(name.into(), true)
}
/// Similar to creating a [`Self::subtask()`], then immediately calling
/// [`Self::start()`] on it.
pub fn running_subtask(&self, name: impl Into<String>) -> ProgressTracker {
self.subtask_impl(name.into(), false)
}
fn subtask_impl(&self, name: String, pending: bool) -> ProgressTracker {
let mut root = self.root.lock();
if let Some(parent) = root.get_mut(&self.path) {
let mut path = Vec::with_capacity(self.path.len() + 1);
path.extend_from_slice(&self.path);
path.push(parent.children.len());
parent.children.push((
name,
ProgressNode {
children: Vec::new(),
progress: None,
state: if pending {
ProgressState::Pending
} else {
ProgressState::InProgress {
started_at: Utc::now(),
started_instant: Instant::now(),
}
},
},
));
ProgressTracker {
root: self.root.clone(),
path,
}
} else {
// Should never happen. But if it does, return an obviously invalid
// path to avoid a panic.
debug_assert!(false, "bug: invalid path when creating subtask");
ProgressTracker {
root: self.root.clone(),
path: vec![usize::MAX, usize::MAX],
}
}
}
/// Enable progress tracking for this task.
///
/// Accepts the total number of units of work, if known.
/// Returns a counter that the caller should increment to report progress.
/// Before entering hot loops, don't forget to call `Arc::deref` on it.
pub fn track_progress(&self, total: Option<u64>) -> Arc<AtomicU64> {
let progress = Arc::new(AtomicU64::new(0));
let mut root = self.root.lock();
if let Some(node) = root.get_mut(&self.path) {
debug_assert!(
node.progress.is_none(),
"usage error: track_progress called multiple times on the same node",
);
node.progress = Some(NodeProgress {
current: progress.clone(),
total,
});
} else {
debug_assert!(
false,
"bug: invalid path when adding adding progress tracking",
);
}
progress
}
/// For tasks created using [`Self::subtask`], mark them as in-progress.
pub fn start(&self) {
let mut root = self.root.lock();
if let Some(node) = root.get_mut(&self.path) {
match node.state {
ProgressState::Pending | ProgressState::Skipped => {
node.state = ProgressState::InProgress {
started_at: Utc::now(),
started_instant: Instant::now(),
};
}
ProgressState::InProgress { .. } | ProgressState::Finished { .. } => (),
}
} else {
debug_assert!(false, "bug: invalid path when starting a task");
}
}
}
impl Drop for ProgressTracker {
fn drop(&mut self) {
ProgressNode::finish(&self.root, &self.path);
}
}
impl ProgressNode {
fn get_mut(&mut self, path: &[usize]) -> Option<&mut ProgressNode> {
let mut current = &mut *self;
for &idx in path {
current = &mut current.children.get_mut(idx)?.1;
}
Some(current)
}
fn render(&self, name: String) -> ProgressTree {
let Self {
children,
progress,
state,
} = self;
let (done, total) = match progress {
Some(NodeProgress { current, total }) => {
(Some(current.load(Ordering::Relaxed)), *total)
}
None => (None, None),
};
let (started_at, finished_at, duration_sec) = match state {
ProgressState::Pending | ProgressState::Skipped => (None, None, None),
ProgressState::InProgress { started_at, .. } => (Some(*started_at), None, None),
ProgressState::Finished {
started_at,
finished_at,
duration,
} => (
Some(*started_at),
Some(*finished_at),
Some(duration.as_secs_f64()),
),
};
ProgressTree {
name,
started_at,
finished_at,
duration_sec,
done,
total,
children: children
.iter()
.map(|(child_name, child_node)| child_node.render(child_name.clone()))
.collect(),
}
}
fn finish(root: &Arc<Mutex<ProgressNode>>, path: &[usize]) {
let mut root = root.lock();
if let Some(node) = root.get_mut(path) {
match &node.state {
ProgressState::Pending => node.state = ProgressState::Skipped,
ProgressState::InProgress {
started_at,
started_instant,
} => {
let finished_instant = Instant::now();
node.state = ProgressState::Finished {
started_at: *started_at,
finished_at: Utc::now(),
duration: finished_instant.duration_since(*started_instant),
};
}
ProgressState::Finished { .. } => (),
ProgressState::Skipped => (),
}
} else {
// Should never happen.
debug_assert!(false, "bug: invalid path when finishing a task");
}
}
}
#[cfg(test)]
mod tests {
use std::fmt::Write;
use std::sync::atomic::Ordering;
use super::*;
#[test]
#[expect(unused_variables, reason = "testing drop behavior")]
fn test_progress_tracker() {
let (view, p) = new_progress_tracker();
let p_foo = p.subtask("foo");
let p_bar = p.subtask("bar");
let p_baz = p.subtask("baz");
let p_foo_x = p_foo.subtask("x");
let p_foo_y = p_foo.subtask("y");
let p_foo_z = p_foo.subtask("z");
p_foo.start();
p_foo_x.start();
{
let p_foo_x_a = p_foo_x.subtask("a");
let p_foo_x_b = p_foo_x.subtask("b");
let p_foo_x_c = p_foo_x.subtask("c");
p_foo_x_a.start();
p_foo_x_b.start();
// c is not started; becomes skipped on drop
p_foo_x_a
.track_progress(Some(7))
.store(5, Ordering::Relaxed);
}
drop(p_foo_x);
p_foo_y.start();
{
let p_foo_y_a = p_foo_y.subtask("a");
let p_foo_y_b = p_foo_y.subtask("b");
let p_foo_y_c = p_foo_y.subtask("c");
p_foo_y_a.start();
p_foo_y_a.track_progress(None).store(3, Ordering::Relaxed);
check_state(
&view,
"
:in-progress {
foo:in-progress {
x:finished {
a:finished[5/7] {}
b:finished {}
c:skipped {}
}
y:in-progress {
a:in-progress[3/?] {}
b:pending {}
c:pending {}
}
z:pending {}
}
bar:pending {}
baz:pending {}
}
",
);
}
}
fn test_render(node: &ProgressNode, output: &mut String) {
output.push(':');
match &node.state {
ProgressState::Pending => output.push_str("pending"),
ProgressState::InProgress { .. } => output.push_str("in-progress"),
ProgressState::Finished { .. } => output.push_str("finished"),
ProgressState::Skipped => output.push_str("skipped"),
}
if let Some(progress) = &node.progress {
write!(output, "[{}/", progress.current.load(Ordering::Relaxed)).unwrap();
if let Some(total) = progress.total {
write!(output, "{total}]").unwrap();
} else {
output.push_str("?]");
}
}
output.push('{');
for (child_name, child_node) in &node.children {
output.push_str(child_name);
test_render(child_node, output);
}
output.push('}');
}
fn check_state(view: &ProgressView, expected: &str) {
let mut rendered = String::new();
test_render(&view.root.lock(), &mut rendered);
assert_eq!(rendered, expected.replace(&[' ', '\n'][..], ""));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/fixed_length_priority_queue.rs | lib/common/common/src/fixed_length_priority_queue.rs | use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::num::NonZeroUsize;
use std::vec::IntoIter as VecIntoIter;
use bytemuck::{TransparentWrapper as _, TransparentWrapperAlloc as _};
use serde::{Deserialize, Serialize};
/// To avoid excessive memory allocation, FixedLengthPriorityQueue
/// imposes a reasonable limit on the allocation size. If the limit
/// is extremely large, we treat it as if no limit was set and
/// delay allocation, assuming that the results will fit within a
/// predefined threshold.
const LARGEST_REASONABLE_ALLOCATION_SIZE: usize = 1_048_576;
/// A container that forgets all but the top N elements
///
/// This is a MinHeap by default - it will keep the largest elements, pop smallest
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct FixedLengthPriorityQueue<T: Ord> {
heap: BinaryHeap<Reverse<T>>,
length: NonZeroUsize,
}
impl<T: Ord> Default for FixedLengthPriorityQueue<T> {
fn default() -> Self {
Self::new(1)
}
}
impl<T: Ord> FixedLengthPriorityQueue<T> {
/// Creates a new queue with the given length
/// Panics if length is 0
pub fn new(length: usize) -> Self {
let heap = BinaryHeap::with_capacity(
length
.saturating_add(1)
.min(LARGEST_REASONABLE_ALLOCATION_SIZE),
);
let length = NonZeroUsize::new(length).expect("length must be greater than zero");
FixedLengthPriorityQueue::<T> { heap, length }
}
/// Pushes a value into the priority queue.
///
/// If the queue if full, replaces the smallest value and returns it.
pub fn push(&mut self, value: T) -> Option<T> {
if !self.is_full() {
self.heap.push(Reverse(value));
return None;
}
let mut x = self.heap.peek_mut().unwrap();
let mut value = Reverse(value);
if x.0 < value.0 {
std::mem::swap(&mut *x, &mut value);
}
Some(value.0)
}
/// Consumes the [`FixedLengthPriorityQueue`] and returns a vector
/// in sorted (descending) order.
pub fn into_sorted_vec(self) -> Vec<T> {
Reverse::peel_vec(self.heap.into_sorted_vec())
}
/// Returns an iterator over the elements in the queue, in arbitrary order.
pub fn iter_unsorted(&self) -> std::slice::Iter<'_, T> {
Reverse::peel_slice(self.heap.as_slice()).iter()
}
/// Returns an iterator over the elements in the queue
/// in sorted (descending) order.
pub fn into_iter_sorted(self) -> VecIntoIter<T> {
self.into_sorted_vec().into_iter()
}
/// Returns the smallest element of the queue,
/// if there is any.
pub fn top(&self) -> Option<&T> {
self.heap.peek().map(|x| &x.0)
}
/// Returns actual length of the queue
pub fn len(&self) -> usize {
self.heap.len()
}
/// Checks if the queue is empty
pub fn is_empty(&self) -> bool {
self.heap.is_empty()
}
/// Checks if the queue is full
pub fn is_full(&self) -> bool {
self.heap.len() >= self.length.into()
}
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.heap.retain(|x| f(&x.0));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/flags.rs | lib/common/common/src/flags.rs | use std::sync::OnceLock;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
/// Global feature flags, normally initialized when starting Qdrant.
static FEATURE_FLAGS: OnceLock<FeatureFlags> = OnceLock::new();
#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, JsonSchema)]
#[serde(default)]
pub struct FeatureFlags {
/// Magic feature flag that enables all features.
///
/// Note that this will only be applied to all flags when passed into [`init_feature_flags`].
all: bool,
/// Skip usage of RocksDB in new immutable payload indices.
///
/// First implemented in Qdrant 1.13.5.
/// Enabled by default in Qdrant 1.14.1.
pub payload_index_skip_rocksdb: bool,
/// Skip usage of RocksDB in new mutable payload indices.
///
/// First implemented in Qdrant 1.15.0.
/// Enabled by default in Qdrant 1.16.0.
pub payload_index_skip_mutable_rocksdb: bool,
/// Skip usage of RocksDB in new payload storages.
///
/// On-disk payload storages never use Gridstore.
///
/// First implemented in Qdrant 1.15.0.
/// Enabled by default in Qdrant 1.16.0.
pub payload_storage_skip_rocksdb: bool,
/// Use incremental HNSW building.
///
/// Enabled by default in Qdrant 1.14.1.
pub incremental_hnsw_building: bool,
/// Migrate RocksDB based ID trackers into file based ID tracker on start.
///
/// Enabled by default in Qdrant 1.15.0.
pub migrate_rocksdb_id_tracker: bool,
/// Migrate RocksDB based vector storages into new format on start.
///
/// Enabled by default in Qdrant 1.16.1.
pub migrate_rocksdb_vector_storage: bool,
/// Migrate RocksDB based payload storages into new format on start.
///
/// Enabled by default in Qdrant 1.16.1.
pub migrate_rocksdb_payload_storage: bool,
/// Migrate RocksDB based payload indices into new format on start.
///
/// Rebuilds a new payload index from scratch.
///
/// Enabled by default in Qdrant 1.16.1.
pub migrate_rocksdb_payload_indices: bool,
/// Use appendable quantization in appendable plain segments.
///
/// Enabled by default in Qdrant 1.16.0.
pub appendable_quantization: bool,
}
impl Default for FeatureFlags {
fn default() -> FeatureFlags {
FeatureFlags {
all: false,
payload_index_skip_rocksdb: true,
payload_index_skip_mutable_rocksdb: true,
payload_storage_skip_rocksdb: true,
incremental_hnsw_building: true,
migrate_rocksdb_id_tracker: true,
migrate_rocksdb_vector_storage: true,
migrate_rocksdb_payload_storage: true,
migrate_rocksdb_payload_indices: true,
appendable_quantization: true,
}
}
}
impl FeatureFlags {
/// Check if the feature flags are set to default values.
pub fn is_default(self) -> bool {
self == FeatureFlags::default()
}
}
/// Initializes the global feature flags with `flags`. Must only be called once at
/// startup or otherwise throws a warning and discards the values.
pub fn init_feature_flags(mut flags: FeatureFlags) {
let FeatureFlags {
all,
payload_index_skip_rocksdb,
payload_index_skip_mutable_rocksdb,
payload_storage_skip_rocksdb,
incremental_hnsw_building,
migrate_rocksdb_id_tracker,
migrate_rocksdb_vector_storage,
migrate_rocksdb_payload_storage,
migrate_rocksdb_payload_indices,
appendable_quantization,
} = &mut flags;
// If all is set, explicitly set all feature flags
if *all {
*payload_index_skip_rocksdb = true;
*payload_index_skip_mutable_rocksdb = true;
*payload_storage_skip_rocksdb = true;
*incremental_hnsw_building = true;
*migrate_rocksdb_id_tracker = true;
*migrate_rocksdb_vector_storage = true;
*migrate_rocksdb_payload_storage = true;
*migrate_rocksdb_payload_indices = true;
*appendable_quantization = true;
}
let res = FEATURE_FLAGS.set(flags);
if res.is_err() {
log::warn!("Feature flags already initialized!");
}
}
/// Returns the configured global feature flags.
pub fn feature_flags() -> FeatureFlags {
if let Some(flags) = FEATURE_FLAGS.get() {
return *flags;
}
// They should always be initialized.
log::warn!("Feature flags not initialized!");
FeatureFlags::default()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_defaults() {
// Ensure we properly deserialize and don't crash on empty state
let empty: FeatureFlags = serde_json::from_str("{}").unwrap();
assert!(empty.is_default());
assert!(feature_flags().is_default());
assert!(FeatureFlags::default().is_default());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/either_variant.rs | lib/common/common/src/either_variant.rs | use std::iter;
/// Variant works similar as Either, but for 4 variants.
pub enum EitherVariant<A, B, C, D> {
A(A),
B(B),
C(C),
D(D),
}
macro_rules! for_all {
($value:expr, $pattern:pat => $result:expr) => {
match $value {
$crate::either_variant::EitherVariant::A($pattern) => $result,
$crate::either_variant::EitherVariant::B($pattern) => $result,
$crate::either_variant::EitherVariant::C($pattern) => $result,
$crate::either_variant::EitherVariant::D($pattern) => $result,
}
};
}
impl<A, B, C, D> Iterator for EitherVariant<A, B, C, D>
where
A: Iterator,
B: Iterator<Item = A::Item>,
C: Iterator<Item = A::Item>,
D: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
for_all!(*self, ref mut inner => inner.next())
}
fn size_hint(&self) -> (usize, Option<usize>) {
for_all!(*self, ref inner => inner.size_hint())
}
fn count(self) -> usize {
for_all!(self, inner => inner.count())
}
fn last(self) -> Option<Self::Item> {
for_all!(self, inner => inner.last())
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
for_all!(*self, ref mut inner => inner.nth(n))
}
fn for_each<F>(self, f: F)
where
F: FnMut(Self::Item),
{
for_all!(self, inner => inner.for_each(f))
}
fn collect<X>(self) -> X
where
X: iter::FromIterator<Self::Item>,
{
for_all!(self, inner => inner.collect())
}
fn partition<X, F>(self, f: F) -> (X, X)
where
X: Default + Extend<Self::Item>,
F: FnMut(&Self::Item) -> bool,
{
for_all!(self, inner => inner.partition(f))
}
fn fold<Acc, G>(self, init: Acc, f: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
for_all!(self, inner => inner.fold(init, f))
}
fn all<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
for_all!(*self, ref mut inner => inner.all(f))
}
fn any<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
for_all!(*self, ref mut inner => inner.any(f))
}
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
for_all!(*self, ref mut inner => inner.find(predicate))
}
fn find_map<X, F>(&mut self, f: F) -> Option<X>
where
F: FnMut(Self::Item) -> Option<X>,
{
for_all!(*self, ref mut inner => inner.find_map(f))
}
fn position<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
{
for_all!(*self, ref mut inner => inner.position(predicate))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/types.rs | lib/common/common/src/types.rs | use std::cmp::Ordering;
use ordered_float::OrderedFloat;
use strum::EnumIter;
use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
/// Type of vector matching score
pub type ScoreType = f32;
/// Type of point index inside a segment
pub type PointOffsetType = u32;
#[derive(Copy, Clone, PartialEq, Debug, Default, FromBytes, IntoBytes, KnownLayout, Immutable)]
#[repr(C)]
pub struct ScoredPointOffset {
pub idx: PointOffsetType,
pub score: ScoreType,
}
impl Eq for ScoredPointOffset {}
impl Ord for ScoredPointOffset {
fn cmp(&self, other: &Self) -> Ordering {
OrderedFloat(self.score).cmp(&OrderedFloat(other.score))
}
}
impl PartialOrd for ScoredPointOffset {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[derive(Copy, Clone, Debug)]
pub struct TelemetryDetail {
pub level: DetailsLevel,
pub histograms: bool,
}
impl TelemetryDetail {
pub fn new(level: DetailsLevel, histograms: bool) -> Self {
Self { level, histograms }
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, EnumIter)]
pub enum DetailsLevel {
/// Minimal information level
/// - app info
/// - minimal telemetry by endpoint
/// - cluster status
Level0,
/// Detailed common info level
/// - app info details
/// - system info
/// - hardware flags
/// - hardware usage per collection
/// - RAM usage
/// - cluster basic details
/// - collections basic info
Level1,
/// Detailed consensus info - peers info
/// Collections:
/// - detailed config
/// - Shards - basic config
Level2,
/// Shards:
/// - detailed config
/// - Optimizers info
Level3,
/// Segment level telemetry
Level4,
}
impl Default for TelemetryDetail {
fn default() -> Self {
TelemetryDetail {
level: DetailsLevel::Level0,
histograms: false,
}
}
}
impl From<usize> for DetailsLevel {
fn from(value: usize) -> Self {
match value {
0 => DetailsLevel::Level0,
1 => DetailsLevel::Level1,
2 => DetailsLevel::Level2,
3 => DetailsLevel::Level3,
4 => DetailsLevel::Level4,
_ => DetailsLevel::Level4,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/cow.rs | lib/common/common/src/cow.rs | //! [`std::borrow::Cow`]-like enums.
//!
//! # Comparison table
//!
//! | Type | Borrowed | Owned |
//! | -------------------- | -------- | ----------------------- |
//! | [`Cow<'a, T>`] | `&'a T` | `<T as ToOwned>::Owned` |
//! | [`BoxCow<'a, T>`] | `&'a T` | `Box<T>` |
//! | [`SimpleCow<'a, T>`] | `&'a T` | `T` |
//!
//! [`Cow<'a, T>`]: std::borrow::Cow
use std::ops::Deref;
pub enum BoxCow<'a, T: ?Sized> {
Borrowed(&'a T),
Owned(Box<T>),
}
impl<'a, T: ?Sized> BoxCow<'a, T> {
pub fn as_borrowed(&'a self) -> Self {
match self {
BoxCow::Borrowed(v) => BoxCow::Borrowed(v),
BoxCow::Owned(v) => BoxCow::Borrowed(v.as_ref()),
}
}
}
impl<T: ?Sized> Deref for BoxCow<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match self {
BoxCow::Borrowed(t) => t,
BoxCow::Owned(t) => t,
}
}
}
pub enum SimpleCow<'a, T> {
Borrowed(&'a T),
Owned(T),
}
impl<T> Deref for SimpleCow<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match self {
SimpleCow::Borrowed(v) => v,
SimpleCow::Owned(v) => v,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/zeros.rs | lib/common/common/src/zeros.rs | use std::io::{Result, Write};
static ZEROS: [u8; 8096] = [0u8; 8096];
pub trait WriteZerosExt {
/// Write `len` zeros to the writer.
fn write_zeros(&mut self, len: usize) -> Result<()>;
}
impl<W: Write> WriteZerosExt for W {
fn write_zeros(&mut self, mut len: usize) -> Result<()> {
while len > 0 {
len -= self.write(&ZEROS[..ZEROS.len().min(len)])?;
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/cpu.rs | lib/common/common/src/cpu.rs | use std::cmp::Ordering;
#[cfg(target_os = "linux")]
use thiserror::Error;
#[cfg(target_os = "linux")]
use thread_priority::{ThreadPriority, ThreadPriorityValue, set_current_thread_priority};
use crate::defaults::default_cpu_budget_unallocated;
/// Try to read number of CPUs from environment variable `QDRANT_NUM_CPUS`.
/// If it is not set, use `num_cpus::get()`.
pub fn get_num_cpus() -> usize {
match std::env::var("QDRANT_NUM_CPUS") {
Ok(val) => {
let num_cpus = val.parse::<usize>().unwrap_or(0);
if num_cpus > 0 {
num_cpus
} else {
num_cpus::get()
}
}
Err(_) => num_cpus::get(),
}
}
/// Get available CPU budget to use for optimizations as number of CPUs (threads).
///
/// This is user configurable via `cpu_budget` parameter in settings:
/// If 0 - auto selection, keep at least one CPU free when possible.
/// If negative - subtract this number of CPUs from the available CPUs.
/// If positive - use this exact number of CPUs.
///
/// The returned value will always be at least 1.
pub fn get_cpu_budget(cpu_budget_param: isize) -> usize {
match cpu_budget_param.cmp(&0) {
// If less than zero, subtract from available CPUs
Ordering::Less => get_num_cpus()
.saturating_sub(-cpu_budget_param as usize)
.max(1),
// If zero, use automatic selection
Ordering::Equal => {
let num_cpus = get_num_cpus();
num_cpus
.saturating_sub(-default_cpu_budget_unallocated(num_cpus) as usize)
.max(1)
}
// If greater than zero, use exact number
Ordering::Greater => cpu_budget_param as usize,
}
}
#[derive(Error, Debug)]
#[cfg(target_os = "linux")]
pub enum ThreadPriorityError {
#[error("Failed to set thread priority: {0:?}")]
SetThreadPriority(thread_priority::Error),
#[error("Failed to parse thread priority value: {0}")]
ParseNice(String),
}
/// On Linux, make current thread lower priority (nice: 10).
#[cfg(target_os = "linux")]
pub fn linux_low_thread_priority() -> Result<(), ThreadPriorityError> {
// 25% corresponds to a nice value of 10
set_linux_thread_priority(25)
}
/// On Linux, make current thread high priority (nice: -10).
///
/// # Warning
///
/// This is very likely to fail because decreasing the nice value requires special privileges. It
/// is therefore recommended to soft-fail.
/// See: <https://manned.org/renice.1#head6>
#[cfg(target_os = "linux")]
pub fn linux_high_thread_priority() -> Result<(), ThreadPriorityError> {
// 75% corresponds to a nice value of -10
set_linux_thread_priority(75)
}
/// On Linux, update priority of current thread.
///
/// Only works on Linux because POSIX threads share their priority/nice value with all process
/// threads. Linux breaks this behaviour though and uses a per-thread priority/nice value.
/// - <https://linux.die.net/man/7/pthreads>
/// - <https://linux.die.net/man/2/setpriority>
#[cfg(target_os = "linux")]
fn set_linux_thread_priority(priority: u8) -> Result<(), ThreadPriorityError> {
let new_priority = ThreadPriority::Crossplatform(
ThreadPriorityValue::try_from(priority).map_err(ThreadPriorityError::ParseNice)?,
);
set_current_thread_priority(new_priority).map_err(ThreadPriorityError::SetThreadPriority)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/maybe_uninit.rs | lib/common/common/src/maybe_uninit.rs | use std::mem::{MaybeUninit, transmute};
/// [`MaybeUninit::fill_from`] backported to stable.
///
/// Unlike the standard library version, this function does not support [`Drop`]
/// types, for simplicity of implementation.
///
/// TODO: remove in favor of [`MaybeUninit::fill_from`] once stabilized.
/// <https://github.com/rust-lang/rust/issues/117428>
pub fn maybe_uninit_fill_from<I: IntoIterator>(
this: &mut [MaybeUninit<I::Item>],
it: I,
) -> (&mut [I::Item], &mut [MaybeUninit<I::Item>]) {
const { assert!(!std::mem::needs_drop::<I::Item>(), "Not supported") };
let iter = it.into_iter();
let mut initialized_len = 0;
for (element, val) in this.iter_mut().zip(iter) {
element.write(val);
initialized_len += 1;
}
// SAFETY: guard.initialized <= this.len()
let (initted, remainder) = unsafe { this.split_at_mut_unchecked(initialized_len) };
// SAFETY: Valid elements have just been written into `init`, so that portion
// of `this` is initialized.
(
unsafe { transmute::<&mut [MaybeUninit<I::Item>], &mut [I::Item]>(initted) },
remainder,
)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/tar_ext.rs | lib/common/common/src/tar_ext.rs | //! Extensions for the `tar` crate.
use std::io::{self, Seek, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use tap::Tap;
use tokio::sync::Mutex;
use tokio::task::JoinError;
/// A wrapper around [`tar::Builder`] that:
/// 1. Usable both in sync and async contexts.
/// 2. Provides the [`BuilderExt::descend`] method.
/// 3. Supports both seekable (i.e. file) and streaming (i.e. sockets) outputs.
pub struct BuilderExt<W: Write + Seek = OwnedOutput> {
tar: Arc<Mutex<BlowFuseOnDrop<W>>>,
path: PathBuf,
}
type OwnedOutput = Box<dyn WriteSeek + Send + 'static>;
type BorrowedOutput<'a> = Box<dyn WriteSeek + 'a>;
pub trait WriteSeek: Write + Seek {}
impl<T: Write + Seek> WriteSeek for T {}
/// A wrapper around [`tar::Builder<FusedWriteSeek>`] that disables
/// [`FusedWriteSeek`] when it is dropped.
///
/// Disabling the [`FusedWriteSeek`] is a workaround for the inconvenient
/// [`tar::Builder`] behavior: dropping a [`tar::Builder`] might cause a final
/// write of archive footer.
/// This behavior is problematic for [`Write`] implementations that could panic
/// when used in an async context, such as [`SyncIoBridge`].
///
/// [`SyncIoBridge`]: https://docs.rs/tokio-util/0.7.12/tokio_util/io/struct.SyncIoBridge.html#method.new
struct BlowFuseOnDrop<W: Write + Seek> {
tar: Option<tar::Builder<FusedWriteSeek<W>>>,
enabled: Arc<AtomicBool>,
}
/// A wrapper around [`WriteSeek`] that could be disabled by [`BlowFuseOnDrop`].
struct FusedWriteSeek<W> {
output: W,
enabled: Arc<AtomicBool>,
}
impl<W: Write + Seek> BlowFuseOnDrop<W> {
fn tar(&mut self) -> &mut tar::Builder<FusedWriteSeek<W>> {
self.tar.as_mut().unwrap()
}
}
impl<W: Write + Seek> Drop for BlowFuseOnDrop<W> {
fn drop(&mut self) {
// Blow the fuse.
self.enabled.store(false, Ordering::Release);
}
}
impl<W: Write> Write for FusedWriteSeek<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if !self.enabled.load(Ordering::Acquire) {
// This error shouldn't be observable. It might appear only in
// `tar::Builder::drop`, and will be ignored there.
return Err(io::Error::other("Using WriteBox after it is disabled"));
}
self.output.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
// This method is never called by `tar::Builder`.
self.output.flush()
}
}
impl<W: Seek> Seek for FusedWriteSeek<W> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.output.seek(pos)
}
}
impl BuilderExt<OwnedOutput> {
pub fn new_seekable_owned(output: impl Write + Seek + Send + 'static) -> Self {
Self::new(Box::new(output))
}
pub fn new_streaming_owned(output: impl Write + Send + 'static) -> Self {
Self::new(Box::new(SeekWrapper(output)))
}
}
impl<'a> BuilderExt<BorrowedOutput<'a>> {
pub fn new_seekable_borrowed(output: impl Write + Seek + 'a) -> Self {
Self::new(Box::new(output))
}
pub fn new_streaming_borrowed(output: impl Write + 'a) -> Self {
Self::new(Box::new(SeekWrapper(output)))
}
}
impl<W: Write + Seek> Clone for BuilderExt<W> {
fn clone(&self) -> Self {
Self {
tar: Arc::clone(&self.tar),
path: self.path.clone(),
}
}
}
impl<W: Write + Seek> BuilderExt<W> {
fn new(output: W) -> Self {
let enabled = Arc::new(AtomicBool::new(true));
Self {
tar: Arc::new(Mutex::new(BlowFuseOnDrop {
tar: Some(
tar::Builder::new(FusedWriteSeek {
output,
enabled: Arc::clone(&enabled),
})
.tap_mut(|tar| tar.sparse(true)),
),
enabled,
})),
path: PathBuf::new(),
}
}
/// Create a new [`BuilderExt`] that writes to a subdirectory of the current
/// path. I.e. the following two lines are equivalent:
/// ```rust,ignore
/// builder.append_data(data, Path::new("foo/bar/baz")).await?;
/// builder.descend(Path::new("foo/bar"))?.append_data(data, Path::new("baz")).await?;
/// ```
pub fn descend(&self, subdir: &Path) -> io::Result<Self> {
Ok(Self {
tar: Arc::clone(&self.tar),
path: join_relative(&self.path, subdir)?,
})
}
/// Write an entry to the tar archive. Takes a closure that takes an
/// `impl Write` and writes the entry contents into it.
///
/// Require the underlying writer to be [`Seek`]. Returns an error for
/// non-seekable aka streaming writers.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
/// There are no async counterpart.
pub fn blocking_write_fn<T>(
&self,
dst: &Path,
f: impl FnOnce(&mut tar::EntryWriter) -> T,
) -> io::Result<T> {
let dst = join_relative(&self.path, dst)?;
let mut header = tar::Header::new_gnu();
header.set_mode(0o644);
let mut tar = self.tar.blocking_lock();
let mut writer = tar.tar().append_writer(&mut header, dst)?;
let result = f(&mut writer);
writer.finish()?;
Ok(result)
}
/// Append a file to the tar archive.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
/// Use [`BuilderExt::append_file`] instead.
pub fn blocking_append_file(&self, src: &Path, dst: &Path) -> io::Result<()> {
let dst = join_relative(&self.path, dst)?;
self.tar
.blocking_lock()
.tar()
.append_path_with_name(src, dst)
}
/// Append a directory to the tar archive.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
pub fn blocking_append_dir_all(&self, src: &Path, dst: &Path) -> io::Result<()> {
let dst = join_relative(&self.path, dst)?;
self.tar.blocking_lock().tar().append_dir_all(dst, src)
}
/// Append a new entry to the tar archive with the given file contents.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
/// Use [`BuilderExt::append_data`] instead.
pub fn blocking_append_data(&self, src: &[u8], dst: &Path) -> io::Result<()> {
let dst = join_relative(&self.path, dst)?;
let mut header = tar::Header::new_gnu();
header.set_mode(0o644);
header.set_size(src.len() as u64);
self.tar
.blocking_lock()
.tar()
.append_data(&mut header, dst, src)
}
/// Finish writing the tar archive. For async counterpart, see
/// [`BuilderExt::finish`].
pub fn blocking_finish(self) -> io::Result<()> {
let mut bb: BlowFuseOnDrop<_> = Arc::try_unwrap(self.tar)
.map_err(|_| {
io::Error::other("finish called with multiple references to the tar builder")
})?
.into_inner();
// Extract the builder out of bb.
let tar: tar::Builder<FusedWriteSeek<_>> = bb.tar.take().unwrap();
// Finish and flush before BuilderBox is dropped.
let mut wb: FusedWriteSeek<_> = tar.into_inner()?; // calls finish()
wb.flush()?;
Ok(())
}
}
impl<W: Send + Write + Seek + 'static> BuilderExt<W> {
/// Append a file to the tar archive.
pub async fn append_file(&self, src: &Path, dst: &Path) -> io::Result<()> {
let src = src.to_path_buf();
let dst = join_relative(&self.path, dst)?;
self.run_async(move |tar| tar.append_path_with_name(src, dst))
.await
}
/// Append a new entry to the tar archive with the given file contents.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
pub async fn append_data(&self, src: Vec<u8>, dst: &Path) -> io::Result<()> {
let dst = join_relative(&self.path, dst)?;
let mut header = tar::Header::new_gnu();
header.set_mode(0o644);
header.set_size(src.len() as u64);
self.run_async(move |tar| tar.append_data(&mut header, dst, src.as_slice()))
.await
}
/// Finish writing the tar archive.
pub async fn finish(self) -> io::Result<()> {
tokio::task::spawn_blocking(move || self.blocking_finish()).await?
}
async fn run_async<T, E>(
&self,
f: impl FnOnce(&mut tar::Builder<FusedWriteSeek<W>>) -> Result<T, E> + Send + 'static,
) -> Result<T, E>
where
T: Send + 'static,
E: Send + 'static + From<JoinError>,
{
let tar = Arc::clone(&self.tar);
tokio::task::spawn_blocking(move || f(tar.blocking_lock().tar())).await?
}
}
fn join_relative(base: &Path, rel_path: &Path) -> io::Result<PathBuf> {
if rel_path.is_absolute() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("path must be relative, but got {rel_path:?}"),
));
}
Ok(base.join(rel_path))
}
/// A wrapper that provides "dummy" [`io::Seek`] implementation to [`io::Write`] stream.
struct SeekWrapper<T>(T);
impl<T: Write> io::Write for SeekWrapper<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl<T: Write> io::Seek for SeekWrapper<T> {
fn seek(&mut self, _: io::SeekFrom) -> io::Result<u64> {
Err(io::ErrorKind::NotSeekable.into())
}
}
#[cfg(test)]
mod tests {
use super::*;
// -------------------------------------------------------------------------
// ------------------------------ Dummy tests ------------------------------
// -------------------------------------------------------------------------
struct DummyBridgeWriter(bool, Arc<Mutex<Vec<u8>>>);
impl Write for DummyBridgeWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.0 {
return Err(io::Error::other("Forced error in write"));
}
self.1.blocking_lock().extend_from_slice(buf); // panics in async
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if self.0 {
return Err(io::Error::other("Forced error in flush"));
}
let _ = self.1.blocking_lock(); // panics in async
Ok(())
}
}
impl Seek for DummyBridgeWriter {
fn seek(&mut self, _: io::SeekFrom) -> io::Result<u64> {
unimplemented!()
}
}
#[tokio::test]
async fn test_dummy_finish_ok() {
let data = Arc::new(Mutex::new(Vec::new()));
let tar = BuilderExt::new_seekable_owned(DummyBridgeWriter(false, Arc::clone(&data)));
assert!(tar.finish().await.is_ok());
assert_eq!(data.lock().await.len(), 1024);
}
#[tokio::test]
async fn test_dummy_finish_fail() {
let data = Arc::new(Mutex::new(Vec::new()));
let tar = BuilderExt::new_seekable_owned(DummyBridgeWriter(true, Arc::clone(&data)));
assert!(tar.finish().await.is_err());
assert_eq!(data.lock().await.len(), 0);
}
#[tokio::test]
async fn test_dummy_drop_fail() {
let data = Arc::new(Mutex::new(Vec::new()));
let tar = BuilderExt::new_seekable_owned(DummyBridgeWriter(true, Arc::clone(&data)));
drop(tar);
assert_eq!(data.lock().await.len(), 0);
}
#[tokio::test]
async fn test_dummy_drop_ok() {
let data = Arc::new(Mutex::new(Vec::new()));
let tar = BuilderExt::new_seekable_owned(DummyBridgeWriter(false, Arc::clone(&data)));
drop(tar);
assert_eq!(data.lock().await.len(), 0);
}
// -------------------------------------------------------------------------
// ------------------------- Write/WriteSeek tests -------------------------
// -------------------------------------------------------------------------
#[test]
fn test_write_ok() {
let tar = BuilderExt::new_streaming_borrowed(Vec::new());
tar.blocking_append_data(b"foo", Path::new("foo")).unwrap();
tar.blocking_finish().unwrap();
}
#[test]
fn test_write_fail() {
let tar = BuilderExt::new_streaming_borrowed(Vec::new());
tar.blocking_append_data(b"foo", Path::new("foo")).unwrap();
let result = tar.blocking_write_fn(Path::new("foo"), |writer| writer.write_all(b"bar"));
assert_eq!(result.unwrap_err().kind(), io::ErrorKind::NotSeekable);
}
#[test]
fn test_writeseek_ok() {
let tar = BuilderExt::new_seekable_borrowed(io::Cursor::new(Vec::new()));
tar.blocking_append_data(b"foo", Path::new("foo")).unwrap();
tar.blocking_write_fn(Path::new("foo"), |writer| writer.write_all(b"bar"))
.unwrap()
.unwrap();
tar.blocking_finish().unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/defaults.rs | lib/common/common/src/defaults.rs | use std::time::Duration;
use lazy_static::lazy_static;
use semver::Version;
use crate::cpu;
/// Current Qdrant version string
pub const QDRANT_VERSION_STRING: &str = "1.16.3";
lazy_static! {
/// Current Qdrant semver version
pub static ref QDRANT_VERSION: Version = Version::parse(QDRANT_VERSION_STRING).expect("malformed version string");
/// User-agent string to use in HTTP clients
pub static ref APP_USER_AGENT: String = format!("Qdrant/{QDRANT_VERSION_STRING}");
}
/// Maximum number of segments to load concurrently when loading a collection.
pub const MAX_CONCURRENT_SEGMENT_LOADS: usize = 8;
/// Number of retries for confirming a consensus operation.
pub const CONSENSUS_CONFIRM_RETRIES: usize = 3;
/// Default timeout for consensus meta operations.
pub const CONSENSUS_META_OP_WAIT: Duration = Duration::from_secs(10);
lazy_static! {
/// Max number of pooled elements to preserve in memory.
/// Scaled according to the number of logical CPU cores to account for concurrent operations.
pub static ref POOL_KEEP_LIMIT: usize = cpu::get_num_cpus().clamp(16, 128);
}
/// Default value of CPU budget parameter.
///
/// Dynamic based on CPU size.
///
/// On low CPU systems, we want to reserve the minimal amount of CPUs for other tasks to allow
/// efficient optimization. On high CPU systems we want to reserve more CPUs.
#[inline(always)]
pub fn default_cpu_budget_unallocated(num_cpu: usize) -> isize {
match num_cpu {
0..=2 => 0,
3..=32 => -1,
33..=48 => -2,
49..=64 => -3,
65..=96 => -4,
97..=128 => -6,
num_cpu @ 129.. => -(num_cpu as isize / 16),
}
}
/// Default number of CPUs for HNSW graph building and optimization tasks in general.
///
/// Dynamic based on CPU size.
///
/// Even on high-CPU systems, a value higher than 16 is discouraged. It will most likely not
/// improve performance and is more likely to cause disconnected HNSW graphs.
/// Will be less if currently available CPU budget is lower.
#[inline(always)]
pub fn thread_count_for_hnsw(num_cpu: usize) -> usize {
match num_cpu {
0..=48 => 8.min(num_cpu).max(1),
49..=64 => 12,
65.. => 16,
}
}
/// Number of search threads to use in the search runtime.
///
/// Dynamic based on CPU size.
#[inline(always)]
pub fn search_thread_count(max_search_threads: usize) -> usize {
if max_search_threads != 0 {
return max_search_threads;
}
// At least one thread, but not more than number of CPUs - 1 if there are more than 2 CPU
// Example:
// Num CPU = 1 -> 1 thread
// Num CPU = 2 -> 2 thread - if we use one thread with 2 cpus, its too much un-utilized resources
// Num CPU = 3 -> 2 thread
// Num CPU = 4 -> 3 thread
// Num CPU = 5 -> 4 thread
match cpu::get_num_cpus() {
0..=1 => 1,
2 => 2,
num_cpu @ 3.. => num_cpu - 1,
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/disk.rs | lib/common/common/src/disk.rs | use std::path::{Path, PathBuf};
use fs_err as fs;
use walkdir::WalkDir;
/// How many bytes a directory takes on disk.
///
/// Note: on non-unix systems, this function returns the apparent/logical
/// directory size rather than actual disk usage.
pub fn dir_disk_size(path: impl Into<PathBuf>) -> std::io::Result<u64> {
fn dir_disk_size(mut dir: fs::ReadDir) -> std::io::Result<u64> {
dir.try_fold(0, |acc, file| {
let file = file?;
let metadata = file.metadata()?;
let size = if metadata.is_dir() {
dir_disk_size(fs::read_dir(file.path())?)?
} else {
#[cfg(unix)]
{
const BLOCK_SIZE: u64 = 512; // aka DEV_BSIZE
use std::os::unix::fs::MetadataExt;
metadata.blocks() * BLOCK_SIZE
}
#[cfg(not(unix))]
{
metadata.len()
}
};
Ok(acc + size)
})
}
dir_disk_size(fs::read_dir(path.into())?)
}
/// List all files in the given directory recursively.
///
/// Notes:
/// - a directory must be given
/// - symlinks are considered to be files
pub fn list_files(dir: impl AsRef<Path>) -> std::io::Result<Vec<PathBuf>> {
let dir = dir.as_ref();
if !dir.is_dir() {
return Ok(vec![]);
}
let mut files = Vec::new();
for entry in WalkDir::new(dir).min_depth(1).follow_links(true) {
let entry = entry?;
let file_type = entry.file_type();
if file_type.is_file() || file_type.is_symlink() {
files.push(entry.into_path());
}
}
Ok(files)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/delta_pack.rs | lib/common/common/src/delta_pack.rs | use crate::bitpacking::{BitReader, BitWriter, packed_bits};
/// To simplify value counting, each value should be at least one byte.
/// Otherwise, the count would be ambiguous, e.g., a 2-byte slice of 5-bit
/// values could contain either 2 or 3 values.
const MIN_BITS_PER_VALUE: u8 = u8::BITS as u8;
/// How many bits required to store a value in range
/// `MIN_BITS_PER_VALUE..=u64::BITS`.
const HEADER_BITS: u8 = 6;
/// Pack sequence of u64 into a delta encoded byte array, and then bitpack it
///
/// Max length of the input: 2**32
/// Assume that the input is sorted
/// Output contains 4 bytes of the length of the input, followed by the packed data.
pub fn delta_pack(data: &[u64]) -> Vec<u8> {
let mut deltas = Vec::with_capacity(data.len());
let mut prev = 0;
for &value in data {
deltas.push(value - prev);
prev = value;
}
compress_sequence(&deltas)
}
/// Pack sequence of u64 into a delta encoded byte array, and then bitpack it
///
/// Max length of the input: 2**32
/// DO NOT Assume that the input is sorted
/// Output contains 4 bytes of the length of the input, followed by the packed data.
pub fn compress_sequence(data: &[u64]) -> Vec<u8> {
let length = data.len() as u32;
let mut output = Vec::with_capacity(4);
if length == 0 {
return output;
}
let max_value = *data.iter().max().unwrap();
let bits_per_value = packed_bits(max_value).max(MIN_BITS_PER_VALUE);
let mut writer = BitWriter::new(&mut output);
writer.write(bits_per_value - MIN_BITS_PER_VALUE, HEADER_BITS);
for &value in data {
writer.write(value, bits_per_value);
}
writer.finish();
output
}
pub fn decompress_sequence(data: &[u8]) -> Vec<u64> {
if data.is_empty() {
return Vec::new();
}
let mut result = Vec::new();
let mut reader = BitReader::new(data);
let mut remaining_bits = data.len() * u8::BITS as usize;
reader.set_bits(HEADER_BITS);
let bits_per_value = reader.read::<u8>() + MIN_BITS_PER_VALUE;
remaining_bits -= HEADER_BITS as usize;
reader.set_bits(bits_per_value);
// It might be possible, that some bits are left after reading the header,
// but it is always less than 1 byte and less than bits_per_value
while remaining_bits >= bits_per_value as usize {
result.push(reader.read::<u64>());
remaining_bits -= bits_per_value as usize;
}
result
}
pub fn delta_unpack(data: &[u8]) -> Vec<u64> {
let mut sequence = decompress_sequence(data);
let mut prev = 0;
for value in sequence.iter_mut() {
*value += prev;
prev = *value;
}
sequence
}
#[cfg(test)]
mod tests {
use rand::Rng;
use super::*;
#[test]
fn pack_and_unpack_sorted_data() {
let data = vec![1, 2, 3, 4, 5];
let packed = delta_pack(&data);
let unpacked = delta_unpack(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_unsorted_data() {
let data = vec![5, 3, 1, 4, 2];
let packed = compress_sequence(&data);
let unpacked = decompress_sequence(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_empty_data() {
let data: Vec<u64> = Vec::new();
let packed = delta_pack(&data);
let unpacked = delta_unpack(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_single_element() {
let data = vec![42];
let packed = delta_pack(&data);
let unpacked = delta_unpack(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_large_numbers() {
let data = vec![u64::MAX - 2, u64::MAX - 1, u64::MAX, u64::MAX];
let packed = delta_pack(&data);
let unpacked = delta_unpack(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_large_numbers_unsorted() {
let data = vec![u64::MAX - 2, u64::MAX, u64::MAX, u64::MAX - 1];
let packed = compress_sequence(&data);
let unpacked = decompress_sequence(&packed);
assert_eq!(data, unpacked);
}
#[test]
fn pack_and_unpack_random_data() {
let num_iterations = 100;
let max_length = 100;
let mut rng = rand::rng();
for _ in 0..num_iterations {
let length = rng.random_range(0..max_length);
let mut data = (0..length)
.map(|_| rng.random_range(0..u64::MAX))
.collect::<Vec<_>>();
data.sort();
let packed = delta_pack(&data);
let unpacked = delta_unpack(&packed);
assert_eq!(data, unpacked);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/bitpacking_links.rs | lib/common/common/src/bitpacking_links.rs | use crate::bitpacking::{BitReader, BitWriter, make_bitmask, packed_bits};
/// To simplify value counting, each value should be at least one byte.
/// Otherwise the count could would be ambiguous, e.g., a 2-byte slice of 5-bit
/// values could contain either 2 or 3 values.
pub const MIN_BITS_PER_VALUE: u8 = u8::BITS as u8;
/// How many bits required to store a value in range
/// `MIN_BITS_PER_VALUE..=u32::BITS`.
const HEADER_BITS: u8 = 5;
/// A specialized packer to pack HNSW graph links.
///
/// It assumes that the first `m` (or `m0`) values could be re-orderd for better
/// compression.
///
/// Parameters:
/// - `bits_per_unsorted` should be enough to store the maximum point ID
/// (it should be the same for all nodes/links within a segment).
/// - `sorted_count` is `m` (or `m0`) for this layer.
/// - `raw_links` is in/out parameter. Input: links to pack, output: same links,
/// but re-ordered.
pub fn pack_links(
links: &mut Vec<u8>,
raw_links: &mut [u32],
bits_per_unsorted: u8,
sorted_count: usize,
) {
if raw_links.is_empty() {
return;
}
// Sort and delta-encode the first `sorted_count` links.
let sorted_count = raw_links.len().min(sorted_count);
raw_links[..sorted_count].sort_unstable();
for i in (1..sorted_count).rev() {
raw_links[i] -= raw_links[i - 1];
}
let mut w = BitWriter::new(links);
if sorted_count != 0 {
// 1. Header.
let bits_per_sorted =
packed_bits(*raw_links[..sorted_count].iter().max().unwrap()).max(MIN_BITS_PER_VALUE);
w.write(u32::from(bits_per_sorted - MIN_BITS_PER_VALUE), HEADER_BITS);
// 2. First `sorted_count` values, sorted and delta-encoded.
// The bit width is determined by the header.
for &value in &raw_links[..sorted_count] {
w.write(value, bits_per_sorted);
}
}
// 3. The rest of the values, unsorted.
for &value in &raw_links[sorted_count..] {
w.write(value, bits_per_unsorted);
}
w.finish();
// Undo delta-encoding.
for i in 1..sorted_count {
raw_links[i] += raw_links[i - 1];
}
}
/// Returns an iterator over packed links.
/// See [`pack_links`] for parameter descriptions.
#[inline]
pub fn iterate_packed_links(
links: &[u8],
bits_per_unsorted: u8,
sorted_count: usize,
) -> PackedLinksIterator<'_> {
let mut reader = BitReader::new(links);
let mut remaining_bits = links.len() * u8::BITS as usize;
let mut remaining_bits_target = remaining_bits;
if sorted_count != 0 && !links.is_empty() {
// 1. Header.
reader.set_bits(HEADER_BITS);
let bits_per_sorted = reader.read::<u8>() + MIN_BITS_PER_VALUE;
remaining_bits -= HEADER_BITS as usize;
// Prepare for reading sorted values.
reader.set_bits(bits_per_sorted);
let max_sorted = remaining_bits / bits_per_sorted as usize;
remaining_bits_target -= sorted_count.min(max_sorted) * bits_per_sorted as usize;
} else {
// Prepare for reading unsorted values.
reader.set_bits(bits_per_unsorted);
}
PackedLinksIterator {
reader,
bits_per_unsorted,
remaining_bits,
remaining_bits_target,
current_delta: 0,
}
}
/// Returns the size in bytes of packed links.
///
/// Used to separate the `data` into `links` (for [`iterate_packed_links()`])
/// and trailing bytes.
pub fn packed_links_size(
data: &[u8],
bits_per_unsorted: u8,
sorted_count: usize,
total_count: usize,
) -> usize {
if total_count == 0 {
return 0;
}
let Some(first_byte) = data.first() else {
return 0;
};
let mut total_bits = 0;
let actual_sorted_count = total_count.min(sorted_count);
if actual_sorted_count > 0 {
total_bits += HEADER_BITS as usize;
let bits_per_sorted = (first_byte & make_bitmask::<u8>(HEADER_BITS)) + MIN_BITS_PER_VALUE;
total_bits += actual_sorted_count * bits_per_sorted as usize;
}
let unsorted_count = total_count - actual_sorted_count;
total_bits += unsorted_count * bits_per_unsorted as usize;
total_bits.div_ceil(u8::BITS as usize)
}
/// Iterator over links packed with [`pack_links`].
/// Created by [`iterate_packed_links`].
pub struct PackedLinksIterator<'a> {
reader: BitReader<'a>,
bits_per_unsorted: u8,
remaining_bits: usize,
remaining_bits_target: usize,
current_delta: u32,
}
impl PackedLinksIterator<'_> {
#[inline]
fn next_sorted(&mut self) -> u32 {
self.current_delta = self.current_delta.wrapping_add(self.reader.read::<u32>());
self.remaining_bits -= self.reader.bits() as usize;
self.current_delta
}
#[inline]
fn next_unsorted(&mut self) -> Option<u32> {
if let Some(rb) = self.remaining_bits.checked_sub(self.reader.bits() as usize) {
self.remaining_bits = rb;
Some(self.reader.read::<u32>())
} else {
None
}
}
}
impl Iterator for PackedLinksIterator<'_> {
type Item = u32;
#[inline]
fn next(&mut self) -> Option<u32> {
if self.remaining_bits > self.remaining_bits_target {
let value = self.next_sorted();
if self.remaining_bits <= self.remaining_bits_target {
// It was the last sorted value.
self.reader.set_bits(self.bits_per_unsorted);
}
return Some(value);
}
self.next_unsorted()
}
/// Optimized [`Iterator::fold()`]. Should be faster than calling
/// [`Iterator::next()`] in a loop.
///
/// It is used in a hot loop during HNSW search, so performance is critical.
#[inline]
fn fold<Acc, F: FnMut(Acc, u32) -> Acc>(mut self, mut acc: Acc, mut f: F) -> Acc {
while self.remaining_bits > self.remaining_bits_target {
acc = f(acc, self.next_sorted());
}
self.reader.set_bits(self.bits_per_unsorted);
while let Some(value) = self.next_unsorted() {
acc = f(acc, value);
}
acc
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (sorted, unsorted);
if let Some(sorted_bits) = self.remaining_bits.checked_sub(self.remaining_bits_target) {
let sorted_bits = sorted_bits.next_multiple_of(self.reader.bits() as usize);
sorted = sorted_bits / self.reader.bits() as usize;
unsorted = (self.remaining_bits - sorted_bits) / self.bits_per_unsorted as usize;
} else {
sorted = 0;
unsorted = self.remaining_bits / self.reader.bits() as usize;
}
(sorted + unsorted, Some(sorted + unsorted))
}
}
impl ExactSizeIterator for PackedLinksIterator<'_> {}
#[cfg(test)]
mod tests {
use itertools::Itertools as _;
use rand::rngs::StdRng;
use rand::{Rng as _, SeedableRng as _};
use rstest::rstest;
use super::*;
use crate::iterator_ext::{check_exact_size_iterator_len, check_iterator_fold};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Cases {
OnlyUnsorted = 0,
OnlySorted = 1,
OnlySortedExact = 2,
Empty = 3,
Both = 4,
}
#[rstest]
#[case::only_unsorted(Cases::OnlyUnsorted)]
#[case::only_sorted(Cases::OnlySorted)]
#[case::only_sorted_exact(Cases::OnlySortedExact)]
#[case::empty(Cases::Empty)]
#[case::both(Cases::Both)]
fn test_random(#[case] case: Cases) {
let mut rng = StdRng::seed_from_u64(42u64.wrapping_add(case as u64));
for _ in 0..1_000 {
let (sorted_count, total_count);
match case {
Cases::OnlyUnsorted => {
sorted_count = 0;
total_count = rng.random_range(1..100);
}
Cases::OnlySorted => {
sorted_count = rng.random_range(2..100);
total_count = rng.random_range(1..sorted_count);
}
Cases::OnlySortedExact => {
sorted_count = rng.random_range(1..100);
total_count = sorted_count;
}
Cases::Empty => {
sorted_count = rng.random_range(0..100); // intentionally not 0
total_count = 0;
}
Cases::Both => {
sorted_count = rng.random_range(0..100);
total_count = rng.random_range(sorted_count..sorted_count + 100);
}
}
let bits_per_unsorted = rng.random_range(MIN_BITS_PER_VALUE..=32);
let mut raw_links_orig = gen_unique_values(&mut rng, total_count, bits_per_unsorted);
let mut raw_links_updated = raw_links_orig.clone();
let mut links = Vec::new();
pack_links(
&mut links,
&mut raw_links_updated,
bits_per_unsorted,
sorted_count,
);
let packed_len = links.len();
let mut unpacked = Vec::new();
let iter = iterate_packed_links(&links, bits_per_unsorted, sorted_count);
iter.for_each(|value| unpacked.push(value));
raw_links_orig[..sorted_count.min(total_count)].sort_unstable();
assert_eq!(raw_links_orig, unpacked);
assert_eq!(raw_links_updated, unpacked);
check_iterator_fold(|| iterate_packed_links(&links, bits_per_unsorted, sorted_count));
check_exact_size_iterator_len(iterate_packed_links(
&links,
bits_per_unsorted,
sorted_count,
));
for _ in 0..10 {
let len = packed_links_size(&links, bits_per_unsorted, sorted_count, total_count);
assert_eq!(len, packed_len);
links.push(rng.random());
}
}
}
/// Generate `count` unique values in range `[0, 2^bits)`.
fn gen_unique_values(rng: &mut StdRng, count: usize, bits: u8) -> Vec<u32> {
assert!(count <= 1 << bits);
std::iter::repeat_with(|| rng.random_range(0..1u64 << bits) as u32)
.unique()
.take(count)
.collect::<Vec<u32>>()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/budget.rs | lib/common/common/src/budget.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::time::Duration;
use tokio::sync::{OwnedSemaphorePermit, Semaphore, TryAcquireError};
use tokio::time;
use crate::cpu;
/// Get IO budget to use for optimizations as number of parallel IO operations.
pub fn get_io_budget(io_budget: usize, cpu_budget: usize) -> usize {
if io_budget == 0 {
// By default, we will use same IO budget as CPU budget
// This will ensure that we will allocate one IO task ahead of one CPU task
cpu_budget
} else {
io_budget
}
}
/// Structure managing global CPU/IO/... budget for optimization tasks.
///
/// Assigns CPU/IO/... permits to tasks to limit overall resource utilization, making optimization
/// workloads more predictable and efficient.
#[derive(Debug, Clone)]
pub struct ResourceBudget {
cpu_semaphore: Arc<Semaphore>,
/// Total CPU budget, available and leased out.
cpu_budget: usize,
io_semaphore: Arc<Semaphore>,
/// Total IO budget, available and leased out.
io_budget: usize,
}
impl ResourceBudget {
pub fn new(cpu_budget: usize, io_budget: usize) -> Self {
Self {
cpu_semaphore: Arc::new(Semaphore::new(cpu_budget)),
cpu_budget,
io_semaphore: Arc::new(Semaphore::new(io_budget)),
io_budget,
}
}
/// Returns the total CPU budget.
pub fn available_cpu_budget(&self) -> usize {
self.cpu_budget
}
/// Returns the total IO budget.
pub fn available_io_budget(&self) -> usize {
self.io_budget
}
/// For the given desired number of CPUs, return the minimum number of required CPUs.
fn min_cpu_permits(&self, desired_cpus: usize) -> usize {
desired_cpus.min(self.cpu_budget).div_ceil(2)
}
fn min_io_permits(&self, desired_io: usize) -> usize {
desired_io.min(self.io_budget).div_ceil(2)
}
fn try_acquire_cpu(
&self,
desired_cpus: usize,
) -> Option<(usize, Option<OwnedSemaphorePermit>)> {
let min_required_cpus = self.min_cpu_permits(desired_cpus) as u32;
let num_cpus = self.cpu_semaphore.available_permits().min(desired_cpus) as u32;
if num_cpus < min_required_cpus {
return None;
}
let cpu_permit = if num_cpus > 0 {
let cpu_result =
Semaphore::try_acquire_many_owned(self.cpu_semaphore.clone(), num_cpus);
match cpu_result {
Ok(permit) => Some(permit),
Err(TryAcquireError::NoPermits) => return None,
Err(TryAcquireError::Closed) => unreachable!(
"Cannot acquire CPU permit because CPU budget semaphore is closed, this should never happen",
),
}
} else {
None
};
Some((num_cpus as usize, cpu_permit))
}
fn try_acquire_io(&self, desired_io: usize) -> Option<(usize, Option<OwnedSemaphorePermit>)> {
let min_required_io = self.min_io_permits(desired_io) as u32;
let num_io = self.io_semaphore.available_permits().min(desired_io) as u32;
if num_io < min_required_io {
return None;
}
let io_permit = if num_io > 0 {
let io_result = Semaphore::try_acquire_many_owned(self.io_semaphore.clone(), num_io);
match io_result {
Ok(permit) => Some(permit),
Err(TryAcquireError::NoPermits) => return None,
Err(TryAcquireError::Closed) => unreachable!(
"Cannot acquire IO permit because IO budget semaphore is closed, this should never happen",
),
}
} else {
None
};
Some((num_io as usize, io_permit))
}
/// Try to acquire Resources permit for optimization task from global Resource budget.
///
/// The given `desired_cpus` is not exact, but rather a hint on what we'd like to acquire.
/// - it will prefer to acquire the maximum number of CPUs
/// - it will never be higher than the total CPU budget
/// - it will never be lower than `min_permits(desired_cpus)`
///
/// Warn: only one Resource Permit per thread is allowed. Otherwise, it might lead to deadlocks.
///
pub fn try_acquire(&self, desired_cpus: usize, desired_io: usize) -> Option<ResourcePermit> {
let (num_cpus, cpu_permit) = self.try_acquire_cpu(desired_cpus)?;
let (num_io, io_permit) = self.try_acquire_io(desired_io)?;
Some(ResourcePermit::new(
num_cpus as u32,
cpu_permit,
num_io as u32,
io_permit,
))
}
/// Acquire Resources permit for optimization task from global Resource budget.
///
/// This will wait until the required number of permits are available.
/// This function is blocking.
pub fn acquire(
&self,
desired_cpus: usize,
desired_io: usize,
stopped: &AtomicBool,
) -> Option<ResourcePermit> {
let mut delay = Duration::from_micros(100);
while !stopped.load(std::sync::atomic::Ordering::Relaxed) {
if let Some(permit) = self.try_acquire(desired_cpus, desired_io) {
return Some(permit);
} else {
std::thread::sleep(delay);
delay = (delay * 2).min(Duration::from_secs(2));
}
}
None
}
pub fn replace_with(
&self,
mut permit: ResourcePermit,
new_desired_cpus: usize,
new_desired_io: usize,
stopped: &AtomicBool,
) -> Result<ResourcePermit, ResourcePermit> {
// Make sure we don't exceed the budget, otherwise we might deadlock
let new_desired_cpus = new_desired_cpus.min(self.cpu_budget);
let new_desired_io = new_desired_io.min(self.io_budget);
// Acquire extra resources we don't have yet
let Some(extra_acquired) = self.acquire(
new_desired_cpus.saturating_sub(permit.num_cpus as usize),
new_desired_io.saturating_sub(permit.num_io as usize),
stopped,
) else {
return Err(permit);
};
permit.merge(extra_acquired);
// Release excess resources we now have
permit.release(
permit.num_cpus.saturating_sub(new_desired_cpus as u32),
permit.num_io.saturating_sub(new_desired_io as u32),
);
Ok(permit)
}
/// Check if there is enough CPU budget available for the given `desired_cpus`.
///
/// This checks for the minimum number of required permits based on the given desired CPUs,
/// based on `min_permits`. To check for an exact number, use `has_budget_exact` instead.
///
/// A desired CPU count of `0` will always return `true`.
pub fn has_budget(&self, desired_cpus: usize, desired_io: usize) -> bool {
self.has_budget_exact(
self.min_cpu_permits(desired_cpus),
self.min_io_permits(desired_io),
)
}
/// Check if there are at least `budget` available CPUs in this budget.
///
/// A budget of `0` will always return `true`.
pub fn has_budget_exact(&self, cpu_budget: usize, io_budget: usize) -> bool {
self.cpu_semaphore.available_permits() >= cpu_budget
&& self.io_semaphore.available_permits() >= io_budget
}
/// Notify when we have CPU budget available for the given number of desired CPUs.
///
/// This will not resolve until the above condition is met.
///
/// Waits for at least the minimum number of permits based on the given desired CPUs. For
/// example, if `desired_cpus` is 8, this will wait for at least 4 to be available. See
/// [`Self::min_cpu_permits`].
///
/// - `1` to wait for any CPU budget to be available.
/// - `0` will always return immediately.
///
/// Uses an exponential backoff strategy up to 10 seconds to avoid busy polling.
pub async fn notify_on_budget_available(&self, desired_cpus: usize, desired_io: usize) {
let min_cpu_required = self.min_cpu_permits(desired_cpus);
let min_io_required = self.min_io_permits(desired_io);
if self.has_budget_exact(min_cpu_required, min_io_required) {
return;
}
// Wait for CPU budget to be available with exponential backoff
// TODO: find better way, don't busy wait
let mut delay = Duration::from_micros(100);
while !self.has_budget_exact(min_cpu_required, min_io_required) {
time::sleep(delay).await;
delay = (delay * 2).min(Duration::from_secs(10));
}
}
}
impl Default for ResourceBudget {
fn default() -> Self {
let cpu_budget = cpu::get_cpu_budget(0);
let io_budget = get_io_budget(0, cpu_budget);
Self::new(cpu_budget, io_budget)
}
}
/// Resource permit, used to limit number of concurrent resource-intensive operations.
/// For example HNSW indexing (which is CPU-bound) can be limited to a certain number of CPUs.
/// Or an I/O-bound operations like segment moving can be limited by I/O permits.
///
/// This permit represents the number of Resources allocated for an operation, so that the operation can
/// respect other parallel workloads. When dropped or `release()`-ed, the Resources are given back for
/// other tasks to acquire.
///
/// These Resource permits are used to better balance and saturate resource utilization.
pub struct ResourcePermit {
/// Number of CPUs acquired in this permit.
pub num_cpus: u32,
/// Semaphore permit.
cpu_permit: Option<OwnedSemaphorePermit>,
/// Number of IO permits acquired in this permit.
pub num_io: u32,
/// Semaphore permit.
io_permit: Option<OwnedSemaphorePermit>,
/// A callback, which should be called when the permit is changed manually.
/// Originally used to notify the task manager that a permit is available
/// and schedule more optimization tasks.
///
/// WARN: is not called on drop, only when `release()` is called.
on_manual_release: Option<Box<dyn Fn() + Send + Sync>>,
}
impl ResourcePermit {
/// New CPU permit with given CPU count and permit semaphore.
pub fn new(
cpu_count: u32,
cpu_permit: Option<OwnedSemaphorePermit>,
io_count: u32,
io_permit: Option<OwnedSemaphorePermit>,
) -> Self {
// Debug assert that cpu/io count and permit counts match
debug_assert!(cpu_permit.as_ref().map_or(0, |p| p.num_permits()) == cpu_count as usize);
debug_assert!(io_permit.as_ref().map_or(0, |p| p.num_permits()) == io_count as usize);
Self {
num_cpus: cpu_count,
cpu_permit,
num_io: io_count,
io_permit,
on_manual_release: None,
}
}
pub fn set_on_manual_release(&mut self, on_release: impl Fn() + Send + Sync + 'static) {
self.on_manual_release = Some(Box::new(on_release));
}
/// Merge the other resource permit into this one
pub fn merge(&mut self, mut other: Self) {
self.num_cpus += other.num_cpus;
self.num_io += other.num_io;
// Merge optional semaphore permits
self.cpu_permit = match (self.cpu_permit.take(), other.cpu_permit.take()) {
(Some(mut permit), Some(other_permit)) => {
permit.merge(other_permit);
Some(permit)
}
(permit @ Some(_), None) | (None, permit @ Some(_)) => permit,
(None, None) => None,
};
self.io_permit = match (self.io_permit.take(), other.io_permit.take()) {
(Some(mut permit), Some(other_permit)) => {
permit.merge(other_permit);
Some(permit)
}
(permit @ Some(_), None) | (None, permit @ Some(_)) => permit,
(None, None) => None,
};
// Debug assert that cpu/io count and permit counts match
debug_assert!(
self.cpu_permit.as_ref().map_or(0, |p| p.num_permits()) == self.num_cpus as usize,
);
debug_assert!(
self.io_permit.as_ref().map_or(0, |p| p.num_permits()) == self.num_io as usize,
);
}
/// New CPU permit with given CPU count without a backing semaphore for a shared pool.
#[cfg(feature = "testing")]
pub fn dummy(count: u32) -> Self {
Self {
num_cpus: count,
cpu_permit: None,
num_io: 0,
io_permit: None,
on_manual_release: None,
}
}
/// Release CPU permit, giving them back to the semaphore.
fn release_cpu(&mut self) {
self.num_cpus = 0;
self.cpu_permit.take();
}
/// Release IO permit, giving them back to the semaphore.
fn release_io(&mut self) {
self.num_io = 0;
self.io_permit.take();
}
/// Partial release CPU permit, giving them back to the semaphore.
fn release_cpu_count(&mut self, release_count: u32) {
if release_count == 0 {
return;
}
if self.num_cpus > release_count {
self.num_cpus -= release_count;
let permit = self.cpu_permit.take();
self.cpu_permit = permit.and_then(|mut permit| permit.split(self.num_cpus as usize));
} else {
self.release_cpu();
}
}
/// Partial release IO permit, giving them back to the semaphore.
fn release_io_count(&mut self, release_count: u32) {
if release_count == 0 {
return;
}
if self.num_io > release_count {
self.num_io -= release_count;
let permit = self.io_permit.take();
self.io_permit = permit.and_then(|mut permit| permit.split(self.num_io as usize));
} else {
self.release_io();
}
}
pub fn release(&mut self, cpu: u32, io: u32) {
self.release_cpu_count(cpu);
self.release_io_count(io);
if let Some(on_release) = &self.on_manual_release {
on_release();
}
}
}
impl Drop for ResourcePermit {
fn drop(&mut self) {
let Self {
num_cpus: _,
cpu_permit,
num_io: _,
io_permit,
on_manual_release: _, // Only explicit release() should call the callback
} = self;
let _ = cpu_permit.take();
let _ = io_permit.take();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/small_uint.rs | lib/common/common/src/small_uint.rs | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct U24([u8; 3]);
impl U24 {
pub const MAX: u32 = 0xFFFFFF;
#[inline]
pub const fn new_wrapped(value: u32) -> Self {
let arr = value.to_le_bytes();
Self([arr[0], arr[1], arr[2]])
}
#[inline]
pub const fn get(self) -> u32 {
u32::from_le_bytes([self.0[0], self.0[1], self.0[2], 0])
}
}
impl TryFrom<u32> for U24 {
type Error = ();
#[inline]
fn try_from(value: u32) -> Result<Self, Self::Error> {
let arr = value.to_le_bytes();
if arr[3] != 0 {
return Err(());
}
Ok(Self([arr[0], arr[1], arr[2]]))
}
}
impl From<U24> for u32 {
#[inline]
fn from(value: U24) -> Self {
value.get()
}
}
#[cfg(test)]
mod tests {
pub use super::*;
#[test]
fn test_u24() {
assert_eq!(U24::new_wrapped(0x13_57_9A_BC).get(), 0x57_9A_BC);
assert!(U24::try_from(0x13_57_9A_BC).is_err());
assert_eq!(U24::try_from(0x57_9A_BC).map(U24::get), Ok(0x57_9A_BC));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/hardware_accumulator.rs | lib/common/common/src/counter/hardware_accumulator.rs | use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use super::hardware_counter::HardwareCounterCell;
use super::hardware_data::HardwareData;
/// Data structure, that routes hardware measurement counters to specific location.
/// Shared drain MUST NOT create its own counters, but only hold a reference to the existing one,
/// as it doesn't provide any checks on drop.
#[derive(Debug)]
pub struct HwSharedDrain {
pub(crate) cpu_counter: Arc<AtomicUsize>,
pub(crate) payload_io_read_counter: Arc<AtomicUsize>,
pub(crate) payload_io_write_counter: Arc<AtomicUsize>,
pub(crate) payload_index_io_read_counter: Arc<AtomicUsize>,
pub(crate) payload_index_io_write_counter: Arc<AtomicUsize>,
pub(crate) vector_io_read_counter: Arc<AtomicUsize>,
pub(crate) vector_io_write_counter: Arc<AtomicUsize>,
}
impl HwSharedDrain {
pub fn get_cpu(&self) -> usize {
self.cpu_counter.load(Ordering::Relaxed)
}
pub fn get_payload_io_read(&self) -> usize {
self.payload_io_read_counter.load(Ordering::Relaxed)
}
pub fn get_payload_io_write(&self) -> usize {
self.payload_io_write_counter.load(Ordering::Relaxed)
}
pub fn get_payload_index_io_read(&self) -> usize {
self.payload_index_io_read_counter.load(Ordering::Relaxed)
}
pub fn get_payload_index_io_write(&self) -> usize {
self.payload_index_io_write_counter.load(Ordering::Relaxed)
}
pub fn get_vector_io_write(&self) -> usize {
self.vector_io_write_counter.load(Ordering::Relaxed)
}
pub fn get_vector_io_read(&self) -> usize {
self.vector_io_read_counter.load(Ordering::Relaxed)
}
/// Accumulates all values from `src` into this HwSharedDrain.
fn accumulate_from_hw_data(&self, src: HardwareData) {
let HwSharedDrain {
cpu_counter,
payload_io_read_counter,
payload_io_write_counter,
payload_index_io_read_counter,
payload_index_io_write_counter,
vector_io_read_counter,
vector_io_write_counter,
} = self;
cpu_counter.fetch_add(src.cpu, Ordering::Relaxed);
payload_io_read_counter.fetch_add(src.payload_io_read, Ordering::Relaxed);
payload_io_write_counter.fetch_add(src.payload_io_write, Ordering::Relaxed);
payload_index_io_read_counter.fetch_add(src.payload_index_io_read, Ordering::Relaxed);
payload_index_io_write_counter.fetch_add(src.payload_index_io_write, Ordering::Relaxed);
vector_io_read_counter.fetch_add(src.vector_io_read, Ordering::Relaxed);
vector_io_write_counter.fetch_add(src.vector_io_write, Ordering::Relaxed);
}
}
impl Clone for HwSharedDrain {
fn clone(&self) -> Self {
HwSharedDrain {
cpu_counter: self.cpu_counter.clone(),
payload_io_read_counter: self.payload_io_read_counter.clone(),
payload_io_write_counter: self.payload_io_write_counter.clone(),
payload_index_io_read_counter: self.payload_index_io_read_counter.clone(),
payload_index_io_write_counter: self.payload_index_io_write_counter.clone(),
vector_io_read_counter: self.vector_io_read_counter.clone(),
vector_io_write_counter: self.vector_io_write_counter.clone(),
}
}
}
impl Default for HwSharedDrain {
fn default() -> Self {
Self {
cpu_counter: Arc::new(AtomicUsize::new(0)),
payload_io_read_counter: Arc::new(AtomicUsize::new(0)),
payload_io_write_counter: Arc::new(AtomicUsize::new(0)),
payload_index_io_read_counter: Arc::new(AtomicUsize::new(0)),
payload_index_io_write_counter: Arc::new(AtomicUsize::new(0)),
vector_io_read_counter: Arc::new(AtomicUsize::new(0)),
vector_io_write_counter: Arc::new(AtomicUsize::new(0)),
}
}
}
/// A "slow" but thread-safe accumulator for measurement results of `HardwareCounterCell` values.
/// This type is completely reference counted and clones of this type will read/write the same values as their origin structure.
#[derive(Debug)]
pub struct HwMeasurementAcc {
request_drain: HwSharedDrain,
metrics_drain: HwSharedDrain,
/// If this is set to true, the accumulator will not accumulate any values.
disposable: bool,
}
impl HwMeasurementAcc {
#[cfg(feature = "testing")]
pub fn new() -> Self {
Self {
request_drain: HwSharedDrain::default(),
metrics_drain: HwSharedDrain::default(),
disposable: false,
}
}
/// Create a disposable accumulator, which will not accumulate any values.
/// WARNING: This is intended for specific internal use-cases only.
/// DO NOT use it in tests or if you don't know what you're doing.
pub fn disposable() -> Self {
Self {
request_drain: HwSharedDrain::default(),
metrics_drain: HwSharedDrain::default(),
disposable: true,
}
}
pub fn is_disposable(&self) -> bool {
self.disposable
}
/// Returns a new `HardwareCounterCell` that accumulates it's measurements to the same parent than this `HwMeasurementAcc`.
pub fn get_counter_cell(&self) -> HardwareCounterCell {
HardwareCounterCell::new_with_accumulator(self.clone())
}
pub fn new_with_metrics_drain(metrics_drain: HwSharedDrain) -> Self {
Self {
request_drain: HwSharedDrain::default(),
metrics_drain,
disposable: false,
}
}
pub fn accumulate<T: Into<HardwareData>>(&self, src: T) {
let src = src.into();
self.request_drain.accumulate_from_hw_data(src);
self.metrics_drain.accumulate_from_hw_data(src);
}
/// Accumulate usage values for request drain only.
/// This is useful if we want to report usage, which happened on another machine
/// So we don't want to accumulate the same usage on the current machine second time
pub fn accumulate_request<T: Into<HardwareData>>(&self, src: T) {
let src = src.into();
self.request_drain.accumulate_from_hw_data(src);
}
pub fn get_cpu(&self) -> usize {
self.request_drain.get_cpu()
}
pub fn get_payload_io_read(&self) -> usize {
self.request_drain.get_payload_io_read()
}
pub fn get_payload_io_write(&self) -> usize {
self.request_drain.get_payload_io_write()
}
pub fn get_payload_index_io_read(&self) -> usize {
self.request_drain.get_payload_index_io_read()
}
pub fn get_payload_index_io_write(&self) -> usize {
self.request_drain.get_payload_index_io_write()
}
pub fn get_vector_io_read(&self) -> usize {
self.request_drain.get_vector_io_read()
}
pub fn get_vector_io_write(&self) -> usize {
self.request_drain.get_vector_io_write()
}
pub fn hw_data(&self) -> HardwareData {
let HwSharedDrain {
cpu_counter,
payload_io_read_counter,
payload_io_write_counter,
payload_index_io_read_counter,
payload_index_io_write_counter,
vector_io_read_counter,
vector_io_write_counter,
} = &self.request_drain;
HardwareData {
cpu: cpu_counter.load(Ordering::Relaxed),
payload_io_read: payload_io_read_counter.load(Ordering::Relaxed),
payload_io_write: payload_io_write_counter.load(Ordering::Relaxed),
vector_io_read: vector_io_read_counter.load(Ordering::Relaxed),
vector_io_write: vector_io_write_counter.load(Ordering::Relaxed),
payload_index_io_read: payload_index_io_read_counter.load(Ordering::Relaxed),
payload_index_io_write: payload_index_io_write_counter.load(Ordering::Relaxed),
}
}
}
#[cfg(feature = "testing")]
impl Default for HwMeasurementAcc {
fn default() -> Self {
Self::new()
}
}
impl Clone for HwMeasurementAcc {
fn clone(&self) -> Self {
Self {
request_drain: self.request_drain.clone(),
metrics_drain: self.metrics_drain.clone(),
disposable: self.disposable,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/iterator_hw_measurement.rs | lib/common/common/src/counter/iterator_hw_measurement.rs | use super::conditioned_counter::ConditionedCounter;
use super::counter_cell::{CounterCell, OptionalCounterCell};
use super::hardware_accumulator::HwMeasurementAcc;
use super::hardware_counter::HardwareCounterCell;
use crate::iterator_ext::on_final_count::OnFinalCount;
pub trait HwMeasurementIteratorExt: Iterator {
/// Measures the hardware usage of an iterator.
///
/// # Arguments
/// - `hw_acc`: accumulator holding a counter cell
/// - `multiplier`: multiplies the number of iterations by this factor.
/// - `f`: Closure to get the specific counter to increase from the cell inside the accumulator.
fn measure_hw_with_acc<R>(
self,
hw_acc: HwMeasurementAcc,
multiplier: usize,
mut f: R,
) -> OnFinalCount<Self, impl FnMut(usize)>
where
Self: Sized,
R: FnMut(&HardwareCounterCell) -> &CounterCell,
{
OnFinalCount::new(self, move |total_count| {
let hw_counter = hw_acc.get_counter_cell();
f(&hw_counter).incr_delta(total_count * multiplier);
})
}
/// Measures the hardware usage of an iterator.
///
/// # Arguments
/// - `cc`: Condition counter to write the measurements into.
/// - `multiplier`: multiplies the number of iterations by this factor.
/// - `f`: Closure to get the specific counter to increase from the cell inside the accumulator.
fn measure_hw_with_condition_cell<R>(
self,
cc: ConditionedCounter,
multiplier: usize,
mut f: R,
) -> OnFinalCount<Self, impl FnMut(usize)>
where
Self: Sized,
R: for<'a> FnMut(&'a ConditionedCounter<'a>) -> OptionalCounterCell<'a>,
{
OnFinalCount::new(self, move |total_count| {
f(&cc).incr_delta(total_count * multiplier);
})
}
/// Measures the hardware usage of an iterator.
///
/// # Arguments
/// - `hw_cell`: counter cell
/// - `multiplier`: multiplies the number of iterations by this factor.
/// - `f`: Closure to get the specific counter to increase from `hw_cell`.
fn measure_hw_with_cell<R>(
self,
hw_cell: &HardwareCounterCell,
multiplier: usize,
mut f: R,
) -> OnFinalCount<Self, impl FnMut(usize)>
where
Self: Sized,
R: FnMut(&HardwareCounterCell) -> &CounterCell,
{
OnFinalCount::new(self, move |total_count| {
f(hw_cell).incr_delta(total_count * multiplier);
})
}
/// Measures the hardware usage of an iterator with the size of a single value being represented as a fraction.
fn measure_hw_with_acc_and_fraction<R>(
self,
hw_acc: HwMeasurementAcc,
fraction: usize,
mut f: R,
) -> OnFinalCount<Self, impl FnMut(usize)>
where
Self: Sized,
R: FnMut(&HardwareCounterCell) -> &CounterCell,
{
OnFinalCount::new(self, move |total_count| {
let hw_counter = hw_acc.get_counter_cell();
f(&hw_counter).incr_delta(total_count / fraction);
})
}
/// Measures the hardware usage of an iterator with the size of a single value being represented as a fraction.
fn measure_hw_with_cell_and_fraction<R>(
self,
hw_cell: &HardwareCounterCell,
fraction: usize,
mut f: R,
) -> OnFinalCount<Self, impl FnMut(usize)>
where
Self: Sized,
R: FnMut(&HardwareCounterCell) -> &CounterCell,
{
OnFinalCount::new(self, move |total_count| {
f(hw_cell).incr_delta(total_count / fraction);
})
}
}
impl<I: Iterator> HwMeasurementIteratorExt for I {}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/conditioned_counter.rs | lib/common/common/src/counter/conditioned_counter.rs | use super::counter_cell::{CounterCell, OptionalCounterCell};
use super::hardware_accumulator::HwMeasurementAcc;
use super::hardware_counter::HardwareCounterCell;
/// A counter that measures or disposes measurements based on a condition.
/// This is needed in places where we need to decide at runtime whether to measure or not.
#[derive(Copy, Clone)]
pub struct ConditionedCounter<'a> {
parent: Option<&'a HardwareCounterCell>,
}
impl<'a> ConditionedCounter<'a> {
pub fn new(condition: bool, parent: &'a HardwareCounterCell) -> Self {
if condition {
Self::always(parent)
} else {
Self::never()
}
}
/// Never measure hardware.
pub fn never() -> Self {
Self { parent: None }
}
/// Always measure hardware.
pub fn always(parent: &'a HardwareCounterCell) -> Self {
Self {
parent: Some(parent),
}
}
#[inline]
fn make_optional_counter<C>(&self, c: C) -> OptionalCounterCell<'_>
where
C: Fn(&HardwareCounterCell) -> &CounterCell,
{
OptionalCounterCell::new(self.parent.map(c))
}
#[inline]
pub fn cpu_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.cpu_counter())
}
#[inline]
pub fn payload_io_read_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.payload_io_read_counter())
}
#[inline]
pub fn payload_index_io_read_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.payload_index_io_read_counter())
}
#[inline]
pub fn payload_index_io_write_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.payload_index_io_write_counter())
}
#[inline]
pub fn payload_io_write_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.payload_io_write_counter())
}
#[inline]
pub fn vector_io_read(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.vector_io_read())
}
#[inline]
pub fn vector_io_write_counter(&self) -> OptionalCounterCell<'_> {
self.make_optional_counter(|i| i.vector_io_write_counter())
}
#[inline]
pub fn new_accumulator(&self) -> HwMeasurementAcc {
match self.parent {
Some(p) => p.new_accumulator(),
None => HwMeasurementAcc::disposable(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_conditioned_counter_empty() {
let parent = HardwareCounterCell::new();
{
let condition = false;
let cc = ConditionedCounter::new(condition, &parent);
cc.cpu_counter().incr_delta(5);
}
assert_eq!(parent.cpu_counter().get(), 0);
assert_eq!(parent.accumulator.as_ref().unwrap().get_cpu(), 0);
}
#[test]
fn test_conditioned_counter_enabled() {
let parent = HardwareCounterCell::new();
{
let cc = ConditionedCounter::always(&parent);
cc.cpu_counter().incr_delta(5);
}
assert_eq!(parent.cpu_counter().get(), 5);
let parent_acc = parent.new_accumulator();
drop(parent); // Parents accumulator gets written after `parent` drops.
assert_eq!(parent_acc.get_cpu(), 5);
}
#[test]
fn test_conditioned_counter_enabled_into_hw_acc() {
let parent = HardwareCounterCell::new();
{
let cc = ConditionedCounter::always(&parent);
// Indirect counting, a possible scenario.
cc.new_accumulator() // Cell->Acc
.get_counter_cell() // Acc->Cell
.cpu_counter()
.incr_delta(5);
}
assert_eq!(parent.cpu_counter().get(), 0); // Parents accumulator gets written, not the counter cell!
assert_eq!(parent.accumulator.as_ref().unwrap().get_cpu(), 5);
}
#[test]
fn test_conditioned_counter_enabled_parent_hw_acc() {
let parent = HwMeasurementAcc::new();
{
let cell = parent.get_counter_cell();
let cc = ConditionedCounter::always(&cell);
// Indirect counting, a possible scenario.
cc.new_accumulator() // Cell->Acc
.get_counter_cell() // Acc->Cell
.cpu_counter()
.incr_delta(5);
}
assert_eq!(parent.get_cpu(), 5);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/counter_cell.rs | lib/common/common/src/counter/counter_cell.rs | use std::cell::Cell;
/// A simple and efficient counter which doesn't need to be mutable for counting.
///
/// It however cannot be shared across threads safely and thus doesn't implement `Sync` or `Send`.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct CounterCell {
counter: Cell<usize>,
}
impl CounterCell {
/// Creates a new `CounterCell` with 0 as initial value.
pub fn new() -> Self {
Self::new_with(0)
}
/// Creates a new `CounterCell` with a custom initial value.
pub fn new_with(init: usize) -> Self {
Self {
counter: Cell::new(init),
}
}
/// Returns the current value of the counter.
#[inline]
pub fn get(&self) -> usize {
self.counter.get()
}
/// Sets the value of the counter to `new_value`.
#[inline]
pub fn set(&self, new_value: usize) {
self.counter.set(new_value);
}
/// Increases the counter by 1.
/// If you have mutable access to the counter, prefer `incr_mut` over this method.
#[inline]
pub fn incr(&self) {
self.incr_delta(1);
}
/// Increases the counter by `delta`.
/// If you have mutable access to the counter, prefer `incr_delta_mut` over this method.
#[inline]
pub fn incr_delta(&self, delta: usize) {
self.set(self.get() + delta);
}
/// Multiply the counters value by `amount`.
#[inline]
pub fn multiplied(&self, amount: usize) {
self.set(self.get() * amount)
}
/// Resets the counter to 0.
pub fn clear(&self) {
self.counter.set(0);
}
/// Takes the value of the counter, leaving 0 in its place.
pub fn take(&self) -> usize {
self.counter.take()
}
/// Creates a write-back counter for best performance possible.
/// For more information on when and why to use, see [`WriteBackCounterCell`]
#[inline]
pub fn write_back_counter(&self) -> WritebackCounterCell<'_> {
WritebackCounterCell::new(self)
}
}
pub struct OptionalCounterCell<'a> {
counter: Option<&'a CounterCell>,
}
impl<'a> OptionalCounterCell<'a> {
#[inline]
pub fn new(counter: Option<&'a CounterCell>) -> Self {
Self { counter }
}
/// Returns the current value of the counter.
#[inline]
pub fn get(&self) -> usize {
self.counter.map_or(0, |i| i.get())
}
/// Sets the value of the counter to `new_value`.
#[inline]
pub fn set(&self, new_value: usize) {
if let Some(counter) = self.counter {
counter.set(new_value);
}
}
/// Increases the counter by 1.
/// If you have mutable access to the counter, prefer `incr_mut` over this method.
#[inline]
pub fn incr(&self) {
self.incr_delta(1);
}
/// Increases the counter by `delta`.
/// If you have mutable access to the counter, prefer `incr_delta_mut` over this method.
#[inline]
pub fn incr_delta(&self, delta: usize) {
self.set(self.get() + delta);
}
}
/// Performance optimized counter to measure hot-paths relieably. It accumulates it's current measurements
/// inside a `usize` and writes the result back into the original counter on drop.
///
///
/// ## Why and when should I use this instead of [`CounterCell`]?
///
/// The `CounterCell::incr_delta()` function is around twice as slow as counting a `usize` integer.
/// This is because we have to copy the cells value, do the arithmetic and write the value back.
/// Usually this is not a problem because it is still a very fast operation. In loops or hot-paths however
/// it can become a considerable overhead we want to avoid as much as possible.
///
/// You should always prefer this over manually counting a loop because we might lose values when returning
/// on an error, early-return or add such code in future and forget to adjust.
///
///
/// ## When to *not* use this?
///
/// Because this writeback counter only writes its values into the original cell when dropped, this is
/// not suitable if you directly need to read from a `CounterCell` within the same scope. This should
/// however avoided as much as possible, because these structures are for collecting measurements and should
/// be read from the initial [`HwMeasurementAcc`].
pub struct WritebackCounterCell<'a> {
cell: &'a CounterCell,
counter: usize,
}
impl Drop for WritebackCounterCell<'_> {
#[inline]
fn drop(&mut self) {
self.cell.incr_delta(self.counter);
}
}
impl<'a> WritebackCounterCell<'a> {
#[inline]
fn new(cell: &'a CounterCell) -> Self {
Self { cell, counter: 0 }
}
#[inline]
pub fn incr_delta(&mut self, delta: usize) {
self.counter += delta;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_write_back_counter() {
let cell = CounterCell::new();
{
let mut wb_counter = cell.write_back_counter();
wb_counter.incr_delta(4);
assert_eq!(cell.get(), 0);
}
assert_eq!(cell.get(), 4);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/hardware_data.rs | lib/common/common/src/counter/hardware_data.rs | use std::ops::Add;
/// Contains all hardware metrics. Only serves as value holding structure without any semantics.
#[derive(Copy, Clone, Default)]
pub struct HardwareData {
pub cpu: usize,
pub payload_io_read: usize,
pub payload_io_write: usize,
pub vector_io_read: usize,
pub vector_io_write: usize,
pub payload_index_io_read: usize,
pub payload_index_io_write: usize,
}
impl Add for HardwareData {
type Output = HardwareData;
fn add(self, rhs: Self) -> Self::Output {
Self {
cpu: self.cpu + rhs.cpu,
payload_io_read: self.payload_io_read + rhs.payload_io_read,
payload_io_write: self.payload_io_write + rhs.payload_io_write,
vector_io_read: self.vector_io_read + rhs.vector_io_read,
vector_io_write: self.vector_io_write + rhs.vector_io_write,
payload_index_io_read: self.payload_index_io_read + rhs.payload_index_io_read,
payload_index_io_write: self.payload_index_io_write + rhs.payload_index_io_write,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/referenced_counter.rs | lib/common/common/src/counter/referenced_counter.rs | use std::ops::Deref;
use super::counter_cell::CounterCell;
use super::hardware_counter::HardwareCounterCell;
/// Referenced hw counter for a single metric of a `HardwareCounterCell`. Can be used to pass a single metric type (eg. only cpu)
/// to a function that needs measurement but depending on the context might measure a different metric.
/// This is currently the case in GridStore as it is used for payloads and sparse vectors.
#[derive(Copy, Clone)]
pub struct HwMetricRefCounter<'a> {
counter: &'a CounterCell,
}
impl<'a> HwMetricRefCounter<'a> {
fn new(counter: &'a CounterCell) -> Self {
Self { counter }
}
}
impl Deref for HwMetricRefCounter<'_> {
type Target = CounterCell;
fn deref(&self) -> &Self::Target {
self.counter
}
}
// Implement referenced functions here to prevent exposing `HwMetricRefCounter::new()`.
impl HardwareCounterCell {
#[inline]
pub fn ref_payload_io_write_counter(&self) -> HwMetricRefCounter<'_> {
HwMetricRefCounter::new(&self.payload_io_write_counter)
}
#[inline]
pub fn ref_payload_io_read_counter(&self) -> HwMetricRefCounter<'_> {
HwMetricRefCounter::new(&self.payload_io_read_counter)
}
#[inline]
pub fn ref_vector_io_write_counter(&self) -> HwMetricRefCounter<'_> {
HwMetricRefCounter::new(&self.vector_io_write_counter)
}
#[inline]
pub fn ref_payload_index_io_write_counter(&self) -> HwMetricRefCounter<'_> {
HwMetricRefCounter::new(&self.payload_index_io_write_counter)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/mod.rs | lib/common/common/src/counter/mod.rs | pub mod conditioned_counter;
pub mod counter_cell;
pub mod hardware_accumulator;
pub mod hardware_counter;
pub mod hardware_data;
pub mod iterator_hw_measurement;
pub mod referenced_counter;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/counter/hardware_counter.rs | lib/common/common/src/counter/hardware_counter.rs | use super::counter_cell::CounterCell;
use super::hardware_accumulator::HwMeasurementAcc;
use super::hardware_data::HardwareData;
/// Collection of different types of hardware measurements.
///
/// To ensure we don't miss consuming measurements, this struct will cause a panic on drop in tests and debug mode
/// if it still holds values and checking is not disabled using eg. `unchecked()`.
/// In release mode it'll only log a warning in this case.
#[derive(Debug)]
pub struct HardwareCounterCell {
vector_io_read_multiplier: usize,
cpu_multiplier: usize,
cpu_counter: CounterCell,
pub(super) payload_io_read_counter: CounterCell,
pub(super) payload_io_write_counter: CounterCell,
pub(super) payload_index_io_read_counter: CounterCell,
pub(super) payload_index_io_write_counter: CounterCell,
pub(super) vector_io_read_counter: CounterCell,
pub(super) vector_io_write_counter: CounterCell,
pub(super) accumulator: Option<HwMeasurementAcc>,
}
#[cfg(feature = "testing")]
impl std::fmt::Display for HardwareCounterCell {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"HardwareCounterCell {{ cpu: {}, payload_io_read: {}, payload_io_write: {}, payload_index_io_read: {}, vector_io_read: {}, vector_io_write: {} }}",
self.cpu_counter.get(),
self.payload_io_read_counter.get(),
self.payload_io_write_counter.get(),
self.payload_index_io_read_counter.get(),
self.vector_io_read_counter.get(),
self.vector_io_write_counter.get()
)
}
}
impl HardwareCounterCell {
#[cfg(feature = "testing")]
pub fn new() -> Self {
Self {
vector_io_read_multiplier: 1,
cpu_multiplier: 1,
cpu_counter: CounterCell::new(),
payload_io_read_counter: CounterCell::new(),
payload_io_write_counter: CounterCell::new(),
payload_index_io_read_counter: CounterCell::new(),
payload_index_io_write_counter: CounterCell::new(),
vector_io_read_counter: CounterCell::new(),
vector_io_write_counter: CounterCell::new(),
accumulator: Some(HwMeasurementAcc::new()),
}
}
/// Create a new `HardwareCounterCell` that doesn't report usage anywhere.
/// WARNING: This is intended for specific internal operations only.
/// Do not use it tests or if you don't know what you're doing.
pub fn disposable() -> Self {
Self {
vector_io_read_multiplier: 1,
cpu_multiplier: 1,
cpu_counter: CounterCell::new(),
payload_io_read_counter: CounterCell::new(),
payload_io_write_counter: CounterCell::new(),
payload_index_io_read_counter: CounterCell::new(),
payload_index_io_write_counter: CounterCell::new(),
vector_io_read_counter: CounterCell::new(),
vector_io_write_counter: CounterCell::new(),
accumulator: None,
}
}
pub fn new_with_accumulator(accumulator: HwMeasurementAcc) -> Self {
Self {
vector_io_read_multiplier: 1,
cpu_multiplier: 1,
cpu_counter: CounterCell::new(),
payload_io_read_counter: CounterCell::new(),
payload_io_write_counter: CounterCell::new(),
payload_index_io_read_counter: CounterCell::new(),
payload_index_io_write_counter: CounterCell::new(),
vector_io_read_counter: CounterCell::new(),
vector_io_write_counter: CounterCell::new(),
accumulator: Some(accumulator),
}
}
pub fn new_accumulator(&self) -> HwMeasurementAcc {
self.accumulator
.clone()
.unwrap_or_else(HwMeasurementAcc::disposable)
}
/// Create a copy of the current counter cell with the same accumulator and config,
/// but with empty counter.
/// Allows independent counting within different segments.
pub fn fork(&self) -> Self {
Self {
vector_io_read_multiplier: self.vector_io_read_multiplier,
cpu_multiplier: self.cpu_multiplier,
cpu_counter: CounterCell::new(),
payload_io_read_counter: CounterCell::new(),
payload_io_write_counter: CounterCell::new(),
payload_index_io_read_counter: CounterCell::new(),
payload_index_io_write_counter: CounterCell::new(),
vector_io_read_counter: CounterCell::new(),
vector_io_write_counter: CounterCell::new(),
accumulator: self.accumulator.clone(),
}
}
pub fn set_cpu_multiplier(&mut self, multiplier: usize) {
self.cpu_multiplier = multiplier;
}
pub fn set_vector_io_read_multiplier(&mut self, multiplier: usize) {
self.vector_io_read_multiplier = multiplier;
}
/// Returns the CPU counter that can be used for counting.
/// Should *never* be used for reading CPU measurements! Use `.get_cpu()` for this.
#[inline]
pub fn cpu_counter(&self) -> &CounterCell {
&self.cpu_counter
}
#[inline]
pub fn payload_io_read_counter(&self) -> &CounterCell {
&self.payload_io_read_counter
}
#[inline]
pub fn payload_index_io_read_counter(&self) -> &CounterCell {
&self.payload_index_io_read_counter
}
#[inline]
pub fn payload_index_io_write_counter(&self) -> &CounterCell {
&self.payload_index_io_write_counter
}
#[inline]
pub fn payload_io_write_counter(&self) -> &CounterCell {
&self.payload_io_write_counter
}
#[inline]
pub fn vector_io_read(&self) -> &CounterCell {
&self.vector_io_read_counter
}
#[inline]
pub fn vector_io_write_counter(&self) -> &CounterCell {
&self.vector_io_write_counter
}
/// Returns a copy of the current measurements made by this counter. Ignores all values from the parent accumulator.
pub fn get_hw_data(&self) -> HardwareData {
let HardwareCounterCell {
vector_io_read_multiplier,
cpu_multiplier,
cpu_counter, // We use .get_cpu() to calculate the real CPU value.
payload_io_read_counter,
payload_io_write_counter,
payload_index_io_read_counter,
payload_index_io_write_counter,
vector_io_read_counter,
vector_io_write_counter,
accumulator: _,
} = self;
HardwareData {
cpu: cpu_counter.get() * cpu_multiplier,
payload_io_read: payload_io_read_counter.get(),
payload_io_write: payload_io_write_counter.get(),
payload_index_io_read: payload_index_io_read_counter.get(),
payload_index_io_write: payload_index_io_write_counter.get(),
vector_io_read: vector_io_read_counter.get() * vector_io_read_multiplier,
vector_io_write: vector_io_write_counter.get(),
}
}
fn merge_to_accumulator(&self) {
if let Some(accumulator) = &self.accumulator {
accumulator.accumulate(self.get_hw_data());
}
}
}
#[cfg(feature = "testing")]
impl Default for HardwareCounterCell {
fn default() -> Self {
Self::new()
}
}
impl Drop for HardwareCounterCell {
fn drop(&mut self) {
self.merge_to_accumulator();
}
}
impl From<&HardwareCounterCell> for HardwareData {
fn from(value: &HardwareCounterCell) -> Self {
let counter_values = value.get_hw_data();
let acc_values = value
.accumulator
.as_ref()
.map(|i| i.hw_data())
.unwrap_or_default();
counter_values + acc_values
}
}
#[cfg(test)]
mod test {
use crate::counter::hardware_accumulator::HwMeasurementAcc;
#[test]
fn test_hw_counter_drain() {
let accumulator = HwMeasurementAcc::new();
{
let draining_cell = accumulator.get_counter_cell();
draining_cell.cpu_counter().incr(); // Dropping here means we drain the values to `atomic` instead of panicking
{
let mut hw_cell_wb = draining_cell.cpu_counter().write_back_counter();
hw_cell_wb.incr_delta(1);
}
}
assert_eq!(accumulator.get_cpu(), 2);
}
#[test]
fn test_hw_counter_new_accumulator() {
let accumulator = HwMeasurementAcc::new();
{
let counter = accumulator.get_counter_cell();
{
let acc = counter.new_accumulator();
{
let cell = acc.get_counter_cell();
cell.cpu_counter().incr_delta(42);
}
}
let mut wb_counter = counter.cpu_counter().write_back_counter();
wb_counter.incr_delta(1);
counter.cpu_counter().incr_delta(26);
}
assert_eq!(accumulator.get_cpu(), 69);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/iterator_ext/on_final_count.rs | lib/common/common/src/iterator_ext/on_final_count.rs | pub struct OnFinalCount<I, F>
where
F: FnMut(usize),
{
wrapped_iter: I,
callback: F,
counter: usize,
}
impl<I, F> OnFinalCount<I, F>
where
F: FnMut(usize),
{
pub fn new(iter: I, f: F) -> Self {
OnFinalCount {
wrapped_iter: iter,
callback: f,
counter: 0,
}
}
}
impl<I, F> Drop for OnFinalCount<I, F>
where
F: FnMut(usize),
{
fn drop(&mut self) {
(self.callback)(self.counter);
}
}
impl<I, F> Iterator for OnFinalCount<I, F>
where
I: Iterator,
F: FnMut(usize),
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
let item = self.wrapped_iter.next();
self.counter += usize::from(item.is_some());
item
}
}
#[cfg(test)]
mod tests {
use crate::iterator_ext::IteratorExt;
#[test]
fn test_on_final_count() {
let mut iter_counter = 0;
let count = (0..10).on_final_count(|c| iter_counter = c).count();
assert_eq!(count, 10);
assert_eq!(iter_counter, 10);
}
#[test]
fn test_on_final_count_half_full() {
let mut iter_counter = 0;
let mut iter = (0..10).on_final_count(|c| iter_counter = c);
let _item1 = iter.next();
let _item2 = iter.next();
let _item3 = iter.next();
drop(iter);
assert_eq!(iter_counter, 3);
}
#[test]
fn test_on_final_count_half_full_insist_on_empty() {
let mut iter_counter = 0;
let mut iter = (0..3).on_final_count(|c| iter_counter = c);
let _item = iter.next();
let _item = iter.next();
let _item = iter.next();
let _item = iter.next();
let _item = iter.next();
let _item = iter.next();
let _item = iter.next();
drop(iter);
assert_eq!(iter_counter, 3);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/iterator_ext/stoppable_iter.rs | lib/common/common/src/iterator_ext/stoppable_iter.rs | use std::sync::atomic::{AtomicBool, Ordering};
pub struct StoppableIter<'a, I> {
iter: I,
is_stopped: &'a AtomicBool,
}
impl<'a, I> StoppableIter<'a, I> {
pub fn new(iter: I, is_stopped: &'a AtomicBool) -> Self {
Self { iter, is_stopped }
}
}
impl<'a, I> Iterator for StoppableIter<'a, I>
where
I: Iterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
// AI says, that if atomic is not updated frequently,
// it is cheap enough to check it on every iteration.
if self.is_stopped.load(Ordering::Relaxed) {
return None;
}
self.iter.next()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/iterator_ext/mod.rs | lib/common/common/src/iterator_ext/mod.rs | #[cfg(any(test, feature = "testing"))]
use std::fmt::Debug;
use std::sync::atomic::AtomicBool;
use check_stopped::CheckStopped;
use on_final_count::OnFinalCount;
use crate::iterator_ext::stoppable_iter::StoppableIter;
pub(super) mod on_final_count;
mod check_stopped;
pub mod stoppable_iter;
pub trait IteratorExt: Iterator {
/// Periodically check if the iteration should be stopped.
/// The closure `f` is called every `every` iterations, and should return `true` if the iteration should be stopped.
fn check_stop_every<F>(self, every: usize, f: F) -> CheckStopped<Self, F>
where
F: FnMut() -> bool,
Self: Sized,
{
CheckStopped::new(self, every, f)
}
/// Stops the iterator if `is_stopped` is set to true
#[inline]
fn stop_if<'a>(self, is_stopped: &'a AtomicBool) -> StoppableIter<'a, Self>
where
Self: Sized,
{
StoppableIter::new(self, is_stopped)
}
/// Will execute the callback when the iterator is dropped.
///
/// The callback receives the total number of times `.next()` was called on the iterator,
/// including the final one where it usually returns `None`.
///
/// Consider subtracting 1 if the final `None` is not needed.
fn on_final_count<F>(self, f: F) -> OnFinalCount<Self, F>
where
F: FnMut(usize),
Self: Sized,
{
OnFinalCount::new(self, f)
}
/// Consume the iterator and call `black_box` on each item, for benchmarking purposes.
fn black_box(self)
where
Self: Sized,
{
self.for_each(|p| {
std::hint::black_box(p);
});
}
}
impl<I: Iterator> IteratorExt for I {}
/// Checks that [`Iterator::fold()`] yields same values as [`Iterator::next()`].
/// Panics if it is not.
#[cfg(any(test, feature = "testing"))]
pub fn check_iterator_fold<I: Iterator, F: Fn() -> I>(mk_iter: F)
where
I::Item: PartialEq + Debug,
{
const EXTRA_COUNT: usize = 3;
// Treat values returned by `next()` as reference.
let mut reference_values = Vec::new();
let mut iter = mk_iter();
#[expect(
clippy::while_let_on_iterator,
reason = "Reference implementation: call bare-bones `next()` explicitly"
)]
while let Some(value) = iter.next() {
reference_values.push(value);
}
// Check that `next()` after exhaustion returns None.
for _ in 0..EXTRA_COUNT {
assert!(
iter.next().is_none(),
"Iterator returns values after it's exhausted",
);
}
drop(iter);
// Check `fold()` yields same values as `next()`.
let mut values_for_fold = Vec::new();
for split_at in 0..reference_values.len() + EXTRA_COUNT {
let mut iter = mk_iter();
values_for_fold.clear();
for _ in 0..split_at.min(reference_values.len()) {
values_for_fold.push(iter.next().expect("not enough values"));
}
// Call `next()` a few times to check that these extra calls won't break
// `fold()`.
for _ in 0..split_at.saturating_sub(reference_values.len()) {
assert!(iter.next().is_none());
}
let acc = iter.fold(values_for_fold.len(), |acc, value| {
assert_eq!(acc, values_for_fold.len());
values_for_fold.push(value);
acc + 1
});
assert_eq!(reference_values, values_for_fold);
assert_eq!(acc, values_for_fold.len());
}
}
/// Checks that [`ExactSizeIterator::len()`] returns correct length.
/// Panics if it is not.
#[cfg(any(test, feature = "testing"))]
pub fn check_exact_size_iterator_len<I: ExactSizeIterator>(mut iter: I) {
for expected_len in (0..iter.len()).rev() {
iter.next();
assert_eq!(iter.len(), expected_len);
}
assert!(iter.next().is_none());
assert_eq!(iter.len(), 0);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/src/iterator_ext/check_stopped.rs | lib/common/common/src/iterator_ext/check_stopped.rs | pub struct CheckStopped<I, F> {
iter: I,
f: F,
every: usize,
counter: usize,
done: bool,
}
impl<I, F> CheckStopped<I, F> {
pub fn new(iter: I, every: usize, f: F) -> Self {
CheckStopped {
iter,
f,
every,
done: false,
counter: 0,
}
}
}
impl<I, F> Iterator for CheckStopped<I, F>
where
I: Iterator,
F: FnMut() -> bool,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
return None;
}
self.counter += 1;
if self.counter == self.every {
self.counter = 0;
if (self.f)() {
self.done = true;
return None;
}
}
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use crate::iterator_ext::IteratorExt;
#[test]
fn test_it_only_acts_periodically() {
let mut num_checks = 0;
let ninety_nine = (0..)
.check_stop_every(20, || {
num_checks += 1;
// stop after 5 checks
num_checks == 5
})
.count();
assert_eq!(ninety_nine, 99);
assert_eq!(num_checks, 5);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/benches/bitpacking.rs | lib/common/common/benches/bitpacking.rs | use std::hint::black_box;
use common::bitpacking::{BitReader, BitWriter};
use common::bitpacking_links::{iterate_packed_links, pack_links};
use common::bitpacking_ordered;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use itertools::Itertools as _;
use rand::rngs::StdRng;
use rand::{Rng as _, SeedableRng as _};
use zerocopy::IntoBytes;
pub fn bench_bitpacking(c: &mut Criterion) {
let mut group = c.benchmark_group("bitpacking");
let mut rng = StdRng::seed_from_u64(42);
let data8 = (0..64_000_000).map(|_| rng.random()).collect::<Vec<u8>>();
let data32 = (0..4_000_000).map(|_| rng.random()).collect::<Vec<u32>>();
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("read", |b| {
b.iter_batched(
|| {
let bits = rng.random_range(1..=32);
let bytes = rng.random_range(0..=16);
let start = rng.random_range(0..data8.len() - bytes);
(bits, &data8[start..start + bytes])
},
|(bits, data)| {
let mut r = BitReader::new(data);
r.set_bits(bits);
for _ in 0..(data.len() * u8::BITS as usize / bits as usize) {
black_box(r.read::<u32>());
}
},
BatchSize::SmallInput,
)
});
let mut rng = StdRng::seed_from_u64(42);
let mut out = Vec::new();
group.bench_function("write", |b| {
b.iter_batched(
|| {
let bits = rng.random_range(1..=32);
let values = rng.random_range(0..=16);
let start = rng.random_range(0..data32.len() - values);
(bits, &data32[start..start + values])
},
|(bits, data)| {
out.clear();
let mut w = BitWriter::new(&mut out);
for &x in data {
w.write(x, bits);
}
w.finish();
black_box(&mut out);
},
BatchSize::SmallInput,
)
});
}
pub fn bench_bitpacking_links(c: &mut Criterion) {
let mut group = c.benchmark_group("bitpacking_links");
let mut rng = StdRng::seed_from_u64(42);
let mut links = Vec::new();
let mut pos = vec![(0, 0, 0)];
while links.len() <= 64_000_000 {
let bits_per_unsorted = rng.random_range(7..=32);
let sorted_count = rng.random_range(0..100);
let unsorted_count = rng.random_range(0..100);
if 1 << bits_per_unsorted < sorted_count + unsorted_count {
continue;
}
pack_links(
&mut links,
&mut std::iter::repeat_with(|| rng.random_range(0..1u64 << bits_per_unsorted) as u32)
.unique()
.take(sorted_count + unsorted_count)
.collect_vec(),
bits_per_unsorted,
sorted_count,
);
pos.push((links.len(), bits_per_unsorted, sorted_count));
}
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("read", |b| {
b.iter_batched(
|| {
let idx = rng.random_range(1..pos.len());
(&links[pos[idx - 1].0..pos[idx].0], pos[idx].1, pos[idx].2)
},
|(links, bits_per_unsorted, sorted_count)| {
iterate_packed_links(links, bits_per_unsorted, sorted_count).for_each(|x| {
black_box(x);
});
},
BatchSize::SmallInput,
)
});
}
pub fn bench_bitpacking_ordered(c: &mut Criterion) {
let mut group = c.benchmark_group("bitpacking_ordered");
let values = bitpacking_ordered::gen_test_sequence(&mut StdRng::seed_from_u64(42), 32, 1 << 22);
group.sample_size(10);
group.bench_function("compress", |b| {
b.iter(|| bitpacking_ordered::compress(&values))
});
// Recreate the group to reset the sample size.
drop(group);
let mut group = c.benchmark_group("bitpacking_ordered");
let (compressed, parameters) = bitpacking_ordered::compress(&values);
let (decompressor, _) = bitpacking_ordered::Reader::new(parameters, &compressed).unwrap();
println!(
"Original size: {:.1} MB, compressed size: {:.1} MB, {:?}",
values.as_bytes().len() as f64 / 1e6,
compressed.len() as f64 / 1e6,
decompressor.parameters(),
);
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("get_raw", |b| {
b.iter_batched(
|| rng.random_range(0..values.len()),
|i| {
black_box(compressed[i]);
},
BatchSize::SmallInput,
)
});
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("get", |b| {
b.iter_batched(
|| rng.random_range(0..values.len()),
|i| {
black_box(decompressor.get(i));
},
BatchSize::SmallInput,
)
});
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("get2", |b| {
b.iter_batched(
|| rng.random_range(0..values.len() - 1),
|i| {
let a = decompressor.get(i);
let b = decompressor.get(i + 1);
black_box((a, b));
},
BatchSize::SmallInput,
)
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = bench_bitpacking, bench_bitpacking_links, bench_bitpacking_ordered,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/benches/mmap_hashmap.rs | lib/common/common/benches/mmap_hashmap.rs | use common::mmap_hashmap::{MmapHashMap, gen_ident, gen_map};
use criterion::{Criterion, criterion_group, criterion_main};
use rand::SeedableRng;
use rand::rngs::StdRng;
fn bench_mmap_hashmap(c: &mut Criterion) {
let mut rng = StdRng::seed_from_u64(42);
let map = gen_map(&mut rng, gen_ident, 100_000);
let tmpdir = tempfile::Builder::new().tempdir().unwrap();
let mmap_path = tmpdir.path().join("data");
let mut keys = map.keys().cloned().collect::<Vec<_>>();
keys.sort_unstable();
MmapHashMap::<str, u32>::create(
&mmap_path,
map.iter().map(|(k, v)| (k.as_str(), v.iter().copied())),
)
.unwrap();
let mmap = MmapHashMap::<str, u32>::open(&mmap_path, true).unwrap();
let mut it = keys.iter().cycle();
c.bench_function("get", |b| {
b.iter(|| mmap.get(it.next().unwrap()).iter().copied().max())
});
drop(tmpdir);
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = bench_mmap_hashmap,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/benches/bitpacking_tango.rs | lib/common/common/benches/bitpacking_tango.rs | use std::cell::LazyCell;
use std::hint::black_box;
use std::rc::Rc;
use common::bitpacking::{BitReader, BitWriter};
use common::bitpacking_links::iterate_packed_links;
use common::bitpacking_ordered;
use itertools::Itertools as _;
use rand::rngs::StdRng;
use rand::{Rng as _, SeedableRng as _};
use tango_bench::{
Bencher, Benchmark, ErasedSampler, IntoBenchmarks, benchmark_fn, tango_benchmarks, tango_main,
};
use zerocopy::IntoBytes;
pub fn benchmarks_bitpacking() -> impl IntoBenchmarks {
let data8 = StateBencher::new(move || {
let mut rng = StdRng::seed_from_u64(42);
(0..64_000_000).map(|_| rng.random()).collect::<Vec<u8>>()
});
let data32 = StateBencher::new(move || {
let mut rng = StdRng::seed_from_u64(42);
(0..4_000_000).map(|_| rng.random()).collect::<Vec<u32>>()
});
[
data8.benchmark_fn("bitpacking/read", move |b, data8| {
let mut rng = StdRng::seed_from_u64(42);
b.iter(move || {
let bits = rng.random_range(1..=32);
let bytes = rng.random_range(0..=16);
let start = rng.random_range(0..data8.len() - bytes);
let data = &data8[start..start + bytes];
let mut r = BitReader::new(data);
r.set_bits(bits);
for _ in 0..(data.len() * u8::BITS as usize / bits as usize) {
black_box(r.read::<u32>());
}
})
}),
data32.benchmark_fn("bitpacking/write", move |b, data32| {
let mut rng = StdRng::seed_from_u64(42);
let mut out = Vec::new();
b.iter(move || {
let bits = rng.random_range(1..=32);
let values = rng.random_range(0..=16);
let start = rng.random_range(0..data32.len() - values);
let data = &data32[start..start + values];
out.clear();
let mut w = BitWriter::new(&mut out);
for &x in data {
w.write(x, bits);
}
w.finish();
black_box(&mut out);
})
}),
]
}
fn benchmarks_bitpacking_links() -> impl IntoBenchmarks {
struct Item {
offset: usize,
bits_per_unsorted: u8,
sorted_count: usize,
}
struct State {
links: Vec<u8>,
items: Vec<Item>,
}
let b = StateBencher::new(move || {
Rc::new({
let mut rng = StdRng::seed_from_u64(42);
let mut links = Vec::new();
let mut pos = vec![Item {
offset: 0,
bits_per_unsorted: 0,
sorted_count: 0,
}];
while links.len() <= 64_000_000 {
let bits_per_unsorted = rng.random_range(7..=32);
let sorted_count = rng.random_range(0..100);
let unsorted_count = rng.random_range(0..100);
if 1 << bits_per_unsorted < sorted_count + unsorted_count {
continue;
}
common::bitpacking_links::pack_links(
&mut links,
&mut std::iter::repeat_with(|| {
rng.random_range(0..1u64 << bits_per_unsorted) as u32
})
.unique()
.take(sorted_count + unsorted_count)
.collect_vec(),
bits_per_unsorted,
sorted_count,
);
pos.push(Item {
offset: links.len(),
bits_per_unsorted,
sorted_count,
});
}
State { links, items: pos }
})
});
[b.benchmark_fn("bitpacking_links/read", move |b, state| {
let mut rng = rand::rng();
b.iter(move || {
let idx = rng.random_range(1..state.items.len());
iterate_packed_links(
&state.links[state.items[idx - 1].offset..state.items[idx].offset],
state.items[idx].bits_per_unsorted,
state.items[idx].sorted_count,
)
.for_each(|x| {
black_box(x);
});
})
})]
}
fn benchmarks_ordered() -> impl IntoBenchmarks {
struct StateOwner {
values: Vec<u64>,
compressed: Vec<u8>,
}
struct StateDependent<'a> {
decompressor: bitpacking_ordered::Reader<'a>,
}
self_cell::self_cell! {
struct State {
owner: StateOwner,
#[covariant]
dependent: StateDependent,
}
}
let b = StateBencher::new(move || {
let values =
bitpacking_ordered::gen_test_sequence(&mut StdRng::seed_from_u64(42), 32, 1 << 22);
let (compressed, parameters) = bitpacking_ordered::compress(&values);
State::new(StateOwner { values, compressed }, |owner| {
let (decompressor, _) =
bitpacking_ordered::Reader::new(parameters, &owner.compressed).unwrap();
println!(
"Original size: {:.1} MB, compressed size: {:.1} MB, {:?}",
owner.values.as_bytes().len() as f64 / 1e6,
owner.compressed.len() as f64 / 1e6,
decompressor.parameters(),
);
StateDependent { decompressor }
})
});
[
b.benchmark_fn("ordered/get", {
move |b, state| {
let mut rng = rand::rng();
let len = state.borrow_owner().values.len() - 1;
b.iter(move || {
let i = rng.random_range(0..len);
black_box(state.borrow_dependent().decompressor.get(i));
})
}
}),
b.benchmark_fn("ordered/get2", {
move |b, state| {
let mut rng = rand::rng();
let len = state.borrow_owner().values.len() - 1;
b.iter(move || {
let i = rng.random_range(0..len);
let a = state.borrow_dependent().decompressor.get(i);
let b = state.borrow_dependent().decompressor.get(i + 1);
black_box((a, b));
})
}
}),
]
}
#[expect(clippy::type_complexity)]
struct StateBencher<T>(Rc<LazyCell<Rc<T>, Box<dyn FnOnce() -> Rc<T>>>>);
impl<T: 'static> StateBencher<T> {
fn new<F: FnOnce() -> T + 'static>(f: F) -> Self {
Self(Rc::new(LazyCell::new(Box::new(move || Rc::new(f())))))
}
pub fn benchmark_fn<F: FnMut(Bencher, Rc<T>) -> Box<dyn ErasedSampler> + 'static>(
&self,
name: impl Into<String>,
mut sampler_factory: F,
) -> Benchmark {
let state = Rc::clone(&self.0);
benchmark_fn(name, move |b| {
let state = Rc::clone(LazyCell::force(&state));
sampler_factory(b, state)
})
}
}
tango_benchmarks!(
benchmarks_bitpacking(),
benchmarks_bitpacking_links(),
benchmarks_ordered()
);
tango_main!();
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/benches/atomic_stop.rs | lib/common/common/benches/atomic_stop.rs | use std::hint::black_box;
use std::sync::atomic::Ordering;
use common::iterator_ext::IteratorExt;
use common::iterator_ext::stoppable_iter::StoppableIter;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::Rng;
fn bench_atomic_stop(c: &mut Criterion) {
// Generate random number from 1 to 1_000_000
let mut rng = rand::rng();
c.bench_function("Sum regular iterator", |b| {
b.iter(|| {
let size = rng.random_range(1_000_000..=2_000_000);
// Sum regular iterator
let sum = (0..size).map(|x| black_box(x + 1)).sum::<u64>();
black_box(sum);
});
});
let stop_flag = std::sync::atomic::AtomicBool::new(false);
c.bench_function("Sum with check every 100", |b| {
b.iter(|| {
let size = rng.random_range(1_000_000..=2_000_000);
let sum = (0..size)
.check_stop_every(100, || stop_flag.load(Ordering::Relaxed))
.map(|x| black_box(x + 1))
.sum::<u64>();
black_box(sum);
});
});
c.bench_function("Sum with stoppable", |b| {
b.iter(|| {
let size = rng.random_range(1_000_000..=2_000_000);
let sum = StoppableIter::new(0..size, &stop_flag)
.map(|x| black_box(x + 1))
.sum::<u64>();
black_box(sum);
});
});
}
criterion_group!(atomic_stop, bench_atomic_stop);
criterion_main!(atomic_stop);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/common/benches/hw_counter.rs | lib/common/common/benches/hw_counter.rs | use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
fn bench_hw_counter(c: &mut Criterion) {
c.bench_function("Disposable Hw Cell", |b| {
b.iter(|| {
let _ = HardwareCounterCell::disposable();
});
});
c.bench_function("Disposable Hw Acc", |b| {
b.iter(|| {
let _ = HwMeasurementAcc::disposable();
});
});
}
criterion_group!(hw_counter, bench_hw_counter);
criterion_main!(hw_counter);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/dataset/src/lib.rs | lib/common/dataset/src/lib.rs | use std::path::{Path, PathBuf};
use anyhow::{Context, Result, anyhow};
use flate2::read::GzDecoder;
use fs_err as fs;
use fs_err::File;
use indicatif::{ProgressBar, ProgressDrawTarget};
pub enum Dataset {
// https://github.com/qdrant/sparse-vectors-experiments
SpladeWikiMovies,
// https://github.com/qdrant/sparse-vectors-benchmark
NeurIps2023Full,
NeurIps2023_1M,
NeurIps2023Small,
NeurIps2023Queries,
// payload csv dataset
HMArticles,
}
impl Dataset {
pub fn download(&self) -> Result<PathBuf> {
download_cached(&self.url())
}
fn url(&self) -> String {
const NEUR_IPS_2023_BASE: &str =
"https://storage.googleapis.com/ann-challenge-sparse-vectors/csr";
match self {
Dataset::SpladeWikiMovies => {
"https://storage.googleapis.com/dataset-sparse-vectors/sparse-vectors.jsonl.gz"
.to_string()
}
Dataset::NeurIps2023Full => format!("{NEUR_IPS_2023_BASE}/base_full.csr.gz"),
Dataset::NeurIps2023_1M => format!("{NEUR_IPS_2023_BASE}/base_1M.csr.gz"),
Dataset::NeurIps2023Small => format!("{NEUR_IPS_2023_BASE}/base_small.csr.gz"),
Dataset::NeurIps2023Queries => format!("{NEUR_IPS_2023_BASE}/queries.dev.csr.gz"),
Dataset::HMArticles => {
"https://storage.googleapis.com/qdrant-tests/h%26m-articles.csv.gz".to_string()
}
}
}
}
fn download_cached(url: &str) -> Result<PathBuf> {
// Filename without an ".gz" extension, e.g. "base_full.csr".
let basename = {
let path = Path::new(url);
match path.extension() {
Some(gz) if gz == "gz" => path.file_stem(),
_ => path.file_name(),
}
.ok_or_else(|| anyhow!("Failed to extract basename from {url}"))?
};
// Cache directory, e.g. "target/datasets".
let cache_dir = workspace_dir()
.join(std::env::var_os("CARGO_TARGET_DIR").unwrap_or_else(|| "target".into()))
.join("datasets");
// Cache file path, e.g. "target/datasets/base_full.csr".
let cache_path = cache_dir.join(basename);
if cache_path.exists() {
return Ok(cache_path);
}
eprintln!("Downloading {url} to {cache_path:?}...");
fs::create_dir_all(cache_dir)?;
let resp = reqwest::blocking::get(url)?;
if !resp.status().is_success() {
anyhow::bail!("Failed to download {url}, status: {}", resp.status());
}
let total_size = resp.content_length();
// Download to a temporary file, e.g. "target/datasets/base_full.csr.tmp", to avoid
// incomplete files.
let mut tmp_fname = cache_path.clone().into_os_string();
tmp_fname.push(".tmp");
// Progress bar.
let pb = ProgressBar::with_draw_target(total_size, ProgressDrawTarget::stderr_with_hz(12));
pb.set_style(
indicatif::ProgressStyle::default_bar()
.template("{msg} {wide_bar} {bytes}/{total_bytes} (eta:{eta})")
.expect("failed to set style"),
);
// Download + decompress.
std::io::copy(
&mut GzDecoder::new(pb.wrap_read(resp)),
&mut File::create(&tmp_fname)?,
)?;
fs::rename(&tmp_fname, &cache_path)
.with_context(|| format!("Failed to rename {tmp_fname:?} to {cache_path:?}"))?;
Ok(cache_path)
}
fn workspace_dir() -> PathBuf {
let output = std::process::Command::new(env!("CARGO"))
.arg("locate-project")
.arg("--workspace")
.arg("--message-format=plain")
.output()
.unwrap()
.stdout;
let cargo_path = Path::new(std::str::from_utf8(&output).unwrap().trim());
cargo_path.parent().unwrap().to_path_buf()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/io/src/storage_version.rs | lib/common/io/src/storage_version.rs | use std::io::{Read, Write};
use std::path::Path;
use atomicwrites::{AllowOverwrite, AtomicFile};
use fs_err::File;
use semver::Version;
use crate::file_operations::{FileOperationResult, FileStorageError};
pub const VERSION_FILE: &str = "version.info";
/// Structure to save and load version with which the storage was create
pub trait StorageVersion {
// Current crate version needs to be defined in each crate separately,
// since the package version is provided at compile time
fn current_raw() -> &'static str;
fn current() -> Version {
// Panic safety: assuming `current_raw` is a valid semver
Self::current_raw().parse().expect("Can't parse version")
}
/// Loads and parses the version from the given directory.
/// Returns `None` if the version file is not found.
fn load(dir_path: &Path) -> FileOperationResult<Option<Version>> {
let version_file = dir_path.join(VERSION_FILE);
let mut contents = String::new();
let mut file = match File::open(&version_file) {
Ok(file) => file,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
return Ok(None);
}
Err(err) => return Err(err.into()),
};
file.read_to_string(&mut contents)?;
let version = contents.parse().map_err(|err| {
FileStorageError::generic(format!(
"Can't parse version from {version_file:?}, error: {err}"
))
})?;
Ok(Some(version))
}
fn save(dir_path: &Path) -> FileOperationResult<()> {
let version_file = dir_path.join(VERSION_FILE);
let af = AtomicFile::new(&version_file, AllowOverwrite);
let current_version = Self::current_raw();
af.write(|f| f.write_all(current_version.as_bytes()))
.map_err(|err| {
FileStorageError::generic(format!("Can't write {version_file:?}, error: {err}"))
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/io/src/file_operations.rs | lib/common/io/src/file_operations.rs | use std::io::{self, BufReader, BufWriter, Write};
use std::path::Path;
use std::result;
use atomicwrites::{AtomicFile, OverwriteBehavior};
use fs_err::File;
use serde::Serialize;
use serde::de::DeserializeOwned;
#[allow(
clippy::disallowed_types,
reason = "can't use `fs_err::File` since `atomicwrites` only provides `&mut std::fs::File`"
)]
pub fn atomic_save<E, F>(path: &Path, write: F) -> Result<(), E>
where
E: From<io::Error>,
F: FnOnce(&mut BufWriter<&mut std::fs::File>) -> Result<(), E>,
{
let af = AtomicFile::new(path, OverwriteBehavior::AllowOverwrite);
af.write(|f| {
let mut writer = BufWriter::new(f);
write(&mut writer)?;
writer.flush()?;
Ok(())
})
.map_err(|e| match e {
atomicwrites::Error::Internal(err) => E::from(err),
atomicwrites::Error::User(err) => err,
})
}
pub fn atomic_save_bin<T: Serialize>(path: &Path, object: &T) -> Result<()> {
atomic_save(path, |writer| Ok(bincode::serialize_into(writer, object)?))
}
pub fn atomic_save_json<T: Serialize>(path: &Path, object: &T) -> Result<()> {
atomic_save(path, |writer| Ok(serde_json::to_writer(writer, object)?))
}
pub fn read_bin<T: DeserializeOwned>(path: &Path) -> Result<T> {
let file = File::open(path)?;
let value = bincode::deserialize_from(BufReader::new(file))?;
Ok(value)
}
pub fn read_json<T: DeserializeOwned>(path: &Path) -> Result<T> {
let file = File::open(path)?;
let value = serde_json::from_reader(BufReader::new(file))?;
Ok(value)
}
pub type FileOperationResult<T> = Result<T>;
pub type FileStorageError = Error;
pub type Result<T, E = Error> = result::Result<T, E>;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("{0}")]
Io(#[from] io::Error),
#[error("{0}")]
Bincode(#[from] bincode::ErrorKind),
#[error("{0}")]
SerdeJson(#[from] serde_json::Error),
#[error("{0}")]
Generic(String),
}
impl Error {
pub fn generic(msg: impl Into<String>) -> Self {
Self::Generic(msg.into())
}
}
impl<E> From<atomicwrites::Error<E>> for Error
where
Self: From<E>,
{
fn from(err: atomicwrites::Error<E>) -> Self {
match err {
atomicwrites::Error::Internal(err) => err.into(),
atomicwrites::Error::User(err) => err.into(),
}
}
}
impl From<bincode::Error> for Error {
fn from(err: bincode::Error) -> Self {
Self::Bincode(*err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::Io(err) => err,
Error::Bincode(err) => Self::other(err),
Error::SerdeJson(err) => Self::other(err),
Error::Generic(msg) => Self::other(msg),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/common/io/src/lib.rs | lib/common/io/src/lib.rs | pub mod file_operations;
pub mod storage_version;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/search_result_aggregator.rs | lib/shard/src/search_result_aggregator.rs | use std::cmp::max;
use ahash::{AHashMap, AHashSet};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use common::types::ScoreType;
use segment::types::{PointIdType, ScoredPoint, SeqNumberType};
/// Avoid excessive memory allocation and allocation failures on huge limits
const LARGEST_REASONABLE_ALLOCATION_SIZE: usize = 1_048_576;
pub struct SearchResultAggregator {
queue: FixedLengthPriorityQueue<ScoredPoint>,
seen: AHashSet<PointIdType>, // Point ids seen
}
impl SearchResultAggregator {
pub fn new(limit: usize) -> Self {
SearchResultAggregator {
queue: FixedLengthPriorityQueue::new(limit),
seen: AHashSet::with_capacity(limit.min(LARGEST_REASONABLE_ALLOCATION_SIZE)),
}
}
pub fn push(&mut self, point: ScoredPoint) {
// track new value in `queue`
if self.seen.insert(point.id) {
self.queue.push(point);
}
}
pub fn into_vec(self) -> Vec<ScoredPoint> {
self.queue.into_sorted_vec()
}
pub fn lowest(&self) -> Option<&ScoredPoint> {
self.queue.top()
}
}
pub struct BatchResultAggregator {
// result aggregators for each batched request
batch_aggregators: Vec<SearchResultAggregator>,
// Store max version for each point id to exclude outdated points from the result
point_versions: AHashMap<PointIdType, SeqNumberType>,
}
impl BatchResultAggregator {
pub fn new(tops: impl IntoIterator<Item = usize>) -> Self {
let mut merged_results_per_batch = vec![];
for top in tops {
merged_results_per_batch.push(SearchResultAggregator::new(top));
}
BatchResultAggregator {
batch_aggregators: merged_results_per_batch,
point_versions: AHashMap::new(),
}
}
pub fn update_point_versions<'a>(
&mut self,
all_searches_results: impl IntoIterator<Item = &'a ScoredPoint>,
) {
for point in all_searches_results {
let point_id = point.id;
self.point_versions
.entry(point_id)
.and_modify(|version| *version = max(*version, point.version))
.or_insert(point.version);
}
}
/// Updates the specific batch result aggregator with the new points
/// Point must be:
/// - not seen before
/// - not outdated (not less than the version stored in point_versions)
///
/// WARN: Must be called after `update_point_versions`, so that `point_versions` is up to date
pub fn update_batch_results(
&mut self,
batch_id: usize,
search_results: impl IntoIterator<Item = ScoredPoint>,
) {
let aggregator = &mut self.batch_aggregators[batch_id];
for scored_point in search_results {
debug_assert!(self.point_versions.contains_key(&scored_point.id));
if let Some(point_max_version) = self.point_versions.get(&scored_point.id)
&& scored_point.version >= *point_max_version
{
aggregator.push(scored_point);
}
}
}
/// Return lowest acceptable score for given batch id
pub fn batch_lowest_scores(&self, batch_id: usize) -> Option<ScoreType> {
let batch_scores = &self.batch_aggregators[batch_id];
batch_scores.lowest().map(|x| x.score)
}
pub fn into_topk(self) -> Vec<Vec<ScoredPoint>> {
self.batch_aggregators
.into_iter()
.map(|x| x.into_vec())
.collect()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/lib.rs | lib/shard/src/lib.rs | pub mod locked_segment;
pub mod operation_rate_cost;
pub mod operations;
pub mod payload_index_schema;
pub mod proxy_segment;
pub mod query;
pub mod retrieve;
pub mod search;
pub mod search_result_aggregator;
pub mod segment_holder;
pub mod update;
pub mod wal;
pub mod common;
#[cfg(feature = "testing")]
pub mod fixtures;
pub type PeerId = u64;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/update.rs | lib/shard/src/update.rs | //! A collection of functions for updating points and payloads stored in segments
use std::sync::atomic::AtomicBool;
use ahash::{AHashMap, AHashSet};
use common::counter::hardware_counter::HardwareCounterCell;
use itertools::iproduct;
use parking_lot::{RwLock, RwLockWriteGuard};
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::build_index_result::BuildFieldIndexResult;
use segment::data_types::named_vectors::NamedVectors;
use segment::entry::entry_point::SegmentEntry;
use segment::json_path::JsonPath;
use segment::types::{
Condition, Filter, Payload, PayloadFieldSchema, PayloadKeyType, PayloadKeyTypeRef, PointIdType,
SeqNumberType, VectorNameBuf,
};
use crate::operations::FieldIndexOperations;
use crate::operations::payload_ops::PayloadOps;
use crate::operations::point_ops::{
ConditionalInsertOperationInternal, PointOperations, PointStructPersisted,
};
use crate::operations::vector_ops::{PointVectorsPersisted, UpdateVectorsOp, VectorOperations};
use crate::segment_holder::SegmentHolder;
pub fn process_point_operation(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
point_operation: PointOperations,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
match point_operation {
PointOperations::UpsertPoints(operation) => {
let points = operation.into_point_vec();
let res = upsert_points(&segments.read(), op_num, points.iter(), hw_counter)?;
Ok(res)
}
PointOperations::UpsertPointsConditional(operation) => {
conditional_upsert(&segments.read(), op_num, operation, hw_counter)
}
PointOperations::DeletePoints { ids } => {
delete_points(&segments.read(), op_num, &ids, hw_counter)
}
PointOperations::DeletePointsByFilter(filter) => {
delete_points_by_filter(&segments.read(), op_num, &filter, hw_counter)
}
PointOperations::SyncPoints(operation) => {
let (deleted, new, updated) = sync_points(
&segments.read(),
op_num,
operation.from_id,
operation.to_id,
&operation.points,
hw_counter,
)?;
Ok(deleted + new + updated)
}
#[cfg(feature = "staging")]
PointOperations::TestDelay(operation) => {
operation.execute();
// This operation doesn't directly affect segment/point versions, so we bump it here
segments.read().bump_max_segment_version_overwrite(op_num);
Ok(0)
}
}
}
pub fn process_vector_operation(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
vector_operation: VectorOperations,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
match vector_operation {
VectorOperations::UpdateVectors(update_vectors) => {
update_vectors_conditional(&segments.read(), op_num, update_vectors, hw_counter)
}
VectorOperations::DeleteVectors(ids, vector_names) => delete_vectors(
&segments.read(),
op_num,
&ids.points,
&vector_names,
hw_counter,
),
VectorOperations::DeleteVectorsByFilter(filter, vector_names) => {
delete_vectors_by_filter(&segments.read(), op_num, &filter, &vector_names, hw_counter)
}
}
}
pub fn process_payload_operation(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
payload_operation: PayloadOps,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
match payload_operation {
PayloadOps::SetPayload(sp) => {
let payload: Payload = sp.payload;
if let Some(points) = sp.points {
set_payload(
&segments.read(),
op_num,
&payload,
&points,
&sp.key,
hw_counter,
)
} else if let Some(filter) = sp.filter {
set_payload_by_filter(
&segments.read(),
op_num,
&payload,
&filter,
&sp.key,
hw_counter,
)
} else {
// TODO: BadRequest (prev) vs BadInput (current)!?
Err(OperationError::ValidationError {
description: "No points or filter specified".to_string(),
})
}
}
PayloadOps::DeletePayload(dp) => {
if let Some(points) = dp.points {
delete_payload(&segments.read(), op_num, &points, &dp.keys, hw_counter)
} else if let Some(filter) = dp.filter {
delete_payload_by_filter(&segments.read(), op_num, &filter, &dp.keys, hw_counter)
} else {
// TODO: BadRequest (prev) vs BadInput (current)!?
Err(OperationError::ValidationError {
description: "No points or filter specified".to_string(),
})
}
}
PayloadOps::ClearPayload { ref points, .. } => {
clear_payload(&segments.read(), op_num, points, hw_counter)
}
PayloadOps::ClearPayloadByFilter(ref filter) => {
clear_payload_by_filter(&segments.read(), op_num, filter, hw_counter)
}
PayloadOps::OverwritePayload(sp) => {
let payload: Payload = sp.payload;
if let Some(points) = sp.points {
overwrite_payload(&segments.read(), op_num, &payload, &points, hw_counter)
} else if let Some(filter) = sp.filter {
overwrite_payload_by_filter(&segments.read(), op_num, &payload, &filter, hw_counter)
} else {
// TODO: BadRequest (prev) vs BadInput (current)!?
Err(OperationError::ValidationError {
description: "No points or filter specified".to_string(),
})
}
}
}
}
pub fn process_field_index_operation(
segments: &RwLock<SegmentHolder>,
op_num: SeqNumberType,
field_index_operation: &FieldIndexOperations,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
match field_index_operation {
FieldIndexOperations::CreateIndex(index_data) => create_field_index(
&segments.read(),
op_num,
&index_data.field_name,
index_data.field_schema.as_ref(),
hw_counter,
),
FieldIndexOperations::DeleteIndex(field_name) => {
delete_field_index(&segments.read(), op_num, field_name)
}
}
}
/// Do not insert more than this number of points in a single update operation chunk
/// This is needed to avoid locking segments for too long, so that
/// parallel read operations are not starved.
const UPDATE_OP_CHUNK_SIZE: usize = 32;
/// Checks point id in each segment, update point if found.
/// All not found points are inserted into random segment.
/// Returns: number of updated points.
pub fn upsert_points<'a, T>(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: T,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize>
where
T: IntoIterator<Item = &'a PointStructPersisted>,
{
let points_map: AHashMap<PointIdType, _> = points.into_iter().map(|p| (p.id, p)).collect();
let ids: Vec<PointIdType> = points_map.keys().copied().collect();
let mut res = 0;
for ids_chunk in ids.chunks(UPDATE_OP_CHUNK_SIZE) {
// Update points in writable segments
let updated_points = segments.apply_points_with_conditional_move(
op_num,
ids_chunk,
|id, write_segment| {
let point = points_map[&id];
upsert_with_payload(
write_segment,
op_num,
id,
point.get_vectors(),
point.payload.as_ref(),
hw_counter,
)
},
|id, vectors, old_payload| {
let point = points_map[&id];
for (name, vec) in point.get_vectors() {
vectors.insert(name.into(), vec.to_owned());
}
if let Some(payload) = &point.payload {
*old_payload = payload.clone();
}
},
|_| false,
hw_counter,
)?;
res += updated_points.len();
// Insert new points, which was not updated or existed
let new_point_ids = ids_chunk
.iter()
.copied()
.filter(|x| !updated_points.contains(x));
{
let default_write_segment =
segments.smallest_appendable_segment().ok_or_else(|| {
OperationError::service_error(
"No appendable segments exist, expected at least one",
)
})?;
let segment_arc = default_write_segment.get();
let mut write_segment = segment_arc.write();
for point_id in new_point_ids {
let point = points_map[&point_id];
res += usize::from(upsert_with_payload(
&mut write_segment,
op_num,
point_id,
point.get_vectors(),
point.payload.as_ref(),
hw_counter,
)?);
}
RwLockWriteGuard::unlock_fair(write_segment);
};
}
Ok(res)
}
pub fn conditional_upsert(
segments: &SegmentHolder,
op_num: SeqNumberType,
operation: ConditionalInsertOperationInternal,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
// Find points, which do exist, but don't match the condition.
// Exclude those points from the upsert operation.
let ConditionalInsertOperationInternal {
mut points_op,
condition,
} = operation;
let point_ids = points_op.point_ids();
let points_to_exclude =
select_excluded_by_filter_ids(segments, point_ids, condition, hw_counter)?;
points_op.retain_point_ids(|idx| !points_to_exclude.contains(idx));
let points = points_op.into_point_vec();
let upserted_points = upsert_points(segments, op_num, points.iter(), hw_counter)?;
if upserted_points == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(upserted_points)
}
/// Upsert to a point ID with the specified vectors and payload in the given segment.
///
/// If the payload is None, the existing payload will be cleared.
///
/// Returns
/// - Ok(true) if the operation was successful and point replaced existing value
/// - Ok(false) if the operation was successful and point was inserted
/// - Err if the operation failed
fn upsert_with_payload(
segment: &mut RwLockWriteGuard<dyn SegmentEntry>,
op_num: SeqNumberType,
point_id: PointIdType,
vectors: NamedVectors,
payload: Option<&Payload>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
let mut res = segment.upsert_point(op_num, point_id, vectors, hw_counter)?;
if let Some(full_payload) = payload {
res &= segment.set_full_payload(op_num, point_id, full_payload, hw_counter)?;
} else {
res &= segment.clear_payload(op_num, point_id, hw_counter)?;
}
debug_assert!(
segment.has_point(point_id),
"the point {point_id} should be present immediately after the upsert"
);
Ok(res)
}
/// Max amount of points to delete in a batched deletion iteration
const DELETION_BATCH_SIZE: usize = 512;
/// Tries to delete points from all segments, returns number of actually deleted points
pub fn delete_points(
segments: &SegmentHolder,
op_num: SeqNumberType,
ids: &[PointIdType],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_deleted_points = 0;
for batch in ids.chunks(DELETION_BATCH_SIZE) {
let deleted_points = segments.apply_points(
batch,
|_| (),
|id, _idx, write_segment, ()| write_segment.delete_point(op_num, id, hw_counter),
)?;
total_deleted_points += deleted_points;
}
Ok(total_deleted_points)
}
/// Deletes points from all segments matching the given filter
pub fn delete_points_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
filter: &Filter,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_deleted = 0;
// we donβt want to cancel this filtered read
let is_stopped = AtomicBool::new(false);
let mut points_to_delete: AHashMap<_, _> = segments
.iter()
.map(|(segment_id, segment)| {
(
*segment_id,
segment.get().read().read_filtered(
None,
None,
Some(filter),
&is_stopped,
hw_counter,
),
)
})
.collect();
segments.apply_segments_batched(|s, segment_id| {
let Some(curr_points) = points_to_delete.get_mut(&segment_id) else {
return Ok(false);
};
if curr_points.is_empty() {
return Ok(false);
}
let mut deleted_in_batch = 0;
while let Some(point_id) = curr_points.pop() {
if s.delete_point(op_num, point_id, hw_counter)? {
total_deleted += 1;
deleted_in_batch += 1;
}
if deleted_in_batch >= DELETION_BATCH_SIZE {
break;
}
}
Ok(true)
})?;
if total_deleted == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(total_deleted)
}
/// Sync points within a given [from_id; to_id) range.
///
/// 1. Retrieve existing points for a range
/// 2. Remove points, which are not present in the sync operation
/// 3. Retrieve overlapping points, detect which one of them are changed
/// 4. Select new points
/// 5. Upsert points which differ from the stored ones
///
/// Returns:
/// (number of deleted points, number of new points, number of updated points)
pub fn sync_points(
segments: &SegmentHolder,
op_num: SeqNumberType,
from_id: Option<PointIdType>,
to_id: Option<PointIdType>,
points: &[PointStructPersisted],
hw_counter: &HardwareCounterCell,
) -> OperationResult<(usize, usize, usize)> {
let id_to_point: AHashMap<PointIdType, _> = points.iter().map(|p| (p.id, p)).collect();
let sync_points: AHashSet<_> = points.iter().map(|p| p.id).collect();
// 1. Retrieve existing points for a range
let stored_point_ids: AHashSet<_> = segments
.iter()
.flat_map(|(_, segment)| segment.get().read().read_range(from_id, to_id))
.collect();
// 2. Remove points, which are not present in the sync operation
let points_to_remove: Vec<_> = stored_point_ids.difference(&sync_points).copied().collect();
let deleted = delete_points(segments, op_num, points_to_remove.as_slice(), hw_counter)?;
// 3. Retrieve overlapping points, detect which one of them are changed
let existing_point_ids: Vec<_> = stored_point_ids
.intersection(&sync_points)
.copied()
.collect();
let mut points_to_update: Vec<_> = Vec::new();
// we donβt want to cancel this filtered read
let is_stopped = AtomicBool::new(false);
let _num_updated =
segments.read_points(existing_point_ids.as_slice(), &is_stopped, |id, segment| {
let all_vectors = match segment.all_vectors(id, hw_counter) {
Ok(v) => v,
Err(OperationError::InconsistentStorage { .. }) => NamedVectors::default(),
Err(e) => return Err(e),
};
let payload = segment.payload(id, hw_counter)?;
let point = id_to_point.get(&id).unwrap();
if point.get_vectors() != all_vectors {
points_to_update.push(*point);
Ok(true)
} else {
let payload_match = match point.payload {
Some(ref p) => p == &payload,
None => Payload::default() == payload,
};
if !payload_match {
points_to_update.push(*point);
Ok(true)
} else {
Ok(false)
}
}
})?;
// 4. Select new points
let num_updated = points_to_update.len();
let mut num_new = 0;
sync_points.difference(&stored_point_ids).for_each(|id| {
num_new += 1;
points_to_update.push(*id_to_point.get(id).unwrap());
});
// 5. Upsert points which differ from the stored ones
let num_replaced = upsert_points(segments, op_num, points_to_update, hw_counter)?;
debug_assert!(
num_replaced <= num_updated,
"number of replaced points cannot be greater than points to update ({num_replaced} <= {num_updated})",
);
Ok((deleted, num_new, num_updated))
}
/// Batch size when modifying vector
const VECTOR_OP_BATCH_SIZE: usize = 32;
pub fn update_vectors_conditional(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: UpdateVectorsOp,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let UpdateVectorsOp {
mut points,
update_filter,
} = points;
let Some(filter_condition) = update_filter else {
return update_vectors(segments, op_num, points, hw_counter);
};
let point_ids: Vec<_> = points.iter().map(|point| point.id).collect();
let points_to_exclude =
select_excluded_by_filter_ids(segments, point_ids, filter_condition, hw_counter)?;
points.retain(|p| !points_to_exclude.contains(&p.id));
update_vectors(segments, op_num, points, hw_counter)
}
/// Update the specified named vectors of a point, keeping unspecified vectors intact.
fn update_vectors(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: Vec<PointVectorsPersisted>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
// Build a map of vectors to update per point, merge updates on same point ID
let mut points_map: AHashMap<PointIdType, NamedVectors> = AHashMap::new();
for point in points {
let PointVectorsPersisted { id, vector } = point;
let named_vector = NamedVectors::from(vector);
let entry = points_map.entry(id).or_default();
entry.merge(named_vector);
}
let ids: Vec<PointIdType> = points_map.keys().copied().collect();
let mut total_updated_points = 0;
for batch in ids.chunks(VECTOR_OP_BATCH_SIZE) {
let updated_points = segments.apply_points_with_conditional_move(
op_num,
batch,
|id, write_segment| {
let vectors = points_map[&id].clone();
write_segment.update_vectors(op_num, id, vectors, hw_counter)
},
|id, owned_vectors, _| {
for (vector_name, vector_ref) in points_map[&id].iter() {
owned_vectors.insert(vector_name.to_owned(), vector_ref.to_owned());
}
},
|_| false,
hw_counter,
)?;
check_unprocessed_points(batch, &updated_points)?;
total_updated_points += updated_points.len();
}
Ok(total_updated_points)
}
/// Delete the given named vectors for the given points, keeping other vectors intact.
pub fn delete_vectors(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: &[PointIdType],
vector_names: &[VectorNameBuf],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_deleted_points = 0;
for batch in points.chunks(VECTOR_OP_BATCH_SIZE) {
let modified_points = segments.apply_points_with_conditional_move(
op_num,
batch,
|id, write_segment| {
let mut res = true;
for name in vector_names {
res &= write_segment.delete_vector(op_num, id, name)?;
}
Ok(res)
},
|_, owned_vectors, _| {
for name in vector_names {
owned_vectors.remove_ref(name);
}
},
|_| false,
hw_counter,
)?;
check_unprocessed_points(batch, &modified_points)?;
total_deleted_points += modified_points.len();
}
Ok(total_deleted_points)
}
/// Delete the given named vectors for points matching the given filter, keeping other vectors intact.
pub fn delete_vectors_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
filter: &Filter,
vector_names: &[VectorNameBuf],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let affected_points = points_by_filter(segments, filter, hw_counter)?;
let vectors_deleted =
delete_vectors(segments, op_num, &affected_points, vector_names, hw_counter)?;
if vectors_deleted == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(vectors_deleted)
}
/// Batch size when modifying payload
const PAYLOAD_OP_BATCH_SIZE: usize = 32;
pub fn set_payload(
segments: &SegmentHolder,
op_num: SeqNumberType,
payload: &Payload,
points: &[PointIdType],
key: &Option<JsonPath>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_updated_points = 0;
for chunk in points.chunks(PAYLOAD_OP_BATCH_SIZE) {
let updated_points = segments.apply_points_with_conditional_move(
op_num,
chunk,
|id, write_segment| write_segment.set_payload(op_num, id, payload, key, hw_counter),
|_, _, old_payload| match key {
Some(key) => old_payload.merge_by_key(payload, key),
None => old_payload.merge(payload),
},
|segment| {
segment.get_indexed_fields().keys().all(|indexed_path| {
!indexed_path.is_affected_by_value_set(&payload.0, key.as_ref())
})
},
hw_counter,
)?;
check_unprocessed_points(chunk, &updated_points)?;
total_updated_points += updated_points.len();
}
Ok(total_updated_points)
}
pub fn set_payload_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
payload: &Payload,
filter: &Filter,
key: &Option<JsonPath>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let affected_points = points_by_filter(segments, filter, hw_counter)?;
let points_updated = set_payload(segments, op_num, payload, &affected_points, key, hw_counter)?;
if points_updated == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(points_updated)
}
pub fn delete_payload(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: &[PointIdType],
keys: &[PayloadKeyType],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_deleted_points = 0;
for batch in points.chunks(PAYLOAD_OP_BATCH_SIZE) {
let updated_points = segments.apply_points_with_conditional_move(
op_num,
batch,
|id, write_segment| {
let mut res = true;
for key in keys {
res &= write_segment.delete_payload(op_num, id, key, hw_counter)?;
}
Ok(res)
},
|_, _, payload| {
for key in keys {
payload.remove(key);
}
},
|segment| {
iproduct!(segment.get_indexed_fields().keys(), keys).all(
|(indexed_path, path_to_delete)| {
!indexed_path.is_affected_by_value_remove(path_to_delete)
},
)
},
hw_counter,
)?;
check_unprocessed_points(batch, &updated_points)?;
total_deleted_points += updated_points.len();
}
Ok(total_deleted_points)
}
pub fn delete_payload_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
filter: &Filter,
keys: &[PayloadKeyType],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let affected_points = points_by_filter(segments, filter, hw_counter)?;
let points_updated = delete_payload(segments, op_num, &affected_points, keys, hw_counter)?;
if points_updated == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(points_updated)
}
pub fn clear_payload(
segments: &SegmentHolder,
op_num: SeqNumberType,
points: &[PointIdType],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_updated_points = 0;
for batch in points.chunks(PAYLOAD_OP_BATCH_SIZE) {
let updated_points = segments.apply_points_with_conditional_move(
op_num,
batch,
|id, write_segment| write_segment.clear_payload(op_num, id, hw_counter),
|_, _, payload| payload.0.clear(),
|segment| segment.get_indexed_fields().is_empty(),
hw_counter,
)?;
check_unprocessed_points(batch, &updated_points)?;
total_updated_points += updated_points.len();
}
Ok(total_updated_points)
}
/// Clear Payloads from all segments matching the given filter
pub fn clear_payload_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
filter: &Filter,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let points_to_clear = points_by_filter(segments, filter, hw_counter)?;
let points_cleared = clear_payload(segments, op_num, &points_to_clear, hw_counter)?;
if points_cleared == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(points_cleared)
}
pub fn overwrite_payload(
segments: &SegmentHolder,
op_num: SeqNumberType,
payload: &Payload,
points: &[PointIdType],
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let mut total_updated_points = 0;
for batch in points.chunks(PAYLOAD_OP_BATCH_SIZE) {
let updated_points = segments.apply_points_with_conditional_move(
op_num,
batch,
|id, write_segment| write_segment.set_full_payload(op_num, id, payload, hw_counter),
|_, _, old_payload| {
*old_payload = payload.clone();
},
|segment| segment.get_indexed_fields().is_empty(),
hw_counter,
)?;
total_updated_points += updated_points.len();
check_unprocessed_points(batch, &updated_points)?;
}
Ok(total_updated_points)
}
pub fn overwrite_payload_by_filter(
segments: &SegmentHolder,
op_num: SeqNumberType,
payload: &Payload,
filter: &Filter,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let affected_points = points_by_filter(segments, filter, hw_counter)?;
let points_updated =
overwrite_payload(segments, op_num, payload, &affected_points, hw_counter)?;
if points_updated == 0 {
// In case we didn't hit any points, we suggest this op_num to the segment-holder to make WAL acknowledge this operation.
// If we don't do this, startup might take up a lot of time in some scenarios because of recovering these no-op operations.
segments.bump_max_segment_version_overwrite(op_num);
}
Ok(points_updated)
}
pub fn create_field_index(
segments: &SegmentHolder,
op_num: SeqNumberType,
field_name: PayloadKeyTypeRef,
field_schema: Option<&PayloadFieldSchema>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<usize> {
let Some(field_schema) = field_schema else {
return Err(OperationError::TypeInferenceError {
field_name: field_name.to_owned(),
});
};
segments.apply_segments(|write_segment| {
write_segment.with_upgraded(|segment| {
segment.delete_field_index_if_incompatible(op_num, field_name, field_schema)
})?;
let (schema, indexes) =
match write_segment.build_field_index(op_num, field_name, field_schema, hw_counter)? {
BuildFieldIndexResult::SkippedByVersion => {
return Ok(false);
}
BuildFieldIndexResult::AlreadyExists => {
return Ok(false);
}
BuildFieldIndexResult::IncompatibleSchema => {
// This is a service error, as we should have just removed the old index
// So it should not be possible to get this error
return Err(OperationError::service_error(format!(
"Incompatible schema for field index on field {field_name}",
)));
}
BuildFieldIndexResult::Built { schema, indexes } => (schema, indexes),
};
write_segment.with_upgraded(|segment| {
segment.apply_field_index(op_num, field_name.to_owned(), schema, indexes)
})
})
}
pub fn delete_field_index(
segments: &SegmentHolder,
op_num: SeqNumberType,
field_name: PayloadKeyTypeRef,
) -> OperationResult<usize> {
segments.apply_segments(|write_segment| {
write_segment.with_upgraded(|segment| segment.delete_field_index(op_num, field_name))
})
}
fn select_excluded_by_filter_ids(
segments: &SegmentHolder,
point_ids: impl IntoIterator<Item = PointIdType>,
filter: Filter,
hw_counter: &HardwareCounterCell,
) -> OperationResult<AHashSet<PointIdType>> {
// Filter for points that doesn't match the condition, and have matching
let non_match_filter =
Filter::new_must_not(Condition::Filter(filter)).with_point_ids(point_ids);
Ok(points_by_filter(segments, &non_match_filter, hw_counter)?
.into_iter()
.collect())
}
fn points_by_filter(
segments: &SegmentHolder,
filter: &Filter,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<PointIdType>> {
let mut affected_points: Vec<PointIdType> = Vec::new();
// we donβt want to cancel this filtered read
let is_stopped = AtomicBool::new(false);
segments.for_each_segment(|s| {
let points = s.read_filtered(None, None, Some(filter), &is_stopped, hw_counter);
affected_points.extend_from_slice(points.as_slice());
Ok(true)
})?;
Ok(affected_points)
}
fn check_unprocessed_points(
points: &[PointIdType],
processed: &AHashSet<PointIdType>,
) -> OperationResult<usize> {
let first_missed_point = points.iter().copied().find(|p| !processed.contains(p));
match first_missed_point {
None => Ok(processed.len()),
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/locked_segment.rs | lib/shard/src/locked_segment.rs | use std::sync::Arc;
use std::thread::sleep;
use std::time::{Duration, Instant};
use parking_lot::RwLock;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::entry::entry_point::SegmentEntry;
use segment::segment::Segment;
use crate::proxy_segment::ProxySegment;
const DROP_SPIN_TIMEOUT: Duration = Duration::from_millis(10);
const DROP_DATA_TIMEOUT: Duration = Duration::from_secs(60 * 60);
/// Object, which unifies the access to different types of segments, but still allows to
/// access the original type of the segment if it is required for more efficient operations.
#[derive(Clone, Debug)]
pub enum LockedSegment {
Original(Arc<RwLock<Segment>>),
Proxy(Arc<RwLock<ProxySegment>>),
}
fn try_unwrap_with_timeout<T>(
mut arc: Arc<T>,
spin: Duration,
timeout: Duration,
) -> Result<T, Arc<T>> {
let start = Instant::now();
loop {
arc = match Arc::try_unwrap(arc) {
Ok(unwrapped) => return Ok(unwrapped),
Err(arc) => arc,
};
if start.elapsed() >= timeout {
return Err(arc);
}
sleep(spin);
}
}
impl LockedSegment {
pub fn new<T>(segment: T) -> Self
where
T: Into<LockedSegment>,
{
segment.into()
}
/// Get reference to the locked segment
pub fn get(&self) -> &RwLock<dyn SegmentEntry> {
match self {
LockedSegment::Original(segment) => segment.as_ref(),
LockedSegment::Proxy(proxy) => proxy.as_ref(),
}
}
pub fn is_original(&self) -> bool {
match self {
LockedSegment::Original(_) => true,
LockedSegment::Proxy(_) => false,
}
}
/// Consume the LockedSegment and drop the underlying segment data.
/// Operation fails if the segment is used by other thread for longer than `timeout`.
pub fn drop_data(self) -> OperationResult<()> {
match self {
LockedSegment::Original(segment) => {
match try_unwrap_with_timeout(segment, DROP_SPIN_TIMEOUT, DROP_DATA_TIMEOUT) {
Ok(raw_locked_segment) => raw_locked_segment.into_inner().drop_data(),
Err(locked_segment) => Err(OperationError::service_error(format!(
"Removing segment which is still in use: {:?}",
locked_segment.read().data_path(),
))),
}
}
LockedSegment::Proxy(proxy) => {
match try_unwrap_with_timeout(proxy, DROP_SPIN_TIMEOUT, DROP_DATA_TIMEOUT) {
Ok(raw_locked_segment) => raw_locked_segment.into_inner().drop_data(),
Err(locked_segment) => Err(OperationError::service_error(format!(
"Removing proxy segment which is still in use: {:?}",
locked_segment.read().data_path(),
))),
}
}
}
}
}
impl From<Segment> for LockedSegment {
fn from(s: Segment) -> Self {
LockedSegment::Original(Arc::new(RwLock::new(s)))
}
}
impl From<ProxySegment> for LockedSegment {
fn from(s: ProxySegment) -> Self {
LockedSegment::Proxy(Arc::new(RwLock::new(s)))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operation_rate_cost.rs | lib/shard/src/operation_rate_cost.rs | use segment::types::Filter;
/// Base cost for a read operation
pub const BASE_COST: usize = 1;
pub fn filter_rate_cost(filter: &Filter) -> usize {
filter.total_conditions_count()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/search.rs | lib/shard/src/search.rs | use api::rest::SearchRequestInternal;
use common::types::ScoreType;
use itertools::Itertools as _;
use segment::data_types::vectors::{NamedQuery, NamedVectorStruct, VectorInternal};
use segment::types::{Filter, SearchParams, WithPayloadInterface, WithVector};
use segment::vector_storage::query::{ContextPair, ContextQuery, DiscoveryQuery, RecoQuery};
use sparse::common::sparse_vector::validate_sparse_vector_impl;
use crate::query::query_enum::QueryEnum;
/// DEPRECATED: Search method should be removed and replaced with `ShardQueryRequest`
#[derive(Clone, Debug, PartialEq)]
pub struct CoreSearchRequest {
/// Every kind of query that can be performed on segment level
pub query: QueryEnum,
/// Look only for points which satisfies this conditions
pub filter: Option<Filter>,
/// Additional search params
pub params: Option<SearchParams>,
/// Max number of result to return
pub limit: usize,
/// Offset of the first result to return.
/// May be used to paginate results.
/// Note: large offset values may cause performance issues.
pub offset: usize,
/// Select which payload to return with the response. Default is false.
pub with_payload: Option<WithPayloadInterface>,
/// Options for specifying which vectors to include into response. Default is false.
pub with_vector: Option<WithVector>,
pub score_threshold: Option<ScoreType>,
}
impl CoreSearchRequest {
pub fn search_rate_cost(&self) -> usize {
let mut cost = self.query.search_cost();
if let Some(filter) = &self.filter {
cost += filter.total_conditions_count();
}
cost
}
}
impl From<SearchRequestInternal> for CoreSearchRequest {
fn from(request: SearchRequestInternal) -> Self {
let SearchRequestInternal {
vector,
filter,
score_threshold,
limit,
offset,
params,
with_vector,
with_payload,
} = request;
Self {
query: QueryEnum::Nearest(NamedQuery::from(NamedVectorStruct::from(vector))),
filter,
params,
limit,
offset: offset.unwrap_or_default(),
with_payload,
with_vector,
score_threshold,
}
}
}
impl TryFrom<api::grpc::qdrant::CoreSearchPoints> for CoreSearchRequest {
type Error = tonic::Status;
fn try_from(value: api::grpc::qdrant::CoreSearchPoints) -> Result<Self, Self::Error> {
let query = value
.query
.and_then(|query| query.query)
.map(|query| {
Ok(match query {
api::grpc::qdrant::query_enum::Query::NearestNeighbors(vector) => {
let vector_internal = VectorInternal::try_from(vector)?;
QueryEnum::Nearest(NamedQuery::from(
api::grpc::conversions::into_named_vector_struct(
value.vector_name,
vector_internal,
)?,
))
}
api::grpc::qdrant::query_enum::Query::RecommendBestScore(query) => {
QueryEnum::RecommendBestScore(NamedQuery {
query: RecoQuery::try_from(query)?,
using: value.vector_name,
})
}
api::grpc::qdrant::query_enum::Query::RecommendSumScores(query) => {
QueryEnum::RecommendSumScores(NamedQuery {
query: RecoQuery::try_from(query)?,
using: value.vector_name,
})
}
api::grpc::qdrant::query_enum::Query::Discover(query) => {
let Some(target) = query.target else {
return Err(tonic::Status::invalid_argument("Target is not specified"));
};
let pairs = query
.context
.into_iter()
.map(try_context_pair_from_grpc)
.try_collect()?;
QueryEnum::Discover(NamedQuery {
query: DiscoveryQuery::new(target.try_into()?, pairs),
using: value.vector_name,
})
}
api::grpc::qdrant::query_enum::Query::Context(query) => {
let pairs = query
.context
.into_iter()
.map(try_context_pair_from_grpc)
.try_collect()?;
QueryEnum::Context(NamedQuery {
query: ContextQuery::new(pairs),
using: value.vector_name,
})
}
})
})
.transpose()?
.ok_or_else(|| tonic::Status::invalid_argument("Query is not specified"))?;
Ok(Self {
query,
filter: value.filter.map(|f| f.try_into()).transpose()?,
params: value.params.map(|p| p.into()),
limit: value.limit as usize,
offset: value.offset.unwrap_or_default() as usize,
with_payload: value.with_payload.map(|wp| wp.try_into()).transpose()?,
with_vector: Some(
value
.with_vectors
.map(|with_vectors| with_vectors.into())
.unwrap_or_default(),
),
score_threshold: value.score_threshold,
})
}
}
fn try_context_pair_from_grpc(
pair: api::grpc::qdrant::ContextPair,
) -> Result<ContextPair<VectorInternal>, tonic::Status> {
let api::grpc::qdrant::ContextPair { positive, negative } = pair;
match (positive, negative) {
(Some(positive), Some(negative)) => Ok(ContextPair {
positive: positive.try_into()?,
negative: negative.try_into()?,
}),
_ => Err(tonic::Status::invalid_argument(
"All context pairs must have both positive and negative parts",
)),
}
}
impl TryFrom<api::grpc::qdrant::SearchPoints> for CoreSearchRequest {
type Error = tonic::Status;
fn try_from(value: api::grpc::qdrant::SearchPoints) -> Result<Self, Self::Error> {
let api::grpc::qdrant::SearchPoints {
collection_name: _,
vector,
filter,
limit,
with_payload,
params,
score_threshold,
offset,
vector_name,
with_vectors,
read_consistency: _,
timeout: _,
shard_key_selector: _,
sparse_indices,
} = value;
if let Some(sparse_indices) = &sparse_indices {
let api::grpc::qdrant::SparseIndices { data } = sparse_indices;
validate_sparse_vector_impl(data, &vector).map_err(|e| {
tonic::Status::invalid_argument(format!(
"Sparse indices does not match sparse vector conditions: {e}"
))
})?;
}
let vector_internal =
VectorInternal::from_vector_and_indices(vector, sparse_indices.map(|v| v.data));
let vector_struct =
api::grpc::conversions::into_named_vector_struct(vector_name, vector_internal)?;
Ok(Self {
query: QueryEnum::Nearest(NamedQuery::from(vector_struct)),
filter: filter.map(Filter::try_from).transpose()?,
params: params.map(SearchParams::from),
limit: limit as usize,
offset: offset.map(|v| v as usize).unwrap_or_default(),
with_payload: with_payload
.map(WithPayloadInterface::try_from)
.transpose()?,
with_vector: with_vectors.map(WithVector::from),
score_threshold: score_threshold.map(|s| s as ScoreType),
})
}
}
#[derive(Debug, Clone)]
pub struct CoreSearchRequestBatch {
pub searches: Vec<CoreSearchRequest>,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/fixtures.rs | lib/shard/src/fixtures.rs | use std::collections::HashSet;
use std::path::Path;
use common::counter::hardware_counter::HardwareCounterCell;
use rand::Rng;
use rand::rngs::ThreadRng;
use segment::data_types::vectors::only_default_vector;
use segment::entry::entry_point::SegmentEntry;
use segment::payload_json;
use segment::segment::Segment;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{Distance, Payload, PointIdType, SeqNumberType};
/// A generator for random point IDs
#[derive(Default)]
pub(crate) struct PointIdGenerator {
thread_rng: ThreadRng,
used: HashSet<u64>,
}
impl PointIdGenerator {
#[inline]
pub fn random(&mut self) -> PointIdType {
self.thread_rng.random_range(1..u64::MAX).into()
}
#[inline]
pub fn unique(&mut self) -> PointIdType {
for _ in 0..100_000 {
let id = self.random();
if let PointIdType::NumId(num) = id
&& self.used.insert(num)
{
return id;
}
}
panic!("failed to generate unique point ID after 100000 attempts");
}
}
pub fn empty_segment(path: &Path) -> Segment {
build_simple_segment(path, 4, Distance::Dot).unwrap()
}
pub fn random_segment(path: &Path, opnum: SeqNumberType, num_vectors: u64, dim: usize) -> Segment {
let mut id_gen = PointIdGenerator::default();
let mut segment = build_simple_segment(path, dim, Distance::Dot).unwrap();
let mut rnd = rand::rng();
let payload_key = "number";
let hw_counter = HardwareCounterCell::new();
for _ in 0..num_vectors {
let random_vector: Vec<_> = (0..dim).map(|_| rnd.random_range(0.0..1.0)).collect();
let point_id: PointIdType = id_gen.unique();
let payload_value = rnd.random_range(1..1_000);
let payload: Payload = payload_json! {payload_key: vec![payload_value]};
segment
.upsert_point(
opnum,
point_id,
only_default_vector(&random_vector),
&hw_counter,
)
.unwrap();
segment
.set_payload(opnum, point_id, &payload, &None, &hw_counter)
.unwrap();
}
segment
}
pub fn build_segment_1(path: &Path) -> Segment {
let mut segment1 = empty_segment(path);
let vec1 = vec![1.0, 0.0, 1.0, 1.0];
let vec2 = vec![1.0, 0.0, 1.0, 0.0];
let vec3 = vec![1.0, 1.0, 1.0, 1.0];
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(1, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment1
.upsert_point(2, 2.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment1
.upsert_point(3, 3.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment1
.upsert_point(4, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment1
.upsert_point(5, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
let payload_key = "color";
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment1
.set_payload(6, 1.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 2.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 3.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 4.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 5.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
}
pub fn build_segment_2(path: &Path) -> Segment {
let mut segment2 = empty_segment(path);
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let vec11 = vec![1.0, 1.0, 1.0, 1.0];
let vec12 = vec![1.0, 1.0, 1.0, 0.0];
let vec13 = vec![1.0, 0.0, 1.0, 1.0];
let vec14 = vec![1.0, 0.0, 0.0, 1.0];
let vec15 = vec![1.0, 1.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment2
.upsert_point(7, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment2
.upsert_point(8, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
segment2
.upsert_point(11, 11.into(), only_default_vector(&vec11), &hw_counter)
.unwrap();
segment2
.upsert_point(12, 12.into(), only_default_vector(&vec12), &hw_counter)
.unwrap();
segment2
.upsert_point(13, 13.into(), only_default_vector(&vec13), &hw_counter)
.unwrap();
segment2
.upsert_point(14, 14.into(), only_default_vector(&vec14), &hw_counter)
.unwrap();
segment2
.upsert_point(15, 15.into(), only_default_vector(&vec15), &hw_counter)
.unwrap();
segment2
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/wal.rs | lib/shard/src/wal.rs | use std::marker::PhantomData;
use std::path::Path;
use std::result;
use std::thread::JoinHandle;
use io::file_operations::{atomic_save_json, read_json};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use wal::{Wal, WalOptions};
/// Write-Ahead-Log wrapper with built-in type parsing.
/// Stores sequences of records of type `R` in binary files.
///
/// Each stored record is enumerated with sequential number.
/// Sequential number can be used to read stored records starting from some IDs,
/// for removing old, no longer required, records.
#[derive(Debug)]
pub struct SerdeWal<R> {
wal: Wal,
options: WalOptions,
/// First index of our logical WAL.
first_index: Option<u64>,
_record: PhantomData<R>,
}
const FIRST_INDEX_FILE: &str = "first-index";
impl<R: DeserializeOwned + Serialize> SerdeWal<R> {
pub fn new(dir: &Path, wal_options: WalOptions) -> Result<SerdeWal<R>> {
let wal = Wal::with_options(dir, &wal_options)
.map_err(|err| WalError::InitWalError(format!("{err:?}")))?;
let first_index_path = dir.join(FIRST_INDEX_FILE);
let first_index = if first_index_path.exists() {
let wal_state: WalState = read_json(&first_index_path).map_err(|err| {
WalError::InitWalError(format!("failed to read first-index file: {err}"))
})?;
let first_index = wal_state
.ack_index
.max(wal.first_index())
.min(wal.last_index());
Some(first_index)
} else {
None
};
Ok(SerdeWal {
wal,
options: wal_options,
first_index,
_record: PhantomData,
})
}
/// Write a record to the WAL but does guarantee durability.
pub fn write(&mut self, entity: &R) -> Result<u64> {
// ToDo: Replace back to faster rmp, once this https://github.com/serde-rs/serde/issues/2055 solved
let binary_entity = serde_cbor::to_vec(&entity).unwrap();
self.wal
.append(&binary_entity)
.map_err(|err| WalError::WriteWalError(format!("{err:?}")))
}
pub fn read_all(
&self,
with_acknowledged: bool,
) -> impl DoubleEndedIterator<Item = (u64, R)> + '_ {
if with_acknowledged {
self.read(self.first_closed_index())
} else {
self.read(self.first_index())
}
}
pub fn read(&self, from: u64) -> impl DoubleEndedIterator<Item = (u64, R)> + '_ {
// We have to explicitly do `from..self.first_index() + self.len(false)`, instead of more
// concise `from..=self.last_index()`, because if the WAL is empty, `Wal::last_index`
// returns `Wal::first_index`, so we end up with `1..=1` instead of an empty range. π
let to = self.first_index() + self.len(false);
(from..to).map(move |idx| {
let record_bin = self.wal.entry(idx).expect("Can't read entry from WAL");
let record: R = serde_cbor::from_slice(&record_bin)
.or_else(|_err| rmp_serde::from_slice(&record_bin))
.expect("Can't deserialize entry, probably corrupted WAL or version mismatch");
(idx, record)
})
}
pub fn is_empty(&self) -> bool {
self.len(false) == 0
}
pub fn len(&self, with_acknowledged: bool) -> u64 {
if with_acknowledged {
self.wal.num_entries()
} else {
self.wal
.num_entries()
.saturating_sub(self.truncated_prefix_entries_num())
}
}
// WAL operates in *segments*, so when `Wal::prefix_truncate` is called (during `SerdeWal::ack`),
// WAL is not truncated precisely up to the `until_index`, but up to the nearest segment with
// `last_index` that is less-or-equal than `until_index`.
//
// Consider the pseudo-graphic illustration of the WAL that was truncated up to index 35:
//
// | -------- | -------- | ===='++++ | ++++++++ | ++++++++ | ++++++++ |
// 10 20 30 35 40 50 60 70
//
// - ' marks the index 35 that has been truncated-to
// - --- marks segments 10-30 that has been physically deleted
// - +++ marks segments 35-70 that are still valid
// - and === marks part of segment 30-35, that is still physically present on disk,
// but that is "logically" deleted
//
// `truncated_prefix_entries_num` returns the length of the "logically deleted" part of the WAL.
fn truncated_prefix_entries_num(&self) -> u64 {
self.first_index().saturating_sub(self.wal.first_index())
}
/// Inform WAL, that records older than `until_index` are no longer required.
/// If it is possible, WAL will remove unused files.
///
/// # Arguments
///
/// * `until_index` - the newest no longer required record sequence number
pub fn ack(&mut self, until_index: u64) -> Result<()> {
// Truncate WAL
self.wal
.prefix_truncate(until_index)
.map_err(|err| WalError::TruncateWalError(format!("{err:?}")))?;
// Acknowledge index should not decrease
let minimal_first_index = self.first_index.unwrap_or_else(|| self.wal.first_index());
let new_first_index = Some(
until_index
.max(minimal_first_index)
.min(self.wal.last_index()),
);
// Update current `first_index`
if self.first_index != new_first_index {
self.first_index = new_first_index;
// Persist current `first_index` value on disk
// TODO: Should we log this error and continue instead of failing?
self.flush_first_index()?;
}
Ok(())
}
fn flush_first_index(&self) -> Result<()> {
let Some(first_index) = self.first_index else {
return Ok(());
};
atomic_save_json(
&self.path().join(FIRST_INDEX_FILE),
&WalState::new(first_index),
)
.map_err(|err| {
WalError::TruncateWalError(format!("failed to write first-index file: {err:?}"))
})?;
Ok(())
}
pub fn flush(&mut self) -> Result<()> {
self.wal
.flush_open_segment()
.map_err(|err| WalError::WriteWalError(format!("{err:?}")))
}
pub fn flush_async(&mut self) -> JoinHandle<std::io::Result<()>> {
self.wal.flush_open_segment_async()
}
pub fn path(&self) -> &Path {
self.wal.path()
}
/// First index that we still have in the first closed segment.
///
/// If the index is lower than `first_index`, it means we have already acknowledged it but we
/// are still holding it in a closed segment until it gets truncated.
pub fn first_closed_index(&self) -> u64 {
self.wal.first_index()
}
/// First index that is in our logical WAL, right after the last acknowledged operation.
pub fn first_index(&self) -> u64 {
self.first_index
.unwrap_or_else(|| self.first_closed_index())
}
/// Last index that is still available in logical WAL.
pub fn last_index(&self) -> u64 {
self.wal.last_index()
}
pub fn segment_capacity(&self) -> usize {
self.options.segment_capacity
}
}
#[derive(Debug, Deserialize, Serialize)]
struct WalState {
pub ack_index: u64,
}
impl WalState {
pub fn new(ack_index: u64) -> Self {
Self { ack_index }
}
}
pub type Result<T, E = WalError> = result::Result<T, E>;
#[derive(Debug, Error)]
#[error("{0}")]
pub enum WalError {
#[error("Can't init WAL: {0}")]
InitWalError(String),
#[error("Can't write WAL: {0}")]
WriteWalError(String),
#[error("Can't truncate WAL: {0}")]
TruncateWalError(String),
#[error("Operation rejected by WAL for old clock")]
ClockRejected,
}
#[cfg(test)]
mod tests {
use std::num::NonZeroUsize;
#[cfg(not(target_os = "windows"))]
use std::os::unix::fs::MetadataExt;
#[cfg(not(target_os = "windows"))]
use fs_err as fs;
use tempfile::Builder;
use super::*;
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
enum TestRecord {
Struct1(TestInternalStruct1),
Struct2(TestInternalStruct2),
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
struct TestInternalStruct1 {
data: usize,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
struct TestInternalStruct2 {
a: i32,
b: i32,
}
#[test]
fn test_wal() {
let dir = Builder::new().prefix("wal_test").tempdir().unwrap();
let capacity = 32 * 1024 * 1024;
let wal_options = WalOptions {
segment_capacity: capacity,
segment_queue_len: 0,
retain_closed: NonZeroUsize::new(1).unwrap(),
};
let mut serde_wal: SerdeWal<TestRecord> = SerdeWal::new(dir.path(), wal_options).unwrap();
let record = TestRecord::Struct1(TestInternalStruct1 { data: 10 });
serde_wal.write(&record).expect("Can't write");
#[cfg(not(target_os = "windows"))]
{
let metadata = fs::metadata(dir.path().join("open-1").to_str().unwrap()).unwrap();
println!("file size: {}", metadata.size());
assert_eq!(metadata.size() as usize, capacity);
};
for (_idx, rec) in serde_wal.read(0) {
println!("{rec:?}");
}
let record = TestRecord::Struct2(TestInternalStruct2 { a: 12, b: 13 });
serde_wal.write(&record).expect("Can't write");
let mut read_iterator = serde_wal.read(0);
let (idx1, record1) = read_iterator.next().unwrap();
let (idx2, record2) = read_iterator.next().unwrap();
assert_eq!(idx1, 0);
assert_eq!(idx2, 1);
match record1 {
TestRecord::Struct1(x) => assert_eq!(x.data, 10),
TestRecord::Struct2(_) => panic!("Wrong structure"),
}
match record2 {
TestRecord::Struct1(_) => panic!("Wrong structure"),
TestRecord::Struct2(x) => {
assert_eq!(x.a, 12);
assert_eq!(x.b, 13);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/payload_index_schema.rs | lib/shard/src/payload_index_schema.rs | use std::collections::HashMap;
use segment::types::{PayloadFieldSchema, PayloadKeyType};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct PayloadIndexSchema {
pub schema: HashMap<PayloadKeyType, PayloadFieldSchema>,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/retrieve/retrieve_blocking.rs | lib/shard/src/retrieve/retrieve_blocking.rs | use std::collections::hash_map::Entry;
use std::sync::atomic::AtomicBool;
use std::time::Duration;
use ahash::AHashMap;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::VectorStructInternal;
use segment::types::{PointIdType, SeqNumberType, WithPayload, WithVector};
use crate::retrieve::record_internal::RecordInternal;
use crate::segment_holder::LockedSegmentHolder;
pub fn retrieve_blocking(
segments: LockedSegmentHolder,
points: &[PointIdType],
with_payload: &WithPayload,
with_vector: &WithVector,
timeout: Duration,
is_stopped: &AtomicBool,
hw_measurement_acc: HwMeasurementAcc,
) -> OperationResult<AHashMap<PointIdType, RecordInternal>> {
let mut point_version: AHashMap<PointIdType, SeqNumberType> = Default::default();
let mut point_records: AHashMap<PointIdType, RecordInternal> = Default::default();
let hw_counter = hw_measurement_acc.get_counter_cell();
let Some(segments_guard) = segments.try_read_for(timeout) else {
return Err(OperationError::timeout(timeout, "retrieve points"));
};
segments_guard.read_points(points, is_stopped, |id, segment| {
let version = segment
.point_version(id)
.ok_or_else(|| OperationError::service_error(format!("No version for point {id}")))?;
// If we already have the latest point version, keep that and continue
let version_entry = point_version.entry(id);
if matches!(&version_entry, Entry::Occupied(entry) if *entry.get() >= version) {
return Ok(true);
}
point_records.insert(
id,
RecordInternal {
id,
payload: if with_payload.enable {
if let Some(selector) = &with_payload.payload_selector {
Some(selector.process(segment.payload(id, &hw_counter)?))
} else {
Some(segment.payload(id, &hw_counter)?)
}
} else {
None
},
vector: {
match with_vector {
WithVector::Bool(true) => {
let vectors = segment.all_vectors(id, &hw_counter)?;
Some(VectorStructInternal::from(vectors))
}
WithVector::Bool(false) => None,
WithVector::Selector(vector_names) => {
let mut selected_vectors = NamedVectors::default();
for vector_name in vector_names {
if let Some(vector) =
segment.vector(vector_name, id, &hw_counter)?
{
selected_vectors.insert(vector_name.clone(), vector);
}
}
Some(VectorStructInternal::from(selected_vectors))
}
}
},
shard_key: None,
order_value: None,
},
);
*version_entry.or_default() = version;
Ok(true)
})?;
Ok(point_records)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/retrieve/mod.rs | lib/shard/src/retrieve/mod.rs | pub mod record_internal;
pub mod retrieve_blocking;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/retrieve/record_internal.rs | lib/shard/src/retrieve/record_internal.rs | use api::conversions::json::payload_to_proto;
use api::grpc::conversions::convert_shard_key_to_grpc;
use segment::data_types::order_by::OrderValue;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, VectorRef, VectorStructInternal};
use segment::types::{Payload, PointIdType, ShardKey, VectorName};
use crate::operations::point_ops::{PointStructPersisted, VectorStructPersisted};
/// Point data
#[derive(Clone, Debug, PartialEq)]
pub struct RecordInternal {
/// Id of the point
pub id: PointIdType,
/// Payload - values assigned to the point
pub payload: Option<Payload>,
/// Vector of the point
pub vector: Option<VectorStructInternal>,
/// Shard Key
pub shard_key: Option<ShardKey>,
/// Order value, if used for order_by
pub order_value: Option<OrderValue>,
}
impl RecordInternal {
pub fn get_vector_by_name(&self, name: &VectorName) -> Option<VectorRef<'_>> {
match &self.vector {
Some(VectorStructInternal::Single(vector)) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorRef::from(vector))
}
Some(VectorStructInternal::MultiDense(vectors)) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorRef::from(vectors))
}
Some(VectorStructInternal::Named(vectors)) => vectors.get(name).map(VectorRef::from),
None => None,
}
}
}
/// Warn: panics if the vector is empty
impl TryFrom<RecordInternal> for PointStructPersisted {
type Error = String;
fn try_from(record: RecordInternal) -> Result<Self, Self::Error> {
let RecordInternal {
id,
payload,
vector,
shard_key: _,
order_value: _,
} = record;
if vector.is_none() {
return Err("Vector is empty".to_string());
}
Ok(Self {
id,
payload,
vector: VectorStructPersisted::from(vector.unwrap()),
})
}
}
impl From<RecordInternal> for api::grpc::qdrant::RetrievedPoint {
fn from(record: RecordInternal) -> Self {
let RecordInternal {
id,
payload,
vector,
shard_key,
order_value,
} = record;
Self {
id: Some(id.into()),
payload: payload.map(payload_to_proto).unwrap_or_default(),
vectors: vector.map(api::grpc::qdrant::VectorsOutput::from),
shard_key: shard_key.map(convert_shard_key_to_grpc),
order_value: order_value.map(From::from),
}
}
}
impl From<RecordInternal> for api::rest::Record {
fn from(value: RecordInternal) -> Self {
let RecordInternal {
id,
payload,
vector,
shard_key,
order_value,
} = value;
Self {
id,
payload,
vector: vector.map(api::rest::VectorStructOutput::from),
shard_key,
order_value,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/proxy_segment/tests.rs | lib/shard/src/proxy_segment/tests.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::tar_ext;
use fs_err::File;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::query_context::QueryContext;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, QueryVector, only_default_vector};
use segment::entry::{SegmentEntry, SnapshotEntry as _};
use segment::types::{FieldCondition, PayloadSchemaType};
use tempfile::Builder;
use super::*;
use crate::fixtures::*;
impl ProxySegment {
/// This function is a simplified version of `search_batch` intended for testing purposes.
#[allow(clippy::too_many_arguments)]
pub fn search(
&self,
vector_name: &VectorName,
vector: &QueryVector,
with_payload: &WithPayload,
with_vector: &WithVector,
filter: Option<&Filter>,
top: usize,
params: Option<&SearchParams>,
) -> OperationResult<Vec<ScoredPoint>> {
use segment::data_types::query_context::QueryContext;
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let result = self.search_batch(
vector_name,
&[vector],
with_payload,
with_vector,
filter,
top,
params,
&segment_query_context,
)?;
Ok(result.into_iter().next().unwrap())
}
}
#[test]
fn test_search_batch_equivalence_single() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let hw_counter = HardwareCounterCell::new();
let vec4 = vec![1.1, 1.0, 0.0, 1.0];
original_segment
.get()
.write()
.upsert_point(100, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
let vec6 = vec![1.0, 1.0, 0.5, 1.0];
original_segment
.get()
.write()
.upsert_point(101, 6.into(), only_default_vector(&vec6), &hw_counter)
.unwrap();
let mut proxy_segment = ProxySegment::new(original_segment);
proxy_segment
.delete_point(102, 1.into(), &hw_counter)
.unwrap();
let query_vector = [1.0, 1.0, 1.0, 1.0].into();
let search_result = proxy_segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
None,
10,
None,
)
.unwrap();
eprintln!("search_result = {search_result:#?}");
let hardware_accumulator = HwMeasurementAcc::new();
let query_context = QueryContext::new(10000, hardware_accumulator.clone());
let segment_query_context = query_context.get_segment_query_context();
let search_batch_result = proxy_segment
.search_batch(
DEFAULT_VECTOR_NAME,
&[&query_vector],
&WithPayload::default(),
&false.into(),
None,
10,
None,
&segment_query_context,
)
.unwrap();
eprintln!("search_batch_result = {search_batch_result:#?}");
assert!(!search_result.is_empty());
assert_eq!(search_result, search_batch_result[0].clone());
assert!(hardware_accumulator.get_cpu() > 0);
}
#[test]
fn test_search_batch_equivalence_single_random() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(random_segment(dir.path(), 100, 200, 4));
let proxy_segment = ProxySegment::new(original_segment);
let query_vector = [1.0, 1.0, 1.0, 1.0].into();
let search_result = proxy_segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
None,
10,
None,
)
.unwrap();
eprintln!("search_result = {search_result:#?}");
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let search_batch_result = proxy_segment
.search_batch(
DEFAULT_VECTOR_NAME,
&[&query_vector],
&WithPayload::default(),
&false.into(),
None,
10,
None,
&segment_query_context,
)
.unwrap();
eprintln!("search_batch_result = {search_batch_result:#?}");
assert!(!search_result.is_empty());
assert_eq!(search_result, search_batch_result[0].clone())
}
#[test]
fn test_search_batch_equivalence_multi_random() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(random_segment(dir.path(), 100, 200, 4));
let proxy_segment = ProxySegment::new(original_segment);
let q1 = [1.0, 1.0, 1.0, 0.1];
let q2 = [1.0, 1.0, 0.1, 0.1];
let q3 = [1.0, 0.1, 1.0, 0.1];
let q4 = [0.1, 1.0, 1.0, 0.1];
let query_vectors: &[&QueryVector] = &[&q1.into(), &q2.into(), &q3.into(), &q4.into()];
let mut all_single_results = Vec::with_capacity(query_vectors.len());
for query_vector in query_vectors {
let res = proxy_segment
.search(
DEFAULT_VECTOR_NAME,
query_vector,
&WithPayload::default(),
&false.into(),
None,
10,
None,
)
.unwrap();
all_single_results.push(res);
}
eprintln!("search_result = {all_single_results:#?}");
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let search_batch_result = proxy_segment
.search_batch(
DEFAULT_VECTOR_NAME,
query_vectors,
&WithPayload::default(),
&false.into(),
None,
10,
None,
&segment_query_context,
)
.unwrap();
eprintln!("search_batch_result = {search_batch_result:#?}");
assert_eq!(all_single_results, search_batch_result)
}
fn wrap_proxy(original_segment: LockedSegment) -> ProxySegment {
ProxySegment::new(original_segment)
}
#[test]
fn test_read_filter() {
let is_stopped = AtomicBool::new(false);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let hw_counter = HardwareCounterCell::new();
let filter = Filter::new_must_not(Condition::Field(FieldCondition::new_match(
"color".parse().unwrap(),
"blue".to_string().into(),
)));
let original_points = original_segment.get().read().read_filtered(
None,
Some(100),
None,
&is_stopped,
&hw_counter,
);
let original_points_filtered = original_segment.get().read().read_filtered(
None,
Some(100),
Some(&filter),
&is_stopped,
&hw_counter,
);
let mut proxy_segment = wrap_proxy(original_segment);
let hw_counter = HardwareCounterCell::new();
proxy_segment
.delete_point(100, 2.into(), &hw_counter)
.unwrap();
let proxy_res = proxy_segment.read_filtered(None, Some(100), None, &is_stopped, &hw_counter);
let proxy_res_filtered =
proxy_segment.read_filtered(None, Some(100), Some(&filter), &is_stopped, &hw_counter);
assert_eq!(original_points_filtered.len() - 1, proxy_res_filtered.len());
assert_eq!(original_points.len() - 1, proxy_res.len());
}
#[test]
fn test_read_range() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let original_points = original_segment
.get()
.read()
.read_range(None, Some(10.into()));
let mut proxy_segment = wrap_proxy(original_segment);
let hw_cell = HardwareCounterCell::new();
proxy_segment.delete_point(100, 2.into(), &hw_cell).unwrap();
let proxy_res = proxy_segment.read_range(None, Some(10.into()));
assert_eq!(original_points.len() - 1, proxy_res.len());
}
#[test]
fn test_sync_indexes() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let write_segment = LockedSegment::new(empty_segment(dir.path()));
original_segment
.get()
.write()
.create_field_index(
10,
&"color".parse().unwrap(),
Some(&PayloadSchemaType::Keyword.into()),
&HardwareCounterCell::new(),
)
.unwrap();
let proxy_segment = ProxySegment::new(original_segment.clone());
let hw_cell = HardwareCounterCell::new();
proxy_segment
.replicate_field_indexes(0, &hw_cell, &write_segment)
.unwrap();
assert!(
write_segment
.get()
.read()
.get_indexed_fields()
.contains_key(&"color".parse().unwrap()),
);
original_segment
.get()
.write()
.create_field_index(
11,
&"location".parse().unwrap(),
Some(&PayloadSchemaType::Geo.into()),
&hw_cell,
)
.unwrap();
original_segment
.get()
.write()
.delete_field_index(12, &"color".parse().unwrap())
.unwrap();
proxy_segment
.replicate_field_indexes(0, &hw_cell, &write_segment)
.unwrap();
assert!(
write_segment
.get()
.read()
.get_indexed_fields()
.contains_key(&"location".parse().unwrap()),
);
assert!(
!write_segment
.get()
.read()
.get_indexed_fields()
.contains_key(&"color".parse().unwrap()),
);
}
#[test]
fn test_take_snapshot() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let original_segment_2 = LockedSegment::new(build_segment_2(dir.path()));
let hw_cell = HardwareCounterCell::new();
let mut proxy_segment = ProxySegment::new(original_segment);
let proxy_segment2 = ProxySegment::new(original_segment_2);
proxy_segment.delete_point(102, 1.into(), &hw_cell).unwrap();
let snapshot_file = Builder::new().suffix(".snapshot.tar").tempfile().unwrap();
eprintln!("Snapshot into {:?}", snapshot_file.path());
let tar = tar_ext::BuilderExt::new_seekable_owned(File::create(snapshot_file.path()).unwrap());
let temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap();
let temp_dir2 = Builder::new().prefix("temp_dir").tempdir().unwrap();
proxy_segment
.take_snapshot(temp_dir.path(), &tar, SnapshotFormat::Regular, None)
.unwrap();
proxy_segment2
.take_snapshot(temp_dir2.path(), &tar, SnapshotFormat::Regular, None)
.unwrap();
tar.blocking_finish().unwrap();
// validate that 2 archives were created:
// wrapped_segment1, wrapped_segment2
let mut tar = tar::Archive::new(File::open(snapshot_file.path()).unwrap());
let archive_count = tar.entries_with_seek().unwrap().count();
assert_eq!(archive_count, 2);
let mut tar = tar::Archive::new(File::open(snapshot_file.path()).unwrap());
for entry in tar.entries_with_seek().unwrap() {
let archive_path = entry.unwrap().path().unwrap().into_owned();
let archive_extension = archive_path.extension().unwrap();
// correct file extension
assert_eq!(archive_extension, "tar");
}
}
#[test]
fn test_point_vector_count() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let original_segment = LockedSegment::new(build_segment_1(dir.path()));
let hw_cell = HardwareCounterCell::new();
let mut proxy_segment = ProxySegment::new(original_segment);
// We have 5 points by default, assert counts
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 5);
assert_eq!(segment_info.num_vectors, 5);
// Delete nonexistent point, counts should remain the same
proxy_segment
.delete_point(101, 99999.into(), &hw_cell)
.unwrap();
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 5);
assert_eq!(segment_info.num_vectors, 5);
// Delete point 1, counts should decrease by 1
proxy_segment.delete_point(102, 4.into(), &hw_cell).unwrap();
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 4);
assert_eq!(segment_info.num_vectors, 4);
}
#[test]
fn test_point_vector_count_multivec() {
use segment::segment_constructor::simple_segment_constructor::{
VECTOR1_NAME, VECTOR2_NAME, build_multivec_segment,
};
use segment::types::Distance;
// Create proxied multivec segment
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let dim = 1;
let mut original_segment = build_multivec_segment(dir.path(), dim, dim, Distance::Dot).unwrap();
let hw_cell = HardwareCounterCell::new();
original_segment
.upsert_point(
100,
4.into(),
NamedVectors::from_pairs([
(VECTOR1_NAME.into(), vec![0.4]),
(VECTOR2_NAME.into(), vec![0.5]),
]),
&hw_cell,
)
.unwrap();
original_segment
.upsert_point(
101,
6.into(),
NamedVectors::from_pairs([
(VECTOR1_NAME.into(), vec![0.6]),
(VECTOR2_NAME.into(), vec![0.7]),
]),
&hw_cell,
)
.unwrap();
let original_segment = LockedSegment::new(original_segment);
let mut proxy_segment = ProxySegment::new(original_segment);
// Assert counts from original segment
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 2);
assert_eq!(segment_info.num_vectors, 4);
// Delete nonexistent point, counts should remain the same
proxy_segment.delete_point(104, 1.into(), &hw_cell).unwrap();
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 2);
assert_eq!(segment_info.num_vectors, 4);
// Delete point 4, counts should decrease by 1
proxy_segment.delete_point(105, 4.into(), &hw_cell).unwrap();
let segment_info = proxy_segment.info();
assert_eq!(segment_info.num_points, 1);
assert_eq!(segment_info.num_vectors, 2);
}
#[test]
fn test_proxy_segment_flush() {
let tmp_dir = tempfile::Builder::new()
.prefix("segment_dir")
.tempdir()
.unwrap();
let locked_wrapped_segment = LockedSegment::new(build_segment_1(tmp_dir.path()));
let mut proxy_segment = ProxySegment::new(locked_wrapped_segment.clone());
let flushed_version_1 = proxy_segment.flush(false).unwrap();
proxy_segment
.delete_point(100, 2.into(), &HardwareCounterCell::new())
.unwrap();
let flushed_version_2 = proxy_segment.flush(false).unwrap();
assert_eq!(flushed_version_2, flushed_version_1);
let version_after_delete = proxy_segment.version();
// We can never fully persist proxy segment, as list of deleted points is always in-memory only.
// So we have to keep WAL for deleted points.
assert!(version_after_delete > flushed_version_2);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/proxy_segment/segment_entry.rs | lib/shard/src/proxy_segment/segment_entry.rs | use std::cmp;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::TelemetryDetail;
use segment::common::Flusher;
use segment::common::operation_error::{OperationError, OperationResult, SegmentFailedState};
use segment::data_types::build_index_result::BuildFieldIndexResult;
use segment::data_types::facets::{FacetParams, FacetValue};
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::order_by::OrderValue;
use segment::data_types::query_context::{FormulaContext, QueryContext, SegmentQueryContext};
use segment::data_types::vectors::{QueryVector, VectorInternal};
use segment::entry::entry_point::{SegmentEntry, SegmentFlushOrdering};
use segment::index::field_index::{CardinalityEstimation, FieldIndex};
use segment::json_path::JsonPath;
use segment::telemetry::SegmentTelemetry;
use segment::types::*;
use super::{ProxyDeletedPoint, ProxyIndexChange, ProxySegment};
use crate::locked_segment::LockedSegment;
impl SegmentEntry for ProxySegment {
fn version(&self) -> SeqNumberType {
cmp::max(self.wrapped_segment.get().read().version(), self.version)
}
fn persistent_version(&self) -> SeqNumberType {
self.wrapped_segment.get().read().persistent_version()
}
fn is_proxy(&self) -> bool {
true
}
fn point_version(&self, point_id: PointIdType) -> Option<SeqNumberType> {
// Use wrapped segment version, if absent we have no version at all
let wrapped_version = self.wrapped_segment.get().read().point_version(point_id)?;
// Ignore point from wrapped segment if already marked for deletion with newer version
// By `point_version` semantics we don't expect to get a version if the point
// is deleted. This also prevents `move_if_exists` from moving an old point
// into the write segment again.
if self
.deleted_points
.get(&point_id)
.is_some_and(|delete| wrapped_version <= delete.local_version)
{
return None;
}
Some(wrapped_version)
}
fn search_batch(
&self,
vector_name: &VectorName,
vectors: &[&QueryVector],
with_payload: &WithPayload,
with_vector: &WithVector,
filter: Option<&Filter>,
top: usize,
params: Option<&SearchParams>,
query_context: &SegmentQueryContext,
) -> OperationResult<Vec<Vec<ScoredPoint>>> {
// Some point might be deleted after temporary segment creation
// We need to prevent them from being found by search request
// That is why we need to pass additional filter for deleted points
let do_update_filter = !self.deleted_points.is_empty();
let wrapped_results = if do_update_filter {
// If we are wrapping a segment with deleted points,
// we can make this hack of replacing deleted_points of the wrapped_segment
// with our proxied deleted_points, do avoid additional filter creation
if let Some(deleted_points) = self.deleted_mask.as_ref() {
let query_context_with_deleted =
query_context.fork().with_deleted_points(deleted_points);
let res = self.wrapped_segment.get().read().search_batch(
vector_name,
vectors,
with_payload,
with_vector,
filter,
top,
params,
&query_context_with_deleted,
);
res?
} else {
let wrapped_filter = Self::add_deleted_points_condition_to_filter(
filter,
self.deleted_points.keys().copied(),
);
self.wrapped_segment.get().read().search_batch(
vector_name,
vectors,
with_payload,
with_vector,
Some(&wrapped_filter),
top,
params,
query_context,
)?
}
} else {
self.wrapped_segment.get().read().search_batch(
vector_name,
vectors,
with_payload,
with_vector,
filter,
top,
params,
query_context,
)?
};
Ok(wrapped_results)
}
fn rescore_with_formula(
&self,
formula_ctx: Arc<FormulaContext>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<ScoredPoint>> {
// Run rescore in wrapped segment
let wrapped_results = self
.wrapped_segment
.get()
.read()
.rescore_with_formula(formula_ctx, hw_counter)?;
let result = {
if self.deleted_points.is_empty() {
wrapped_results
} else {
wrapped_results
.into_iter()
.filter(|point| !self.deleted_points.contains_key(&point.id))
.collect()
}
};
Ok(result)
}
fn upsert_point(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_vectors: NamedVectors,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Upsert is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn delete_point(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
let mut was_deleted = false;
self.version = cmp::max(self.version, op_num);
let point_offset = match &self.wrapped_segment {
LockedSegment::Original(raw_segment) => {
let point_offset = raw_segment.read().get_internal_id(point_id);
if point_offset.is_some() {
let prev = self.deleted_points.insert(
point_id,
ProxyDeletedPoint {
local_version: op_num,
operation_version: op_num,
},
);
was_deleted = prev.is_none();
if let Some(prev) = prev {
debug_assert!(
prev.operation_version < op_num,
"Overriding deleted flag {prev:?} with older op_num:{op_num}",
)
}
}
point_offset
}
LockedSegment::Proxy(proxy) => {
if proxy.read().has_point(point_id) {
let prev = self.deleted_points.insert(
point_id,
ProxyDeletedPoint {
local_version: op_num,
operation_version: op_num,
},
);
was_deleted = prev.is_none();
if let Some(prev) = prev {
debug_assert!(
prev.operation_version < op_num,
"Overriding deleted flag {prev:?} with older op_num:{op_num}",
)
}
}
None
}
};
self.set_deleted_offset(point_offset);
Ok(was_deleted)
}
fn update_vectors(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_vectors: NamedVectors,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Update vectors is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn delete_vector(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_vector_name: &VectorName,
) -> OperationResult<bool> {
// Print current stack trace for easier debugging of unexpected calls
Err(OperationError::service_error(format!(
"Delete vector is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn set_full_payload(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_full_payload: &Payload,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Set full payload is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn set_payload(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_payload: &Payload,
_key: &Option<JsonPath>,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Set payload is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn delete_payload(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_key: PayloadKeyTypeRef,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Delete payload is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn clear_payload(
&mut self,
op_num: SeqNumberType,
point_id: PointIdType,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<bool> {
Err(OperationError::service_error(format!(
"Clear payload is disabled for proxy segments: operation {op_num} on point {point_id}",
)))
}
fn vector(
&self,
vector_name: &VectorName,
point_id: PointIdType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Option<VectorInternal>> {
if self.deleted_points.contains_key(&point_id) {
Ok(None)
} else {
self.wrapped_segment
.get()
.read()
.vector(vector_name, point_id, hw_counter)
}
}
fn all_vectors(
&self,
point_id: PointIdType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<NamedVectors<'_>> {
let mut result = NamedVectors::default();
let wrapped = self.wrapped_segment.get();
let wrapped_guard = wrapped.read();
let config = wrapped_guard.config();
let vector_names: Vec<_> = config
.vector_data
.keys()
.chain(config.sparse_vector_data.keys())
.cloned()
.collect();
// Must drop wrapped guard to prevent self-deadlock in `vector()` function below
drop(wrapped_guard);
for vector_name in vector_names {
if let Some(vector) = self.vector(&vector_name, point_id, hw_counter)? {
result.insert(vector_name, vector);
}
}
Ok(result)
}
fn payload(
&self,
point_id: PointIdType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
if self.deleted_points.contains_key(&point_id) {
Ok(Payload::default())
} else {
self.wrapped_segment
.get()
.read()
.payload(point_id, hw_counter)
}
}
/// Not implemented for proxy
fn iter_points(&self) -> Box<dyn Iterator<Item = PointIdType> + '_> {
// iter_points is not available for Proxy implementation
// Due to internal locks it is almost impossible to return iterator with proper owning, lifetimes, e.t.c.
unimplemented!("call to iter_points is not implemented for Proxy segment")
}
fn read_filtered<'a>(
&'a self,
offset: Option<PointIdType>,
limit: Option<usize>,
filter: Option<&'a Filter>,
is_stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
) -> Vec<PointIdType> {
if self.deleted_points.is_empty() {
self.wrapped_segment
.get()
.read()
.read_filtered(offset, limit, filter, is_stopped, hw_counter)
} else {
let wrapped_filter = Self::add_deleted_points_condition_to_filter(
filter,
self.deleted_points.keys().copied(),
);
self.wrapped_segment.get().read().read_filtered(
offset,
limit,
Some(&wrapped_filter),
is_stopped,
hw_counter,
)
}
}
fn read_ordered_filtered<'a>(
&'a self,
limit: Option<usize>,
filter: Option<&'a Filter>,
order_by: &'a segment::data_types::order_by::OrderBy,
is_stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<(OrderValue, PointIdType)>> {
let read_points = if self.deleted_points.is_empty() {
self.wrapped_segment
.get()
.read()
.read_ordered_filtered(limit, filter, order_by, is_stopped, hw_counter)?
} else {
let wrapped_filter = Self::add_deleted_points_condition_to_filter(
filter,
self.deleted_points.keys().copied(),
);
self.wrapped_segment.get().read().read_ordered_filtered(
limit,
Some(&wrapped_filter),
order_by,
is_stopped,
hw_counter,
)?
};
Ok(read_points)
}
fn read_random_filtered<'a>(
&'a self,
limit: usize,
filter: Option<&'a Filter>,
is_stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
) -> Vec<PointIdType> {
if self.deleted_points.is_empty() {
self.wrapped_segment
.get()
.read()
.read_random_filtered(limit, filter, is_stopped, hw_counter)
} else {
let wrapped_filter = Self::add_deleted_points_condition_to_filter(
filter,
self.deleted_points.keys().copied(),
);
self.wrapped_segment.get().read().read_random_filtered(
limit,
Some(&wrapped_filter),
is_stopped,
hw_counter,
)
}
}
/// Read points in [from; to) range
fn read_range(&self, from: Option<PointIdType>, to: Option<PointIdType>) -> Vec<PointIdType> {
let read_points = self.wrapped_segment.get().read().read_range(from, to);
if self.deleted_points.is_empty() {
read_points
} else {
read_points
.into_iter()
.filter(|idx| !self.deleted_points.contains_key(idx))
.collect()
}
}
fn unique_values(
&self,
key: &JsonPath,
filter: Option<&Filter>,
is_stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
) -> OperationResult<BTreeSet<FacetValue>> {
let values = self
.wrapped_segment
.get()
.read()
.unique_values(key, filter, is_stopped, hw_counter)?;
Ok(values)
}
fn facet(
&self,
request: &FacetParams,
is_stopped: &AtomicBool,
hw_counter: &HardwareCounterCell,
) -> OperationResult<HashMap<FacetValue, usize>> {
let hits = if self.deleted_points.is_empty() {
self.wrapped_segment
.get()
.read()
.facet(request, is_stopped, hw_counter)?
} else {
let wrapped_filter = Self::add_deleted_points_condition_to_filter(
request.filter.as_ref(),
self.deleted_points.keys().copied(),
);
let new_request = FacetParams {
filter: Some(wrapped_filter),
..request.clone()
};
self.wrapped_segment
.get()
.read()
.facet(&new_request, is_stopped, hw_counter)?
};
Ok(hits)
}
fn has_point(&self, point_id: PointIdType) -> bool {
!self.deleted_points.contains_key(&point_id)
&& self.wrapped_segment.get().read().has_point(point_id)
}
fn is_empty(&self) -> bool {
self.wrapped_segment.get().read().is_empty()
}
fn available_point_count(&self) -> usize {
let deleted_points_count = self.deleted_points.len();
let wrapped_segment_count = self.wrapped_segment.get().read().available_point_count();
wrapped_segment_count.saturating_sub(deleted_points_count)
}
fn deleted_point_count(&self) -> usize {
self.wrapped_segment.get().read().deleted_point_count() + self.deleted_points.len()
}
fn available_vectors_size_in_bytes(&self, vector_name: &VectorName) -> OperationResult<usize> {
let wrapped_segment = self.wrapped_segment.get();
let wrapped_segment_guard = wrapped_segment.read();
let wrapped_size = wrapped_segment_guard.available_vectors_size_in_bytes(vector_name)?;
let wrapped_count = wrapped_segment_guard.available_point_count();
drop(wrapped_segment_guard);
let stored_points = wrapped_count;
// because we don't know the exact size of deleted vectors, we assume that they are the same avg size as the wrapped ones
if stored_points > 0 {
let deleted_points_count = self.deleted_points.len();
let available_points = stored_points.saturating_sub(deleted_points_count);
Ok(
((wrapped_size as u128) * available_points as u128 / stored_points as u128)
as usize,
)
} else {
Ok(0)
}
}
fn estimate_point_count<'a>(
&'a self,
filter: Option<&'a Filter>,
hw_counter: &HardwareCounterCell,
) -> CardinalityEstimation {
let deleted_point_count = self.deleted_points.len();
let (wrapped_segment_est, total_wrapped_size) = {
let wrapped_segment = self.wrapped_segment.get();
let wrapped_segment_guard = wrapped_segment.read();
(
wrapped_segment_guard.estimate_point_count(filter, hw_counter),
wrapped_segment_guard.available_point_count(),
)
};
let expected_deleted_count = if total_wrapped_size > 0 {
(wrapped_segment_est.exp as f64
* (deleted_point_count as f64 / total_wrapped_size as f64)) as usize
} else {
0
};
let CardinalityEstimation {
primary_clauses,
min,
exp,
max,
} = wrapped_segment_est;
CardinalityEstimation {
primary_clauses,
min: min.saturating_sub(deleted_point_count),
exp: exp.saturating_sub(expected_deleted_count),
max,
}
}
fn segment_type(&self) -> SegmentType {
SegmentType::Special
}
fn size_info(&self) -> SegmentInfo {
// To reduce code complexity for estimations, we use `.info()` directly here.
self.info()
}
fn info(&self) -> SegmentInfo {
let wrapped_info = self.wrapped_segment.get().read().info();
let vector_name_count =
self.config().vector_data.len() + self.config().sparse_vector_data.len();
let deleted_points_count = self.deleted_points.len();
// This is a best estimate
let num_vectors = wrapped_info
.num_vectors
.saturating_sub(deleted_points_count * vector_name_count);
let num_indexed_vectors = if wrapped_info.segment_type == SegmentType::Indexed {
wrapped_info
.num_vectors
.saturating_sub(deleted_points_count * vector_name_count)
} else {
0
};
let vector_data = wrapped_info.vector_data;
SegmentInfo {
segment_type: SegmentType::Special,
num_vectors,
num_indexed_vectors,
num_points: self.available_point_count(),
num_deleted_vectors: wrapped_info.num_deleted_vectors
+ deleted_points_count * vector_name_count,
vectors_size_bytes: wrapped_info.vectors_size_bytes, // + write_info.vectors_size_bytes,
payloads_size_bytes: wrapped_info.payloads_size_bytes,
ram_usage_bytes: wrapped_info.ram_usage_bytes,
disk_usage_bytes: wrapped_info.disk_usage_bytes,
is_appendable: false,
index_schema: wrapped_info.index_schema,
vector_data,
}
}
fn config(&self) -> &SegmentConfig {
&self.wrapped_config
}
fn is_appendable(&self) -> bool {
false
}
fn flush_ordering(&self) -> SegmentFlushOrdering {
self.wrapped_segment
.get()
.read()
.flush_ordering()
// Mark flush ordering as proxy
.proxy()
}
fn flusher(&self, force: bool) -> Option<Flusher> {
let wrapped_segment = self.wrapped_segment.get();
let wrapped_segment_guard = wrapped_segment.read();
wrapped_segment_guard.flusher(force)
}
fn drop_data(self) -> OperationResult<()> {
self.wrapped_segment.drop_data()
}
fn data_path(&self) -> PathBuf {
self.wrapped_segment.get().read().data_path()
}
fn delete_field_index(&mut self, op_num: u64, key: PayloadKeyTypeRef) -> OperationResult<bool> {
if self.version() > op_num {
return Ok(false);
}
self.version = cmp::max(self.version, op_num);
// Store index change to later propagate to optimized/wrapped segment
self.changed_indexes
.insert(key.clone(), ProxyIndexChange::Delete(op_num));
Ok(true)
}
fn delete_field_index_if_incompatible(
&mut self,
op_num: SeqNumberType,
key: PayloadKeyTypeRef,
field_schema: &PayloadFieldSchema,
) -> OperationResult<bool> {
if self.version() > op_num {
return Ok(false);
}
self.version = cmp::max(self.version, op_num);
self.changed_indexes.insert(
key.clone(),
ProxyIndexChange::DeleteIfIncompatible(op_num, field_schema.clone()),
);
Ok(true)
}
fn build_field_index(
&self,
op_num: SeqNumberType,
_key: PayloadKeyTypeRef,
field_type: &PayloadFieldSchema,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<BuildFieldIndexResult> {
if self.version() > op_num {
return Ok(BuildFieldIndexResult::SkippedByVersion);
}
Ok(BuildFieldIndexResult::Built {
indexes: vec![], // No actual index is built in proxy segment, they will be created later
schema: field_type.clone(),
})
}
fn apply_field_index(
&mut self,
op_num: SeqNumberType,
key: PayloadKeyType,
field_schema: PayloadFieldSchema,
_field_index: Vec<FieldIndex>,
) -> OperationResult<bool> {
if self.version() > op_num {
return Ok(false);
}
self.version = cmp::max(self.version, op_num);
// Store index change to later propagate to optimized/wrapped segment
self.changed_indexes
.insert(key, ProxyIndexChange::Create(field_schema, op_num));
Ok(true)
}
fn get_indexed_fields(&self) -> HashMap<PayloadKeyType, PayloadFieldSchema> {
let mut indexed_fields = self.wrapped_segment.get().read().get_indexed_fields();
for (field_name, change) in self.changed_indexes.iter_unordered() {
match change {
ProxyIndexChange::Create(schema, _) => {
indexed_fields.insert(field_name.to_owned(), schema.to_owned());
}
ProxyIndexChange::Delete(_) => {
indexed_fields.remove(field_name);
}
ProxyIndexChange::DeleteIfIncompatible(_, schema) => {
if let Some(existing_schema) = indexed_fields.get(field_name)
&& existing_schema != schema
{
indexed_fields.remove(field_name);
}
}
}
}
indexed_fields
}
fn check_error(&self) -> Option<SegmentFailedState> {
self.wrapped_segment.get().read().check_error()
}
fn vector_names(&self) -> HashSet<VectorNameBuf> {
self.wrapped_segment.get().read().vector_names()
}
fn get_telemetry_data(&self, detail: TelemetryDetail) -> SegmentTelemetry {
self.wrapped_segment.get().read().get_telemetry_data(detail)
}
fn fill_query_context(&self, query_context: &mut QueryContext) {
// Information from temporary segment is not too important for query context
self.wrapped_segment
.get()
.read()
.fill_query_context(query_context)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/proxy_segment/mod.rs | lib/shard/src/proxy_segment/mod.rs | pub mod segment_entry;
pub mod snapshot_entry;
#[cfg(test)]
mod tests;
use ahash::AHashMap;
use bitvec::prelude::BitVec;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools as _;
use segment::common::operation_error::OperationResult;
use segment::types::*;
use crate::locked_segment::LockedSegment;
pub type DeletedPoints = AHashMap<PointIdType, ProxyDeletedPoint>;
/// This object is a wrapper around read-only segment.
///
/// It could be used to provide all read and write operations while wrapped segment is being optimized (i.e. not available for writing)
/// It writes all changed records into a temporary `write_segment` and keeps track on changed points
#[derive(Debug)]
pub struct ProxySegment {
pub wrapped_segment: LockedSegment,
/// Internal mask of deleted points, specific to the wrapped segment
/// Present if the wrapped segment is a plain segment
/// Used for faster deletion checks
deleted_mask: Option<BitVec>,
changed_indexes: ProxyIndexChanges,
/// Points which should no longer used from wrapped_segment
/// May contain points which are not in wrapped_segment,
/// because the set is shared among all proxy segments
deleted_points: DeletedPoints,
wrapped_config: SegmentConfig,
/// Version of the last change in this proxy, considering point deletes and payload index
/// changes. Defaults to the version of the wrapped segment.
version: SeqNumberType,
}
impl ProxySegment {
pub fn new(segment: LockedSegment) -> Self {
let deleted_mask = match &segment {
LockedSegment::Original(raw_segment) => {
let raw_segment_guard = raw_segment.read();
let already_deleted = raw_segment_guard.get_deleted_points_bitvec();
Some(already_deleted)
}
LockedSegment::Proxy(_) => {
log::debug!("Double proxy segment creation");
None
}
};
let (wrapped_config, version) = {
let read_segment = segment.get().read();
(read_segment.config().clone(), read_segment.version())
};
ProxySegment {
wrapped_segment: segment,
deleted_mask,
changed_indexes: ProxyIndexChanges::default(),
deleted_points: AHashMap::new(),
wrapped_config,
version,
}
}
/// Ensure that write segment have same indexes as wrapped segment
pub fn replicate_field_indexes(
&self,
op_num: SeqNumberType,
hw_counter: &HardwareCounterCell,
segment_to_update: &LockedSegment,
) -> OperationResult<()> {
let existing_indexes = segment_to_update.get().read().get_indexed_fields();
let expected_indexes = self.wrapped_segment.get().read().get_indexed_fields();
// Add missing indexes
for (expected_field, expected_schema) in &expected_indexes {
let existing_schema = existing_indexes.get(expected_field);
if existing_schema != Some(expected_schema) {
if existing_schema.is_some() {
segment_to_update
.get()
.write()
.delete_field_index(op_num, expected_field)?;
}
segment_to_update.get().write().create_field_index(
op_num,
expected_field,
Some(expected_schema),
hw_counter,
)?;
}
}
// Remove extra indexes
for existing_field in existing_indexes.keys() {
if !expected_indexes.contains_key(existing_field) {
segment_to_update
.get()
.write()
.delete_field_index(op_num, existing_field)?;
}
}
Ok(())
}
/// Updates the deleted mask with the given point offset
/// Ensures that the mask is resized if necessary and returns false
/// if either the mask or the point offset is missing (mask is not applicable)
fn set_deleted_offset(&mut self, point_offset: Option<PointOffsetType>) -> bool {
match (&mut self.deleted_mask, point_offset) {
(Some(deleted_mask), Some(point_offset)) => {
if deleted_mask.len() <= point_offset as usize {
deleted_mask.resize(point_offset as usize + 1, false);
}
deleted_mask.set(point_offset as usize, true);
true
}
_ => false,
}
}
fn add_deleted_points_condition_to_filter(
filter: Option<&Filter>,
deleted_points: impl IntoIterator<Item = PointIdType>,
) -> Filter {
#[allow(clippy::from_iter_instead_of_collect)]
let wrapper_condition = Condition::HasId(HasIdCondition::from_iter(deleted_points));
match filter {
None => Filter::new_must_not(wrapper_condition),
Some(f) => {
let mut new_filter = f.clone();
let must_not = new_filter.must_not;
let new_must_not = match must_not {
None => Some(vec![wrapper_condition]),
Some(mut conditions) => {
conditions.push(wrapper_condition);
Some(conditions)
}
};
new_filter.must_not = new_must_not;
new_filter
}
}
}
/// Propagate changes in this proxy to the wrapped segment
///
/// This propagates:
/// - delete (or moved) points
/// - deleted payload indexes
/// - created payload indexes
///
/// This is required if making both the wrapped segment and the writable segment available in a
/// shard holder at the same time. If the wrapped segment is thrown away, then this is not
/// required.
pub fn propagate_to_wrapped(&mut self) -> OperationResult<()> {
// Important: we must not keep a write lock on the wrapped segment for the duration of this
// function to prevent a deadlock. The search functions conflict with it trying to take a
// read lock on the wrapped segment as well while already holding the deleted points lock
// (or others). Careful locking management is very important here. Instead we just take an
// upgradable read lock, upgrading to a write lock on demand.
// See: <https://github.com/qdrant/qdrant/pull/4206>
let wrapped_segment = self.wrapped_segment.get();
let mut wrapped_segment = wrapped_segment.upgradable_read();
// Propagate index changes before point deletions
// Point deletions bump the segment version, can cause index changes to be ignored
// Lock ordering is important here and must match the flush function to prevent a deadlock
{
let op_num = wrapped_segment.version();
if !self.changed_indexes.is_empty() {
wrapped_segment.with_upgraded(|wrapped_segment| {
for (field_name, change) in self.changed_indexes.iter_ordered() {
debug_assert!(
change.version() >= op_num,
"proxied index change should have newer version than segment",
);
match change {
ProxyIndexChange::Create(schema, version) => {
wrapped_segment.create_field_index(
*version,
field_name,
Some(schema),
&HardwareCounterCell::disposable(), // Internal operation
)?;
}
ProxyIndexChange::Delete(version) => {
wrapped_segment.delete_field_index(*version, field_name)?;
}
ProxyIndexChange::DeleteIfIncompatible(version, schema) => {
wrapped_segment.delete_field_index_if_incompatible(
*version, field_name, schema,
)?;
}
}
}
OperationResult::Ok(())
})?;
self.changed_indexes.clear();
}
}
// Propagate deleted points
// Lock ordering is important here and must match the flush function to prevent a deadlock
{
if !self.deleted_points.is_empty() {
wrapped_segment.with_upgraded(|wrapped_segment| {
for (point_id, versions) in self.deleted_points.iter() {
// Note:
// Queued deletes may have an older version than what is currently in the
// wrapped segment. Such deletes are ignored because the point in the
// wrapped segment is considered to be newer. This is possible because
// different proxy segments can share state through a common write segment.
// See: <https://github.com/qdrant/qdrant/pull/7208>
wrapped_segment.delete_point(
versions.operation_version,
*point_id,
&HardwareCounterCell::disposable(), // Internal operation: no need to measure.
)?;
}
OperationResult::Ok(())
})?;
self.deleted_points.clear();
// Note: We do not clear the deleted mask here, as it provides
// no performance advantage and does not affect the correctness of search.
// Points are still marked as deleted in two places, which is fine
}
}
Ok(())
}
pub fn get_deleted_points(&self) -> &DeletedPoints {
&self.deleted_points
}
pub fn get_index_changes(&self) -> &ProxyIndexChanges {
&self.changed_indexes
}
}
/// Point persion information of points to delete from a wrapped proxy segment.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ProxyDeletedPoint {
/// Version the point had in the wrapped segment when the delete was scheduled.
/// We use it to determine if some other proxy segment should move the point again with
/// `move_if_exists` if it has newer point data.
pub local_version: SeqNumberType,
/// Version of the operation that caused the delete to be scheduled.
/// We use it for the delete operations when propagating them to the wrapped or optimized
/// segment.
pub operation_version: SeqNumberType,
}
#[derive(Debug, Default)]
pub struct ProxyIndexChanges {
changes: AHashMap<PayloadKeyType, ProxyIndexChange>,
}
impl ProxyIndexChanges {
pub fn insert(&mut self, key: PayloadKeyType, change: ProxyIndexChange) {
self.changes.insert(key, change);
}
pub fn remove(&mut self, key: &PayloadKeyType) {
self.changes.remove(key);
}
pub fn len(&self) -> usize {
self.changes.len()
}
pub fn is_empty(&self) -> bool {
self.changes.is_empty()
}
pub fn clear(&mut self) {
self.changes.clear();
}
/// Iterate over proxy index changes in order of version.
///
/// Index changes must be applied in order because changes with an old version will silently be
/// rejected.
pub fn iter_ordered(&self) -> impl Iterator<Item = (&PayloadKeyType, &ProxyIndexChange)> {
self.changes
.iter()
.sorted_by_key(|(_, change)| change.version())
}
/// Iterate over proxy index changes in arbitrary order.
pub fn iter_unordered(&self) -> impl Iterator<Item = (&PayloadKeyType, &ProxyIndexChange)> {
self.changes.iter()
}
pub fn merge(&mut self, other: &Self) {
for (key, change) in &other.changes {
self.changes.insert(key.clone(), change.clone());
}
}
}
#[derive(Debug, Clone)]
pub enum ProxyIndexChange {
Create(PayloadFieldSchema, SeqNumberType),
Delete(SeqNumberType),
DeleteIfIncompatible(SeqNumberType, PayloadFieldSchema),
}
impl ProxyIndexChange {
pub fn version(&self) -> SeqNumberType {
match self {
ProxyIndexChange::Create(_, version) => *version,
ProxyIndexChange::Delete(version) => *version,
ProxyIndexChange::DeleteIfIncompatible(version, _) => *version,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/proxy_segment/snapshot_entry.rs | lib/shard/src/proxy_segment/snapshot_entry.rs | use std::path::Path;
use common::tar_ext;
use segment::common::operation_error::OperationResult;
use segment::data_types::manifest::SnapshotManifest;
use segment::entry::snapshot_entry::SnapshotEntry;
use segment::types::*;
use super::ProxySegment;
impl SnapshotEntry for ProxySegment {
fn take_snapshot(
&self,
temp_path: &Path,
tar: &tar_ext::BuilderExt,
format: SnapshotFormat,
manifest: Option<&SnapshotManifest>,
) -> OperationResult<()> {
log::info!("Taking a snapshot of a proxy segment");
// Snapshot wrapped segment data into the temporary dir
self.wrapped_segment
.get()
.read()
.take_snapshot(temp_path, tar, format, manifest)?;
Ok(())
}
fn collect_snapshot_manifest(&self, manifest: &mut SnapshotManifest) -> OperationResult<()> {
self.wrapped_segment
.get()
.read()
.collect_snapshot_manifest(manifest)?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/scroll.rs | lib/shard/src/query/scroll.rs | use segment::data_types::order_by::OrderBy;
use segment::types::{Filter, WithPayloadInterface, WithVector};
use crate::operation_rate_cost;
/// Scroll request, used as a part of query request
#[derive(Clone, Debug, PartialEq)]
pub struct QueryScrollRequestInternal {
/// Page size. Default: 10
pub limit: usize,
/// Look only for points which satisfies this conditions. If not provided - all points.
pub filter: Option<Filter>,
/// Select which payload to return with the response. Default is true.
pub with_payload: WithPayloadInterface,
/// Options for specifying which vectors to include into response. Default is false.
pub with_vector: WithVector,
/// Order the records by a payload field.
pub scroll_order: ScrollOrder,
}
impl QueryScrollRequestInternal {
pub fn scroll_rate_cost(&self) -> usize {
let mut cost = operation_rate_cost::BASE_COST;
if let Some(filter) = &self.filter {
cost += operation_rate_cost::filter_rate_cost(filter);
}
cost
}
}
#[derive(Clone, Debug, Default, PartialEq)]
pub enum ScrollOrder {
#[default]
ById,
ByField(OrderBy),
Random,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/tests.rs | lib/shard/src/query/tests.rs | use ahash::AHashSet;
use ordered_float::OrderedFloat;
use segment::common::operation_error::OperationError;
use segment::common::reciprocal_rank_fusion::DEFAULT_RRF_K;
use segment::data_types::vectors::{MultiDenseVectorInternal, NamedQuery, VectorInternal};
use segment::json_path::JsonPath;
use segment::types::*;
use sparse::common::sparse_vector::SparseVector;
use super::planned_query::*;
use super::*;
#[test]
fn test_try_from_double_rescore() {
let dummy_vector = vec![1.0, 2.0, 3.0];
let filter_inner_inner = Filter::new_must_not(Condition::IsNull(
JsonPath::try_from("apples").unwrap().into(),
));
let filter_inner = Filter::new_must(Condition::Field(FieldCondition::new_match(
"has_oranges".try_into().unwrap(),
true.into(),
)));
let filter_outer = Filter::new_must(Condition::HasId(
AHashSet::from([1.into(), 2.into()]).into(),
));
let request = ShardQueryRequest {
prefetches: vec![ShardPrefetch {
prefetches: vec![ShardPrefetch {
prefetches: Default::default(),
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"byte",
)))),
limit: 1000,
params: None,
filter: Some(filter_inner_inner.clone()),
score_threshold: None,
}],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"full",
)))),
limit: 100,
params: None,
filter: Some(filter_inner.clone()),
score_threshold: None,
}],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::MultiDense(MultiDenseVectorInternal::new_unchecked(vec![
dummy_vector.clone(),
])),
"multi",
)))),
filter: Some(filter_outer.clone()),
score_threshold: None,
limit: 10,
offset: 0,
params: Some(SearchParams {
exact: true,
..Default::default()
}),
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(true),
};
let planned_query = PlannedQuery::try_from(vec![request]).unwrap();
assert_eq!(
planned_query.searches,
vec![CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"byte",
)),
filter: Some(
filter_outer
.merge_owned(filter_inner)
.merge_owned(filter_inner_inner)
),
params: None,
limit: 1000,
offset: 0,
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: Some(WithVector::Bool(false)),
score_threshold: None,
}]
);
assert_eq!(
planned_query.root_plans,
vec![RootPlan {
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(true),
merge_plan: MergePlan {
sources: vec![Source::Prefetch(Box::from(MergePlan {
sources: vec![Source::SearchesIdx(0)],
rescore_stages: Some(RescoreStages::shard_level(RescoreParams {
rescore: ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"full",
))),
limit: 100,
score_threshold: None,
params: None,
}))
}))],
rescore_stages: Some(RescoreStages::shard_level(RescoreParams {
rescore: ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::MultiDense(MultiDenseVectorInternal::new_unchecked(vec![
dummy_vector
])),
"multi"
))),
limit: 10,
score_threshold: None,
params: Some(SearchParams {
exact: true,
..Default::default()
})
}))
}
}]
);
}
#[test]
fn test_try_from_no_prefetch() {
let dummy_vector = vec![1.0, 2.0, 3.0];
let request = ShardQueryRequest {
prefetches: vec![], // No prefetch
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"full",
)))),
filter: Some(Filter::default()),
score_threshold: Some(OrderedFloat(0.5)),
limit: 10,
offset: 12,
params: Some(SearchParams::default()),
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(true),
};
let planned_query = PlannedQuery::try_from(vec![request]).unwrap();
assert_eq!(
planned_query.searches,
vec![CoreSearchRequest {
query: QueryEnum::Nearest(
NamedQuery::new(VectorInternal::Dense(dummy_vector), "full",)
),
filter: Some(Filter::default()),
params: Some(SearchParams::default()),
limit: 22,
offset: 0,
with_vector: Some(WithVector::Bool(false)),
with_payload: Some(WithPayloadInterface::Bool(false)),
score_threshold: Some(0.5),
}]
);
assert_eq!(
planned_query.root_plans,
vec![RootPlan {
with_payload: WithPayloadInterface::Bool(true),
with_vector: WithVector::Bool(true),
merge_plan: MergePlan {
sources: vec![Source::SearchesIdx(0)],
rescore_stages: None,
},
}]
);
}
#[test]
fn test_try_from_hybrid_query() {
let dummy_vector = vec![1.0, 2.0, 3.0];
let dummy_sparse = SparseVector::new(vec![100, 123, 2000], vec![0.2, 0.3, 0.4]).unwrap();
let filter_inner1 = Filter::new_must(Condition::Field(FieldCondition::new_match(
"city".try_into().unwrap(),
"Berlin".to_string().into(),
)));
let filter_inner2 = Filter::new_must(Condition::Field(FieldCondition::new_match(
"city".try_into().unwrap(),
"Munich".to_string().into(),
)));
let filter_outer = Filter::new_must(Condition::Field(FieldCondition::new_match(
"country".try_into().unwrap(),
"Germany".to_string().into(),
)));
let request = ShardQueryRequest {
prefetches: vec![
ShardPrefetch {
prefetches: Vec::new(),
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"dense",
)))),
limit: 100,
params: None,
filter: Some(filter_inner1.clone()),
score_threshold: None,
},
ShardPrefetch {
prefetches: Vec::new(),
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Sparse(dummy_sparse.clone()),
"sparse",
)))),
limit: 100,
params: None,
filter: Some(filter_inner2.clone()),
score_threshold: None,
},
],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: Some(filter_outer.clone()),
score_threshold: None,
limit: 50,
offset: 0,
params: None,
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(true),
};
let planned_query = PlannedQuery::try_from(vec![request]).unwrap();
assert_eq!(
planned_query.searches,
vec![
CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector),
"dense",
)),
filter: Some(filter_outer.merge(&filter_inner1)),
params: None,
limit: 100,
offset: 0,
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: Some(WithVector::Bool(false)),
score_threshold: None,
},
CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Sparse(dummy_sparse),
"sparse",
)),
filter: Some(filter_outer.merge(&filter_inner2)),
params: None,
limit: 100,
offset: 0,
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: Some(WithVector::Bool(false)),
score_threshold: None,
}
]
);
assert_eq!(
planned_query.root_plans,
vec![RootPlan {
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(true),
merge_plan: MergePlan {
sources: vec![Source::SearchesIdx(0), Source::SearchesIdx(1)],
rescore_stages: Some(RescoreStages::collection_level(RescoreParams {
rescore: ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K)),
limit: 50,
score_threshold: None,
params: None,
}))
}
}]
);
}
#[test]
fn test_try_from_rrf_without_source() {
let request = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: Some(Filter::default()),
score_threshold: None,
limit: 50,
offset: 0,
params: None,
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(false),
};
let planned_query = PlannedQuery::try_from(vec![request]);
assert!(planned_query.is_err())
}
#[test]
fn test_base_params_mapping_in_try_from() {
let dummy_vector = vec![1.0, 2.0, 3.0];
let dummy_params = Some(SearchParams {
indexed_only: true,
..Default::default()
});
let dummy_filter = Some(Filter::new_must(Condition::Field(
FieldCondition::new_match(
"my_key".try_into().unwrap(),
Match::new_value(segment::types::ValueVariants::String("hello".to_string())),
),
)));
let top_level_params = Some(SearchParams {
exact: true,
..Default::default()
});
let request = ShardQueryRequest {
prefetches: vec![ShardPrefetch {
prefetches: Vec::new(),
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector.clone()),
"dense",
)))),
limit: 37,
params: dummy_params,
filter: dummy_filter.clone(),
score_threshold: Some(OrderedFloat(0.1)),
}],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: Some(Filter::default()),
score_threshold: Some(OrderedFloat(0.666)),
limit: 50,
offset: 49,
// these params will be ignored because we have a prefetch
params: top_level_params,
with_payload: WithPayloadInterface::Bool(true),
with_vector: WithVector::Bool(false),
};
let planned_query = PlannedQuery::try_from(vec![request]).unwrap();
assert_eq!(
planned_query.root_plans,
vec![RootPlan {
with_payload: WithPayloadInterface::Bool(true),
with_vector: WithVector::Bool(false),
merge_plan: MergePlan {
sources: vec![Source::SearchesIdx(0)],
rescore_stages: Some(RescoreStages::collection_level(RescoreParams {
rescore: ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K)),
limit: 99,
score_threshold: Some(OrderedFloat(0.666)),
params: top_level_params,
}))
}
}]
);
assert_eq!(
planned_query.searches,
vec![CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(dummy_vector),
"dense",
),),
filter: dummy_filter,
params: dummy_params,
limit: 37,
offset: 0,
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: Some(WithVector::Bool(false)),
score_threshold: Some(0.1)
}]
)
}
pub fn make_prefetches_at_depth(depth: usize) -> ShardPrefetch {
// recursive helper for accumulation
pub fn make_prefetches_at_depth_acc(depth: usize, acc: ShardPrefetch) -> ShardPrefetch {
if depth == 0 {
acc
} else {
make_prefetches_at_depth_acc(
depth - 1,
ShardPrefetch {
prefetches: vec![acc],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
limit: 10,
params: None,
filter: None,
score_threshold: None,
},
)
}
}
// lowest prefetch
let prefetch = ShardPrefetch {
prefetches: Vec::new(),
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
limit: 100,
params: None,
filter: None,
score_threshold: None,
};
make_prefetches_at_depth_acc(depth - 1, prefetch)
}
#[test]
fn test_detect_max_depth() {
// depth 0
let mut request = ShardQueryRequest {
prefetches: vec![],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
filter: None,
score_threshold: None,
limit: 10,
offset: 0,
params: None,
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(false),
};
assert_eq!(request.prefetches_depth(), 0);
// depth 3
request.prefetches = vec![ShardPrefetch {
prefetches: vec![ShardPrefetch {
prefetches: vec![ShardPrefetch {
prefetches: vec![],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
limit: 10,
params: None,
filter: None,
score_threshold: None,
}],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
limit: 10,
params: None,
filter: None,
score_threshold: None,
}],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
VectorInternal::Dense(vec![1.0, 2.0, 3.0]),
"dense",
)))),
limit: 10,
params: None,
filter: None,
score_threshold: None,
}];
assert_eq!(request.prefetches_depth(), 3);
// use with helper for less boilerplate
request.prefetches = vec![make_prefetches_at_depth(3)];
assert_eq!(request.prefetches_depth(), 3);
let _planned_query = PlannedQuery::try_from(vec![request.clone()]).unwrap();
request.prefetches = vec![make_prefetches_at_depth(64)];
assert_eq!(request.prefetches_depth(), 64);
let _planned_query = PlannedQuery::try_from(vec![request.clone()]).unwrap();
request.prefetches = vec![make_prefetches_at_depth(65)];
assert_eq!(request.prefetches_depth(), 65);
// assert error
assert!(matches!(
PlannedQuery::try_from(vec![request]),
Err(OperationError::ValidationError { description }) if description == "prefetches depth 65 exceeds max depth 64",
));
}
fn dummy_core_prefetch(limit: usize) -> ShardPrefetch {
ShardPrefetch {
prefetches: vec![],
query: Some(nearest_query()),
filter: None,
params: None,
limit,
score_threshold: None,
}
}
fn dummy_scroll_prefetch(limit: usize) -> ShardPrefetch {
ShardPrefetch {
prefetches: vec![],
query: None,
limit,
params: None,
filter: None,
score_threshold: None,
}
}
fn nearest_query() -> ScoringQuery {
ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::default_dense(vec![
0.1, 0.2, 0.3, 0.4,
])))
}
#[test]
fn test_from_batch_of_requests() {
let requests = vec![
// A no-prefetch core_search query
ShardQueryRequest {
prefetches: vec![],
query: Some(nearest_query()),
filter: None,
score_threshold: None,
limit: 10,
offset: 0,
params: None,
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(false),
},
// A no-prefetch scroll query
ShardQueryRequest {
prefetches: vec![],
query: None,
filter: None,
score_threshold: None,
limit: 20,
offset: 0,
params: None,
with_payload: WithPayloadInterface::Bool(false),
with_vector: WithVector::Bool(false),
},
// A double fusion query
ShardQueryRequest {
prefetches: vec![
ShardPrefetch {
prefetches: vec![dummy_core_prefetch(30), dummy_core_prefetch(40)],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
params: None,
score_threshold: None,
limit: 10,
},
dummy_scroll_prefetch(50),
],
query: Some(ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K))),
filter: None,
score_threshold: None,
limit: 10,
offset: 0,
params: None,
with_payload: WithPayloadInterface::Bool(true),
with_vector: WithVector::Bool(true),
},
];
let planned_query = PlannedQuery::try_from(requests).unwrap();
assert_eq!(planned_query.searches.len(), 3);
assert_eq!(planned_query.scrolls.len(), 2);
assert_eq!(planned_query.root_plans.len(), 3);
assert_eq!(
planned_query.root_plans,
vec![
RootPlan {
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
merge_plan: MergePlan {
sources: vec![Source::SearchesIdx(0)],
rescore_stages: None,
},
},
RootPlan {
with_vector: WithVector::Bool(false),
with_payload: WithPayloadInterface::Bool(false),
merge_plan: MergePlan {
sources: vec![Source::ScrollsIdx(0)],
rescore_stages: None,
},
},
RootPlan {
with_vector: WithVector::Bool(true),
with_payload: WithPayloadInterface::Bool(true),
merge_plan: MergePlan {
sources: vec![
Source::Prefetch(Box::from(MergePlan {
sources: vec![Source::SearchesIdx(1), Source::SearchesIdx(2),],
rescore_stages: Some(RescoreStages::shard_level(RescoreParams {
rescore: ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K)),
limit: 10,
score_threshold: None,
params: None,
})),
})),
Source::ScrollsIdx(1),
],
rescore_stages: Some(RescoreStages::collection_level(RescoreParams {
rescore: ScoringQuery::Fusion(FusionInternal::RrfK(DEFAULT_RRF_K)),
limit: 10,
score_threshold: None,
params: None,
})),
},
},
]
);
assert_eq!(planned_query.searches[0].limit, 10);
assert_eq!(planned_query.searches[1].limit, 30);
assert_eq!(planned_query.searches[2].limit, 40);
assert_eq!(planned_query.scrolls[0].limit, 20);
assert_eq!(planned_query.scrolls[1].limit, 50);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/query_enum.rs | lib/shard/src/query/query_enum.rs | use api::grpc;
use segment::data_types::vectors::*;
use segment::types::{VectorName, VectorNameBuf};
use segment::vector_storage::query::*;
use serde::Serialize;
use sparse::common::sparse_vector::SparseVector;
/// Every kind of vector query that can be performed on segment level.
#[derive(Clone, Debug, PartialEq, Hash, Serialize)]
pub enum QueryEnum {
Nearest(NamedQuery<VectorInternal>),
RecommendBestScore(NamedQuery<RecoQuery<VectorInternal>>),
RecommendSumScores(NamedQuery<RecoQuery<VectorInternal>>),
Discover(NamedQuery<DiscoveryQuery<VectorInternal>>),
Context(NamedQuery<ContextQuery<VectorInternal>>),
FeedbackNaive(NamedQuery<NaiveFeedbackQuery<VectorInternal>>),
}
impl QueryEnum {
pub fn get_vector_name(&self) -> &VectorName {
match self {
QueryEnum::Nearest(vector) => vector.get_name(),
QueryEnum::RecommendBestScore(reco_query) => reco_query.get_name(),
QueryEnum::RecommendSumScores(reco_query) => reco_query.get_name(),
QueryEnum::Discover(discovery_query) => discovery_query.get_name(),
QueryEnum::Context(context_query) => context_query.get_name(),
QueryEnum::FeedbackNaive(feedback_query) => feedback_query.get_name(),
}
}
/// Only when the distance is the scoring, this will return true.
pub fn is_distance_scored(&self) -> bool {
match self {
QueryEnum::Nearest(_) => true,
QueryEnum::RecommendBestScore(_)
| QueryEnum::RecommendSumScores(_)
| QueryEnum::Discover(_)
| QueryEnum::Context(_)
| QueryEnum::FeedbackNaive(_) => false,
}
}
pub fn iterate_sparse(&self, mut f: impl FnMut(&VectorName, &SparseVector)) {
match self {
QueryEnum::Nearest(named) => match &named.query {
VectorInternal::Sparse(sparse_vector) => f(named.get_name(), sparse_vector),
VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => {}
},
QueryEnum::RecommendBestScore(reco_query)
| QueryEnum::RecommendSumScores(reco_query) => {
let name = reco_query.get_name();
for vector in reco_query.query.flat_iter() {
match vector {
VectorInternal::Sparse(sparse_vector) => f(name, sparse_vector),
VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => {}
}
}
}
QueryEnum::Discover(discovery_query) => {
let name = discovery_query.get_name();
for vector in discovery_query.query.flat_iter() {
match vector {
VectorInternal::Sparse(sparse_vector) => f(name, sparse_vector),
VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => {}
}
}
}
QueryEnum::Context(context_query) => {
let name = context_query.get_name();
for vector in context_query.query.flat_iter() {
match vector {
VectorInternal::Sparse(sparse_vector) => f(name, sparse_vector),
VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => {}
}
}
}
QueryEnum::FeedbackNaive(feedback_query) => {
let name = feedback_query.get_name();
for vector in feedback_query.query.flat_iter() {
match vector {
VectorInternal::Sparse(sparse_vector) => f(name, sparse_vector),
VectorInternal::Dense(_) | VectorInternal::MultiDense(_) => {}
}
}
}
}
}
/// Returns the estimated cost of using this query in terms of number of vectors.
/// The cost approximates how many similarity comparisons this query will make against one point.
pub fn search_cost(&self) -> usize {
match self {
QueryEnum::Nearest(named_query) => search_cost([&named_query.query]),
QueryEnum::RecommendBestScore(named_query) => {
search_cost(named_query.query.flat_iter())
}
QueryEnum::RecommendSumScores(named_query) => {
search_cost(named_query.query.flat_iter())
}
QueryEnum::Discover(named_query) => search_cost(named_query.query.flat_iter()),
QueryEnum::Context(named_query) => search_cost(named_query.query.flat_iter()),
QueryEnum::FeedbackNaive(named_query) => search_cost(named_query.query.flat_iter()),
}
}
}
fn search_cost<'a>(vectors: impl IntoIterator<Item = &'a VectorInternal>) -> usize {
vectors
.into_iter()
.map(VectorInternal::similarity_cost)
.sum()
}
impl AsRef<QueryEnum> for QueryEnum {
fn as_ref(&self) -> &QueryEnum {
self
}
}
impl From<DenseVector> for QueryEnum {
fn from(vector: DenseVector) -> Self {
QueryEnum::Nearest(NamedQuery {
query: VectorInternal::Dense(vector),
using: None,
})
}
}
impl From<NamedQuery<DiscoveryQuery<VectorInternal>>> for QueryEnum {
fn from(query: NamedQuery<DiscoveryQuery<VectorInternal>>) -> Self {
QueryEnum::Discover(query)
}
}
impl From<QueryEnum> for QueryVector {
fn from(query: QueryEnum) -> Self {
match query {
QueryEnum::Nearest(named) => QueryVector::Nearest(named.query),
QueryEnum::RecommendBestScore(named) => QueryVector::RecommendBestScore(named.query),
QueryEnum::RecommendSumScores(named) => QueryVector::RecommendSumScores(named.query),
QueryEnum::Discover(named) => QueryVector::Discovery(named.query),
QueryEnum::Context(named) => QueryVector::Context(named.query),
QueryEnum::FeedbackNaive(named) => QueryVector::FeedbackNaive(named.query),
}
}
}
impl From<QueryEnum> for grpc::QueryEnum {
fn from(value: QueryEnum) -> Self {
match value {
QueryEnum::Nearest(vector) => grpc::QueryEnum {
query: Some(grpc::query_enum::Query::NearestNeighbors(
grpc::Vector::from(vector.query),
)),
},
QueryEnum::RecommendBestScore(named) => grpc::QueryEnum {
query: Some(grpc::query_enum::Query::RecommendBestScore(
named.query.into(),
)),
},
QueryEnum::RecommendSumScores(named) => grpc::QueryEnum {
query: Some(grpc::query_enum::Query::RecommendSumScores(
named.query.into(),
)),
},
QueryEnum::Discover(named) => grpc::QueryEnum {
query: Some(grpc::query_enum::Query::Discover(grpc::DiscoveryQuery {
target: Some(named.query.target.into()),
context: named
.query
.pairs
.into_iter()
.map(|pair| grpc::ContextPair {
positive: Some(pair.positive.into()),
negative: Some(pair.negative.into()),
})
.collect(),
})),
},
QueryEnum::Context(named) => grpc::QueryEnum {
query: Some(grpc::query_enum::Query::Context(grpc::ContextQuery {
context: named
.query
.pairs
.into_iter()
.map(|pair| grpc::ContextPair {
positive: Some(pair.positive.into()),
negative: Some(pair.negative.into()),
})
.collect(),
})),
},
QueryEnum::FeedbackNaive(_named) => {
// This conversion only happens for search/recommend/discover dedicated endpoints. Feedback does not have one.
unimplemented!("there is no specialized feedback endpoint")
}
}
}
}
impl QueryEnum {
pub fn from_grpc_raw_query(
raw_query: grpc::RawQuery,
using: Option<VectorNameBuf>,
) -> Result<QueryEnum, tonic::Status> {
use grpc::raw_query::Variant;
let variant = raw_query
.variant
.ok_or_else(|| tonic::Status::invalid_argument("missing field: variant"))?;
let query_enum = match variant {
Variant::Nearest(nearest) => {
let vector = VectorInternal::try_from(nearest)?;
let name = match (using, &vector) {
(None, VectorInternal::Sparse(_)) => {
return Err(tonic::Status::invalid_argument(
"Sparse vector must have a name",
));
}
(
Some(name),
VectorInternal::MultiDense(_)
| VectorInternal::Sparse(_)
| VectorInternal::Dense(_),
) => name,
(None, VectorInternal::MultiDense(_) | VectorInternal::Dense(_)) => {
DEFAULT_VECTOR_NAME.to_owned()
}
};
let named_vector = NamedQuery::new(vector, name);
QueryEnum::Nearest(named_vector)
}
Variant::RecommendBestScore(recommend) => QueryEnum::RecommendBestScore(NamedQuery {
query: RecoQuery::try_from(recommend)?,
using,
}),
Variant::RecommendSumScores(recommend) => QueryEnum::RecommendSumScores(NamedQuery {
query: RecoQuery::try_from(recommend)?,
using,
}),
Variant::Discover(discovery) => QueryEnum::Discover(NamedQuery {
query: DiscoveryQuery::try_from(discovery)?,
using,
}),
Variant::Context(context) => QueryEnum::Context(NamedQuery {
query: ContextQuery::try_from(context)?,
using,
}),
Variant::Feedback(grpc::raw_query::Feedback {
target,
feedback,
strategy,
}) => {
let strategy = strategy
.and_then(|strategy| strategy.variant)
.ok_or_else(|| {
tonic::Status::invalid_argument("feedback strategy is required")
})?;
let target = VectorInternal::try_from(
target.ok_or_else(|| tonic::Status::invalid_argument("No target provided"))?,
)?;
let feedback = feedback
.into_iter()
.map(<FeedbackItem<VectorInternal>>::try_from)
.collect::<Result<Vec<_>, _>>()?;
match strategy {
grpc::feedback_strategy::Variant::Naive(strategy) => {
let feedback_query = NaiveFeedbackQuery {
target,
feedback,
coefficients: NaiveFeedbackCoefficients::from(strategy),
};
let named = NamedQuery {
query: feedback_query,
using,
};
QueryEnum::FeedbackNaive(named)
}
}
}
};
Ok(query_enum)
}
}
impl From<QueryEnum> for grpc::RawQuery {
fn from(query: QueryEnum) -> Self {
use grpc::raw_query::Variant;
let variant = match query {
QueryEnum::Nearest(named) => Variant::Nearest(grpc::RawVector::from(named.query)),
QueryEnum::RecommendBestScore(named) => {
Variant::RecommendBestScore(grpc::raw_query::Recommend::from(named.query))
}
QueryEnum::RecommendSumScores(named) => {
Variant::RecommendSumScores(grpc::raw_query::Recommend::from(named.query))
}
QueryEnum::Discover(named) => {
Variant::Discover(grpc::raw_query::Discovery::from(named.query))
}
QueryEnum::Context(named) => {
Variant::Context(grpc::raw_query::Context::from(named.query))
}
QueryEnum::FeedbackNaive(named) => {
Variant::Feedback(grpc::raw_query::Feedback::from(named.query))
}
};
grpc::RawQuery {
variant: Some(variant),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/mod.rs | lib/shard/src/query/mod.rs | pub mod formula;
pub mod mmr;
pub mod planned_query;
pub mod query_enum;
pub mod scroll;
pub mod query_context;
#[cfg(test)]
mod tests;
use api::{grpc, rest};
use common::types::ScoreType;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use segment::common::reciprocal_rank_fusion::DEFAULT_RRF_K;
use segment::data_types::order_by::OrderBy;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, NamedQuery, NamedVectorStruct, VectorInternal,
};
use segment::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula;
use segment::types::*;
use serde::Serialize;
use self::formula::*;
use self::query_enum::*;
use crate::search::CoreSearchRequest;
/// Internal response type for a universal query request.
///
/// Capable of returning multiple intermediate results if needed, like the case of RRF (Reciprocal Rank Fusion)
pub type ShardQueryResponse = Vec<Vec<ScoredPoint>>;
/// Internal representation of a universal query request.
///
/// Direct translation of the user-facing request, but with all point ids substituted with their corresponding vectors.
///
/// For the case of formula queries, it collects conditions and variables too.
#[derive(Clone, Debug, Hash, Serialize)]
pub struct ShardQueryRequest {
pub prefetches: Vec<ShardPrefetch>,
#[serde(skip_serializing_if = "Option::is_none")]
pub query: Option<ScoringQuery>,
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<Filter>,
#[serde(skip_serializing_if = "Option::is_none")]
pub score_threshold: Option<OrderedFloat<ScoreType>>,
pub limit: usize,
pub offset: usize,
/// Search params for when there is no prefetch
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<SearchParams>,
pub with_vector: WithVector,
pub with_payload: WithPayloadInterface,
}
impl ShardQueryRequest {
pub fn prefetches_depth(&self) -> usize {
self.prefetches
.iter()
.map(ShardPrefetch::depth)
.max()
.unwrap_or(0)
}
pub fn filter_refs(&self) -> Vec<Option<&Filter>> {
let mut filters = vec![];
filters.push(self.filter.as_ref());
for prefetch in &self.prefetches {
filters.extend(prefetch.filter_refs())
}
filters
}
}
#[derive(Clone, Debug, Hash, Serialize)]
pub struct ShardPrefetch {
pub prefetches: Vec<ShardPrefetch>,
#[serde(skip_serializing_if = "Option::is_none")]
pub query: Option<ScoringQuery>,
pub limit: usize,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<SearchParams>,
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<Filter>,
#[serde(skip_serializing_if = "Option::is_none")]
pub score_threshold: Option<OrderedFloat<ScoreType>>,
}
impl ShardPrefetch {
pub fn depth(&self) -> usize {
let mut depth = 1;
for prefetch in &self.prefetches {
depth = depth.max(prefetch.depth() + 1);
}
depth
}
fn filter_refs(&self) -> Vec<Option<&Filter>> {
let mut filters = vec![];
filters.push(self.filter.as_ref());
for prefetch in &self.prefetches {
filters.extend(prefetch.filter_refs())
}
filters
}
}
/// Same as `Query`, but with the resolved vector references.
#[derive(Clone, Debug, PartialEq, Hash, Serialize)]
pub enum ScoringQuery {
/// Score points against some vector(s)
Vector(QueryEnum),
/// Reciprocal rank fusion
Fusion(FusionInternal),
/// Order by a payload field
OrderBy(OrderBy),
/// Score boosting via an arbitrary formula
Formula(ParsedFormula),
/// Sample points
Sample(SampleInternal),
/// Maximal Marginal Relevance
///
/// This one behaves a little differently than the other scorings, since it is two parts.
/// It will create one nearest neighbor search in segment space and then try to resolve MMR algorithm higher up.
///
/// E.g. If it is the root query of a request:
/// 1. Performs search all the way down to segments.
/// 2. MMR gets calculated once results reach collection level.
Mmr(MmrInternal),
}
impl ScoringQuery {
/// Whether the query needs the prefetches results from all shards to compute the final score
///
/// If false, there is a single list of scored points which contain the final score.
pub fn needs_intermediate_results(&self) -> bool {
match self {
Self::Fusion(fusion) => match fusion {
// We need the ranking information of each prefetch
FusionInternal::RrfK(_) => true,
// We need the score distribution information of each prefetch
FusionInternal::Dbsf => true,
},
// MMR is a nearest neighbors search before computing diversity at collection level
Self::Mmr(_) => false,
Self::Vector(_) | Self::OrderBy(_) | Self::Formula(_) | Self::Sample(_) => false,
}
}
/// Get the vector name if it is scored against a vector
pub fn get_vector_name(&self) -> Option<&VectorName> {
match self {
Self::Vector(query) => Some(query.get_vector_name()),
Self::Mmr(mmr) => Some(&mmr.using),
_ => None,
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)]
pub enum FusionInternal {
/// Reciprocal Rank Fusion
RrfK(usize),
/// Distribution-based score fusion
Dbsf,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)]
pub enum SampleInternal {
Random,
}
/// Maximal Marginal Relevance configuration
#[derive(Clone, Debug, PartialEq, Hash, Serialize)]
pub struct MmrInternal {
/// Query vector, used to get the relevance of each point.
pub vector: VectorInternal,
/// Vector name to use for similarity computation, defaults to empty string (default vector)
pub using: VectorNameBuf,
/// Lambda parameter controlling diversity vs relevance trade-off (0.0 = full diversity, 1.0 = full relevance)
pub lambda: OrderedFloat<f32>,
/// Maximum number of candidates to pre-select using nearest neighbors.
pub candidates_limit: usize,
}
impl From<CoreSearchRequest> for ShardQueryRequest {
fn from(value: CoreSearchRequest) -> Self {
let CoreSearchRequest {
query,
filter,
score_threshold,
limit,
offset,
params,
with_vector,
with_payload,
} = value;
Self {
prefetches: vec![],
query: Some(ScoringQuery::Vector(query)),
filter,
score_threshold: score_threshold.map(OrderedFloat),
limit,
offset,
params,
with_vector: with_vector.unwrap_or_default(),
with_payload: with_payload.unwrap_or_default(),
}
}
}
impl From<rest::schema::SearchRequestInternal> for ShardQueryRequest {
fn from(value: rest::schema::SearchRequestInternal) -> Self {
let rest::schema::SearchRequestInternal {
vector,
filter,
score_threshold,
limit,
offset,
params,
with_vector,
with_payload,
} = value;
Self {
prefetches: vec![],
query: Some(ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::from(
NamedVectorStruct::from(vector),
)))),
filter,
score_threshold: score_threshold.map(OrderedFloat),
limit,
offset: offset.unwrap_or_default(),
params,
with_vector: with_vector.unwrap_or_default(),
with_payload: with_payload.unwrap_or_default(),
}
}
}
impl TryFrom<grpc::QueryShardPoints> for ShardQueryRequest {
type Error = tonic::Status;
fn try_from(value: grpc::QueryShardPoints) -> Result<Self, Self::Error> {
let grpc::QueryShardPoints {
prefetch,
query,
using,
filter,
limit,
params,
score_threshold,
offset,
with_payload,
with_vectors,
} = value;
let request = Self {
prefetches: prefetch
.into_iter()
.map(ShardPrefetch::try_from)
.try_collect()?,
query: query
.map(|query| ScoringQuery::try_from_grpc_query(query, using))
.transpose()?,
filter: filter.map(Filter::try_from).transpose()?,
score_threshold: score_threshold.map(OrderedFloat),
limit: limit as usize,
offset: offset as usize,
params: params.map(SearchParams::from),
with_vector: with_vectors
.map(WithVector::from)
.unwrap_or(WithVector::Bool(false)),
with_payload: with_payload
.map(WithPayloadInterface::try_from)
.transpose()?
.unwrap_or(WithPayloadInterface::Bool(true)),
};
Ok(request)
}
}
impl From<ShardQueryRequest> for grpc::QueryShardPoints {
fn from(value: ShardQueryRequest) -> Self {
let ShardQueryRequest {
prefetches,
query,
filter,
score_threshold,
limit,
offset,
params,
with_vector,
with_payload,
} = value;
Self {
prefetch: prefetches
.into_iter()
.map(grpc::query_shard_points::Prefetch::from)
.collect(),
using: query
.as_ref()
.and_then(|query| query.get_vector_name().map(ToOwned::to_owned)),
query: query.map(From::from),
filter: filter.map(grpc::Filter::from),
params: params.map(grpc::SearchParams::from),
score_threshold: score_threshold.map(OrderedFloat::into_inner),
limit: limit as u64,
offset: offset as u64,
with_payload: Some(grpc::WithPayloadSelector::from(with_payload)),
with_vectors: Some(grpc::WithVectorsSelector::from(with_vector)),
}
}
}
impl From<ShardPrefetch> for grpc::query_shard_points::Prefetch {
fn from(value: ShardPrefetch) -> Self {
let ShardPrefetch {
prefetches,
query,
limit,
params,
filter,
score_threshold,
} = value;
Self {
prefetch: prefetches.into_iter().map(Self::from).collect(),
using: query
.as_ref()
.and_then(|query| query.get_vector_name().map(ToOwned::to_owned)),
query: query.map(From::from),
filter: filter.map(grpc::Filter::from),
params: params.map(grpc::SearchParams::from),
score_threshold: score_threshold.map(OrderedFloat::into_inner),
limit: limit as u64,
}
}
}
impl TryFrom<grpc::query_shard_points::Prefetch> for ShardPrefetch {
type Error = tonic::Status;
fn try_from(value: grpc::query_shard_points::Prefetch) -> Result<Self, Self::Error> {
let grpc::query_shard_points::Prefetch {
prefetch,
query,
limit,
params,
filter,
score_threshold,
using,
} = value;
let shard_prefetch = Self {
prefetches: prefetch
.into_iter()
.map(ShardPrefetch::try_from)
.try_collect()?,
query: query
.map(|query| ScoringQuery::try_from_grpc_query(query, using))
.transpose()?,
limit: limit as usize,
params: params.map(SearchParams::from),
filter: filter.map(Filter::try_from).transpose()?,
score_threshold: score_threshold.map(OrderedFloat),
};
Ok(shard_prefetch)
}
}
impl From<rest::Fusion> for FusionInternal {
fn from(value: rest::Fusion) -> Self {
match value {
rest::Fusion::Rrf => FusionInternal::RrfK(DEFAULT_RRF_K),
rest::Fusion::Dbsf => FusionInternal::Dbsf,
}
}
}
impl From<rest::Rrf> for FusionInternal {
fn from(value: rest::Rrf) -> Self {
let rest::Rrf { k } = value;
FusionInternal::RrfK(k.unwrap_or(DEFAULT_RRF_K))
}
}
impl From<grpc::Fusion> for FusionInternal {
fn from(fusion: grpc::Fusion) -> Self {
match fusion {
grpc::Fusion::Rrf => FusionInternal::RrfK(DEFAULT_RRF_K),
grpc::Fusion::Dbsf => FusionInternal::Dbsf,
}
}
}
impl TryFrom<grpc::Rrf> for FusionInternal {
type Error = tonic::Status;
fn try_from(rrf: grpc::Rrf) -> Result<Self, Self::Error> {
let grpc::Rrf { k } = rrf;
Ok(FusionInternal::RrfK(
k.map(|k| k as usize).unwrap_or(DEFAULT_RRF_K),
))
}
}
impl TryFrom<i32> for FusionInternal {
type Error = tonic::Status;
fn try_from(fusion: i32) -> Result<Self, Self::Error> {
let fusion = grpc::Fusion::try_from(fusion).map_err(|_| {
tonic::Status::invalid_argument(format!("invalid fusion type value {fusion}",))
})?;
Ok(FusionInternal::from(fusion))
}
}
impl From<FusionInternal> for grpc::Query {
fn from(fusion: FusionInternal) -> Self {
use grpc::query::Variant as QueryVariant;
use grpc::{Fusion, Query, Rrf};
match fusion {
// Avoid breaking rolling upgrade by keeping case of k==2 as Fusion::Rrf
FusionInternal::RrfK(k) if k == DEFAULT_RRF_K => Query {
variant: Some(QueryVariant::Fusion(i32::from(Fusion::Rrf))),
},
FusionInternal::RrfK(k) => Query {
variant: Some(QueryVariant::Rrf(Rrf { k: Some(k as u32) })),
},
FusionInternal::Dbsf => Query {
variant: Some(QueryVariant::Fusion(i32::from(Fusion::Dbsf))),
},
}
}
}
impl From<FusionInternal> for grpc::query_shard_points::Query {
fn from(fusion: FusionInternal) -> Self {
use grpc::query_shard_points::Query;
use grpc::query_shard_points::query::Score;
use grpc::{Fusion, Rrf};
match fusion {
// Avoid breaking rolling upgrade by keeping case of k==2 as Fusion::Rrf
FusionInternal::RrfK(k) if k == DEFAULT_RRF_K => Query {
score: Some(Score::Fusion(i32::from(Fusion::Rrf))),
},
FusionInternal::RrfK(k) => Query {
score: Some(Score::Rrf(Rrf { k: Some(k as u32) })),
},
FusionInternal::Dbsf => Query {
score: Some(Score::Fusion(i32::from(Fusion::Dbsf))),
},
}
}
}
impl From<rest::Sample> for SampleInternal {
fn from(value: rest::Sample) -> Self {
match value {
rest::Sample::Random => SampleInternal::Random,
}
}
}
impl From<grpc::Sample> for SampleInternal {
fn from(value: grpc::Sample) -> Self {
match value {
grpc::Sample::Random => SampleInternal::Random,
}
}
}
impl TryFrom<i32> for SampleInternal {
type Error = tonic::Status;
fn try_from(sample: i32) -> Result<Self, Self::Error> {
let sample = grpc::Sample::try_from(sample).map_err(|_| {
tonic::Status::invalid_argument(format!("invalid sample type value {sample}",))
})?;
Ok(SampleInternal::from(sample))
}
}
impl From<SampleInternal> for grpc::Sample {
fn from(value: SampleInternal) -> Self {
match value {
SampleInternal::Random => grpc::Sample::Random,
}
}
}
impl ScoringQuery {
fn try_from_grpc_query(
query: grpc::query_shard_points::Query,
using: Option<VectorNameBuf>,
) -> Result<Self, tonic::Status> {
let score = query
.score
.ok_or_else(|| tonic::Status::invalid_argument("missing field: score"))?;
let scoring_query = match score {
grpc::query_shard_points::query::Score::Vector(query) => {
ScoringQuery::Vector(QueryEnum::from_grpc_raw_query(query, using)?)
}
grpc::query_shard_points::query::Score::Fusion(fusion) => {
ScoringQuery::Fusion(FusionInternal::try_from(fusion)?)
}
grpc::query_shard_points::query::Score::Rrf(rrf) => {
ScoringQuery::Fusion(FusionInternal::try_from(rrf)?)
}
grpc::query_shard_points::query::Score::OrderBy(order_by) => {
ScoringQuery::OrderBy(OrderBy::try_from(order_by)?)
}
grpc::query_shard_points::query::Score::Sample(sample) => {
ScoringQuery::Sample(SampleInternal::try_from(sample)?)
}
grpc::query_shard_points::query::Score::Formula(formula) => ScoringQuery::Formula(
ParsedFormula::try_from(FormulaInternal::try_from(formula)?).map_err(|e| {
tonic::Status::invalid_argument(format!("failed to parse formula: {e}"))
})?,
),
grpc::query_shard_points::query::Score::Mmr(grpc::MmrInternal {
vector,
lambda,
candidates_limit,
}) => {
let vector = vector
.ok_or_else(|| tonic::Status::invalid_argument("missing field: mmr.vector"))?;
let vector = VectorInternal::try_from(vector)?;
ScoringQuery::Mmr(MmrInternal {
vector,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_string()),
lambda: OrderedFloat(lambda),
candidates_limit: candidates_limit as usize,
})
}
};
Ok(scoring_query)
}
}
impl From<ScoringQuery> for grpc::query_shard_points::Query {
fn from(value: ScoringQuery) -> Self {
use grpc::query_shard_points::query::Score;
match value {
ScoringQuery::Vector(query) => Self {
score: Some(Score::Vector(grpc::RawQuery::from(query))),
},
ScoringQuery::Fusion(fusion) => Self::from(fusion),
ScoringQuery::OrderBy(order_by) => Self {
score: Some(Score::OrderBy(grpc::OrderBy::from(order_by))),
},
ScoringQuery::Formula(parsed_formula) => Self {
score: Some(Score::Formula(grpc::Formula::from_parsed(parsed_formula))),
},
ScoringQuery::Sample(sample) => Self {
score: Some(Score::Sample(grpc::Sample::from(sample) as i32)),
},
ScoringQuery::Mmr(MmrInternal {
vector,
using: _,
lambda,
candidates_limit,
}) => Self {
score: Some(Score::Mmr(grpc::MmrInternal {
vector: Some(grpc::RawVector::from(vector)),
lambda: lambda.into_inner(),
candidates_limit: candidates_limit as u32,
})),
},
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/query_context.rs | lib/shard/src/query/query_context.rs | use std::sync::atomic::AtomicBool;
use std::time::Duration;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::iterator_ext::IteratorExt;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::query_context::QueryContext;
use segment::types::VectorName;
use crate::common::stopping_guard::StoppingGuard;
use crate::search::CoreSearchRequest;
use crate::segment_holder::LockedSegmentHolder;
pub fn init_query_context(
batch_request: &[CoreSearchRequest],
// How many KBs segment should have to be considered requiring indexing for search
search_optimized_threshold_kb: usize,
is_stopped_guard: &StoppingGuard,
hw_measurement_acc: HwMeasurementAcc,
check_idf_required: impl Fn(&VectorName) -> bool,
) -> QueryContext {
let mut query_context = QueryContext::new(search_optimized_threshold_kb, hw_measurement_acc)
.with_is_stopped(is_stopped_guard.get_is_stopped());
for search_request in batch_request {
search_request
.query
.iterate_sparse(|vector_name, sparse_vector| {
if check_idf_required(vector_name) {
query_context.init_idf(vector_name, &sparse_vector.indices);
}
})
}
query_context
}
pub fn fill_query_context(
mut query_context: QueryContext,
segments: LockedSegmentHolder,
timeout: Duration,
is_stopped: &AtomicBool,
) -> OperationResult<Option<QueryContext>> {
let start = std::time::Instant::now();
let Some(segments) = segments.try_read_for(timeout) else {
return Err(OperationError::timeout(timeout, "fill query context"));
};
if segments.is_empty() {
return Ok(None);
}
let segments = segments.non_appendable_then_appendable_segments();
for locked_segment in segments.stop_if(is_stopped) {
let segment = locked_segment.get();
let timeout = timeout.saturating_sub(start.elapsed());
let Some(segment_guard) = segment.try_read_for(timeout) else {
return Err(OperationError::timeout(timeout, "fill query context"));
};
segment_guard.fill_query_context(&mut query_context);
}
Ok(Some(query_context))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/planned_query.rs | lib/shard/src/query/planned_query.rs | use common::types::ScoreType;
use ordered_float::OrderedFloat;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::vectors::NamedQuery;
use segment::types::{Filter, SearchParams, WithPayloadInterface, WithVector};
use super::query_enum::QueryEnum;
use super::scroll::{QueryScrollRequestInternal, ScrollOrder};
use super::*;
use crate::search::CoreSearchRequest;
const MAX_PREFETCH_DEPTH: usize = 64;
/// The planned representation of multiple [ShardQueryRequest]s, which flattens all the
/// leaf queries into a batch of searches and scrolls.
#[derive(Debug, Default)]
pub struct PlannedQuery {
/// References to the searches and scrolls, and how to merge them.
/// This retains the recursive structure of the original queries.
///
/// One per each query in the batch
pub root_plans: Vec<RootPlan>,
/// All the leaf core searches
pub searches: Vec<CoreSearchRequest>,
/// All the leaf scrolls
pub scrolls: Vec<QueryScrollRequestInternal>,
}
#[derive(Debug, PartialEq)]
pub struct RootPlan {
pub merge_plan: MergePlan,
pub with_vector: WithVector,
pub with_payload: WithPayloadInterface,
}
#[derive(Debug, PartialEq)]
pub struct MergePlan {
/// Gather all these sources
pub sources: Vec<Source>,
/// How to merge the sources
///
/// If this is [None], then it means one thing:
/// * It is a top-level query without prefetches, so sources must be of length 1.
pub rescore_stages: Option<RescoreStages>,
}
#[derive(Debug, PartialEq)]
pub enum Source {
/// A reference offset into the main search batch
SearchesIdx(usize),
/// A reference offset into the scrolls list
ScrollsIdx(usize),
/// A nested prefetch
Prefetch(Box<MergePlan>),
}
#[derive(Debug, PartialEq)]
pub struct RescoreStages {
/// Rescore at shard level, before merging results from all shards.
/// This is applicable if scores are independent for points
pub shard_level: Option<RescoreParams>,
/// Rescore results globally, once all results are obtained from all shards.
/// This is applicable if scores interdepend, like in Fusion
pub collection_level: Option<RescoreParams>,
}
impl RescoreStages {
pub fn shard_level(params: RescoreParams) -> Self {
Self {
shard_level: Some(params),
collection_level: None,
}
}
pub fn collection_level(params: RescoreParams) -> Self {
Self {
shard_level: None,
collection_level: Some(params),
}
}
}
/// Defines how to merge multiple [sources](Source)
#[derive(Debug, PartialEq)]
pub struct RescoreParams {
/// Alter the scores before selecting the best limit
pub rescore: ScoringQuery,
/// Keep this many points from the top
pub limit: usize,
/// Keep only points with better score than this threshold
pub score_threshold: Option<OrderedFloat<ScoreType>>,
/// Parameters for the rescore search request
pub params: Option<SearchParams>,
}
impl PlannedQuery {
pub fn new() -> Self {
Self::default()
}
pub fn add(&mut self, request: ShardQueryRequest) -> OperationResult<()> {
let depth = request.prefetches_depth();
if depth > MAX_PREFETCH_DEPTH {
return Err(OperationError::validation_error(format!(
"prefetches depth {depth} exceeds max depth {MAX_PREFETCH_DEPTH}"
)));
}
let ShardQueryRequest {
prefetches,
query,
filter,
score_threshold,
limit,
offset,
with_vector,
with_payload,
params,
} = request;
// Adjust limit so that we have enough results when we cut off the offset at a higher level
let limit = limit + offset;
// Adjust with_vector based on the root query variant
let with_vector = match &query {
None
| Some(ScoringQuery::Vector(_))
| Some(ScoringQuery::Fusion(_))
| Some(ScoringQuery::OrderBy(_))
| Some(ScoringQuery::Formula(_))
| Some(ScoringQuery::Sample(_)) => with_vector,
Some(ScoringQuery::Mmr(mmr)) => with_vector.merge(&WithVector::from(mmr.using.clone())),
};
let root_plan = if prefetches.is_empty() {
self.root_plan_without_prefetches(
query,
filter,
score_threshold.map(OrderedFloat::into_inner),
with_vector,
with_payload,
params,
limit,
)?
} else {
self.root_plan_with_prefetches(
prefetches,
query,
filter,
score_threshold.map(OrderedFloat::into_inner),
with_vector,
with_payload,
params,
limit,
)?
};
self.root_plans.push(root_plan);
Ok(())
}
#[expect(clippy::too_many_arguments)]
fn root_plan_without_prefetches(
&mut self,
query: Option<ScoringQuery>,
filter: Option<Filter>,
score_threshold: Option<f32>,
with_vector: WithVector,
with_payload: WithPayloadInterface,
params: Option<SearchParams>,
limit: usize,
) -> OperationResult<RootPlan> {
let rescore_stages = match &query {
None => None,
Some(ScoringQuery::Vector(_)) => None,
Some(ScoringQuery::Fusion(_)) => None, // Expect fusion to have prefetches
Some(ScoringQuery::OrderBy(_)) => None,
Some(ScoringQuery::Formula(_)) => None,
Some(ScoringQuery::Sample(_)) => None,
Some(ScoringQuery::Mmr(_)) => Some(RescoreStages::collection_level(RescoreParams {
rescore: query.clone().unwrap(),
limit,
score_threshold: score_threshold.map(OrderedFloat),
params,
})),
};
// Everything must come from a single source.
let sources = vec![leaf_source_from_scoring_query(
&mut self.searches,
&mut self.scrolls,
query,
limit,
params,
score_threshold,
filter,
)?];
let merge_plan = MergePlan {
sources,
// Root-level query without prefetches means we won't do any extra rescoring
rescore_stages,
};
Ok(RootPlan {
merge_plan,
with_vector,
with_payload,
})
}
#[expect(clippy::too_many_arguments)]
fn root_plan_with_prefetches(
&mut self,
prefetches: Vec<ShardPrefetch>,
query: Option<ScoringQuery>,
filter: Option<Filter>,
score_threshold: Option<f32>,
with_vector: WithVector,
with_payload: WithPayloadInterface,
params: Option<SearchParams>,
limit: usize,
) -> OperationResult<RootPlan> {
let rescoring_query = query.ok_or_else(|| {
OperationError::validation_error("cannot have prefetches without a query".to_string())
})?;
let sources =
recurse_prefetches(&mut self.searches, &mut self.scrolls, prefetches, &filter)?;
let rescore_stages = match rescoring_query {
ScoringQuery::Mmr(mmr) => {
let MmrInternal {
vector,
using,
lambda: _,
candidates_limit,
} = &mmr;
// Although MMR gets computed at collection level, we select top candidates via a nearest rescoring first
let shard_level = RescoreParams {
rescore: ScoringQuery::Vector(QueryEnum::Nearest(NamedQuery::new(
vector.clone(),
using,
))),
limit: *candidates_limit,
score_threshold: score_threshold.map(OrderedFloat),
params,
};
let collection_level = RescoreParams {
rescore: ScoringQuery::Mmr(mmr),
limit,
score_threshold: score_threshold.map(OrderedFloat),
params,
};
Some(RescoreStages {
shard_level: Some(shard_level),
collection_level: Some(collection_level),
})
}
rescore @ (ScoringQuery::Vector(_)
| ScoringQuery::OrderBy(_)
| ScoringQuery::Formula(_)
| ScoringQuery::Sample(_)) => Some(RescoreStages::shard_level(RescoreParams {
rescore,
limit,
score_threshold: score_threshold.map(OrderedFloat),
params,
})),
// We will propagate the intermediate results. Fusion will take place at collection level.
fusion @ ScoringQuery::Fusion(_) => {
Some(RescoreStages::collection_level(RescoreParams {
rescore: fusion,
limit,
score_threshold: score_threshold.map(OrderedFloat),
params,
}))
}
};
let merge_plan = MergePlan {
sources,
rescore_stages,
};
Ok(RootPlan {
merge_plan,
with_vector,
with_payload,
})
}
pub fn scrolls(&self) -> &Vec<QueryScrollRequestInternal> {
&self.scrolls
}
}
/// Recursively construct a merge_plan for prefetches
fn recurse_prefetches(
core_searches: &mut Vec<CoreSearchRequest>,
scrolls: &mut Vec<QueryScrollRequestInternal>,
prefetches: Vec<ShardPrefetch>,
propagate_filter: &Option<Filter>, // Global filter to apply to all prefetches
) -> OperationResult<Vec<Source>> {
let mut sources = Vec::with_capacity(prefetches.len());
for prefetch in prefetches {
let ShardPrefetch {
prefetches,
query,
limit,
params,
filter,
score_threshold,
} = prefetch;
// Filters are propagated into the leaves
let filter = Filter::merge_opts(propagate_filter.clone(), filter);
let source = if prefetches.is_empty() {
// This is a leaf prefetch. Fetch this info from the segments
leaf_source_from_scoring_query(
core_searches,
scrolls,
query,
limit,
params,
score_threshold.map(OrderedFloat::into_inner),
filter,
)?
} else {
// This has nested prefetches. Recurse into them
let inner_sources = recurse_prefetches(core_searches, scrolls, prefetches, &filter)?;
let rescore = query.ok_or_else(|| {
OperationError::validation_error(
"cannot have prefetches without a query".to_string(),
)
})?;
// Even if this is a fusion request, it can only be executed at shard level here,
// because we can't forward the inner results to the collection level without
// materializing them first.
let rescore_stages = RescoreStages::shard_level(RescoreParams {
rescore,
limit,
score_threshold,
params,
});
let merge_plan = MergePlan {
sources: inner_sources,
rescore_stages: Some(rescore_stages),
};
Source::Prefetch(Box::new(merge_plan))
};
sources.push(source);
}
Ok(sources)
}
/// Crafts a "leaf source" from a scoring query. This means that the scoring query
/// does not act over prefetched points and will be executed over the segments directly.
///
/// Only `Source::SearchesIdx` or `Source::ScrollsIdx` variants are returned.
fn leaf_source_from_scoring_query(
core_searches: &mut Vec<CoreSearchRequest>,
scrolls: &mut Vec<QueryScrollRequestInternal>,
query: Option<ScoringQuery>,
limit: usize,
params: Option<SearchParams>,
score_threshold: Option<f32>,
filter: Option<Filter>,
) -> OperationResult<Source> {
let source = match query {
Some(ScoringQuery::Vector(query_enum)) => {
let core_search = CoreSearchRequest {
query: query_enum,
filter,
params,
limit,
offset: 0,
with_vector: Some(WithVector::from(false)),
with_payload: Some(WithPayloadInterface::from(false)),
score_threshold,
};
let idx = core_searches.len();
core_searches.push(core_search);
Source::SearchesIdx(idx)
}
Some(ScoringQuery::Fusion(_)) => {
return Err(OperationError::validation_error(
"cannot apply Fusion without prefetches".to_string(),
));
}
Some(ScoringQuery::OrderBy(order_by)) => {
let scroll = QueryScrollRequestInternal {
scroll_order: ScrollOrder::ByField(order_by),
filter,
with_vector: WithVector::from(false),
with_payload: WithPayloadInterface::from(false),
limit,
};
let idx = scrolls.len();
scrolls.push(scroll);
Source::ScrollsIdx(idx)
}
Some(ScoringQuery::Formula(_)) => {
return Err(OperationError::validation_error(
"cannot apply Formula without prefetches".to_string(),
));
}
Some(ScoringQuery::Sample(SampleInternal::Random)) => {
let scroll = QueryScrollRequestInternal {
scroll_order: ScrollOrder::Random,
filter,
with_vector: WithVector::from(false),
with_payload: WithPayloadInterface::from(false),
limit,
};
let idx = scrolls.len();
scrolls.push(scroll);
Source::ScrollsIdx(idx)
}
Some(ScoringQuery::Mmr(MmrInternal {
vector,
using,
lambda: _,
candidates_limit,
})) => {
let query = QueryEnum::Nearest(NamedQuery::new(vector, using));
let core_search = CoreSearchRequest {
query,
filter,
score_threshold,
with_vector: Some(WithVector::from(false)),
with_payload: Some(WithPayloadInterface::from(false)),
offset: 0,
params,
limit: candidates_limit,
};
let idx = core_searches.len();
core_searches.push(core_search);
Source::SearchesIdx(idx)
}
None => {
let scroll = QueryScrollRequestInternal {
scroll_order: Default::default(),
filter,
with_vector: WithVector::from(false),
with_payload: WithPayloadInterface::from(false),
limit,
};
let idx = scrolls.len();
scrolls.push(scroll);
Source::ScrollsIdx(idx)
}
};
Ok(source)
}
impl TryFrom<Vec<ShardQueryRequest>> for PlannedQuery {
type Error = OperationError;
fn try_from(requests: Vec<ShardQueryRequest>) -> Result<Self, Self::Error> {
let mut planned_query = Self::new();
for request in requests {
planned_query.add(request)?;
}
Ok(planned_query)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/formula.rs | lib/shard/src/query/formula.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
use api::grpc::DecayParamsExpression;
use api::rest::GeoDistance;
use api::{grpc, rest};
use common::types::ScoreType;
use itertools::Itertools;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::index::query_optimization::rescore_formula::parsed_formula::*;
use segment::json_path::JsonPath;
use segment::types::{Condition, GeoPoint};
use serde::Serialize;
use serde_json::Value;
#[derive(Clone, Debug, PartialEq, Serialize)]
pub struct FormulaInternal {
pub formula: ExpressionInternal,
pub defaults: HashMap<String, Value>,
}
impl TryFrom<FormulaInternal> for ParsedFormula {
type Error = OperationError;
fn try_from(value: FormulaInternal) -> Result<Self, Self::Error> {
let FormulaInternal { formula, defaults } = value;
let mut payload_vars = HashSet::new();
let mut conditions = Vec::new();
let parsed_expression = formula.parse_and_convert(&mut payload_vars, &mut conditions)?;
let defaults = defaults
.into_iter()
.map(|(key, value)| {
let key = key
.as_str()
.parse()
.map_err(|msg| failed_to_parse("variable ID", &key, &msg))?;
OperationResult::Ok((key, value))
})
.try_collect()?;
Ok(ParsedFormula {
formula: parsed_expression,
payload_vars,
conditions,
defaults,
})
}
}
#[derive(Clone, Debug, PartialEq, Serialize)]
pub enum ExpressionInternal {
Constant(f32),
Variable(String),
Condition(Box<Condition>),
GeoDistance {
origin: GeoPoint,
to: JsonPath,
},
Datetime(String),
DatetimeKey(JsonPath),
Mult(Vec<ExpressionInternal>),
Sum(Vec<ExpressionInternal>),
Neg(Box<ExpressionInternal>),
Div {
left: Box<ExpressionInternal>,
right: Box<ExpressionInternal>,
by_zero_default: Option<ScoreType>,
},
Sqrt(Box<ExpressionInternal>),
Pow {
base: Box<ExpressionInternal>,
exponent: Box<ExpressionInternal>,
},
Exp(Box<ExpressionInternal>),
Log10(Box<ExpressionInternal>),
Ln(Box<ExpressionInternal>),
Abs(Box<ExpressionInternal>),
Decay {
kind: DecayKind,
x: Box<ExpressionInternal>,
target: Option<Box<ExpressionInternal>>,
midpoint: Option<f32>,
scale: Option<f32>,
},
}
impl ExpressionInternal {
fn parse_and_convert(
self,
payload_vars: &mut HashSet<JsonPath>,
conditions: &mut Vec<Condition>,
) -> OperationResult<ParsedExpression> {
let expr = match self {
ExpressionInternal::Constant(c) => {
ParsedExpression::Constant(PreciseScoreOrdered::from(PreciseScore::from(c)))
}
ExpressionInternal::Variable(var) => {
let var: VariableId = var
.parse()
.map_err(|msg| failed_to_parse("variable ID", &var, &msg))?;
if let VariableId::Payload(payload_var) = var.clone() {
payload_vars.insert(payload_var);
}
ParsedExpression::Variable(var)
}
ExpressionInternal::Condition(condition) => {
let condition_id = conditions.len();
conditions.push(*condition);
ParsedExpression::new_condition_id(condition_id)
}
ExpressionInternal::GeoDistance { origin, to } => {
payload_vars.insert(to.clone());
ParsedExpression::new_geo_distance(origin, to)
}
ExpressionInternal::Datetime(dt_str) => {
ParsedExpression::Datetime(DatetimeExpression::Constant(
dt_str
.parse()
.map_err(|err| failed_to_parse("date-time", &dt_str, err))?,
))
}
ExpressionInternal::DatetimeKey(json_path) => {
payload_vars.insert(json_path.clone());
ParsedExpression::Datetime(DatetimeExpression::PayloadVariable(json_path))
}
ExpressionInternal::Mult(internal_expressions) => ParsedExpression::Mult(
internal_expressions
.into_iter()
.map(|expr| expr.parse_and_convert(payload_vars, conditions))
.try_collect()?,
),
ExpressionInternal::Sum(expression_internals) => ParsedExpression::Sum(
expression_internals
.into_iter()
.map(|expr| expr.parse_and_convert(payload_vars, conditions))
.try_collect()?,
),
ExpressionInternal::Neg(expression_internal) => ParsedExpression::new_neg(
expression_internal.parse_and_convert(payload_vars, conditions)?,
),
ExpressionInternal::Div {
left,
right,
by_zero_default,
} => ParsedExpression::new_div(
left.parse_and_convert(payload_vars, conditions)?,
right.parse_and_convert(payload_vars, conditions)?,
by_zero_default.map(PreciseScore::from),
),
ExpressionInternal::Sqrt(expression_internal) => ParsedExpression::Sqrt(Box::new(
expression_internal.parse_and_convert(payload_vars, conditions)?,
)),
ExpressionInternal::Pow { base, exponent } => ParsedExpression::Pow {
base: Box::new(base.parse_and_convert(payload_vars, conditions)?),
exponent: Box::new(exponent.parse_and_convert(payload_vars, conditions)?),
},
ExpressionInternal::Exp(expression_internal) => ParsedExpression::Exp(Box::new(
expression_internal.parse_and_convert(payload_vars, conditions)?,
)),
ExpressionInternal::Log10(expression_internal) => ParsedExpression::Log10(Box::new(
expression_internal.parse_and_convert(payload_vars, conditions)?,
)),
ExpressionInternal::Ln(expression_internal) => ParsedExpression::Ln(Box::new(
expression_internal.parse_and_convert(payload_vars, conditions)?,
)),
ExpressionInternal::Abs(expression_internal) => ParsedExpression::Abs(Box::new(
expression_internal.parse_and_convert(payload_vars, conditions)?,
)),
ExpressionInternal::Decay {
kind,
x,
target,
midpoint,
scale,
} => {
let lambda = ParsedExpression::decay_params_to_lambda(midpoint, scale, kind)?;
let x = x.parse_and_convert(payload_vars, conditions)?;
let target = target
.map(|t| t.parse_and_convert(payload_vars, conditions))
.transpose()?
.map(Box::new);
ParsedExpression::Decay {
kind,
x: Box::new(x),
target,
lambda: PreciseScoreOrdered::from(lambda),
}
}
};
Ok(expr)
}
}
fn failed_to_parse(what: &str, value: &str, message: impl fmt::Display) -> OperationError {
OperationError::validation_error(format!("failed to parse {what} {value}: {message}"))
}
impl From<rest::FormulaQuery> for FormulaInternal {
fn from(value: rest::FormulaQuery) -> Self {
let rest::FormulaQuery { formula, defaults } = value;
FormulaInternal {
formula: ExpressionInternal::from(formula),
defaults,
}
}
}
impl TryFrom<grpc::Formula> for FormulaInternal {
type Error = tonic::Status;
fn try_from(formula: grpc::Formula) -> Result<Self, Self::Error> {
let grpc::Formula {
expression,
defaults,
} = formula;
let expression = expression
.ok_or_else(|| tonic::Status::invalid_argument("missing field: expression"))?;
let expression = ExpressionInternal::try_from(expression)?;
let defaults = defaults
.into_iter()
.map(|(key, value)| {
let value = api::conversions::json::proto_to_json(value)?;
Result::<_, tonic::Status>::Ok((key, value))
})
.try_collect()?;
Ok(Self {
formula: expression,
defaults,
})
}
}
impl From<rest::Expression> for ExpressionInternal {
fn from(value: rest::Expression) -> Self {
match value {
rest::Expression::Constant(c) => ExpressionInternal::Constant(c),
rest::Expression::Variable(key) => ExpressionInternal::Variable(key),
rest::Expression::Condition(condition) => ExpressionInternal::Condition(condition),
rest::Expression::GeoDistance(GeoDistance {
geo_distance: rest::GeoDistanceParams { origin, to },
}) => ExpressionInternal::GeoDistance { origin, to },
rest::Expression::Datetime(rest::DatetimeExpression { datetime }) => {
ExpressionInternal::Datetime(datetime)
}
rest::Expression::DatetimeKey(rest::DatetimeKeyExpression { datetime_key }) => {
ExpressionInternal::DatetimeKey(datetime_key)
}
rest::Expression::Mult(rest::MultExpression { mult: exprs }) => {
ExpressionInternal::Mult(exprs.into_iter().map(ExpressionInternal::from).collect())
}
rest::Expression::Sum(rest::SumExpression { sum: exprs }) => {
ExpressionInternal::Sum(exprs.into_iter().map(ExpressionInternal::from).collect())
}
rest::Expression::Neg(rest::NegExpression { neg: expr }) => {
ExpressionInternal::Neg(Box::new(ExpressionInternal::from(*expr)))
}
rest::Expression::Div(rest::DivExpression {
div:
rest::DivParams {
left,
right,
by_zero_default,
},
}) => {
let left = Box::new((*left).into());
let right = Box::new((*right).into());
ExpressionInternal::Div {
left,
right,
by_zero_default,
}
}
rest::Expression::Sqrt(sqrt_expression) => {
ExpressionInternal::Sqrt(Box::new(ExpressionInternal::from(*sqrt_expression.sqrt)))
}
rest::Expression::Pow(rest::PowExpression { pow }) => ExpressionInternal::Pow {
base: Box::new(ExpressionInternal::from(*pow.base)),
exponent: Box::new(ExpressionInternal::from(*pow.exponent)),
},
rest::Expression::Exp(rest::ExpExpression { exp: expr }) => {
ExpressionInternal::Exp(Box::new(ExpressionInternal::from(*expr)))
}
rest::Expression::Log10(rest::Log10Expression { log10: expr }) => {
ExpressionInternal::Log10(Box::new(ExpressionInternal::from(*expr)))
}
rest::Expression::Ln(rest::LnExpression { ln: expr }) => {
ExpressionInternal::Ln(Box::new(ExpressionInternal::from(*expr)))
}
rest::Expression::Abs(rest::AbsExpression { abs: expr }) => {
ExpressionInternal::Abs(Box::new(ExpressionInternal::from(*expr)))
}
rest::Expression::LinDecay(rest::LinDecayExpression {
lin_decay:
rest::DecayParamsExpression {
x,
target,
midpoint,
scale,
},
}) => ExpressionInternal::Decay {
kind: DecayKind::Lin,
x: Box::new(ExpressionInternal::from(*x)),
target: target.map(|t| Box::new(ExpressionInternal::from(*t))),
midpoint,
scale,
},
rest::Expression::ExpDecay(rest::ExpDecayExpression {
exp_decay:
rest::DecayParamsExpression {
x,
target,
midpoint,
scale,
},
}) => ExpressionInternal::Decay {
kind: DecayKind::Exp,
x: Box::new(ExpressionInternal::from(*x)),
target: target.map(|t| Box::new(ExpressionInternal::from(*t))),
midpoint,
scale,
},
rest::Expression::GaussDecay(rest::GaussDecayExpression {
gauss_decay:
rest::DecayParamsExpression {
x,
target,
midpoint,
scale,
},
}) => ExpressionInternal::Decay {
kind: DecayKind::Gauss,
x: Box::new(ExpressionInternal::from(*x)),
target: target.map(|t| Box::new(ExpressionInternal::from(*t))),
midpoint,
scale,
},
}
}
}
impl TryFrom<grpc::Expression> for ExpressionInternal {
type Error = tonic::Status;
fn try_from(expression: grpc::Expression) -> Result<Self, Self::Error> {
use grpc::expression::Variant;
let variant = expression
.variant
.ok_or_else(|| tonic::Status::invalid_argument("missing field: variant"))?;
let expression = match variant {
Variant::Constant(constant) => ExpressionInternal::Constant(constant),
Variant::Variable(variable) => ExpressionInternal::Variable(variable),
Variant::Condition(condition) => {
let condition = grpc::conversions::grpc_condition_into_condition(condition)?
.ok_or_else(|| tonic::Status::invalid_argument("missing field: condition"))?;
ExpressionInternal::Condition(Box::new(condition))
}
Variant::GeoDistance(grpc::GeoDistance { origin, to }) => {
let origin = origin
.ok_or_else(|| tonic::Status::invalid_argument("missing field: origin"))?
.into();
let to = to
.parse()
.map_err(|_| tonic::Status::invalid_argument("invalid payload key"))?;
ExpressionInternal::GeoDistance { origin, to }
}
Variant::Datetime(dt_str) => ExpressionInternal::Datetime(dt_str),
Variant::DatetimeKey(dt_key) => {
let json_path = dt_key
.parse()
.map_err(|_| tonic::Status::invalid_argument("invalid payload key"))?;
ExpressionInternal::DatetimeKey(json_path)
}
Variant::Mult(grpc::MultExpression { mult }) => {
let mult = mult
.into_iter()
.map(ExpressionInternal::try_from)
.try_collect()?;
ExpressionInternal::Mult(mult)
}
Variant::Sum(grpc::SumExpression { sum }) => {
let sum = sum
.into_iter()
.map(ExpressionInternal::try_from)
.try_collect()?;
ExpressionInternal::Sum(sum)
}
Variant::Div(div) => {
let grpc::DivExpression {
left,
right,
by_zero_default,
} = *div;
let left =
*left.ok_or_else(|| tonic::Status::invalid_argument("missing field: left"))?;
let right = *right
.ok_or_else(|| tonic::Status::invalid_argument("missing field: right"))?;
ExpressionInternal::Div {
left: Box::new(left.try_into()?),
right: Box::new(right.try_into()?),
by_zero_default,
}
}
Variant::Neg(expression) => {
ExpressionInternal::Neg(Box::new((*expression).try_into()?))
}
Variant::Abs(expression) => {
ExpressionInternal::Abs(Box::new((*expression).try_into()?))
}
Variant::Sqrt(expression) => {
ExpressionInternal::Sqrt(Box::new((*expression).try_into()?))
}
Variant::Pow(pow_expression) => {
let grpc::PowExpression { base, exponent } = *pow_expression;
let raw_base =
*base.ok_or_else(|| tonic::Status::invalid_argument("missing field: base"))?;
let raw_exponent = *exponent
.ok_or_else(|| tonic::Status::invalid_argument("missing field: exponent"))?;
ExpressionInternal::Pow {
base: Box::new(raw_base.try_into()?),
exponent: Box::new(raw_exponent.try_into()?),
}
}
Variant::Exp(expression) => {
ExpressionInternal::Exp(Box::new((*expression).try_into()?))
}
Variant::Log10(expression) => {
ExpressionInternal::Log10(Box::new((*expression).try_into()?))
}
Variant::Ln(expression) => ExpressionInternal::Ln(Box::new((*expression).try_into()?)),
Variant::LinDecay(decay_params) => {
try_from_decay_params(*decay_params, DecayKind::Lin)?
}
Variant::ExpDecay(decay_params) => {
try_from_decay_params(*decay_params, DecayKind::Exp)?
}
Variant::GaussDecay(decay_params) => {
try_from_decay_params(*decay_params, DecayKind::Gauss)?
}
};
Ok(expression)
}
}
fn try_from_decay_params(
params: DecayParamsExpression,
kind: DecayKind,
) -> Result<ExpressionInternal, tonic::Status> {
let grpc::DecayParamsExpression {
x,
target,
midpoint,
scale,
} = params;
let x = *x.ok_or_else(|| tonic::Status::invalid_argument("missing field: x"))?;
let target = target.map(|t| (*t).try_into()).transpose()?.map(Box::new);
Ok(ExpressionInternal::Decay {
kind,
x: Box::new(x.try_into()?),
target,
midpoint,
scale,
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/mmr/tests.rs | lib/shard/src/query/mmr/tests.rs | use std::collections::HashMap;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use ordered_float::OrderedFloat;
use rstest::rstest;
use segment::data_types::vectors::{
MultiDenseVectorInternal, VectorInternal, VectorStructInternal,
};
use segment::types::{
Distance, MultiVectorComparator, MultiVectorConfig, PointIdType, ScoredPoint, VectorNameBuf,
};
use sparse::common::sparse_vector::SparseVector;
use strum::IntoEnumIterator;
use super::mmr_from_points_with_vector;
use crate::query::MmrInternal;
/// Create a ScoredPoint with a dense vector attached.
fn create_scored_point_with_vector(
id: PointIdType,
vector: Vec<f32>,
vector_name: Option<&str>,
) -> ScoredPoint {
let vector_internal = VectorInternal::Dense(vector);
let mut vectors = HashMap::new();
let name = vector_name.unwrap_or("");
vectors.insert(name.to_string(), vector_internal);
ScoredPoint {
id,
version: 0,
score: 0.0,
payload: None,
vector: Some(VectorStructInternal::Named(vectors)),
shard_key: None,
order_value: None,
}
}
/// Create a ScoredPoint without any vector attached.
fn create_scored_point_without_vector(id: PointIdType) -> ScoredPoint {
ScoredPoint {
id,
version: 0,
score: 0.0,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
/// Create a ScoredPoint with a sparse vector attached.
fn create_scored_point_with_sparse_vector(
id: PointIdType,
indices: Vec<u32>,
values: Vec<f32>,
vector_name: Option<&str>,
) -> ScoredPoint {
let sparse_vector = SparseVector::new(indices, values).expect("Valid sparse vector");
let vector_internal = VectorInternal::Sparse(sparse_vector);
let mut vectors = HashMap::new();
let name = vector_name.unwrap_or("");
vectors.insert(name.to_string(), vector_internal);
ScoredPoint {
id,
version: 0,
score: 0.0,
payload: None,
vector: Some(VectorStructInternal::Named(vectors)),
shard_key: None,
order_value: None,
}
}
/// Create a ScoredPoint with a multi-dense vector attached.
fn create_scored_point_with_multi_vector(
id: PointIdType,
vectors: Vec<Vec<f32>>,
vector_name: Option<&str>,
) -> ScoredPoint {
let multi_vector = MultiDenseVectorInternal::new_unchecked(vectors);
let vector_internal = VectorInternal::MultiDense(multi_vector);
let mut vector_map = HashMap::new();
let name = vector_name.unwrap_or("");
vector_map.insert(name.to_string(), vector_internal);
ScoredPoint {
id,
version: 0,
score: 0.0,
payload: None,
vector: Some(VectorStructInternal::Named(vector_map)),
shard_key: None,
order_value: None,
}
}
/// Test the basic MMR functionality with multiple lambda values
#[rstest]
#[case::full_relevance(1.0, &[1, 2, 3])]
#[case::balanced(0.5, &[1, 3, 2])]
#[case::more_diversity(0.01, &[1, 5, 4])]
fn test_mmr_lambda(#[case] lambda: f32, #[case] expected_order: &[u64]) {
let distance = Distance::Euclid;
let points = vec![
create_scored_point_with_vector(1.into(), vec![1.0, 0.05], None),
create_scored_point_with_vector(2.into(), vec![0.95, 0.15], None),
create_scored_point_with_vector(3.into(), vec![0.8, 0.0], None),
create_scored_point_with_vector(4.into(), vec![0.85, 0.25], None),
create_scored_point_with_vector(5.into(), vec![1.0, 0.5], None),
];
let mmr = MmrInternal {
vector: vec![1.0, 0.0].into(),
using: VectorNameBuf::from(""),
lambda: OrderedFloat(lambda),
candidates_limit: 100,
};
let result = mmr_from_points_with_vector(
points.clone(),
mmr,
distance,
None,
3,
HwMeasurementAcc::disposable(),
);
let scored_points = result.unwrap();
assert_eq!(scored_points.len(), 3);
// With MMR, we should get 3 diverse points, and the first should be the highest original score
// The exact order depends on the similarity calculations, but we should get all different points
let selected_ids: Vec<_> = scored_points.iter().map(|p| p.id).collect();
let expected_ids = expected_order
.iter()
.map(|&id| id.into())
.collect::<Vec<_>>();
assert_eq!(selected_ids, expected_ids);
// The scores should be modified by MMR (different from original scores)
assert_ne!(scored_points[0].score, 0.9); // MMR should modify the original scores
}
/// Test MMR behavior with insufficient points (< 2) for similarity computation.
#[test]
fn test_mmr_less_than_two_points() {
let distance = Distance::Cosine;
let mmr = MmrInternal {
vector: vec![1.0, 0.0].into(), // TODO: MMR vector dimension does not match point vector dimension!?
using: VectorNameBuf::from(""),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
// Test with empty points
let empty_points = vec![];
let result = mmr_from_points_with_vector(
empty_points,
mmr.clone(),
distance,
None,
5,
HwMeasurementAcc::disposable(),
);
assert!(result.is_ok());
assert_eq!(result.unwrap().len(), 0);
// Test with one point
let single_point = vec![create_scored_point_with_vector(
1.into(),
vec![1.0, 0.0, 0.0],
None,
)];
let result = mmr_from_points_with_vector(
single_point,
mmr,
distance,
None,
5,
HwMeasurementAcc::disposable(),
);
assert!(result.is_ok());
let scored_points = result.unwrap();
assert_eq!(scored_points.len(), 1);
assert_eq!(scored_points[0].id, 1.into());
}
#[test]
fn test_mmr_points_without_required_vector() {
let distance = Distance::Cosine;
let points = vec![
create_scored_point_with_vector(1.into(), vec![1.0, 0.0, 0.0], Some("custom")),
create_scored_point_without_vector(2.into()), // No vector
create_scored_point_with_vector(3.into(), vec![0.0, 1.0, 0.0], Some("other")), // Wrong vector name
create_scored_point_with_vector(4.into(), vec![0.0, 0.0, 1.0], Some("custom")),
];
let mmr = MmrInternal {
vector: vec![1.0, 0.0, 0.0].into(),
using: VectorNameBuf::from("custom"),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
let result = mmr_from_points_with_vector(
points,
mmr,
distance,
None,
5,
HwMeasurementAcc::disposable(),
);
assert!(result.is_ok());
let scored_points = result.unwrap();
// Only points 1 and 4 should remain (they have the "custom" vector)
assert_eq!(scored_points.len(), 2);
let selected_ids: Vec<_> = scored_points.iter().map(|p| p.id).collect();
assert!(selected_ids.contains(&(1.into())));
assert!(selected_ids.contains(&(4.into())));
}
#[test]
fn test_mmr_duplicate_points() {
let distance = Distance::Cosine;
// Include duplicate point IDs
let points = vec![
create_scored_point_with_vector(1.into(), vec![1.0, 0.0, 0.0], None),
create_scored_point_with_vector(1.into(), vec![0.5, 0.5, 0.0], None), // Duplicate ID
create_scored_point_with_vector(2.into(), vec![0.0, 1.0, 0.0], None),
];
let mmr = MmrInternal {
vector: vec![1.0, 0.0, 0.0].into(),
using: VectorNameBuf::from(""),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
let result = mmr_from_points_with_vector(
points,
mmr,
distance,
None,
5,
HwMeasurementAcc::disposable(),
);
assert!(result.is_ok());
let scored_points = result.unwrap();
// Duplicates should be removed, so we should have 2 unique points
assert_eq!(scored_points.len(), 2);
let unique_ids: std::collections::HashSet<_> = scored_points.iter().map(|p| p.id).collect();
assert_eq!(unique_ids.len(), 2);
}
#[test]
fn test_mmr_dense_vectors() {
// Test dense vectors with all distance metrics
let dense_points = vec![
create_scored_point_with_vector(1.into(), vec![1.0, 0.0, 0.0], None),
create_scored_point_with_vector(2.into(), vec![0.0, 1.0, 0.0], None),
create_scored_point_with_vector(3.into(), vec![0.0, 0.0, 1.0], None),
];
let mmr = MmrInternal {
vector: vec![1.0, 0.0, 0.0].into(),
using: VectorNameBuf::from(""),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
// Test with all distance metrics for dense vectors
for distance in Distance::iter() {
let result = mmr_from_points_with_vector(
dense_points.clone(),
mmr.clone(),
distance,
None,
3,
HwMeasurementAcc::disposable(),
);
assert!(
result.is_ok(),
"Dense vectors failed for distance metric: {distance:?}"
);
let scored_points = result.unwrap();
assert_eq!(scored_points.len(), 3);
}
}
#[test]
fn test_mmr_sparse_vectors() {
// Test sparse vectors with dot product (only supported distance for sparse)
let distance = Distance::Dot;
let sparse_vector_name = "sparse";
let sparse_points = vec![
create_scored_point_with_sparse_vector(
4.into(),
vec![0, 2, 5],
vec![1.0, 0.5, 0.3],
Some(sparse_vector_name),
),
create_scored_point_with_sparse_vector(
5.into(),
vec![1, 3, 4],
vec![0.8, 0.6, 0.4],
Some(sparse_vector_name),
),
create_scored_point_with_sparse_vector(
6.into(),
vec![0, 1, 6],
vec![0.7, 0.9, 0.2],
Some(sparse_vector_name),
),
];
let sparse_mmr = MmrInternal {
vector: SparseVector::new(vec![0, 2, 5], vec![1.0, 0.5, 0.3])
.unwrap()
.into(),
using: VectorNameBuf::from(sparse_vector_name),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
let sparse_result = mmr_from_points_with_vector(
sparse_points,
sparse_mmr,
distance,
None,
3,
HwMeasurementAcc::disposable(),
)
.unwrap();
assert_eq!(sparse_result.len(), 3);
}
#[test]
fn test_mmr_multi_vector() {
// Test multi-vectors with all supported distance metrics
let multi_vector_config = MultiVectorConfig {
comparator: MultiVectorComparator::MaxSim,
};
let multi_vector_name = "multi";
let multi_points = vec![
create_scored_point_with_multi_vector(
7.into(),
vec![vec![1.0, 0.0], vec![0.0, 1.0]],
Some(multi_vector_name),
),
create_scored_point_with_multi_vector(
8.into(),
vec![vec![0.0, 1.0], vec![1.0, 0.0]],
Some(multi_vector_name),
),
create_scored_point_with_multi_vector(
9.into(),
vec![vec![1.0, 1.0], vec![0.0, 0.0]],
Some(multi_vector_name),
),
];
let multi_mmr = MmrInternal {
vector: MultiDenseVectorInternal::new(vec![1.0, 0.0, 0.0, 1.0], 2).into(),
using: VectorNameBuf::from(multi_vector_name),
lambda: OrderedFloat(0.5),
candidates_limit: 100,
};
for distance in Distance::iter() {
let multi_result = mmr_from_points_with_vector(
multi_points.clone(),
multi_mmr.clone(),
distance,
Some(multi_vector_config),
3,
HwMeasurementAcc::disposable(),
);
assert!(
multi_result.is_ok(),
"Multi-vectors failed for distance metric: {distance:?}"
);
let multi_scored_points = multi_result.unwrap();
assert_eq!(multi_scored_points.len(), 3);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/mmr/mod.rs | lib/shard/src/query/mmr/mod.rs | mod lazy_matrix;
#[cfg(test)]
mod tests;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::ScoreType;
use indexmap::IndexSet;
use itertools::Itertools as _;
use ordered_float::OrderedFloat;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::vectors::{QueryVector, VectorInternal, VectorRef};
use segment::types::{Distance, MultiVectorConfig, ScoredPoint};
use segment::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage;
use segment::vector_storage::multi_dense::volatile_multi_dense_vector_storage::new_volatile_multi_dense_vector_storage;
use segment::vector_storage::sparse::volatile_sparse_vector_storage::new_volatile_sparse_vector_storage;
use segment::vector_storage::{VectorStorage as _, VectorStorageEnum, new_raw_scorer};
use self::lazy_matrix::LazyMatrix;
use super::MmrInternal;
/// Calculate the MMR (Maximal Marginal Relevance) score for a set of points with vectors.
///
/// Assumes the points have vectors attached. If not, the entire point will be discarded.
///
/// # Arguments
///
/// * `collection_params` - The parameters of the collection. Used to determine the right distance metric, or multivec config.
/// * `points_with_vector` - The points with vectors.
/// * `mmr` - The MMR parameters.
/// * `limit` - The maximum number of points to return.
/// * `search_runtime_handle` - The runtime handle for searching.
/// * `timeout` - The timeout for the operation.
/// * `hw_measurement_acc` - The hardware measurement accumulator.
///
/// # Returns
///
/// A vector of scored points.
pub fn mmr_from_points_with_vector(
points_with_vector: impl IntoIterator<Item = ScoredPoint>,
mmr: MmrInternal,
distance: Distance,
multivector_config: Option<MultiVectorConfig>,
limit: usize,
hw_measurement_acc: HwMeasurementAcc,
) -> OperationResult<Vec<ScoredPoint>> {
let (vectors, candidates): (Vec<_>, Vec<_>) = points_with_vector
.into_iter()
.unique_by(|p| p.id)
.filter_map(|p| {
let vector = p
.vector
.as_ref()
// silently ignore points without this named vector
.and_then(|v| v.get(&mmr.using))
.map(|v| v.to_owned())?;
Some((vector, p))
})
.unzip();
debug_assert_eq!(vectors.len(), candidates.len());
if candidates.is_empty() {
return Ok(candidates);
}
let volatile_storage = create_volatile_storage(
&vectors,
distance,
multivector_config,
hw_measurement_acc.get_counter_cell(),
)?;
if candidates.len() < 2 {
// can't compute MMR for less than 2 points, return with original score
return Ok(candidates);
}
// get similarities against query
let query_similarities = relevance_similarities(
&volatile_storage,
mmr.vector,
hw_measurement_acc.get_counter_cell(),
)?;
// get similarity matrix between candidates
let similarity_matrix = similarity_matrix(&volatile_storage, vectors, hw_measurement_acc)?;
// compute MMR
Ok(maximal_marginal_relevance(
candidates,
query_similarities,
similarity_matrix,
mmr.lambda.0,
limit,
))
}
/// Creates a volatile (in-memory and not persistent) vector storage and inserts the vectors in the provided order.
fn create_volatile_storage(
vectors: &[VectorInternal],
distance: Distance,
multivector_config: Option<MultiVectorConfig>,
hw_counter: HardwareCounterCell,
) -> OperationResult<VectorStorageEnum> {
// Create temporary vector storage
let mut volatile_storage = {
match &vectors[0] {
VectorInternal::Dense(vector) => {
new_volatile_dense_vector_storage(vector.len(), distance)
}
VectorInternal::MultiDense(typed_multi_dense_vector) => {
let multivector_config = multivector_config.ok_or_else(|| {
OperationError::service_error(
"multivectors are present, but no multivector config provided",
)
})?;
new_volatile_multi_dense_vector_storage(
typed_multi_dense_vector.dim,
distance,
multivector_config,
)
}
VectorInternal::Sparse(_) => new_volatile_sparse_vector_storage(),
}
};
// Populate storage with vectors
for (key, vector) in (0..).zip(vectors) {
volatile_storage.insert_vector(key, VectorRef::from(vector), &hw_counter)?;
}
Ok(volatile_storage)
}
/// Compute the "relevance" similarity between a query vector and all vectors in the storage.
fn relevance_similarities(
volatile_storage: &VectorStorageEnum,
query_vector: VectorInternal,
hw_counter: HardwareCounterCell,
) -> OperationResult<Vec<ScoreType>> {
let query = QueryVector::Nearest(query_vector);
let query_scorer = new_raw_scorer(query, volatile_storage, hw_counter)?;
// get similarity between candidates and query
let ids: Vec<_> = (0..volatile_storage.total_vector_count() as u32).collect();
let mut similarities = vec![0.0; ids.len()];
query_scorer.score_points(&ids, &mut similarities);
Ok(similarities)
}
/// Returns a symmetric matrix where entry (i,j) represents the similarity
/// between vector i and vector j. Diagonal entries are 0 (self-similarity is not calculated).
/// Only computes each pair once for efficiency since similarity is symmetric.
///
/// Errors if there are less than 2 vectors.
fn similarity_matrix(
volatile_storage: &VectorStorageEnum,
vectors: Vec<VectorInternal>,
hw_measurement_acc: HwMeasurementAcc,
) -> OperationResult<LazyMatrix<'_>> {
let num_vectors = vectors.len();
// if we have less than 2 points, we can't build a matrix
debug_assert!(
num_vectors >= 2,
"There should be at least two vectors to calculate similarity matrix"
);
if num_vectors < 2 {
return Err(OperationError::service_error(
"There should be at least two vectors to calculate similarity matrix",
));
}
LazyMatrix::new(vectors, volatile_storage, hw_measurement_acc)
}
/// Maximal Marginal Relevance (MMR) algorithm
///
/// Iteratively selects points by considering their similarity to
/// already selected points, combining diversity and relevance.
///
/// # Arguments
///
/// * `candidates` - the list of points to select from
/// * `query_similarities` - similarities to the query for each candidate. Offsets refer to the index of the candidate in the `candidates` vector.
/// * `similarity_matrix` - full pairwise similarity matrix between candidates
/// * `lambda` - the lambda parameter for the MMR algorithm (0.0 = max diversity, 1.0 = max relevance)
/// * `limit` - the maximum number of points to select
fn maximal_marginal_relevance(
candidates: Vec<ScoredPoint>,
query_similarities: Vec<ScoreType>,
mut similarity_matrix: LazyMatrix,
lambda: f32,
limit: usize,
) -> Vec<ScoredPoint> {
let num_candidates = candidates.len();
if num_candidates == 0 || limit == 0 {
return Vec::new();
}
let mut selected_indices = Vec::with_capacity(limit);
let mut remaining_indices: IndexSet<usize, ahash::RandomState> = (0..num_candidates).collect();
// Select first point with highest relevance score
if let Some(best_idx) = remaining_indices
.iter()
.max_by_key(|&candidate_idx| OrderedFloat(query_similarities[*candidate_idx]))
.copied()
{
selected_indices.push(best_idx);
remaining_indices.swap_remove(&best_idx);
}
// Iteratively select remaining points using MMR
while selected_indices.len() < limit && !remaining_indices.is_empty() {
let best_candidate = remaining_indices
.iter()
.map(|&candidate_idx| {
let relevance_score = query_similarities[candidate_idx];
debug_assert!(
selected_indices
.iter()
.all(|&selected_idx| selected_idx != candidate_idx)
);
// Find maximum similarity to any already selected point
let max_similarity_to_selected = selected_indices
.iter()
.map(|selected_idx| {
similarity_matrix.get_similarity(candidate_idx, *selected_idx)
})
.max_by_key(|&sim| OrderedFloat(sim))
.unwrap_or(0.0);
// Calculate MMR score: Ξ» * relevance - (1 - Ξ») * max_similarity_to_selected
let mmr_score =
lambda * relevance_score - (1.0 - lambda) * max_similarity_to_selected;
(candidate_idx, mmr_score)
})
.max_by_key(|(_candidate_idx, mmr_score)| OrderedFloat(*mmr_score));
if let Some((selected_idx, _mmr_score)) = best_candidate {
// Select the best candidate and remove from remaining
remaining_indices.swap_remove(&selected_idx);
selected_indices.push(selected_idx);
} else {
break;
}
}
// Convert selected indices to ScoredPoint results
selected_indices
.into_iter()
.map(|idx| {
// Use original score, already postprocessed.
//
// We prefer this over MMR score because:
// - We already selected the top candidates based on MMR score.
// - If this is performed at collection level, we will pass this score to the user, which is arguably more meaningful.
// - If this is performed at local shard, it might be combined with other shards' results.
// - MMR does not make sense to compare by score with a different set of MMR results
// - It makes more sense to compare by query score.
// - If this isn't the last rescore before sending to collection,
// we are only interested in the selection of points, not the score itself.
candidates[idx].clone()
})
.collect()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/query/mmr/lazy_matrix.rs | lib/shard/src/query/mmr/lazy_matrix.rs | use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::types::{PointOffsetType, ScoreType};
use segment::common::operation_error::OperationResult;
use segment::data_types::vectors::{QueryVector, VectorInternal};
#[cfg(debug_assertions)]
use segment::vector_storage::{Random, VectorStorage};
use segment::vector_storage::{RawScorer, VectorStorageEnum, new_raw_scorer};
/// Compute the similarity matrix lazily.
///
/// Uses a symmetric matrix as a cache layer to compute similarities only once,
/// but only those which are requested.
pub struct LazyMatrix<'storage> {
/// Vec of scorers for each vector. position is parallel to the vectors in the referenced storage.
scorers: Vec<Box<dyn RawScorer + 'storage>>,
// perf: can this Option<ScoreType> be smaller?
// this allocates m*m values. For 1000x1000 it takes 8MB.
/// similarity matrix with possibly unallocated values.
matrix: Vec<Vec<Option<ScoreType>>>,
}
impl<'storage> LazyMatrix<'storage> {
/// Create a new lazy matrix from a list of vectors and a storage.
///
/// Returns an error if there are less than two vectors.
pub fn new(
vectors: Vec<VectorInternal>,
storage: &'storage VectorStorageEnum,
hw_measurement_acc: HwMeasurementAcc,
) -> OperationResult<Self> {
#[cfg(debug_assertions)]
{
for (i, vector) in vectors.iter().enumerate() {
let stored_vector = storage.get_vector::<Random>(i as u32);
assert_eq!(stored_vector.to_owned(), *vector);
}
}
let matrix = vec![vec![None; vectors.len()]; vectors.len()];
// Prepare all scorers
let scorers = vectors
.into_iter()
.map(|vector| {
let query = QueryVector::Nearest(vector);
new_raw_scorer(query, storage, hw_measurement_acc.get_counter_cell())
})
.collect::<OperationResult<Vec<_>>>()?;
Ok(Self { scorers, matrix })
}
pub fn get_similarity(&mut self, i: usize, j: usize) -> ScoreType {
if let Some(similarity) = self.matrix[i][j] {
return similarity;
}
let similarity = self.compute_similarity(i, j);
self.matrix[i][j] = Some(similarity);
similarity
}
fn compute_similarity(&self, i: usize, j: usize) -> ScoreType {
self.scorers[i].score_point(j as PointOffsetType)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operations/payload_ops.rs | lib/shard/src/operations/payload_ops.rs | use std::fmt;
use api::rest::ShardKeySelector;
use schemars::JsonSchema;
use segment::json_path::JsonPath;
use segment::types::{Filter, Payload, PayloadKeyType, PointIdType};
use serde;
use serde::{Deserialize, Serialize};
use strum::{EnumDiscriminants, EnumIter};
use validator::Validate;
/// Define operations description for point payloads manipulation
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(rename_all = "snake_case")]
pub enum PayloadOps {
/// Set payload value, overrides if it is already exists
SetPayload(SetPayloadOp),
/// Deletes specified payload values if they are assigned
DeletePayload(DeletePayloadOp),
/// Drops all Payload values associated with given points.
ClearPayload { points: Vec<PointIdType> },
/// Clear all Payload values by given filter criteria.
ClearPayloadByFilter(Filter),
/// Overwrite full payload with given keys
OverwritePayload(SetPayloadOp),
}
impl PayloadOps {
pub fn point_ids(&self) -> Option<Vec<PointIdType>> {
match self {
Self::SetPayload(op) => op.points.clone(),
Self::DeletePayload(op) => op.points.clone(),
Self::ClearPayload { points } => Some(points.clone()),
Self::ClearPayloadByFilter(_) => None,
Self::OverwritePayload(op) => op.points.clone(),
}
}
pub fn retain_point_ids<F>(&mut self, filter: F)
where
F: Fn(&PointIdType) -> bool,
{
match self {
Self::SetPayload(op) => retain_opt(op.points.as_mut(), filter),
Self::DeletePayload(op) => retain_opt(op.points.as_mut(), filter),
Self::ClearPayload { points } => points.retain(filter),
Self::ClearPayloadByFilter(_) => (),
Self::OverwritePayload(op) => retain_opt(op.points.as_mut(), filter),
}
}
}
fn retain_opt<T, F>(vec: Option<&mut Vec<T>>, filter: F)
where
F: Fn(&T) -> bool,
{
if let Some(vec) = vec {
vec.retain(filter);
}
}
/// This data structure is used in API interface and applied across multiple shards
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(try_from = "SetPayloadShadow")]
pub struct SetPayload {
pub payload: Payload,
/// Assigns payload to each point in this list
pub points: Option<Vec<PointIdType>>,
/// Assigns payload to each point that satisfy this filter condition
#[validate(nested)]
pub filter: Option<Filter>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
/// Assigns payload to each point that satisfy this path of property
pub key: Option<JsonPath>,
}
/// This data structure is used inside shard operations queue
/// and supposed to be written into WAL of individual shard.
///
/// Unlike `SetPayload` it does not contain `shard_key` field
/// as individual shard does not need to know about shard key
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct SetPayloadOp {
pub payload: Payload,
/// Assigns payload to each point in this list
pub points: Option<Vec<PointIdType>>,
/// Assigns payload to each point that satisfy this filter condition
pub filter: Option<Filter>,
/// Payload selector to indicate property of payload, e.g. `a.b.c`
pub key: Option<JsonPath>,
}
/// This data structure is used in API interface and applied across multiple shards
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone)]
#[serde(try_from = "DeletePayloadShadow")]
pub struct DeletePayload {
/// List of payload keys to remove from payload
pub keys: Vec<PayloadKeyType>,
/// Deletes values from each point in this list
pub points: Option<Vec<PointIdType>>,
/// Deletes values from points that satisfy this filter condition
#[validate(nested)]
pub filter: Option<Filter>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
/// This data structure is used inside shard operations queue
/// and supposed to be written into WAL of individual shard.
///
/// Unlike `DeletePayload` it does not contain `shard_key` field
/// as individual shard does not need to know about shard key
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct DeletePayloadOp {
/// List of payload keys to remove from payload
pub keys: Vec<PayloadKeyType>,
/// Deletes values from each point in this list
pub points: Option<Vec<PointIdType>>,
/// Deletes values from points that satisfy this filter condition
pub filter: Option<Filter>,
}
#[derive(Deserialize)]
struct SetPayloadShadow {
pub payload: Payload,
pub points: Option<Vec<PointIdType>>,
pub filter: Option<Filter>,
pub shard_key: Option<ShardKeySelector>,
pub key: Option<JsonPath>,
}
impl TryFrom<SetPayloadShadow> for SetPayload {
type Error = PointsSelectorValidationError;
fn try_from(value: SetPayloadShadow) -> Result<Self, Self::Error> {
let SetPayloadShadow {
payload,
points,
filter,
shard_key,
key,
} = value;
if points.is_some() || filter.is_some() {
Ok(SetPayload {
payload,
points,
filter,
shard_key,
key,
})
} else {
Err(PointsSelectorValidationError)
}
}
}
#[derive(Deserialize)]
struct DeletePayloadShadow {
pub keys: Vec<PayloadKeyType>,
pub points: Option<Vec<PointIdType>>,
pub filter: Option<Filter>,
pub shard_key: Option<ShardKeySelector>,
}
impl TryFrom<DeletePayloadShadow> for DeletePayload {
type Error = PointsSelectorValidationError;
fn try_from(value: DeletePayloadShadow) -> Result<Self, Self::Error> {
let DeletePayloadShadow {
keys,
points,
filter,
shard_key,
} = value;
if points.is_some() || filter.is_some() {
Ok(DeletePayload {
keys,
points,
filter,
shard_key,
})
} else {
Err(PointsSelectorValidationError)
}
}
}
#[derive(Debug)]
pub struct PointsSelectorValidationError;
impl fmt::Display for PointsSelectorValidationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Either list of point ids or filter must be provided")
}
}
#[cfg(test)]
mod tests {
use segment::types::{Payload, PayloadContainer};
use serde_json::Value;
use super::*;
#[derive(Debug, Deserialize, Serialize)]
pub struct TextSelector {
pub points: Vec<PointIdType>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct TextSelectorOpt {
pub points: Option<Vec<PointIdType>>,
pub filter: Option<Filter>,
}
#[test]
fn test_replace_with_opt_in_cbor() {
let obj1 = TextSelector {
points: vec![1.into(), 2.into(), 3.into()],
};
let raw_cbor = serde_cbor::to_vec(&obj1).unwrap();
let obj2 = serde_cbor::from_slice::<TextSelectorOpt>(&raw_cbor).unwrap();
eprintln!("obj2 = {obj2:#?}");
assert_eq!(obj1.points, obj2.points.unwrap());
}
#[test]
fn test_serialization() {
let query1 = r#"
{
"set_payload": {
"points": [1, 2, 3],
"payload": {
"key1": "hello" ,
"key2": [1,2,3,4],
"key3": {"json": {"key1":"value1"} }
}
}
}
"#;
let operation: PayloadOps = serde_json::from_str(query1).unwrap();
match operation {
PayloadOps::SetPayload(set_payload) => {
let payload: Payload = set_payload.payload;
assert_eq!(payload.len(), 3);
assert!(payload.contains_key("key1"));
let payload_type = payload
.get_value(&"key1".parse().unwrap())
.into_iter()
.next()
.cloned()
.expect("No key key1");
match payload_type {
Value::String(x) => assert_eq!(x, "hello"),
_ => panic!("Wrong payload type"),
}
let payload_type_json = payload
.get_value(&"key3".parse().unwrap())
.into_iter()
.next()
.cloned();
assert!(matches!(payload_type_json, Some(Value::Object(_))))
}
_ => panic!("Wrong operation"),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operations/vector_ops.rs | lib/shard/src/operations/vector_ops.rs | use segment::types::{Filter, PointIdType, VectorNameBuf};
use serde::{Deserialize, Serialize};
use strum::{EnumDiscriminants, EnumIter};
use super::point_ops::{PointIdsList, VectorStructPersisted};
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(rename_all = "snake_case")]
pub enum VectorOperations {
/// Update vectors
UpdateVectors(UpdateVectorsOp),
/// Delete vectors if exists
DeleteVectors(PointIdsList, Vec<VectorNameBuf>),
/// Delete vectors by given filter criteria
DeleteVectorsByFilter(Filter, Vec<VectorNameBuf>),
}
impl VectorOperations {
pub fn point_ids(&self) -> Option<Vec<PointIdType>> {
match self {
Self::UpdateVectors(op) => Some(op.points.iter().map(|point| point.id).collect()),
Self::DeleteVectors(points, _) => Some(points.points.clone()),
Self::DeleteVectorsByFilter(_, _) => None,
}
}
pub fn retain_point_ids<F>(&mut self, filter: F)
where
F: Fn(&PointIdType) -> bool,
{
match self {
Self::UpdateVectors(op) => op.points.retain(|point| filter(&point.id)),
Self::DeleteVectors(points, _) => points.points.retain(filter),
Self::DeleteVectorsByFilter(_, _) => (),
}
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct UpdateVectorsOp {
/// Points with named vectors
pub points: Vec<PointVectorsPersisted>,
/// Condition to check before updating vectors
#[serde(default, skip_serializing_if = "Option::is_none")]
pub update_filter: Option<Filter>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct PointVectorsPersisted {
/// Point id
pub id: PointIdType,
/// Vectors
pub vector: VectorStructPersisted,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operations/mod.rs | lib/shard/src/operations/mod.rs | pub mod payload_ops;
pub mod point_ops;
#[cfg(feature = "staging")]
pub mod staging;
pub mod vector_ops;
use segment::json_path::JsonPath;
use segment::types::{PayloadFieldSchema, PointIdType};
use serde::{Deserialize, Serialize};
use strum::{EnumDiscriminants, EnumIter};
use crate::PeerId;
use crate::operations::point_ops::PointOperations;
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(untagged, rename_all = "snake_case")]
pub enum CollectionUpdateOperations {
PointOperation(point_ops::PointOperations),
VectorOperation(vector_ops::VectorOperations),
PayloadOperation(payload_ops::PayloadOps),
FieldIndexOperation(FieldIndexOperations),
}
impl CollectionUpdateOperations {
pub fn is_upsert_points(&self) -> bool {
matches!(
self,
Self::PointOperation(point_ops::PointOperations::UpsertPoints(_))
)
}
pub fn is_delete_points(&self) -> bool {
matches!(
self,
Self::PointOperation(point_ops::PointOperations::DeletePoints { .. })
)
}
pub fn point_ids(&self) -> Option<Vec<PointIdType>> {
match self {
Self::PointOperation(op) => op.point_ids(),
Self::VectorOperation(op) => op.point_ids(),
Self::PayloadOperation(op) => op.point_ids(),
Self::FieldIndexOperation(_) => None,
}
}
/// List point IDs that can be created during the operation.
/// Do not list IDs that are deleted or modified.
pub fn upsert_point_ids(&self) -> Option<Vec<PointIdType>> {
match self {
Self::PointOperation(op) => match op {
PointOperations::UpsertPoints(op) => Some(op.point_ids()),
PointOperations::UpsertPointsConditional(op) => Some(op.points_op.point_ids()),
PointOperations::DeletePoints { .. } => None,
PointOperations::DeletePointsByFilter(_) => None,
PointOperations::SyncPoints(op) => {
Some(op.points.iter().map(|point| point.id).collect())
}
#[cfg(feature = "staging")]
PointOperations::TestDelay(_) => None,
},
Self::VectorOperation(_) => None,
Self::PayloadOperation(_) => None,
Self::FieldIndexOperation(_) => None,
}
}
pub fn retain_point_ids<F>(&mut self, filter: F)
where
F: Fn(&PointIdType) -> bool,
{
match self {
Self::PointOperation(op) => op.retain_point_ids(filter),
Self::VectorOperation(op) => op.retain_point_ids(filter),
Self::PayloadOperation(op) => op.retain_point_ids(filter),
Self::FieldIndexOperation(_) => (),
}
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(rename_all = "snake_case")]
pub enum FieldIndexOperations {
/// Create index for payload field
CreateIndex(CreateIndex),
/// Delete index for the field
DeleteIndex(JsonPath),
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
#[serde(rename_all = "snake_case")]
pub struct CreateIndex {
pub field_name: JsonPath,
pub field_schema: Option<PayloadFieldSchema>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct OperationWithClockTag {
#[serde(flatten)]
pub operation: CollectionUpdateOperations,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub clock_tag: Option<ClockTag>,
}
impl OperationWithClockTag {
pub fn new(
operation: impl Into<CollectionUpdateOperations>,
clock_tag: Option<ClockTag>,
) -> Self {
Self {
operation: operation.into(),
clock_tag,
}
}
}
impl From<CollectionUpdateOperations> for OperationWithClockTag {
fn from(operation: CollectionUpdateOperations) -> Self {
Self::new(operation, None)
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct ClockTag {
pub peer_id: PeerId,
pub clock_id: u32,
pub clock_tick: u64,
/// A unique token for each clock tag.
pub token: ClockToken,
pub force: bool,
}
pub type ClockToken = u64;
impl ClockTag {
pub fn new(peer_id: PeerId, clock_id: u32, clock_tick: u64) -> Self {
let random_token = rand::random();
Self::new_with_token(peer_id, clock_id, clock_tick, random_token)
}
pub fn new_with_token(
peer_id: PeerId,
clock_id: u32,
clock_tick: u64,
token: ClockToken,
) -> Self {
Self {
peer_id,
clock_id,
clock_tick,
token,
force: false,
}
}
pub fn force(mut self, force: bool) -> Self {
self.force = force;
self
}
}
impl From<api::grpc::qdrant::ClockTag> for ClockTag {
fn from(tag: api::grpc::qdrant::ClockTag) -> Self {
let api::grpc::qdrant::ClockTag {
peer_id,
clock_id,
clock_tick,
token,
force,
} = tag;
Self {
peer_id,
clock_id,
clock_tick,
token,
force,
}
}
}
impl From<ClockTag> for api::grpc::qdrant::ClockTag {
fn from(tag: ClockTag) -> Self {
let ClockTag {
peer_id,
clock_id,
clock_tick,
token,
force,
} = tag;
Self {
peer_id,
clock_id,
clock_tick,
token,
force,
}
}
}
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use segment::types::*;
use super::payload_ops::*;
use super::point_ops::*;
use super::vector_ops::*;
use super::*;
proptest::proptest! {
#[test]
fn operation_with_clock_tag_json(operation in any::<OperationWithClockTag>()) {
// Assert that `OperationWithClockTag` can be serialized
let input = serde_json::to_string(&operation).unwrap();
let output: OperationWithClockTag = serde_json::from_str(&input).unwrap();
assert_eq!(operation, output);
// Assert that `OperationWithClockTag` can be deserialized from `CollectionUpdateOperation`
let input = serde_json::to_string(&operation.operation).unwrap();
let output: OperationWithClockTag = serde_json::from_str(&input).unwrap();
assert_eq!(operation.operation, output.operation);
// Assert that `CollectionUpdateOperation` serializes into JSON object with a single key
// (e.g., `{ "upsert_points": <upsert points object> }`)
match serde_json::to_value(&operation.operation).unwrap() {
serde_json::Value::Object(map) if map.len() == 1 => (),
_ => panic!("TODO"),
};
}
#[test]
fn operation_with_clock_tag_cbor(operation in any::<OperationWithClockTag>()) {
// Assert that `OperationWithClockTag` can be serialized
let input = serde_cbor::to_vec(&operation).unwrap();
let output: OperationWithClockTag = serde_cbor::from_slice(&input).unwrap();
assert_eq!(operation, output);
// Assert that `OperationWithClockTag` can be deserialized from `CollectionUpdateOperation`
let input = serde_cbor::to_vec(&operation.operation).unwrap();
let output: OperationWithClockTag = serde_cbor::from_slice(&input).unwrap();
assert_eq!(operation.operation, output.operation);
}
}
impl Arbitrary for OperationWithClockTag {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
any::<(CollectionUpdateOperations, Option<ClockTag>)>()
.prop_map(|(operation, clock_tag)| Self::new(operation, clock_tag))
.boxed()
}
}
impl Arbitrary for ClockTag {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
any::<(PeerId, u32, u64)>()
.prop_map(|(peer_id, clock_id, clock_tick)| {
Self::new(peer_id, clock_id, clock_tick)
})
.boxed()
}
}
impl Arbitrary for CollectionUpdateOperations {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
prop_oneof![
any::<point_ops::PointOperations>().prop_map(Self::PointOperation),
any::<vector_ops::VectorOperations>().prop_map(Self::VectorOperation),
any::<payload_ops::PayloadOps>().prop_map(Self::PayloadOperation),
any::<FieldIndexOperations>().prop_map(Self::FieldIndexOperation),
]
.boxed()
}
}
impl Arbitrary for point_ops::PointOperations {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
let upsert = Self::UpsertPoints(PointInsertOperationsInternal::PointsList(Vec::new()));
let delete = Self::DeletePoints { ids: Vec::new() };
let delete_by_filter = Self::DeletePointsByFilter(Filter {
should: None,
min_should: None,
must: None,
must_not: None,
});
let sync = Self::SyncPoints(PointSyncOperation {
from_id: None,
to_id: None,
points: Vec::new(),
});
prop_oneof![
Just(upsert),
Just(delete),
Just(delete_by_filter),
Just(sync),
]
.boxed()
}
}
impl Arbitrary for vector_ops::VectorOperations {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
let update = Self::UpdateVectors(UpdateVectorsOp {
points: Vec::new(),
update_filter: None,
});
let delete = Self::DeleteVectors(
PointIdsList {
points: Vec::new(),
shard_key: None,
},
Vec::new(),
);
let delete_by_filter = Self::DeleteVectorsByFilter(
Filter {
should: None,
min_should: None,
must: None,
must_not: None,
},
Vec::new(),
);
prop_oneof![Just(update), Just(delete), Just(delete_by_filter),].boxed()
}
}
impl Arbitrary for payload_ops::PayloadOps {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
let set = Self::SetPayload(SetPayloadOp {
payload: Payload(Default::default()),
points: None,
filter: None,
key: None,
});
let overwrite = Self::OverwritePayload(SetPayloadOp {
payload: Payload(Default::default()),
points: None,
filter: None,
key: None,
});
let delete = Self::DeletePayload(DeletePayloadOp {
keys: Vec::new(),
points: None,
filter: None,
});
let clear = Self::ClearPayload { points: Vec::new() };
let clear_by_filter = Self::ClearPayloadByFilter(Filter {
should: None,
min_should: None,
must: None,
must_not: None,
});
prop_oneof![
Just(set),
Just(overwrite),
Just(delete),
Just(clear),
Just(clear_by_filter),
]
.boxed()
}
}
impl Arbitrary for FieldIndexOperations {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
let create = Self::CreateIndex(CreateIndex {
field_name: "field_name".parse().unwrap(),
field_schema: None,
});
let delete = Self::DeleteIndex("field_name".parse().unwrap());
prop_oneof![Just(create), Just(delete),].boxed()
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operations/staging.rs | lib/shard/src/operations/staging.rs | //! Staging-only operations for testing and debugging purposes.
//!
//! This module contains operations that are only available when the `staging` feature is enabled.
use std::time::Duration;
use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
/// Test operation that introduces an artificial delay.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct TestDelayOperation {
/// Duration of the delay in seconds.
pub duration: OrderedFloat<f64>,
}
impl TestDelayOperation {
/// Create a new TestDelayOperation with the given duration in seconds.
pub fn new(duration_secs: f64) -> Self {
Self {
duration: OrderedFloat(duration_secs),
}
}
/// Execute the delay operation (blocking).
pub fn execute(&self) {
let duration_secs = self.duration.into_inner();
if duration_secs < 0.0 {
log::warn!("TestDelay: negative duration {duration_secs}s, skipping");
return;
}
let delay = Duration::from_secs_f64(duration_secs);
log::debug!("TestDelay: sleeping for {delay:?}");
std::thread::sleep(delay);
log::debug!("TestDelay: finished sleeping");
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/operations/point_ops.rs | lib/shard/src/operations/point_ops.rs | use std::collections::{HashMap, HashSet};
use std::fmt::{Debug, Formatter};
use std::hash::{Hash, Hasher};
use std::{iter, mem};
use api::conversions::json::payload_to_proto;
use api::rest::{
DenseVector, MultiDenseVector, ShardKeySelector, VectorOutput, VectorStructOutput,
};
use common::validation::validate_multi_vector;
use itertools::Itertools as _;
use ordered_float::OrderedFloat;
use schemars::JsonSchema;
use segment::common::operation_error::OperationError;
use segment::common::utils::unordered_hash_unique;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::{
BatchVectorStructInternal, DEFAULT_VECTOR_NAME, MultiDenseVectorInternal, VectorInternal,
VectorStructInternal,
};
use segment::types::{Filter, Payload, PointIdType, VectorNameBuf};
use serde::{Deserialize, Serialize};
use sparse::common::types::{DimId, DimWeight};
use strum::{EnumDiscriminants, EnumIter};
use tonic::Status;
use validator::{Validate, ValidationErrors};
use super::payload_ops::*;
use super::vector_ops::*;
use super::*;
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema, Validate, Hash)]
#[serde(rename_all = "snake_case")]
pub struct PointIdsList {
pub points: Vec<PointIdType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shard_key: Option<ShardKeySelector>,
}
impl From<Vec<PointIdType>> for PointIdsList {
fn from(points: Vec<PointIdType>) -> Self {
Self {
points,
shard_key: None,
}
}
}
// General idea of having an extra layer of data structures after REST and gRPC
// is to ensure that all vectors are inferenced and validated before they are persisted.
//
// This separation allows to have a single point, enforced by the type system,
// where all Documents and other inference-able objects are resolved into raw vectors.
//
// Separation between VectorStructPersisted and VectorStructInternal is only needed
// for legacy reasons, as the previous implementations wrote VectorStruct to WAL,
// so we need an ability to read it back. VectorStructPersisted reproduces the same
// structure as VectorStruct had in the previous versions.
//
//
// gRPC REST API βββββ WAL
// β β β I β β²
// β β β n β β
// β β β f β β
// βββββββββΌββββββββ βββββββΌβββββββ β e β βββββββββββ΄ββββββββββββ
// β grpc::Vectors βββββΊβVectorStructβββββββββΊβ r ββββββΊβVectorStructPersistedβββββββ
// βββββββββββββββββ ββββββββββββββ β e β βββββββββββββββββββββββ β
// Vectors β n β Only Vectors β
// + Documents β c β β
// + Images β e β β
// + Other inference βββββ β
// Implement JsonSchema β
// βββββββββββββββββββββββ β
// β βββββββ
// β Storage β
// β β
// REST API Response ββββββββββ¬βββββββββββββ
// β² β
// β β
// ββββββββ΄βββββββββββββββ βββββββββββΌββββββββββββ
// β VectorStructOutput βββββ¬ββββββ€VectorStructInternal β
// βββββββββββββββββββββββ β βββββββββββββββββββββββ
// Only Vectors β Only Vectors
// Implement JsonSchema β Optimized for search
// β
// β
// βββββββββββββββββββββββ β
// β grpc::VectorsOutput βββββ
// βββββββββββββ¬ββββββββββ
// β
// βΌ
// gPRC Response
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(rename_all = "snake_case")]
pub enum PointOperations {
/// Insert or update points
UpsertPoints(PointInsertOperationsInternal),
/// Insert points, or update existing points if condition matches
UpsertPointsConditional(ConditionalInsertOperationInternal),
/// Delete point if exists
DeletePoints { ids: Vec<PointIdType> },
/// Delete points by given filter criteria
DeletePointsByFilter(Filter),
/// Points Sync
SyncPoints(PointSyncOperation),
/// Introduce artificial delay for testing purposes
#[cfg(feature = "staging")]
TestDelay(super::staging::TestDelayOperation),
}
impl PointOperations {
pub fn point_ids(&self) -> Option<Vec<PointIdType>> {
match self {
Self::UpsertPoints(op) => Some(op.point_ids()),
Self::UpsertPointsConditional(op) => Some(op.points_op.point_ids()),
Self::DeletePoints { ids } => Some(ids.clone()),
Self::DeletePointsByFilter(_) => None,
Self::SyncPoints(op) => Some(op.points.iter().map(|point| point.id).collect()),
#[cfg(feature = "staging")]
Self::TestDelay(_) => None,
}
}
pub fn retain_point_ids<F>(&mut self, filter: F)
where
F: Fn(&PointIdType) -> bool,
{
match self {
Self::UpsertPoints(op) => op.retain_point_ids(filter),
Self::UpsertPointsConditional(op) => {
op.points_op.retain_point_ids(filter);
}
Self::DeletePoints { ids } => ids.retain(filter),
Self::DeletePointsByFilter(_) => (),
Self::SyncPoints(op) => op.points.retain(|point| filter(&point.id)),
#[cfg(feature = "staging")]
Self::TestDelay(_) => (),
}
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, EnumDiscriminants, Hash)]
#[strum_discriminants(derive(EnumIter))]
#[serde(rename_all = "snake_case")]
pub enum PointInsertOperationsInternal {
/// Inset points from a batch.
#[serde(rename = "batch")]
PointsBatch(BatchPersisted),
/// Insert points from a list
#[serde(rename = "points")]
PointsList(Vec<PointStructPersisted>),
}
impl PointInsertOperationsInternal {
pub fn point_ids(&self) -> Vec<PointIdType> {
match self {
Self::PointsBatch(batch) => batch.ids.clone(),
Self::PointsList(points) => points.iter().map(|point| point.id).collect(),
}
}
pub fn into_point_vec(self) -> Vec<PointStructPersisted> {
match self {
PointInsertOperationsInternal::PointsBatch(batch) => {
let batch_vectors = BatchVectorStructInternal::from(batch.vectors);
let all_vectors = batch_vectors.into_all_vectors(batch.ids.len());
let vectors_iter = batch.ids.into_iter().zip(all_vectors);
match batch.payloads {
None => vectors_iter
.map(|(id, vectors)| PointStructPersisted {
id,
vector: VectorStructInternal::from(vectors).into(),
payload: None,
})
.collect(),
Some(payloads) => vectors_iter
.zip(payloads)
.map(|((id, vectors), payload)| PointStructPersisted {
id,
vector: VectorStructInternal::from(vectors).into(),
payload,
})
.collect(),
}
}
PointInsertOperationsInternal::PointsList(points) => points,
}
}
pub fn retain_point_ids<F>(&mut self, filter: F)
where
F: Fn(&PointIdType) -> bool,
{
match self {
Self::PointsBatch(batch) => {
let mut retain_indices = HashSet::new();
retain_with_index(&mut batch.ids, |index, id| {
if filter(id) {
retain_indices.insert(index);
true
} else {
false
}
});
match &mut batch.vectors {
BatchVectorStructPersisted::Single(vectors) => {
retain_with_index(vectors, |index, _| retain_indices.contains(&index));
}
BatchVectorStructPersisted::MultiDense(vectors) => {
retain_with_index(vectors, |index, _| retain_indices.contains(&index));
}
BatchVectorStructPersisted::Named(vectors) => {
for (_, vectors) in vectors.iter_mut() {
retain_with_index(vectors, |index, _| retain_indices.contains(&index));
}
}
}
if let Some(payload) = &mut batch.payloads {
retain_with_index(payload, |index, _| retain_indices.contains(&index));
}
}
Self::PointsList(points) => points.retain(|point| filter(&point.id)),
}
}
pub fn into_update_only(
self,
update_filter: Option<Filter>,
) -> Vec<CollectionUpdateOperations> {
let mut operations = Vec::new();
match self {
Self::PointsBatch(batch) => {
let mut update_vectors = UpdateVectorsOp {
points: Vec::new(),
update_filter: update_filter.clone(),
};
match batch.vectors {
BatchVectorStructPersisted::Single(vectors) => {
let ids = batch.ids.iter().copied();
let vectors = vectors.into_iter().map(VectorStructPersisted::Single);
update_vectors.points = ids
.zip(vectors)
.map(|(id, vector)| PointVectorsPersisted { id, vector })
.collect();
}
BatchVectorStructPersisted::MultiDense(vectors) => {
let ids = batch.ids.iter().copied();
let vectors = vectors.into_iter().map(VectorStructPersisted::MultiDense);
update_vectors.points = ids
.zip(vectors)
.map(|(id, vector)| PointVectorsPersisted { id, vector })
.collect();
}
BatchVectorStructPersisted::Named(batch_vectors) => {
let ids = batch.ids.iter().copied();
let mut batch_vectors: HashMap<_, _> = batch_vectors
.into_iter()
.map(|(name, vectors)| (name, vectors.into_iter()))
.collect();
let vectors = iter::repeat(()).filter_map(move |_| {
let mut point_vectors =
HashMap::with_capacity(batch_vectors.capacity());
for (vector_name, vectors) in batch_vectors.iter_mut() {
point_vectors.insert(vector_name.clone(), vectors.next()?);
}
Some(VectorStructPersisted::Named(point_vectors))
});
update_vectors.points = ids
.zip(vectors)
.map(|(id, vector)| PointVectorsPersisted { id, vector })
.collect();
}
}
let update_vectors = vector_ops::VectorOperations::UpdateVectors(update_vectors);
let update_vectors = CollectionUpdateOperations::VectorOperation(update_vectors);
operations.push(update_vectors);
if let Some(payloads) = batch.payloads {
let ids = batch.ids.iter().copied();
for (id, payload) in ids.zip(payloads) {
if let Some(payload) = payload {
let set_payload = if let Some(update_filter) = update_filter.clone() {
SetPayloadOp {
points: None,
payload,
filter: Some(update_filter.with_point_ids(vec![id])),
key: None,
}
} else {
SetPayloadOp {
points: Some(vec![id]),
payload,
filter: None,
key: None,
}
};
let set_payload =
payload_ops::PayloadOps::OverwritePayload(set_payload);
let set_payload =
CollectionUpdateOperations::PayloadOperation(set_payload);
operations.push(set_payload);
}
}
}
}
Self::PointsList(points) => {
let mut update_vectors = UpdateVectorsOp {
points: Vec::new(),
update_filter: update_filter.clone(),
};
for point in points {
update_vectors.points.push(PointVectorsPersisted {
id: point.id,
vector: point.vector,
});
if let Some(payload) = point.payload {
let set_payload = if let Some(update_filter) = update_filter.clone() {
SetPayloadOp {
points: None,
payload,
filter: Some(update_filter.with_point_ids(vec![point.id])),
key: None,
}
} else {
SetPayloadOp {
points: Some(vec![point.id]),
payload,
filter: None,
key: None,
}
};
let set_payload = payload_ops::PayloadOps::OverwritePayload(set_payload);
let set_payload = CollectionUpdateOperations::PayloadOperation(set_payload);
operations.push(set_payload);
}
}
let update_vectors = vector_ops::VectorOperations::UpdateVectors(update_vectors);
let update_vectors = CollectionUpdateOperations::VectorOperation(update_vectors);
operations.insert(0, update_vectors);
}
}
operations
}
}
impl From<BatchPersisted> for PointInsertOperationsInternal {
fn from(batch: BatchPersisted) -> Self {
PointInsertOperationsInternal::PointsBatch(batch)
}
}
impl From<Vec<PointStructPersisted>> for PointInsertOperationsInternal {
fn from(points: Vec<PointStructPersisted>) -> Self {
PointInsertOperationsInternal::PointsList(points)
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct ConditionalInsertOperationInternal {
pub points_op: PointInsertOperationsInternal,
/// Condition to check, if the point already exists
pub condition: Filter,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
pub struct PointSyncOperation {
/// Minimal id of the sync range
pub from_id: Option<PointIdType>,
/// Maximal id og
pub to_id: Option<PointIdType>,
pub points: Vec<PointStructPersisted>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Hash)]
#[serde(rename_all = "snake_case")]
pub struct BatchPersisted {
pub ids: Vec<PointIdType>,
pub vectors: BatchVectorStructPersisted,
pub payloads: Option<Vec<Option<Payload>>>,
}
impl TryFrom<BatchPersisted> for Vec<api::grpc::qdrant::PointStruct> {
type Error = Status;
fn try_from(batch: BatchPersisted) -> Result<Self, Self::Error> {
let BatchPersisted {
ids,
vectors,
payloads,
} = batch;
let mut points = Vec::with_capacity(ids.len());
let batch_vectors = BatchVectorStructInternal::from(vectors);
let all_vectors = batch_vectors.into_all_vectors(ids.len());
for (i, p_id) in ids.into_iter().enumerate() {
let id = Some(p_id.into());
let vector = all_vectors.get(i).cloned();
let payload = payloads.as_ref().and_then(|payloads| {
payloads.get(i).map(|payload| match payload {
None => HashMap::new(),
Some(payload) => payload_to_proto(payload.clone()),
})
});
let vectors: Option<VectorStructInternal> = vector.map(|v| v.into());
let point = api::grpc::qdrant::PointStruct {
id,
vectors: vectors.map(api::grpc::qdrant::Vectors::from),
payload: payload.unwrap_or_default(),
};
points.push(point);
}
Ok(points)
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
#[serde(untagged, rename_all = "snake_case")]
pub enum BatchVectorStructPersisted {
Single(Vec<DenseVector>),
MultiDense(Vec<MultiDenseVector>),
Named(HashMap<VectorNameBuf, Vec<VectorPersisted>>),
}
impl Hash for BatchVectorStructPersisted {
fn hash<H: Hasher>(&self, state: &mut H) {
mem::discriminant(self).hash(state);
match self {
BatchVectorStructPersisted::Single(dense) => {
for vector in dense {
for v in vector {
OrderedFloat(*v).hash(state);
}
}
}
BatchVectorStructPersisted::MultiDense(multidense) => {
for vector in multidense {
for v in vector {
for element in v {
OrderedFloat(*element).hash(state);
}
}
}
}
BatchVectorStructPersisted::Named(named) => unordered_hash_unique(state, named.iter()),
}
}
}
impl From<BatchVectorStructPersisted> for BatchVectorStructInternal {
fn from(value: BatchVectorStructPersisted) -> Self {
match value {
BatchVectorStructPersisted::Single(vector) => BatchVectorStructInternal::Single(vector),
BatchVectorStructPersisted::MultiDense(vectors) => {
BatchVectorStructInternal::MultiDense(
vectors
.into_iter()
.map(MultiDenseVectorInternal::new_unchecked)
.collect(),
)
}
BatchVectorStructPersisted::Named(vectors) => BatchVectorStructInternal::Named(
vectors
.into_iter()
.map(|(k, v)| (k, v.into_iter().map(VectorInternal::from).collect()))
.collect(),
),
}
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Validate, Hash)]
#[serde(rename_all = "snake_case")]
pub struct PointStructPersisted {
/// Point id
pub id: PointIdType,
/// Vectors
pub vector: VectorStructPersisted,
/// Payload values (optional)
pub payload: Option<Payload>,
}
impl PointStructPersisted {
pub fn get_vectors(&self) -> NamedVectors<'_> {
let mut named_vectors = NamedVectors::default();
match &self.vector {
VectorStructPersisted::Single(vector) => named_vectors.insert(
DEFAULT_VECTOR_NAME.to_owned(),
VectorInternal::from(vector.clone()),
),
VectorStructPersisted::MultiDense(vector) => named_vectors.insert(
DEFAULT_VECTOR_NAME.to_owned(),
VectorInternal::from(MultiDenseVectorInternal::new_unchecked(vector.clone())),
),
VectorStructPersisted::Named(vectors) => {
for (name, vector) in vectors {
named_vectors.insert(name.clone(), VectorInternal::from(vector.clone()));
}
}
}
named_vectors
}
}
impl TryFrom<api::rest::schema::Record> for PointStructPersisted {
type Error = String;
fn try_from(record: api::rest::schema::Record) -> Result<Self, Self::Error> {
let api::rest::schema::Record {
id,
payload,
vector,
shard_key: _,
order_value: _,
} = record;
if vector.is_none() {
return Err("Vector is empty".to_string());
}
Ok(Self {
id,
payload,
vector: VectorStructPersisted::from(vector.unwrap()),
})
}
}
impl TryFrom<PointStructPersisted> for api::grpc::qdrant::PointStruct {
type Error = Status;
fn try_from(value: PointStructPersisted) -> Result<Self, Self::Error> {
let PointStructPersisted {
id,
vector,
payload,
} = value;
let vectors_internal = VectorStructInternal::try_from(vector)
.map_err(|e| Status::invalid_argument(format!("Failed to convert vectors: {e}")))?;
let vectors = api::grpc::qdrant::Vectors::from(vectors_internal);
let converted_payload = match payload {
None => HashMap::new(),
Some(payload) => payload_to_proto(payload),
};
Ok(Self {
id: Some(id.into()),
vectors: Some(vectors),
payload: converted_payload,
})
}
}
/// Data structure for point vectors, as it is persisted in WAL
#[derive(Clone, PartialEq, Deserialize, Serialize)]
#[serde(untagged, rename_all = "snake_case")]
pub enum VectorStructPersisted {
Single(DenseVector),
MultiDense(MultiDenseVector),
Named(HashMap<VectorNameBuf, VectorPersisted>),
}
impl std::hash::Hash for VectorStructPersisted {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
mem::discriminant(self).hash(state);
match self {
VectorStructPersisted::Single(vec) => {
for v in vec {
OrderedFloat(*v).hash(state);
}
}
VectorStructPersisted::MultiDense(multi_vec) => {
for vec in multi_vec {
for v in vec {
OrderedFloat(*v).hash(state);
}
}
}
VectorStructPersisted::Named(map) => {
unordered_hash_unique(state, map.iter());
}
}
}
}
impl Debug for VectorStructPersisted {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
VectorStructPersisted::Single(vector) => {
let first_elements = vector.iter().take(4).join(", ");
write!(f, "Single([{}, ... x {}])", first_elements, vector.len())
}
VectorStructPersisted::MultiDense(vector) => {
let first_vectors = vector
.iter()
.take(4)
.map(|v| {
let first_elements = v.iter().take(4).join(", ");
format!("[{}, ... x {}]", first_elements, v.len())
})
.join(", ");
write!(f, "MultiDense([{}, ... x {})", first_vectors, vector.len())
}
VectorStructPersisted::Named(vectors) => write!(f, "Named(( ")
.and_then(|_| {
for (name, vector) in vectors {
write!(f, "{name}: {vector:?}, ")?;
}
Ok(())
})
.and_then(|_| write!(f, "))")),
}
}
}
impl VectorStructPersisted {
/// Check if this vector struct is empty.
pub fn is_empty(&self) -> bool {
match self {
VectorStructPersisted::Single(vector) => vector.is_empty(),
VectorStructPersisted::MultiDense(vector) => vector.is_empty(),
VectorStructPersisted::Named(vectors) => vectors.values().all(|v| match v {
VectorPersisted::Dense(vector) => vector.is_empty(),
VectorPersisted::Sparse(vector) => vector.indices.is_empty(),
VectorPersisted::MultiDense(vector) => vector.is_empty(),
}),
}
}
}
impl Validate for VectorStructPersisted {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
VectorStructPersisted::Single(_) => Ok(()),
VectorStructPersisted::MultiDense(v) => validate_multi_vector(v),
VectorStructPersisted::Named(v) => common::validation::validate_iter(v.values()),
}
}
}
impl From<DenseVector> for VectorStructPersisted {
fn from(value: DenseVector) -> Self {
VectorStructPersisted::Single(value)
}
}
impl From<VectorStructInternal> for VectorStructPersisted {
fn from(value: VectorStructInternal) -> Self {
match value {
VectorStructInternal::Single(vector) => VectorStructPersisted::Single(vector),
VectorStructInternal::MultiDense(vector) => {
VectorStructPersisted::MultiDense(vector.into_multi_vectors())
}
VectorStructInternal::Named(vectors) => VectorStructPersisted::Named(
vectors
.into_iter()
.map(|(k, v)| (k, VectorPersisted::from(v)))
.collect(),
),
}
}
}
impl From<VectorStructOutput> for VectorStructPersisted {
fn from(value: VectorStructOutput) -> Self {
match value {
VectorStructOutput::Single(vector) => VectorStructPersisted::Single(vector),
VectorStructOutput::MultiDense(vector) => VectorStructPersisted::MultiDense(vector),
VectorStructOutput::Named(vectors) => VectorStructPersisted::Named(
vectors
.into_iter()
.map(|(k, v)| (k, VectorPersisted::from(v)))
.collect(),
),
}
}
}
impl TryFrom<VectorStructPersisted> for VectorStructInternal {
type Error = OperationError;
fn try_from(value: VectorStructPersisted) -> Result<Self, Self::Error> {
let vector_struct = match value {
VectorStructPersisted::Single(vector) => VectorStructInternal::Single(vector),
VectorStructPersisted::MultiDense(vector) => {
VectorStructInternal::MultiDense(MultiDenseVectorInternal::try_from(vector)?)
}
VectorStructPersisted::Named(vectors) => VectorStructInternal::Named(
vectors
.into_iter()
.map(|(k, v)| (k, VectorInternal::from(v)))
.collect(),
),
};
Ok(vector_struct)
}
}
impl From<VectorStructPersisted> for NamedVectors<'_> {
fn from(value: VectorStructPersisted) -> Self {
match value {
VectorStructPersisted::Single(vector) => {
NamedVectors::from_pairs([(DEFAULT_VECTOR_NAME.to_owned(), vector)])
}
VectorStructPersisted::MultiDense(vector) => {
let mut named_vector = NamedVectors::default();
let multivec = MultiDenseVectorInternal::new_unchecked(vector);
named_vector.insert(
DEFAULT_VECTOR_NAME.to_owned(),
segment::data_types::vectors::VectorInternal::from(multivec),
);
named_vector
}
VectorStructPersisted::Named(vectors) => {
let mut named_vector = NamedVectors::default();
for (name, vector) in vectors {
named_vector.insert(
name,
segment::data_types::vectors::VectorInternal::from(vector),
);
}
named_vector
}
}
}
}
/// Single vector data, as it is persisted in WAL
/// Unlike [`api::rest::Vector`], this struct only stores raw vectors, inferenced or resolved.
/// Unlike [`VectorInternal`], is not optimized for search
#[derive(Clone, PartialEq, Deserialize, Serialize)]
#[serde(untagged, rename_all = "snake_case")]
pub enum VectorPersisted {
Dense(DenseVector),
Sparse(sparse::common::sparse_vector::SparseVector),
MultiDense(MultiDenseVector),
}
impl Hash for VectorPersisted {
fn hash<H: Hasher>(&self, state: &mut H) {
mem::discriminant(self).hash(state);
match self {
VectorPersisted::Dense(vec) => {
for v in vec {
OrderedFloat(*v).hash(state);
}
}
VectorPersisted::Sparse(sparse) => {
sparse.hash(state);
}
VectorPersisted::MultiDense(multi_vec) => {
for vec in multi_vec {
for v in vec {
OrderedFloat(*v).hash(state);
}
}
}
}
}
}
impl VectorPersisted {
pub fn new_sparse(indices: Vec<DimId>, values: Vec<DimWeight>) -> Self {
Self::Sparse(sparse::common::sparse_vector::SparseVector { indices, values })
}
pub fn empty_sparse() -> Self {
Self::new_sparse(vec![], vec![])
}
}
impl Debug for VectorPersisted {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
VectorPersisted::Dense(vector) => {
let first_elements = vector.iter().take(4).join(", ");
write!(f, "Dense([{}, ... x {}])", first_elements, vector.len())
}
VectorPersisted::Sparse(vector) => {
let first_elements = vector
.indices
.iter()
.zip(vector.values.iter())
.take(4)
.map(|(k, v)| format!("{k}->{v}"))
.join(", ");
write!(
f,
"Sparse([{}, ... x {})",
first_elements,
vector.indices.len()
)
}
VectorPersisted::MultiDense(vector) => {
let first_vectors = vector
.iter()
.take(4)
.map(|v| {
let first_elements = v.iter().take(4).join(", ");
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/common/mod.rs | lib/shard/src/common/mod.rs | pub mod stopping_guard;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/common/stopping_guard.rs | lib/shard/src/common/stopping_guard.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
/// Structure that ensures that `is_stopped` flag is set to `true` when dropped.
pub struct StoppingGuard {
is_stopped: Arc<AtomicBool>,
}
impl StoppingGuard {
/// Creates a new `StopGuard` instance.
pub fn new() -> Self {
Self {
is_stopped: Arc::new(AtomicBool::new(false)),
}
}
pub fn get_is_stopped(&self) -> Arc<AtomicBool> {
self.is_stopped.clone()
}
}
impl Default for StoppingGuard {
fn default() -> Self {
Self::new()
}
}
impl Drop for StoppingGuard {
fn drop(&mut self) {
self.is_stopped
.store(true, std::sync::atomic::Ordering::Relaxed);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/segment_holder/flush.rs | lib/shard/src/segment_holder/flush.rs | use std::cmp::{max, min};
use std::sync::atomic::Ordering;
use std::thread::JoinHandle;
use parking_lot::{RwLock, RwLockReadGuard};
use segment::common::operation_error::{OperationError, OperationResult};
use segment::entry::SegmentEntry;
use segment::types::SeqNumberType;
use crate::locked_segment::LockedSegment;
use crate::segment_holder::{SegmentHolder, SegmentId};
impl SegmentHolder {
/// Flushes all segments and returns maximum version to persist
///
/// Before flushing, this read-locks all segments. It prevents writes to all not-yet-flushed
/// segments during flushing. All locked segments are flushed and released one by one.
///
/// If there are unsaved changes after flush - detects lowest unsaved change version.
/// If all changes are saved - returns max version.
pub fn flush_all(&self, sync: bool, force: bool) -> OperationResult<SeqNumberType> {
let lock_order: Vec<_> = self.segment_flush_ordering().collect();
// Grab and keep to segment RwLock's until the end of this function
let segments = self.segment_locks(lock_order.iter().cloned())?;
// We can never have zero segments
// Having zero segments could permanently corrupt the WAL by acknowledging u64::MAX
assert!(
!segments.is_empty(),
"must always have at least one segment",
);
// Read-lock all segments before flushing any, must prevent any writes to any segment
// That is to prevent any copy-on-write operation on two segments from occurring in between
// flushing the two segments. If that would happen, segments could end up in an
// inconsistent state on disk that is not recoverable after a crash.
//
// E.g.: we have a point on an immutable segment. If we use a set-payload operation, we do
// copy-on-write. The point from immutable segment A is deleted, the updated point is
// stored on appendable segment B.
// Because of flush ordering segment B (appendable) is flushed before segment A
// (not-appendable). If the copy-on-write operation happens in between, the point is
// deleted from A, but the new point in B is not persisted. We cannot recover this by
// replaying the WAL in case of a crash because the point in A does not exist anymore,
// making copy-on-write impossible.
// Locking all segments prevents copy-on-write operations from occurring in between
// flushes.
//
// WARNING: Ordering is very important here. Specifically:
// - We MUST lock non-appendable first, then appendable.
// - We MUST flush appendable first, then non-appendable
// Because of this, two rev(erse) calls are used below here.
//
// Locking must happen in this order because `apply_points_to_appendable` can take two
// write locks, also in this order. If we'd use different ordering we will eventually end
// up with a deadlock.
let mut segment_reads: Vec<_> = segments
.iter()
.rev()
.map(|segment| Self::aloha_lock_segment_read(segment))
.collect();
segment_reads.reverse();
// Re-sort segments for flush ordering also considering proxies, required to guarantee data consistency
segment_reads.sort_by_cached_key(|segment| segment.flush_ordering());
if !sync && self.is_background_flushing() {
// There is already a background flush ongoing, return current max persisted version
return Ok(self.get_max_persisted_version(segment_reads, lock_order));
}
// This lock also prevents multiple parallel sync flushes
// as it is exclusive
let mut background_flush_lock = self.lock_flushing()?;
if sync {
for read_segment in segment_reads.iter() {
read_segment.flush(force)?;
}
} else {
let flushers: Vec<_> = segment_reads
.iter()
.filter_map(|read_segment| read_segment.flusher(force))
.collect();
*background_flush_lock = Some(
std::thread::Builder::new()
.name("background_flush".to_string())
.spawn(move || {
for flusher in flushers {
flusher()?;
}
Ok(())
})
.unwrap(),
);
}
Ok(self.get_max_persisted_version(segment_reads, lock_order))
}
/// Defines naive flush ordering for segments.
///
/// Flush appendable segments first, then non-appendable.
/// This is done to ensure that all data, transferred from non-appendable segments to appendable segments
/// is persisted, before marking records in non-appendable segments as removed.
fn segment_flush_ordering(&self) -> impl Iterator<Item = SegmentId> {
let appendable_segments = self.appendable_segments_ids();
let non_appendable_segments = self.non_appendable_segments_ids();
appendable_segments
.into_iter()
.chain(non_appendable_segments)
}
// Joins flush thread if exists
// Returns lock to guarantee that there will be no other flush in a different thread
pub(super) fn lock_flushing(
&self,
) -> OperationResult<parking_lot::MutexGuard<'_, Option<JoinHandle<OperationResult<()>>>>> {
let mut lock = self.flush_thread.lock();
let mut join_handle: Option<JoinHandle<OperationResult<()>>> = None;
std::mem::swap(&mut join_handle, &mut lock);
if let Some(join_handle) = join_handle {
// Flush result was reported to segment, so we don't need this value anymore
let flush_result = join_handle
.join()
.map_err(|_err| OperationError::service_error("failed to join flush thread"))?;
flush_result.map_err(|err| {
OperationError::service_error(format!("last background flush failed: {err}"))
})?;
}
Ok(lock)
}
pub(super) fn is_background_flushing(&self) -> bool {
let lock = self.flush_thread.lock();
if let Some(join_handle) = lock.as_ref() {
!join_handle.is_finished()
} else {
false
}
}
/// Calculates the version of the segments that is safe to acknowledge in WAL
///
/// If there are unsaved changes after flush - detects lowest unsaved change version.
/// If all changes are saved - returns max version.
fn get_max_persisted_version(
&self,
segment_reads: Vec<RwLockReadGuard<'_, dyn SegmentEntry>>,
lock_order: Vec<SegmentId>,
) -> SeqNumberType {
// Start with the max_persisted_vesrion at the set overwrite value, which may just be 0
// Any of the segments we flush may increase this if they have a higher persisted version
// The overwrite is required to ensure we acknowledge no-op operations in WAL that didn't hit any segment
//
// Only affects returned version if all changes are saved
let mut max_persisted_version: SeqNumberType = self
.max_persisted_segment_version_overwrite
.load(Ordering::Relaxed);
let mut min_unsaved_version: SeqNumberType = SeqNumberType::MAX;
let mut has_unsaved = false;
for (read_segment, segment_id) in segment_reads.into_iter().zip(lock_order.into_iter()) {
let segment_version = read_segment.version();
let segment_persisted_version = read_segment.persistent_version();
log::trace!(
"Flushed segment {segment_id}:{:?} version: {segment_version} to persisted: {segment_persisted_version}",
&read_segment.data_path(),
);
if segment_version > segment_persisted_version {
has_unsaved = true;
min_unsaved_version = min(min_unsaved_version, segment_persisted_version);
}
max_persisted_version = max(max_persisted_version, segment_persisted_version);
drop(read_segment);
}
if has_unsaved {
log::trace!(
"Some segments have unsaved changes, lowest unsaved version: {min_unsaved_version}"
);
min_unsaved_version
} else {
log::trace!(
"All segments flushed successfully, max persisted version: {max_persisted_version}"
);
max_persisted_version
}
}
/// Grab the RwLock's for all the given segment IDs.
fn segment_locks(
&self,
segment_ids: impl IntoIterator<Item = SegmentId>,
) -> OperationResult<Vec<&RwLock<dyn SegmentEntry>>> {
segment_ids
.into_iter()
.map(|segment_id| {
self.get(segment_id)
.ok_or_else(|| {
OperationError::service_error(format!("No segment with ID {segment_id}"))
})
.map(LockedSegment::get)
})
.collect()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/segment_holder/tests.rs | lib/shard/src/segment_holder/tests.rs | use std::collections::HashMap;
use std::str::FromStr;
use rand::Rng;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, VectorInternal};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{Distance, PayloadContainer};
use serde_json::Value;
use tempfile::Builder;
use super::*;
use crate::fixtures::*;
#[test]
fn test_add_and_swap() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let segment2 = build_segment_2(dir.path());
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
assert_ne!(sid1, sid2);
let segment3 = build_simple_segment(dir.path(), 4, Distance::Dot).unwrap();
let (_sid3, replaced_segments) = holder.swap_new(segment3, &[sid1, sid2]);
replaced_segments
.into_iter()
.for_each(|s| s.drop_data().unwrap());
}
#[rstest::rstest]
#[case::do_update_nonappendable(true)]
#[case::dont_update_nonappendable(false)]
fn test_apply_to_appendable(#[case] update_nonappendable: bool) {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_2(dir.path());
segment2.appendable_flag = false;
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
let op_num = 100;
let mut processed_points: Vec<PointIdType> = vec![];
let mut processed_points2: Vec<PointIdType> = vec![];
holder
.apply_points_with_conditional_move(
op_num,
&[1.into(), 2.into(), 11.into(), 12.into()],
|point_id, segment| {
processed_points.push(point_id);
assert!(segment.has_point(point_id));
Ok(true)
},
|point_id, _, _| processed_points2.push(point_id),
|_| update_nonappendable,
&HardwareCounterCell::new(),
)
.unwrap();
assert_eq!(4, processed_points.len() + processed_points2.len());
let locked_segment_1 = holder.get(sid1).unwrap().get();
let read_segment_1 = locked_segment_1.read();
let locked_segment_2 = holder.get(sid2).unwrap().get();
let read_segment_2 = locked_segment_2.read();
for i in processed_points2.iter() {
assert!(read_segment_1.has_point(*i));
}
assert!(read_segment_1.has_point(1.into()));
assert!(read_segment_1.has_point(2.into()));
// Points moved or not moved on apply based on appendable flag
assert_eq!(read_segment_1.has_point(11.into()), !update_nonappendable);
assert_eq!(read_segment_1.has_point(12.into()), !update_nonappendable);
assert_eq!(read_segment_2.has_point(11.into()), update_nonappendable);
assert_eq!(read_segment_2.has_point(12.into()), update_nonappendable);
}
/// Test applying points and conditionally moving them if operation versions are off
///
/// More specifically, this tests the move is still applied correctly even if segments already
/// have a newer version. That very situation can happen when replaying the WAL after a crash
/// where only some of the segments have been flushed properly.
///
/// Before <https://github.com/qdrant/qdrant/pull/4060> the copy and delete operation to move a
/// point to another segment may only be partially executed if an operation ID was given that
/// is older than the current segment version. It resulted in missing points. This test asserts
/// this cannot happen anymore.
#[rstest::rstest]
#[case::segments_older(false, false)]
#[case::non_appendable_newer_appendable_older(true, false)]
#[case::non_appendable_older_appendable_newer(false, true)]
#[case::segments_newer(true, true)]
fn test_apply_and_move_old_versions(
#[case] segment_1_high_version: bool,
#[case] segment_2_high_version: bool,
) {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_2(dir.path());
let hw_counter = HardwareCounterCell::new();
// Insert operation 100 with point 123 and 456 into segment 1, and 789 into segment 2
segment1
.upsert_point(
100,
123.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 1.0, 2.0, 3.0]),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
100,
456.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 1.0, 2.0, 3.0]),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
100,
789.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 1.0, 2.0, 3.0]),
&hw_counter,
)
.unwrap();
// Bump segment version of segment 1 and/or 2 to a high value
// Here we insert a random point to achieve this, normally this could happen on restart if
// segments are not all flushed at the same time
if segment_1_high_version {
segment1
.upsert_point(
99999,
99999.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
&hw_counter,
)
.unwrap();
}
if segment_2_high_version {
segment2
.upsert_point(
99999,
99999.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 0.0, 0.0, 0.0]),
&hw_counter,
)
.unwrap();
}
// Segment 1 is non-appendable, segment 2 is appendable
segment1.appendable_flag = false;
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
// Update point 123, 456 and 789 in the non-appendable segment to move it to segment 2
let op_num = 101;
let mut processed_points: Vec<PointIdType> = vec![];
let mut processed_points2: Vec<PointIdType> = vec![];
holder
.apply_points_with_conditional_move(
op_num,
&[123.into(), 456.into(), 789.into()],
|point_id, segment| {
processed_points.push(point_id);
assert!(segment.has_point(point_id));
Ok(true)
},
|point_id, _, _| processed_points2.push(point_id),
|_| false,
&hw_counter,
)
.unwrap();
assert_eq!(3, processed_points.len() + processed_points2.len());
let locked_segment_1 = holder.get(sid1).unwrap().get();
let read_segment_1 = locked_segment_1.read();
let locked_segment_2 = holder.get(sid2).unwrap().get();
let read_segment_2 = locked_segment_2.read();
for i in processed_points2.iter() {
assert!(read_segment_2.has_point(*i));
}
// Point 123 and 456 should have moved from segment 1 into 2
assert!(!read_segment_1.has_point(123.into()));
assert!(!read_segment_1.has_point(456.into()));
assert!(!read_segment_1.has_point(789.into()));
assert!(read_segment_2.has_point(123.into()));
assert!(read_segment_2.has_point(456.into()));
assert!(read_segment_2.has_point(789.into()));
}
#[test]
fn test_cow_operation() {
const PAYLOAD_KEY: &str = "test-value";
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_1(dir.path());
let hw_counter = HardwareCounterCell::new();
segment2
.upsert_point(
100,
123.into(),
segment::data_types::vectors::only_default_vector(&[0.0, 1.0, 2.0, 3.0]),
&hw_counter,
)
.unwrap();
let mut payload = Payload::default();
payload.0.insert(PAYLOAD_KEY.to_string(), 42.into());
segment2
.set_full_payload(100, 123.into(), &payload, &hw_counter)
.unwrap();
segment2.appendable_flag = false;
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
{
let locked_segment_1 = holder.get(sid1).unwrap().get();
let read_segment_1 = locked_segment_1.read();
assert!(!read_segment_1.has_point(123.into()));
let locked_segment_2 = holder.get(sid2).unwrap().get();
let read_segment_2 = locked_segment_2.read();
assert!(read_segment_2.has_point(123.into()));
let vector = read_segment_2
.vector(DEFAULT_VECTOR_NAME, 123.into(), &hw_counter)
.unwrap()
.unwrap();
assert_ne!(vector, VectorInternal::Dense(vec![9.0; 4]));
assert_eq!(
read_segment_2
.payload(123.into(), &hw_counter)
.unwrap()
.get_value(&JsonPath::from_str(PAYLOAD_KEY).unwrap())[0],
&Value::from(42)
);
}
holder
.apply_points_with_conditional_move(
1010,
&[123.into()],
|_, _| unreachable!(),
|_point_id, vectors, payload| {
vectors.insert(
DEFAULT_VECTOR_NAME.to_owned(),
VectorInternal::Dense(vec![9.0; 4]),
);
payload.0.insert(PAYLOAD_KEY.to_string(), 2.into());
},
|_| false,
&hw_counter,
)
.unwrap();
let locked_segment_1 = holder.get(sid1).unwrap().get();
let read_segment_1 = locked_segment_1.read();
assert!(read_segment_1.has_point(123.into()));
let new_vector = read_segment_1
.vector(DEFAULT_VECTOR_NAME, 123.into(), &hw_counter)
.unwrap()
.unwrap();
assert_eq!(new_vector, VectorInternal::Dense(vec![9.0; 4]));
let new_payload_value = read_segment_1.payload(123.into(), &hw_counter).unwrap();
assert_eq!(
new_payload_value.get_value(&JsonPath::from_str(PAYLOAD_KEY).unwrap())[0],
&Value::from(2)
);
}
#[test]
fn test_points_deduplication() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_1(dir.path());
let hw_counter = HardwareCounterCell::new();
segment1
.set_payload(100, 1.into(), &payload_json! {}, &None, &hw_counter)
.unwrap();
segment1
.set_payload(100, 2.into(), &payload_json! {}, &None, &hw_counter)
.unwrap();
segment2
.set_payload(200, 4.into(), &payload_json! {}, &None, &hw_counter)
.unwrap();
segment2
.set_payload(200, 5.into(), &payload_json! {}, &None, &hw_counter)
.unwrap();
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
let res = deduplicate_points_sync(&holder).unwrap();
assert_eq!(5, res);
assert!(holder.get(sid1).unwrap().get().read().has_point(1.into()));
assert!(holder.get(sid1).unwrap().get().read().has_point(2.into()));
assert!(!holder.get(sid2).unwrap().get().read().has_point(1.into()));
assert!(!holder.get(sid2).unwrap().get().read().has_point(2.into()));
assert!(holder.get(sid2).unwrap().get().read().has_point(4.into()));
assert!(holder.get(sid2).unwrap().get().read().has_point(5.into()));
assert!(!holder.get(sid1).unwrap().get().read().has_point(4.into()));
assert!(!holder.get(sid1).unwrap().get().read().has_point(5.into()));
}
/// Unit test for a specific bug we caught before.
///
/// See: <https://github.com/qdrant/qdrant/pull/5585>
#[test]
fn test_points_deduplication_bug() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment1 = empty_segment(dir.path());
let mut segment2 = empty_segment(dir.path());
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(
2,
10.into(),
segment::data_types::vectors::only_default_vector(&[0.0; 4]),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
3,
10.into(),
segment::data_types::vectors::only_default_vector(&[0.0; 4]),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
1,
11.into(),
segment::data_types::vectors::only_default_vector(&[0.0; 4]),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
2,
11.into(),
segment::data_types::vectors::only_default_vector(&[0.0; 4]),
&hw_counter,
)
.unwrap();
let mut holder = SegmentHolder::default();
let sid1 = holder.add_new(segment1);
let sid2 = holder.add_new(segment2);
let duplicate_count = holder
.find_duplicated_points()
.values()
.map(|ids| ids.len())
.sum::<usize>();
assert_eq!(2, duplicate_count);
let removed_count = deduplicate_points_sync(&holder).unwrap();
assert_eq!(2, removed_count);
assert!(!holder.get(sid1).unwrap().get().read().has_point(10.into()));
assert!(holder.get(sid2).unwrap().get().read().has_point(10.into()));
assert!(!holder.get(sid1).unwrap().get().read().has_point(11.into()));
assert!(holder.get(sid2).unwrap().get().read().has_point(11.into()));
assert_eq!(
holder
.get(sid1)
.unwrap()
.get()
.read()
.available_point_count(),
0,
);
assert_eq!(
holder
.get(sid2)
.unwrap()
.get()
.read()
.available_point_count(),
2,
);
}
#[test]
fn test_points_deduplication_randomized() {
const POINT_COUNT: usize = 1000;
let mut rand = rand::rng();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let vector = segment::data_types::vectors::only_default_vector(&[0.0; 4]);
let hw_counter = HardwareCounterCell::new();
let mut segments = [
empty_segment(dir.path()),
empty_segment(dir.path()),
empty_segment(dir.path()),
empty_segment(dir.path()),
empty_segment(dir.path()),
];
// Insert points into all segments with random versions
let mut highest_point_version = HashMap::new();
for id in 0..POINT_COUNT {
let mut max_version = 0;
let point_id = PointIdType::from(id as u64);
for segment in &mut segments {
let version = rand.random_range(1..10);
segment
.upsert_point(version, point_id, vector.clone(), &hw_counter)
.unwrap();
max_version = version.max(max_version);
}
highest_point_version.insert(id, max_version);
}
// Put segments into holder
let mut holder = SegmentHolder::default();
let segment_ids = segments
.into_iter()
.map(|segment| holder.add_new(segment))
.collect::<Vec<_>>();
let duplicate_count = holder
.find_duplicated_points()
.values()
.map(|ids| ids.len())
.sum::<usize>();
assert_eq!(POINT_COUNT * (segment_ids.len() - 1), duplicate_count);
let removed_count = deduplicate_points_sync(&holder).unwrap();
assert_eq!(POINT_COUNT * (segment_ids.len() - 1), removed_count);
// Assert points after deduplication
for id in 0..POINT_COUNT {
let point_id = PointIdType::from(id as u64);
let max_version = highest_point_version[&id];
let found_versions = segment_ids
.iter()
.filter_map(|segment_id| {
holder
.get(*segment_id)
.unwrap()
.get()
.read()
.point_version(point_id)
})
.collect::<Vec<_>>();
// We must have exactly one version, and it must be the highest we inserted
assert_eq!(
found_versions.len(),
1,
"point version must be maximum known version",
);
assert_eq!(
found_versions[0], max_version,
"point version must be maximum known version",
);
}
}
fn deduplicate_points_sync(holder: &SegmentHolder) -> OperationResult<usize> {
let mut removed_points = 0;
for task in holder.deduplicate_points_tasks() {
removed_points += task()?;
}
Ok(removed_points)
}
#[test]
fn test_double_proxies() {
let hw_counter = HardwareCounterCell::disposable();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment1 = build_segment_1(dir.path());
let mut holder = SegmentHolder::default();
let locked_segment1 = LockedSegment::from(segment1);
let _sid1 = holder.add_new_locked(locked_segment1.clone());
let holder = Arc::new(RwLock::new(holder));
let before_segment_ids = holder
.read()
.iter()
.map(|(id, _)| *id)
.collect::<HashSet<_>>();
let segments_dir = Builder::new().prefix("segments_dir").tempdir().unwrap();
let payload_schema_file = dir.path().join("payload.schema");
let schema: Arc<SaveOnDisk<PayloadIndexSchema>> =
Arc::new(SaveOnDisk::load_or_init_default(payload_schema_file).unwrap());
let (inner_proxies, inner_tmp_segment, inner_segments_lock) =
SegmentHolder::proxy_all_segments(
holder.upgradable_read(),
segments_dir.path(),
None,
schema.clone(),
)
.unwrap();
// check inner proxy contains points
let points = inner_proxies[0]
.1
.get()
.read()
.read_range(Some(1.into()), None);
assert_eq!(&points, &[1.into(), 2.into(), 3.into(), 4.into(), 5.into()]);
// Writing to inner proxy segment
inner_proxies[0]
.1
.get()
.write()
.delete_point(10, 1.into(), &hw_counter)
.unwrap();
let (outer_proxies, outer_tmp_segment, outer_segments_lock) =
SegmentHolder::proxy_all_segments(inner_segments_lock, segments_dir.path(), None, schema)
.unwrap();
let mut has_point = false;
for (_proxy_id, proxy) in &outer_proxies {
let proxy_read = proxy.get().read();
if proxy_read.has_point(2.into()) {
has_point = true;
let payload = proxy_read.payload(2.into(), &hw_counter).unwrap();
assert!(
payload.0.get("color").is_some(),
"Payload should be readable in double proxy"
);
drop(proxy_read);
proxy
.get()
.write()
.delete_point(11, 2.into(), &hw_counter)
.unwrap();
break;
}
}
assert!(has_point, "Point should be present in double proxy");
// Unproxy once
SegmentHolder::unproxy_all_segments(outer_segments_lock, outer_proxies, outer_tmp_segment)
.unwrap();
// Unproxy twice
SegmentHolder::unproxy_all_segments(holder.upgradable_read(), inner_proxies, inner_tmp_segment)
.unwrap();
let after_segment_ids = holder
.read()
.iter()
.map(|(id, _)| *id)
.collect::<HashSet<_>>();
// Check that we have one new segment
let diff: HashSet<_> = after_segment_ids.difference(&before_segment_ids).collect();
assert_eq!(
diff.len(),
0,
"There should be no new segment after unproxying"
);
let has_point_1 = locked_segment1.get().read().has_point(1.into()); // Deleted in inner proxy
let has_point_2 = locked_segment1.get().read().has_point(2.into()); // Deleted in outer proxy
let has_point_3 = locked_segment1.get().read().has_point(3.into()); // Not deleted
assert!(!has_point_1, "Point 1 should be deleted");
assert!(!has_point_2, "Point 2 should be deleted");
assert!(has_point_3, "Point 3 should be present");
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/segment_holder/mod.rs | lib/shard/src/segment_holder/mod.rs | mod flush;
mod snapshot;
#[cfg(test)]
mod tests;
use std::cmp::{max, min};
use std::collections::hash_map::Entry;
use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashSet};
use std::ops::Deref;
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::thread::JoinHandle;
use std::time::Duration;
use ahash::{AHashMap, AHashSet};
use common::counter::hardware_counter::HardwareCounterCell;
use common::iterator_ext::IteratorExt;
use common::save_on_disk::SaveOnDisk;
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
use rand::seq::IndexedRandom;
use segment::common::operation_error::{OperationError, OperationResult};
use segment::data_types::named_vectors::NamedVectors;
use segment::entry::entry_point::SegmentEntry;
use segment::segment_constructor::build_segment;
use segment::types::{ExtendedPointId, Payload, PointIdType, SegmentConfig, SeqNumberType};
use smallvec::{SmallVec, smallvec};
use crate::locked_segment::LockedSegment;
use crate::payload_index_schema::PayloadIndexSchema;
pub type SegmentId = usize;
/// Internal structure for deduplication of points. Used for BinaryHeap
#[derive(Eq, PartialEq)]
struct DedupPoint {
point_id: PointIdType,
segment_id: SegmentId,
}
impl Ord for DedupPoint {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.point_id.cmp(&other.point_id).reverse()
}
}
impl PartialOrd for DedupPoint {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
#[derive(Debug, Default)]
pub struct SegmentHolder {
/// Keep segments sorted by their ID for deterministic iteration order
appendable_segments: BTreeMap<SegmentId, LockedSegment>,
non_appendable_segments: BTreeMap<SegmentId, LockedSegment>,
/// Source for unique (virtual) IDs for newly added segments
id_source: AtomicUsize,
/// Seq number of the first un-recovered operation.
/// If there are no failed operation - None
pub failed_operation: BTreeSet<SeqNumberType>,
/// Holds the first uncorrected error happened with optimizer
pub optimizer_errors: Option<String>,
/// A special segment version that is usually used to keep track of manually bumped segment versions.
/// An example for this are operations that don't modify any points but could be expensive to recover from during WAL recovery.
/// To acknowledge them in WAL, we overwrite the max_persisted value in `Self::flush_all` with the segment version stored here.
max_persisted_segment_version_overwrite: AtomicU64,
/// Holder for a thread, which does flushing of all segments sequentially.
/// This is used to avoid multiple concurrent flushes.
pub flush_thread: Mutex<Option<JoinHandle<OperationResult<()>>>>,
}
impl Drop for SegmentHolder {
fn drop(&mut self) {
if let Err(flushing_err) = self.lock_flushing() {
log::error!("Failed to flush segments holder during drop: {flushing_err}");
}
}
}
pub type LockedSegmentHolder = Arc<RwLock<SegmentHolder>>;
impl SegmentHolder {
/// Iterate over all segments with their IDs
///
/// Appendable first, then non-appendable.
pub fn iter(&self) -> impl Iterator<Item = (&SegmentId, &LockedSegment)> {
self.appendable_segments
.iter()
.chain(self.non_appendable_segments.iter())
}
pub fn len(&self) -> usize {
self.appendable_segments.len() + self.non_appendable_segments.len()
}
pub fn is_empty(&self) -> bool {
self.appendable_segments.is_empty() && self.non_appendable_segments.is_empty()
}
fn generate_new_key(&self) -> SegmentId {
let key: SegmentId = self.id_source.fetch_add(1, Ordering::SeqCst);
if self.get(key).is_some() {
debug_assert!(false, "generated new key that already exists");
self.generate_new_key()
} else {
key
}
}
/// Add new segment to storage
///
/// The segment gets assigned a new unique ID.
pub fn add_new<T>(&mut self, segment: T) -> SegmentId
where
T: Into<LockedSegment>,
{
let segment_id = self.generate_new_key();
self.add_existing(segment_id, segment);
segment_id
}
/// Add new segment to storage which is already LockedSegment
///
/// The segment gets assigned a new unique ID.
pub fn add_new_locked(&mut self, segment: LockedSegment) -> SegmentId {
let segment_id = self.generate_new_key();
self.add_existing_locked(segment_id, segment);
segment_id
}
/// Add an existing segment to storage
///
/// The segment gets the provided ID, which must not be in the segment holder yet.
pub fn add_existing<T>(&mut self, segment_id: SegmentId, segment: T)
where
T: Into<LockedSegment>,
{
let locked_segment = segment.into();
self.add_existing_locked(segment_id, locked_segment);
}
/// Add an existing segment to storage which is already LockedSegment
///
/// The segment gets the provided ID, which must not be in the segment holder yet.
pub fn add_existing_locked(&mut self, segment_id: SegmentId, segment: LockedSegment) {
debug_assert!(
self.get(segment_id).is_none(),
"cannot add segment with ID {segment_id}, it already exists",
);
if segment.get().read().is_appendable() {
self.appendable_segments.insert(segment_id, segment);
} else {
self.non_appendable_segments.insert(segment_id, segment);
}
}
pub fn remove(&mut self, remove_ids: &[SegmentId]) -> Vec<LockedSegment> {
let mut removed_segments = vec![];
for remove_id in remove_ids {
let removed_segment = self.appendable_segments.remove(remove_id);
if let Some(segment) = removed_segment {
removed_segments.push(segment);
}
let removed_segment = self.non_appendable_segments.remove(remove_id);
if let Some(segment) = removed_segment {
removed_segments.push(segment);
}
}
removed_segments
}
/// Replace old segments with a new one
///
/// # Arguments
///
/// * `segment` - segment to insert
/// * `remove_ids` - ids of segments to replace
///
/// # Result
///
/// Pair of (id of newly inserted segment, Vector of replaced segments)
///
/// The inserted segment gets assigned a new unique ID.
pub fn swap_new<T>(
&mut self,
segment: T,
remove_ids: &[SegmentId],
) -> (SegmentId, Vec<LockedSegment>)
where
T: Into<LockedSegment>,
{
let new_id = self.add_new(segment);
(new_id, self.remove(remove_ids))
}
pub fn swap_new_locked(
&mut self,
segment: LockedSegment,
remove_ids: &[SegmentId],
) -> (SegmentId, Vec<LockedSegment>) {
let new_id = self.add_new_locked(segment);
(new_id, self.remove(remove_ids))
}
/// Replace an existing segment
///
/// # Arguments
///
/// * `segment_id` - segment ID to replace
/// * `segment` - segment to replace with
///
/// # Result
///
/// Returns the replaced segment. Errors if the segment ID did not exist.
pub fn replace<T>(
&mut self,
segment_id: SegmentId,
segment: T,
) -> OperationResult<LockedSegment>
where
T: Into<LockedSegment>,
{
// Remove existing segment, check precondition
let mut removed = self.remove(&[segment_id]);
if removed.is_empty() {
return Err(OperationError::service_error(
"cannot replace segment with ID {segment_id}, it does not exists",
));
}
debug_assert_eq!(removed.len(), 1);
self.add_existing(segment_id, segment);
Ok(removed.pop().unwrap())
}
pub fn get(&self, id: SegmentId) -> Option<&LockedSegment> {
self.appendable_segments
.get(&id)
.or_else(|| self.non_appendable_segments.get(&id))
}
pub fn has_appendable_segment(&self) -> bool {
!self.appendable_segments.is_empty()
}
/// Get all locked segments, non-appendable first, then appendable.
pub fn non_appendable_then_appendable_segments(&self) -> impl Iterator<Item = LockedSegment> {
self.non_appendable_segments
.values()
.chain(self.appendable_segments.values())
.cloned()
}
/// Get two separate lists for non-appendable and appendable locked segments
pub fn split_segments(&self) -> (Vec<LockedSegment>, Vec<LockedSegment>) {
(
self.non_appendable_segments.values().cloned().collect(),
self.appendable_segments.values().cloned().collect(),
)
}
/// Return appendable segment IDs sorted by IDs
pub fn appendable_segments_ids(&self) -> Vec<SegmentId> {
self.appendable_segments.keys().copied().collect()
}
/// Return non-appendable segment IDs sorted by IDs
pub fn non_appendable_segments_ids(&self) -> Vec<SegmentId> {
self.non_appendable_segments.keys().copied().collect()
}
/// Suggests a new maximum persisted segment version when calling `flush_all`. This can be used to make WAL acknowledge no-op operations,
/// so we don't replay them on startup. This is especially helpful if the no-op operation is computational expensive and could cause
/// WAL replay, and thus Qdrant startup, take a significant amount of time.
pub fn bump_max_segment_version_overwrite(&self, op_num: SeqNumberType) {
self.max_persisted_segment_version_overwrite
.fetch_max(op_num, Ordering::Relaxed);
}
pub fn segment_ids(&self) -> Vec<SegmentId> {
self.appendable_segments_ids()
.into_iter()
.chain(self.non_appendable_segments_ids())
.collect()
}
/// Get a random appendable segment
///
/// If you want the smallest segment, use `random_appendable_segment_with_capacity` instead.
pub fn random_appendable_segment(&self) -> Option<LockedSegment> {
let segment_ids: Vec<_> = self.appendable_segments_ids();
segment_ids
.choose(&mut rand::rng())
.and_then(|idx| self.appendable_segments.get(idx).cloned())
}
/// Get the smallest appendable segment
///
/// The returned segment likely has the most capacity for new points, which will help balance
/// new incoming data over all segments we have.
///
/// This attempts a non-blocking read-lock on all segments to find the smallest one. Segments
/// that cannot be read-locked at this time are skipped. If no segment can be read-locked at
/// all, a random one is returned.
///
/// If capacity is not important use `random_appendable_segment` instead because it is cheaper.
pub fn smallest_appendable_segment(&self) -> Option<LockedSegment> {
let segment_ids: Vec<_> = self.appendable_segments_ids();
// Try a non-blocking read lock on all segments and return the smallest one
let smallest_segment = segment_ids
.iter()
.filter_map(|segment_id| self.get(*segment_id))
.filter_map(|locked_segment| {
match locked_segment
.get()
.try_read()
.map(|segment| segment.max_available_vectors_size_in_bytes())?
{
Ok(size) => Some((locked_segment, size)),
Err(err) => {
log::error!("Failed to get segment size, ignoring: {err}");
None
}
}
})
.min_by_key(|(_, segment_size)| *segment_size);
if let Some((smallest_segment, _)) = smallest_segment {
return Some(LockedSegment::clone(smallest_segment));
}
// Fall back to picking a random segment
segment_ids
.choose(&mut rand::rng())
.and_then(|idx| self.appendable_segments.get(idx).cloned())
}
/// Selects point ids, which is stored in this segment
fn segment_points(ids: &[PointIdType], segment: &dyn SegmentEntry) -> Vec<PointIdType> {
ids.iter()
.cloned()
.filter(|id| segment.has_point(*id))
.collect()
}
/// Select what point IDs to update and delete in each segment
///
/// Each external point ID might have multiple point versions across all segments.
///
/// This finds all point versions and groups them per segment. The newest point versions are
/// selected to be updated, all older versions are marked to be deleted.
///
/// Points that are already soft deleted are not included.
fn find_points_to_update_and_delete(
&self,
ids: &[PointIdType],
) -> (
AHashMap<SegmentId, Vec<PointIdType>>,
AHashMap<SegmentId, Vec<PointIdType>>,
) {
let mut to_delete: AHashMap<SegmentId, Vec<PointIdType>> = AHashMap::new();
// Find in which segments latest point versions are located, mark older points for deletion
let mut latest_points: AHashMap<PointIdType, (SeqNumberType, SmallVec<[SegmentId; 1]>)> =
AHashMap::with_capacity(ids.len());
for (segment_id, segment) in self.iter() {
let segment_arc = segment.get();
let segment_lock = segment_arc.read();
let segment_points = Self::segment_points(ids, segment_lock.deref());
for segment_point in segment_points {
let Some(point_version) = segment_lock.point_version(segment_point) else {
continue;
};
match latest_points.entry(segment_point) {
// First time we see the point, add it
Entry::Vacant(entry) => {
entry.insert((point_version, smallvec![*segment_id]));
}
// Point we have seen before is older, replace it and mark older for deletion
Entry::Occupied(mut entry) if entry.get().0 < point_version => {
let (old_version, old_segment_ids) =
entry.insert((point_version, smallvec![*segment_id]));
// Mark other point for deletion if the version is older
// TODO(timvisee): remove this check once deleting old points uses correct version
if old_version < point_version {
for old_segment_id in old_segment_ids {
to_delete
.entry(old_segment_id)
.or_default()
.push(segment_point);
}
}
}
// Ignore points with the same version, only update one of them
// TODO(timvisee): remove this branch once deleting old points uses correct version
Entry::Occupied(mut entry) if entry.get().0 == point_version => {
entry.get_mut().1.push(*segment_id);
}
// Point we have seen before is newer, mark this point for deletion
Entry::Occupied(_) => {
to_delete
.entry(*segment_id)
.or_default()
.push(segment_point);
}
}
}
}
// Group points to update by segments
let segment_count = self.len();
let mut to_update = AHashMap::with_capacity(min(segment_count, latest_points.len()));
let default_capacity = ids.len() / max(segment_count / 2, 1);
for (point_id, (_point_version, segment_ids)) in latest_points {
for segment_id in segment_ids {
to_update
.entry(segment_id)
.or_insert_with(|| Vec::with_capacity(default_capacity))
.push(point_id);
}
}
// Assert each segment does not have overlapping updates and deletes
debug_assert!(
to_update
.iter()
.filter_map(|(segment_id, updates)| {
to_delete.get(segment_id).map(|deletes| (updates, deletes))
})
.all(|(updates, deletes)| {
let updates: HashSet<&ExtendedPointId> = HashSet::from_iter(updates);
let deletes = HashSet::from_iter(deletes);
updates.is_disjoint(&deletes)
}),
"segments should not have overlapping updates and deletes",
);
(to_update, to_delete)
}
pub fn for_each_segment<F>(&self, mut f: F) -> OperationResult<usize>
where
F: FnMut(&RwLockReadGuard<dyn SegmentEntry + 'static>) -> OperationResult<bool>,
{
let mut processed_segments = 0;
for (_id, segment) in self.iter() {
let is_applied = f(&segment.get().read())?;
processed_segments += usize::from(is_applied);
}
Ok(processed_segments)
}
pub fn apply_segments<F>(&self, mut f: F) -> OperationResult<usize>
where
F: FnMut(
&mut RwLockUpgradableReadGuard<dyn SegmentEntry + 'static>,
) -> OperationResult<bool>,
{
let mut processed_segments = 0;
for (_id, segment) in self.iter() {
let is_applied = f(&mut segment.get().upgradable_read())?;
processed_segments += usize::from(is_applied);
}
Ok(processed_segments)
}
pub fn apply_segments_batched<F>(&self, mut f: F) -> OperationResult<()>
where
F: FnMut(
&mut RwLockWriteGuard<dyn SegmentEntry + 'static>,
SegmentId,
) -> OperationResult<bool>,
{
loop {
let mut did_apply = false;
// It is important to iterate over all segments for each batch
// to avoid blocking of a single segment with sequential updates
for (segment_id, segment) in self.iter() {
did_apply |= f(&mut segment.get().write(), *segment_id)?;
}
// No segment update => we're done
if !did_apply {
break;
}
}
Ok(())
}
/// Apply an operation `point_operation` to a set of points `ids`.
///
/// A point may exist in multiple segments, having multiple versions. Depending on the kind of
/// operation, it either needs to be applied to just the latest point version, or to all of
/// them. This is controllable by the `all_point_versions` flag.
/// See: <https://github.com/qdrant/qdrant/pull/5956>
///
/// In case of operations that may do a copy-on-write, we must only apply the operation to the
/// latest point version. Otherwise our copy on write mechanism may repurpose old point data.
/// See: <https://github.com/qdrant/qdrant/pull/5528>
///
/// In case of delete operations, we must apply them to all versions of a point. Otherwise
/// future operations may revive deletions through older point versions.
///
/// The `segment_data` function is called no more than once for each segment and its result is
/// passed to `point_operation`.
pub fn apply_points<T, D, O>(
&self,
ids: &[PointIdType],
mut segment_data: D,
mut point_operation: O,
) -> OperationResult<usize>
where
D: FnMut(&dyn SegmentEntry) -> T,
O: FnMut(
PointIdType,
SegmentId,
&mut RwLockWriteGuard<dyn SegmentEntry>,
&T,
) -> OperationResult<bool>,
{
let (to_update, to_delete) = self.find_points_to_update_and_delete(ids);
// Delete old points first, because we want to handle copy-on-write in multiple proxy segments properly
for (segment_id, points) in to_delete {
let segment = self.get(segment_id).unwrap();
let segment_arc = segment.get();
let mut write_segment = segment_arc.write();
for point_id in points {
if let Some(version) = write_segment.point_version(point_id) {
write_segment.delete_point(
version,
point_id,
&HardwareCounterCell::disposable(), // Internal operation: no need to measure.
)?;
}
}
}
// Apply point operations to selected segments
let mut applied_points = 0;
for (segment_id, points) in to_update {
let segment = self.get(segment_id).unwrap();
let segment_arc = segment.get();
let mut write_segment = segment_arc.write();
let segment_data = segment_data(write_segment.deref());
for point_id in points {
let is_applied =
point_operation(point_id, segment_id, &mut write_segment, &segment_data)?;
applied_points += usize::from(is_applied);
}
}
Ok(applied_points)
}
/// Try to acquire read lock over the given segment with increasing wait time.
/// Should prevent deadlock in case if multiple threads tries to lock segments sequentially.
fn aloha_lock_segment_read(
segment: &'_ RwLock<dyn SegmentEntry>,
) -> RwLockReadGuard<'_, dyn SegmentEntry> {
let mut interval = Duration::from_nanos(100);
loop {
if let Some(guard) = segment.try_read_for(interval) {
return guard;
}
interval = interval.saturating_mul(2);
if interval.as_secs() >= 10 {
log::warn!(
"Trying to read-lock a segment is taking a long time. This could be a deadlock and may block new updates.",
);
}
}
}
/// Try to acquire write lock over random segment with increasing wait time.
/// Should prevent deadlock in case if multiple threads tries to lock segments sequentially.
pub fn aloha_random_write<F>(
&self,
segment_ids: &[SegmentId],
mut apply: F,
) -> OperationResult<bool>
where
F: FnMut(SegmentId, &mut RwLockWriteGuard<dyn SegmentEntry>) -> OperationResult<bool>,
{
if segment_ids.is_empty() {
return Err(OperationError::service_error(
"No appendable segments exists, expected at least one",
));
}
let mut entries: Vec<_> = Vec::with_capacity(segment_ids.len());
// Try to access each segment first without any timeout (fast)
for segment_id in segment_ids {
let segment_opt = self.get(*segment_id).map(|x| x.get());
match segment_opt {
None => {}
Some(segment_lock) => {
match segment_lock.try_write() {
None => {}
Some(mut lock) => return apply(*segment_id, &mut lock),
}
// save segments for further lock attempts
entries.push((*segment_id, segment_lock))
}
};
}
let mut rng = rand::rng();
let (segment_id, segment_lock) = entries.choose(&mut rng).unwrap();
let mut segment_write = segment_lock.write();
apply(*segment_id, &mut segment_write)
}
/// Apply an operation `point_operation` to a set of points `ids`, and, if necessary, move the
/// points into appendable segments.
///
/// Moving is not performed in the following cases:
/// - The segment containing the point is appendable.
/// - The `update_nonappendable` function returns true for the segment.
///
/// Otherwise, the operation is applied to the containing segment in place.
///
/// Rationale: non-appendable segments may contain immutable indexes that could be left in an
/// inconsistent state after applying the operation. When it's known that the operation will not
/// affect the indexes, `update_nonappendable` can return true to avoid moving the points as a
/// performance optimization.
///
/// It's always safe to pass a closure that always returns false (i.e. `|_| false`).
///
/// Returns set of point ids which were successfully (already) applied to segments.
///
/// # Warning
///
/// This function must not be used to apply point deletions, and [`apply_points`] must be used
/// instead. There are two reasons for this:
///
/// 1. moving a point first and deleting it after is unnecessary overhead.
/// 2. this leaves older point versions in place, which may accidentally be revived by some
/// other operation later.
pub fn apply_points_with_conditional_move<F, G, H>(
&self,
op_num: SeqNumberType,
ids: &[PointIdType],
mut point_operation: F,
mut point_cow_operation: H,
update_nonappendable: G,
hw_counter: &HardwareCounterCell,
) -> OperationResult<AHashSet<PointIdType>>
where
F: FnMut(PointIdType, &mut RwLockWriteGuard<dyn SegmentEntry>) -> OperationResult<bool>,
for<'n, 'o, 'p> H: FnMut(PointIdType, &'n mut NamedVectors<'o>, &'p mut Payload),
G: FnMut(&dyn SegmentEntry) -> bool,
{
// Choose random appendable segment from this
let appendable_segments = self.appendable_segments_ids();
let mut applied_points: AHashSet<PointIdType> = Default::default();
let _applied_points_count = self.apply_points(
ids,
update_nonappendable,
|point_id, _idx, write_segment, &update_nonappendable| {
if let Some(point_version) = write_segment.point_version(point_id)
&& point_version >= op_num
{
applied_points.insert(point_id);
return Ok(false);
}
let can_apply_operation = !write_segment.is_proxy()
&& (update_nonappendable || write_segment.is_appendable());
let is_applied = if can_apply_operation {
point_operation(point_id, write_segment)?
} else {
self.aloha_random_write(
&appendable_segments,
|_appendable_idx, appendable_write_segment| {
let mut all_vectors =
write_segment.all_vectors(point_id, hw_counter)?;
let mut payload = write_segment.payload(point_id, hw_counter)?;
point_cow_operation(point_id, &mut all_vectors, &mut payload);
appendable_write_segment.upsert_point(
op_num,
point_id,
all_vectors,
hw_counter,
)?;
appendable_write_segment
.set_full_payload(op_num, point_id, &payload, hw_counter)?;
write_segment.delete_point(op_num, point_id, hw_counter)?;
Ok(true)
},
)?
};
applied_points.insert(point_id);
Ok(is_applied)
},
)?;
Ok(applied_points)
}
pub fn read_points<F>(
&self,
ids: &[PointIdType],
is_stopped: &AtomicBool,
mut f: F,
) -> OperationResult<usize>
where
F: FnMut(PointIdType, &RwLockReadGuard<dyn SegmentEntry>) -> OperationResult<bool>,
{
// We must go over non-appendable segments first, then go over appendable segments after
// Points may be moved from non-appendable to appendable, because we don't lock all
// segments together read ordering is very important here!
//
// Consider the following sequence:
//
// 1. Read-lock non-appendable segment A
// 2. Atomic move from A to B
// 3. Read-lock appendable segment B
//
// We are guaranteed to read all data consistently, and don't lose any points
let segments = self.non_appendable_then_appendable_segments();
let mut read_points = 0;
for segment in segments {
let segment_arc = segment.get();
let read_segment = segment_arc.read();
let points = ids
.iter()
.cloned()
.stop_if(is_stopped)
.filter(|id| read_segment.has_point(*id));
for point in points {
let is_ok = f(point, &read_segment)?;
read_points += usize::from(is_ok);
}
}
Ok(read_points)
}
/// Create a new appendable segment and add it to the segment holder.
///
/// The segment configuration is sourced from the given collection parameters.
pub fn create_appendable_segment(
&mut self,
segments_path: &Path,
segment_config: SegmentConfig,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
) -> OperationResult<LockedSegment> {
let segment = self.build_tmp_segment(
segments_path,
Some(segment_config),
payload_index_schema,
true,
)?;
self.add_new_locked(segment.clone());
Ok(segment)
}
/// Build a temporary appendable segment, usually for proxying writes into.
///
/// The segment configuration is sourced from the given collection parameters. If none is
/// specified this will fall back and clone the configuration of any existing appendable
/// segment in the segment holder.
///
/// # Errors
///
/// Errors if:
/// - building the segment fails
/// - no segment configuration is provided, and no appendable segment is in the segment holder
///
/// # Warning
///
/// This builds a segment on disk, but does NOT add it to the current segment holder. That must
/// be done explicitly. `save_version` must be true for the segment to be loaded when Qdrant
/// restarts.
fn build_tmp_segment(
&self,
segments_path: &Path,
segment_config: Option<SegmentConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
save_version: bool,
) -> OperationResult<LockedSegment> {
let config = match segment_config {
// Base config on collection params
Some(config) => config,
// Fall back: base config on existing appendable segment
None => self
.random_appendable_segment()
.ok_or_else(|| {
OperationError::service_error(
"No existing segment to source temporary segment configuration from",
)
})?
.get()
.read()
.config()
.clone(),
};
let mut segment = build_segment(segments_path, &config, save_version)?;
// Internal operation.
let hw_counter = HardwareCounterCell::disposable();
let payload_schema_lock = payload_index_schema.read();
for (key, schema) in payload_schema_lock.schema.iter() {
segment.create_field_index(0, key, Some(schema), &hw_counter)?;
}
Ok(LockedSegment::new(segment))
}
/// Method tries to remove the segment with the given ID under the following conditions:
///
/// - The segment exists in the holder, if not - it is ignored.
/// - The segment is a raw segment and not some special proxy segment.
/// - The segment is empty.
/// - We are not removing the last appendable segment.
///
/// Returns `true` if the segment was removed, `false` otherwise.
pub fn remove_segment_if_not_needed(&mut self, segment_id: SegmentId) -> OperationResult<bool> {
let tmp_segment = {
let mut segments = self.remove(&[segment_id]);
if segments.is_empty() {
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/shard/src/segment_holder/snapshot.rs | lib/shard/src/segment_holder/snapshot.rs | use std::path::Path;
use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::save_on_disk::SaveOnDisk;
use io::storage_version::StorageVersion;
use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard};
use segment::common::operation_error::OperationResult;
use segment::data_types::manifest::SnapshotManifest;
use segment::entry::SegmentEntry;
use segment::segment::SegmentVersion;
use segment::types::SegmentConfig;
use crate::locked_segment::LockedSegment;
use crate::payload_index_schema::PayloadIndexSchema;
use crate::proxy_segment::ProxySegment;
use crate::segment_holder::{SegmentHolder, SegmentId};
impl SegmentHolder {
pub fn snapshot_manifest(&self) -> OperationResult<SnapshotManifest> {
let mut manifest = SnapshotManifest::default();
for (_, segment) in self.iter() {
segment
.get()
.read()
.collect_snapshot_manifest(&mut manifest)?;
}
Ok(manifest)
}
/// Proxy all shard segments for [`proxy_all_segments_and_apply`].
#[allow(clippy::type_complexity)]
pub fn proxy_all_segments<'a>(
segments_lock: RwLockUpgradableReadGuard<'a, SegmentHolder>,
segments_path: &Path,
segment_config: Option<SegmentConfig>,
payload_index_schema: Arc<SaveOnDisk<PayloadIndexSchema>>,
) -> OperationResult<(
Vec<(SegmentId, LockedSegment)>,
SegmentId,
RwLockUpgradableReadGuard<'a, SegmentHolder>,
)> {
// This counter will be used to measure operations on temp segment,
// which is part of internal process and can be ignored
let hw_counter = HardwareCounterCell::disposable();
// Create temporary appendable segment to direct all proxy writes into
let tmp_segment = segments_lock.build_tmp_segment(
segments_path,
segment_config,
payload_index_schema,
false,
)?;
// List all segments we want to snapshot
let mut segment_ids = segments_lock.segment_ids();
// Re-sort segments for flush ordering, required to guarantee data consistency
// TODO: sort in a better place to not lock each segment
segment_ids.sort_by_cached_key(|segment_id| {
segments_lock
.get(*segment_id)
.unwrap()
.get()
.read()
.flush_ordering()
});
// Create proxy for all segments
let mut new_proxies = Vec::with_capacity(segment_ids.len());
for segment_id in segment_ids {
let segment = segments_lock.get(segment_id).unwrap();
let proxy = ProxySegment::new(segment.clone());
// Write segment is fresh, so it has no operations
// Operation with number 0 will be applied
proxy.replicate_field_indexes(0, &hw_counter, &tmp_segment)?;
new_proxies.push((segment_id, proxy));
}
// Save segment version once all payload indices have been converted
// If this ends up not being saved due to a crash, the segment will not be used
match &tmp_segment {
LockedSegment::Original(segment) => {
let segment_path = &segment.read().current_path;
SegmentVersion::save(segment_path)?;
}
LockedSegment::Proxy(_) => unreachable!(),
}
// Replace all segments with proxies
// We cannot fail past this point to prevent only having some segments proxified
let mut proxies = Vec::with_capacity(new_proxies.len());
let mut write_segments = RwLockUpgradableReadGuard::upgrade(segments_lock);
for (segment_id, proxy) in new_proxies {
// Replicate field indexes the second time, because optimized segments could have
// been changed. The probability is small, though, so we can afford this operation
// under the full collection write lock
let op_num = proxy.version();
if let Err(err) = proxy.replicate_field_indexes(op_num, &hw_counter, &tmp_segment) {
log::error!("Failed to replicate proxy segment field indexes, ignoring: {err}");
}
// We must keep existing segment IDs because ongoing optimizations might depend on the mapping being the same
write_segments.replace(segment_id, proxy)?;
let locked_proxy_segment = write_segments
.get(segment_id)
.cloned()
.expect("failed to get segment from segment holder we just swapped in");
proxies.push((segment_id, locked_proxy_segment));
}
// Make sure at least one appendable segment exists
let temp_segment_id = write_segments.add_new_locked(tmp_segment);
let segments_lock = RwLockWriteGuard::downgrade_to_upgradable(write_segments);
Ok((proxies, temp_segment_id, segments_lock))
}
/// Try to unproxy a single shard segment for [`proxy_all_segments_and_apply`].
///
/// # Warning
///
/// If unproxying fails an error is returned with the lock and the proxy is left behind in the
/// shard holder.
pub fn try_unproxy_segment(
segments_lock: RwLockUpgradableReadGuard<SegmentHolder>,
segment_id: SegmentId,
proxy_segment: LockedSegment,
) -> Result<RwLockUpgradableReadGuard<SegmentHolder>, RwLockUpgradableReadGuard<SegmentHolder>>
{
// We must propagate all changes in the proxy into their wrapped segments, as we'll put the
// wrapped segment back into the segment holder. This can be an expensive step if we
// collected a lot of changes in the proxy, so we do this in two batches to prevent
// unnecessary locking. First we propagate all changes with a read lock on the shard
// holder, to prevent blocking other readers. Second we propagate any new changes again
// with a write lock on the segment holder, blocking other operations. This second batch
// should be very fast, as we already propagated all changes in the first, which is why we
// can hold a write lock. Once done, we can swap out the proxy for the wrapped shard.
let proxy_segment = match proxy_segment {
LockedSegment::Proxy(proxy_segment) => proxy_segment,
LockedSegment::Original(_) => {
log::warn!(
"Unproxying segment {segment_id} that is not proxified, that is unexpected, skipping",
);
return Err(segments_lock);
}
};
// Batch 1: propagate changes to wrapped segment with segment holder read lock
{
if let Err(err) = proxy_segment.write().propagate_to_wrapped() {
log::error!(
"Propagating proxy segment {segment_id} changes to wrapped segment failed, ignoring: {err}",
);
}
}
let mut write_segments = RwLockUpgradableReadGuard::upgrade(segments_lock);
// Batch 2: propagate changes to wrapped segment with segment holder write lock
// Propagate proxied changes to wrapped segment, take it out and swap with proxy
// Important: put the wrapped segment back with its original segment ID
let wrapped_segment = {
let mut proxy_segment = proxy_segment.write();
if let Err(err) = proxy_segment.propagate_to_wrapped() {
log::error!(
"Propagating proxy segment {segment_id} changes to wrapped segment failed, ignoring: {err}",
);
}
proxy_segment.wrapped_segment.clone()
};
write_segments.replace(segment_id, wrapped_segment).unwrap();
// Downgrade write lock to read and give it back
Ok(RwLockWriteGuard::downgrade_to_upgradable(write_segments))
}
/// Unproxy all shard segments for [`proxy_all_segments_and_apply`].
pub fn unproxy_all_segments(
segments_lock: RwLockUpgradableReadGuard<SegmentHolder>,
proxies: Vec<(SegmentId, LockedSegment)>,
tmp_segment_id: SegmentId,
) -> OperationResult<()> {
// We must propagate all changes in the proxy into their wrapped segments, as we'll put the
// wrapped segment back into the segment holder. This can be an expensive step if we
// collected a lot of changes in the proxy, so we do this in two batches to prevent
// unnecessary locking. First we propagate all changes with a read lock on the shard
// holder, to prevent blocking other readers. Second we propagate any new changes again
// with a write lock on the segment holder, blocking other operations. This second batch
// should be very fast, as we already propagated all changes in the first, which is why we
// can hold a write lock. Once done, we can swap out the proxy for the wrapped shard.
// Batch 1: propagate changes to wrapped segment with segment holder read lock
proxies
.iter()
.filter_map(|(segment_id, proxy_segment)| match proxy_segment {
LockedSegment::Proxy(proxy_segment) => Some((segment_id, proxy_segment)),
LockedSegment::Original(_) => None,
}).for_each(|(proxy_id, proxy_segment)| {
if let Err(err) = proxy_segment.write().propagate_to_wrapped() {
log::error!("Propagating proxy segment {proxy_id} changes to wrapped segment failed, ignoring: {err}");
}
});
// Batch 2: propagate changes to wrapped segment with segment holder write lock
// Swap out each proxy with wrapped segment once changes are propagated
let mut write_segments = RwLockUpgradableReadGuard::upgrade(segments_lock);
for (segment_id, proxy_segment) in proxies {
match proxy_segment {
// Propagate proxied changes to wrapped segment, take it out and swap with proxy
// Important: put the wrapped segment back with its original segment ID
LockedSegment::Proxy(proxy_segment) => {
let wrapped_segment = {
let mut proxy_segment = proxy_segment.write();
if let Err(err) = proxy_segment.propagate_to_wrapped() {
log::error!(
"Propagating proxy segment {segment_id} changes to wrapped segment failed, ignoring: {err}",
);
}
proxy_segment.wrapped_segment.clone()
};
write_segments.replace(segment_id, wrapped_segment)?;
}
// If already unproxied, do nothing
LockedSegment::Original(_) => {}
}
}
debug_assert!(
write_segments.get(tmp_segment_id).is_some(),
"temp segment must exist",
);
// Remove temporary appendable segment, if we don't need it anymore
write_segments.remove_segment_if_not_needed(tmp_segment_id)?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/codegen/src/pyclass_repr.rs | lib/edge/python/codegen/src/pyclass_repr.rs | pub fn pyclass_repr(input: proc_macro2::TokenStream) -> syn::Result<proc_macro2::TokenStream> {
let impl_block: syn::ItemImpl = syn::parse2(input)?;
let type_name = &impl_block.self_ty;
let mut fields = Vec::new();
for item in &impl_block.items {
let syn::ImplItem::Fn(func) = item else {
continue;
};
if !func.attrs.iter().any(|attr| attr.path().is_ident("getter")) {
continue;
}
fields.push(&func.sig.ident);
}
let output = quote::quote! {
#impl_block
impl crate::repr::Repr for #type_name {
fn fmt(&self, f: &mut crate::repr::Formatter<'_>) -> std::fmt::Result {
use crate::repr::WriteExt as _;
f.class::<Self>(&[
#( (stringify!(#fields), &self.#fields()) ),*
])
}
}
};
Ok(output)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/codegen/src/lib.rs | lib/edge/python/codegen/src/lib.rs | mod pyclass_repr;
#[proc_macro_attribute]
pub fn pyclass_repr(
_attributes: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
match pyclass_repr::pyclass_repr(input.into()) {
Ok(output) => output.into(),
Err(error) => error.to_compile_error().into(),
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/lib.rs | lib/edge/python/src/lib.rs | pub mod config;
pub mod query;
pub mod repr;
pub mod search;
pub mod types;
pub mod update;
use std::path::PathBuf;
use bytemuck::TransparentWrapperAlloc as _;
use pyo3::exceptions::PyException;
use pyo3::prelude::*;
use segment::common::operation_error::OperationError;
use segment::types::*;
use self::config::*;
use self::query::*;
use self::search::*;
use self::types::*;
use self::update::*;
#[pymodule]
mod qdrant_edge {
#[pymodule_export]
use super::PyShard;
#[pymodule_export]
use super::config::quantization::{
PyBinaryQuantizationConfig, PyBinaryQuantizationEncoding,
PyBinaryQuantizationQueryEncoding, PyCompressionRatio, PyProductQuantizationConfig,
PyScalarQuantizationConfig, PyScalarType,
};
#[pymodule_export]
use super::config::sparse_vector_data::{
PyModifier, PySparseIndexConfig, PySparseIndexType, PySparseVectorDataConfig,
PySparseVectorStorageType,
};
#[pymodule_export]
use super::config::vector_data::{
PyDistance, PyHnswIndexConfig, PyMultiVectorComparator, PyMultiVectorConfig,
PyPlainIndexConfig, PyVectorDataConfig, PyVectorStorageDatatype, PyVectorStorageType,
};
#[pymodule_export]
use super::config::{PyPayloadStorageType, PySegmentConfig};
#[pymodule_export]
use super::query::{
PyDirection, PyFusion, PyMmr, PyOrderBy, PyPrefetch, PyQueryRequest, PySample,
};
#[pymodule_export]
use super::search::{
PyAcornSearchParams, PyPayloadSelectorInterface, PyQuantizationSearchParams,
PySearchParams, PySearchRequest,
};
#[pymodule_export]
use super::types::filter::{
PyFieldCondition, PyFilter, PyGeoBoundingBox, PyGeoPoint, PyGeoPolygon, PyGeoRadius,
PyHasIdCondition, PyHasVectorCondition, PyIsEmptyCondition, PyIsNullCondition, PyMatchAny,
PyMatchExcept, PyMatchPhrase, PyMatchText, PyMatchTextAny, PyMatchValue, PyMinShould,
PyNestedCondition, PyRangeDateTime, PyRangeFloat, PyValuesCount,
};
#[pymodule_export]
use super::types::formula::{PyDecayKind, PyExpressionInterface, PyFormula};
#[pymodule_export]
use super::types::query::{
PyContextPair, PyContextQuery, PyDiscoverQuery, PyFeedbackItem, PyFeedbackNaiveQuery,
PyNaiveFeedbackCoefficients, PyQueryInterface, PyRecommendQuery,
};
#[pymodule_export]
use super::types::{PyPoint, PyPointVectors, PyRecord, PyScoredPoint, PySparseVector};
#[pymodule_export]
use super::update::PyUpdateOperation;
}
#[pyclass(name = "Shard")]
#[derive(Debug)]
pub struct PyShard(edge::Shard);
#[pymethods]
impl PyShard {
#[new]
pub fn load(path: PathBuf, config: Option<PySegmentConfig>) -> Result<Self> {
let shard = edge::Shard::load(&path, config.map(SegmentConfig::from))?;
Ok(Self(shard))
}
pub fn update(&self, operation: PyUpdateOperation) -> Result<()> {
self.0.update(operation.into())?;
Ok(())
}
pub fn query(&self, query: PyQueryRequest) -> Result<Vec<PyScoredPoint>> {
let points = self.0.query(query.into())?;
let points = PyScoredPoint::wrap_vec(points);
Ok(points)
}
pub fn search(&self, search: PySearchRequest) -> Result<Vec<PyScoredPoint>> {
let points = self.0.search(search.into())?;
let points = PyScoredPoint::wrap_vec(points);
Ok(points)
}
pub fn retrieve(
&self,
point_ids: Vec<PyPointId>,
with_payload: Option<PyWithPayload>,
with_vector: Option<PyWithVector>,
) -> Result<Vec<PyRecord>> {
let point_ids = PyPointId::peel_vec(point_ids);
let points = self.0.retrieve(
&point_ids,
with_payload.map(WithPayloadInterface::from),
with_vector.map(WithVector::from),
)?;
let points = PyRecord::wrap_vec(points);
Ok(points)
}
}
pub type Result<T, E = PyError> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct PyError(OperationError);
impl From<OperationError> for PyError {
fn from(err: OperationError) -> Self {
Self(err)
}
}
impl From<PyError> for PyErr {
fn from(err: PyError) -> Self {
PyException::new_err(err.0.to_string())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/update.rs | lib/edge/python/src/update.rs | use bytemuck::TransparentWrapperAlloc as _;
use derive_more::Into;
use pyo3::prelude::*;
use segment::json_path::JsonPath;
use segment::types::{Filter, Payload, VectorNameBuf};
use shard::operations::point_ops::{PointIdsList, PointInsertOperationsInternal};
use shard::operations::{CollectionUpdateOperations, payload_ops, point_ops, vector_ops};
use crate::*;
#[pyclass(name = "UpdateOperation")]
#[derive(Clone, Debug, Into)]
pub struct PyUpdateOperation(CollectionUpdateOperations);
#[pymethods]
impl PyUpdateOperation {
#[staticmethod]
#[pyo3(signature = (points, condition=None))]
pub fn upsert_points(points: Vec<PyPoint>, condition: Option<PyFilter>) -> Self {
let points = PointInsertOperationsInternal::PointsList(PyPoint::peel_vec(points));
let operation = match condition {
Some(condition) => point_ops::PointOperations::UpsertPointsConditional(
point_ops::ConditionalInsertOperationInternal {
points_op: points,
condition: Filter::from(condition),
},
),
None => point_ops::PointOperations::UpsertPoints(points),
};
Self(CollectionUpdateOperations::PointOperation(operation))
}
#[staticmethod]
pub fn delete_points(point_ids: Vec<PyPointId>) -> Self {
let operation = point_ops::PointOperations::DeletePoints {
ids: PyPointId::peel_vec(point_ids),
};
Self(CollectionUpdateOperations::PointOperation(operation))
}
#[staticmethod]
pub fn delete_points_by_filter(filter: PyFilter) -> Self {
let operation = point_ops::PointOperations::DeletePointsByFilter(Filter::from(filter));
Self(CollectionUpdateOperations::PointOperation(operation))
}
#[staticmethod]
#[pyo3(signature = (point_vectors, condition=None))]
pub fn update_vectors(point_vectors: Vec<PyPointVectors>, condition: Option<PyFilter>) -> Self {
let operation = vector_ops::VectorOperations::UpdateVectors(vector_ops::UpdateVectorsOp {
points: PyPointVectors::peel_vec(point_vectors),
update_filter: condition.map(Filter::from),
});
Self(CollectionUpdateOperations::VectorOperation(operation))
}
#[staticmethod]
pub fn delete_vectors(point_ids: Vec<PyPointId>, vector_names: Vec<VectorNameBuf>) -> Self {
let operation = vector_ops::VectorOperations::DeleteVectors(
PointIdsList::from(PyPointId::peel_vec(point_ids)),
vector_names,
);
Self(CollectionUpdateOperations::VectorOperation(operation))
}
#[staticmethod]
pub fn delete_vectors_by_filter(filter: PyFilter, vector_names: Vec<VectorNameBuf>) -> Self {
let operation =
vector_ops::VectorOperations::DeleteVectorsByFilter(Filter::from(filter), vector_names);
Self(CollectionUpdateOperations::VectorOperation(operation))
}
#[staticmethod]
#[pyo3(signature = (point_ids, payload, key=None))]
pub fn set_payload(
point_ids: Vec<PyPointId>,
payload: PyPayload,
key: Option<PyJsonPath>,
) -> Self {
let operation = payload_ops::PayloadOps::SetPayload(payload_ops::SetPayloadOp {
payload: Payload::from(payload),
points: Some(PyPointId::peel_vec(point_ids)),
filter: None,
key: key.map(JsonPath::from),
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
#[pyo3(signature = (filter, payload, key=None))]
pub fn set_payload_by_filter(
filter: PyFilter,
payload: PyPayload,
key: Option<PyJsonPath>,
) -> Self {
let operation = payload_ops::PayloadOps::SetPayload(payload_ops::SetPayloadOp {
payload: Payload::from(payload),
points: None,
filter: Some(Filter::from(filter)),
key: key.map(JsonPath::from),
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
pub fn delete_payload(point_ids: Vec<PyPointId>, keys: Vec<PyJsonPath>) -> Self {
let operation = payload_ops::PayloadOps::DeletePayload(payload_ops::DeletePayloadOp {
keys: PyJsonPath::peel_vec(keys),
points: Some(PyPointId::peel_vec(point_ids)),
filter: None,
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
pub fn delete_payload_by_filter(filter: PyFilter, keys: Vec<PyJsonPath>) -> Self {
let operation = payload_ops::PayloadOps::DeletePayload(payload_ops::DeletePayloadOp {
keys: PyJsonPath::peel_vec(keys),
points: None,
filter: Some(Filter::from(filter)),
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
pub fn clear_payload(point_ids: Vec<PyPointId>) -> Self {
let operation = payload_ops::PayloadOps::ClearPayload {
points: PyPointId::peel_vec(point_ids),
};
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
pub fn clear_payload_by_filter(filter: PyFilter) -> Self {
let operation = payload_ops::PayloadOps::ClearPayloadByFilter(Filter::from(filter));
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
#[pyo3(signature = (point_ids, payload, key=None))]
pub fn overwrite_payload(
point_ids: Vec<PyPointId>,
payload: PyPayload,
key: Option<PyJsonPath>,
) -> Self {
let operation = payload_ops::PayloadOps::OverwritePayload(payload_ops::SetPayloadOp {
payload: Payload::from(payload),
points: Some(PyPointId::peel_vec(point_ids)),
filter: None,
key: key.map(JsonPath::from),
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
#[staticmethod]
#[pyo3(signature = (filter, payload, key=None))]
pub fn overwrite_payload_by_filter(
filter: PyFilter,
payload: PyPayload,
key: Option<PyJsonPath>,
) -> Self {
let operation = payload_ops::PayloadOps::OverwritePayload(payload_ops::SetPayloadOp {
payload: Payload::from(payload),
points: None,
filter: Some(Filter::from(filter)),
key: key.map(JsonPath::from),
});
Self(CollectionUpdateOperations::PayloadOperation(operation))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/search.rs | lib/edge/python/src/search.rs | use bytemuck::{TransparentWrapper, TransparentWrapperAlloc as _};
use derive_more::Into;
use ordered_float::OrderedFloat;
use pyo3::IntoPyObjectExt as _;
use pyo3::prelude::*;
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequest;
use crate::repr::*;
use crate::*;
#[pyclass(name = "SearchRequest")]
#[derive(Clone, Debug, Into)]
pub struct PySearchRequest(CoreSearchRequest);
#[pyclass_repr]
#[pymethods]
impl PySearchRequest {
#[new]
#[allow(clippy::too_many_arguments)]
pub fn new(
query: PyQuery,
filter: Option<PyFilter>,
params: Option<PySearchParams>,
limit: usize,
offset: usize,
with_vector: Option<PyWithVector>,
with_payload: Option<PyWithPayload>,
score_threshold: Option<f32>,
) -> Self {
Self(CoreSearchRequest {
query: QueryEnum::from(query),
filter: filter.map(Filter::from),
params: params.map(SearchParams::from),
limit,
offset,
with_vector: with_vector.map(WithVector::from),
with_payload: with_payload.map(WithPayloadInterface::from),
score_threshold,
})
}
#[getter]
pub fn query(&self) -> &PyQuery {
PyQuery::wrap_ref(&self.0.query)
}
#[getter]
pub fn filter(&self) -> Option<&PyFilter> {
self.0.filter.as_ref().map(PyFilter::wrap_ref)
}
#[getter]
pub fn params(&self) -> Option<PySearchParams> {
self.0.params.map(PySearchParams)
}
#[getter]
pub fn limit(&self) -> usize {
self.0.limit
}
#[getter]
pub fn offset(&self) -> usize {
self.0.offset
}
#[getter]
pub fn with_vector(&self) -> Option<&PyWithVector> {
self.0.with_vector.as_ref().map(PyWithVector::wrap_ref)
}
#[getter]
pub fn with_payload(&self) -> Option<&PyWithPayload> {
self.0.with_payload.as_ref().map(PyWithPayload::wrap_ref)
}
#[getter]
pub fn score_threshold(&self) -> Option<f32> {
self.0.score_threshold
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PySearchRequest {
fn _getters(self) {
// Every field should have a getter method
let CoreSearchRequest {
query: _,
filter: _,
params: _,
limit: _,
offset: _,
with_vector: _,
with_payload: _,
score_threshold: _,
} = self.0;
}
}
#[pyclass(name = "SearchParams")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PySearchParams(pub SearchParams);
#[pyclass_repr]
#[pymethods]
impl PySearchParams {
#[new]
pub fn new(
hnsw_ef: Option<usize>,
exact: bool,
quantization: Option<PyQuantizationSearchParams>,
indexed_only: bool,
acorn: Option<PyAcornSearchParams>,
) -> Self {
Self(SearchParams {
hnsw_ef,
exact,
quantization: quantization.map(QuantizationSearchParams::from),
indexed_only,
acorn: acorn.map(AcornSearchParams::from),
})
}
#[getter]
pub fn hnsw_ef(&self) -> Option<usize> {
self.0.hnsw_ef
}
#[getter]
pub fn exact(&self) -> bool {
self.0.exact
}
#[getter]
pub fn quantization(&self) -> Option<PyQuantizationSearchParams> {
self.0.quantization.map(PyQuantizationSearchParams)
}
#[getter]
pub fn indexed_only(&self) -> bool {
self.0.indexed_only
}
#[getter]
pub fn acorn(&self) -> Option<PyAcornSearchParams> {
self.0.acorn.map(PyAcornSearchParams)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PySearchParams {
fn _getters(self) {
// Every field should have a getter method
let SearchParams {
hnsw_ef: _,
exact: _,
quantization: _,
indexed_only: _,
acorn: _,
} = self.0;
}
}
#[pyclass(name = "QuantizationSearchParams")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyQuantizationSearchParams(QuantizationSearchParams);
#[pyclass_repr]
#[pymethods]
impl PyQuantizationSearchParams {
#[new]
pub fn new(ignore: bool, rescore: Option<bool>, oversampling: Option<f64>) -> Self {
Self(QuantizationSearchParams {
ignore,
rescore,
oversampling,
})
}
#[getter]
pub fn ignore(&self) -> bool {
self.0.ignore
}
#[getter]
pub fn rescore(&self) -> Option<bool> {
self.0.rescore
}
#[getter]
pub fn oversampling(&self) -> Option<f64> {
self.0.oversampling
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyQuantizationSearchParams {
fn _getters(self) {
// Every field should have a getter method
let QuantizationSearchParams {
ignore: _,
rescore: _,
oversampling: _,
} = self.0;
}
}
#[pyclass(name = "AcornSearchParams")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyAcornSearchParams(AcornSearchParams);
#[pyclass_repr]
#[pymethods]
impl PyAcornSearchParams {
#[new]
pub fn new(enable: bool, max_selectivity: Option<f64>) -> Self {
Self(AcornSearchParams {
enable,
max_selectivity: max_selectivity.map(OrderedFloat),
})
}
#[getter]
pub fn enable(&self) -> bool {
self.0.enable
}
#[getter]
pub fn max_selectivity(&self) -> Option<f64> {
self.0
.max_selectivity
.map(|selectivity| selectivity.into_inner())
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyAcornSearchParams {
fn _getters(self) {
// Every field should have a getter method
let AcornSearchParams {
enable: _,
max_selectivity: _,
} = self.0;
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyWithVector(pub WithVector);
impl FromPyObject<'_, '_> for PyWithVector {
type Error = PyErr;
fn extract(with_vector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Bool(bool),
Selector(Vec<String>),
}
fn _variants(with_vector: WithVector) {
match with_vector {
WithVector::Bool(_) => {}
WithVector::Selector(_) => {}
}
}
let with_vector = match with_vector.extract()? {
Helper::Bool(bool) => WithVector::Bool(bool),
Helper::Selector(vectors) => WithVector::Selector(vectors),
};
Ok(Self(with_vector))
}
}
impl<'py> IntoPyObject<'py> for PyWithVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible?
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyWithVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible?
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
WithVector::Bool(bool) => bool.into_bound_py_any(py),
WithVector::Selector(vectors) => vectors.into_bound_py_any(py),
}
}
}
impl Repr for PyWithVector {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self.0 {
WithVector::Bool(bool) => bool.fmt(f),
WithVector::Selector(vectors) => vectors.fmt(f),
}
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyWithPayload(WithPayloadInterface);
impl FromPyObject<'_, '_> for PyWithPayload {
type Error = PyErr;
fn extract(with_payload: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Bool(bool),
Fields(Vec<PyJsonPath>),
Selector(PyPayloadSelector),
}
fn _variants(with_payload: WithPayloadInterface) {
match with_payload {
WithPayloadInterface::Bool(_) => {}
WithPayloadInterface::Fields(_) => {}
WithPayloadInterface::Selector(_) => {}
}
}
let with_payload = match with_payload.extract()? {
Helper::Bool(bool) => WithPayloadInterface::Bool(bool),
Helper::Fields(fields) => WithPayloadInterface::Fields(PyJsonPath::peel_vec(fields)),
Helper::Selector(selector) => {
WithPayloadInterface::Selector(PayloadSelector::from(selector))
}
};
Ok(Self(with_payload))
}
}
impl<'py> IntoPyObject<'py> for PyWithPayload {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible?
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyWithPayload {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible?
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
WithPayloadInterface::Bool(bool) => bool.into_bound_py_any(py),
WithPayloadInterface::Fields(fields) => {
PyJsonPath::wrap_slice(fields).into_bound_py_any(py)
}
WithPayloadInterface::Selector(selector) => PyPayloadSelector::wrap_ref(selector)
.clone()
.into_bound_py_any(py),
}
}
}
impl Repr for PyWithPayload {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self.0 {
WithPayloadInterface::Bool(bool) => bool.fmt(f),
WithPayloadInterface::Fields(fields) => PyJsonPath::wrap_slice(fields).fmt(f),
WithPayloadInterface::Selector(selector) => {
PyPayloadSelector::wrap_ref(selector).fmt(f)
}
}
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPayloadSelector(PayloadSelector);
impl FromPyObject<'_, '_> for PyPayloadSelector {
type Error = PyErr;
fn extract(selector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
let selector = match selector.extract()? {
PyPayloadSelectorInterface::Include { keys } => {
PayloadSelector::Include(PayloadSelectorInclude {
include: PyJsonPath::peel_vec(keys),
})
}
PyPayloadSelectorInterface::Exclude { keys } => {
PayloadSelector::Exclude(PayloadSelectorExclude {
exclude: PyJsonPath::peel_vec(keys),
})
}
};
Ok(Self(selector))
}
}
impl<'py> IntoPyObject<'py> for PyPayloadSelector {
type Target = PyPayloadSelectorInterface;
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible?
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
let selector = match self.0 {
PayloadSelector::Include(PayloadSelectorInclude { include }) => {
PyPayloadSelectorInterface::Include {
keys: PyJsonPath::wrap_vec(include),
}
}
PayloadSelector::Exclude(PayloadSelectorExclude { exclude }) => {
PyPayloadSelectorInterface::Exclude {
keys: PyJsonPath::wrap_vec(exclude),
}
}
};
Bound::new(py, selector)
}
}
impl Repr for PyPayloadSelector {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let (repr, keys) = match &self.0 {
PayloadSelector::Include(PayloadSelectorInclude { include }) => {
("Include", PyJsonPath::wrap_slice(include))
}
PayloadSelector::Exclude(PayloadSelectorExclude { exclude }) => {
("Exclude", PyJsonPath::wrap_slice(exclude))
}
};
f.complex_enum::<PyPayloadSelectorInterface>(repr, &[("keys", &keys)])
}
}
#[pyclass(name = "PayloadSelector")]
#[derive(Clone, Debug)]
pub enum PyPayloadSelectorInterface {
Include { keys: Vec<PyJsonPath> },
Exclude { keys: Vec<PyJsonPath> },
}
impl Repr for PyPayloadSelectorInterface {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let (repr, keys) = match self {
PyPayloadSelectorInterface::Include { keys } => ("Include", keys),
PyPayloadSelectorInterface::Exclude { keys } => ("Exclude", keys),
};
f.complex_enum::<Self>(repr, &[("keys", keys)])
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/query.rs | lib/edge/python/src/query.rs | use std::fmt;
use bytemuck::{TransparentWrapper, TransparentWrapperAlloc as _};
use derive_more::Into;
use ordered_float::OrderedFloat;
use pyo3::IntoPyObjectExt;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use segment::data_types::order_by::{Direction, OrderBy, StartFrom};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, VectorInternal};
use segment::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula;
use segment::json_path::JsonPath;
use shard::query::query_enum::QueryEnum;
use shard::query::*;
use super::*;
use crate::repr::*;
#[pyclass(name = "QueryRequest")]
#[derive(Clone, Debug, Into)]
pub struct PyQueryRequest(ShardQueryRequest);
#[pyclass_repr]
#[pymethods]
impl PyQueryRequest {
#[new]
#[allow(clippy::too_many_arguments)]
pub fn new(
prefetches: Vec<PyPrefetch>,
query: Option<PyScoringQuery>,
filter: Option<PyFilter>,
score_threshold: Option<f32>,
limit: usize,
offset: usize,
params: Option<PySearchParams>,
with_vector: PyWithVector,
with_payload: PyWithPayload,
) -> Self {
Self(ShardQueryRequest {
prefetches: PyPrefetch::peel_vec(prefetches),
query: query.map(ScoringQuery::from),
filter: filter.map(Filter::from),
score_threshold: score_threshold.map(OrderedFloat),
limit,
offset,
params: params.map(SearchParams::from),
with_vector: WithVector::from(with_vector),
with_payload: WithPayloadInterface::from(with_payload),
})
}
#[getter]
pub fn prefetches(&self) -> &[PyPrefetch] {
PyPrefetch::wrap_slice(&self.0.prefetches)
}
#[getter]
pub fn query(&self) -> Option<&PyScoringQuery> {
self.0.query.as_ref().map(PyScoringQuery::wrap_ref)
}
#[getter]
pub fn filter(&self) -> Option<&PyFilter> {
self.0.filter.as_ref().map(PyFilter::wrap_ref)
}
#[getter]
pub fn score_threshold(&self) -> Option<f32> {
self.0
.score_threshold
.map(|threshold| threshold.into_inner())
}
#[getter]
pub fn limit(&self) -> usize {
self.0.limit
}
#[getter]
pub fn offset(&self) -> usize {
self.0.offset
}
#[getter]
pub fn params(&self) -> Option<PySearchParams> {
self.0.params.map(PySearchParams)
}
#[getter]
pub fn with_vector(&self) -> &PyWithVector {
PyWithVector::wrap_ref(&self.0.with_vector)
}
#[getter]
pub fn with_payload(&self) -> &PyWithPayload {
PyWithPayload::wrap_ref(&self.0.with_payload)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyQueryRequest {
fn _getters(self) {
// Every field should have a getter method
let ShardQueryRequest {
prefetches: _,
query: _,
filter: _,
score_threshold: _,
limit: _,
offset: _,
params: _,
with_vector: _,
with_payload: _,
} = self.0;
}
}
#[pyclass(name = "Prefetch")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPrefetch(ShardPrefetch);
#[pyclass_repr]
#[pymethods]
impl PyPrefetch {
#[new]
pub fn new(
prefetches: Vec<PyPrefetch>,
query: Option<PyScoringQuery>,
limit: usize,
params: Option<PySearchParams>,
filter: Option<PyFilter>,
score_threshold: Option<f32>,
) -> Self {
Self(ShardPrefetch {
prefetches: PyPrefetch::peel_vec(prefetches),
query: query.map(ScoringQuery::from),
limit,
params: params.map(SearchParams::from),
filter: filter.map(Filter::from),
score_threshold: score_threshold.map(OrderedFloat),
})
}
#[getter]
pub fn prefetches(&self) -> &[PyPrefetch] {
PyPrefetch::wrap_slice(&self.0.prefetches)
}
#[getter]
pub fn query(&self) -> Option<PyScoringQuery> {
self.0.query.clone().map(PyScoringQuery)
}
#[getter]
pub fn limit(&self) -> usize {
self.0.limit
}
#[getter]
pub fn params(&self) -> Option<PySearchParams> {
self.0.params.map(PySearchParams)
}
#[getter]
pub fn filter(&self) -> Option<PyFilter> {
self.0.filter.clone().map(PyFilter)
}
#[getter]
pub fn score_threshold(&self) -> Option<f32> {
self.0
.score_threshold
.map(|threshold| threshold.into_inner())
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyPrefetch {
fn _getters(self) {
// Every field should have a getter method
let ShardPrefetch {
prefetches: _,
query: _,
limit: _,
params: _,
filter: _,
score_threshold: _,
} = self.0;
}
}
impl<'py> IntoPyObject<'py> for &PyPrefetch {
type Target = PyPrefetch;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(self.clone(), py)
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyScoringQuery(ScoringQuery);
impl FromPyObject<'_, '_> for PyScoringQuery {
type Error = PyErr;
fn extract(query: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Vector(PyQuery),
Fusion(PyFusion),
OrderBy(PyOrderBy),
Formula(PyFormula),
Sample(PySample),
Mmr(PyMmr),
}
fn _variants(query: ScoringQuery) {
match query {
ScoringQuery::Vector(_) => {}
ScoringQuery::Fusion(_) => {}
ScoringQuery::OrderBy(_) => {}
ScoringQuery::Formula(_) => {}
ScoringQuery::Sample(_) => {}
ScoringQuery::Mmr(_) => {}
}
}
let query = match query.extract()? {
Helper::Vector(query) => ScoringQuery::Vector(QueryEnum::from(query)),
Helper::Fusion(fusion) => ScoringQuery::Fusion(FusionInternal::from(fusion)),
Helper::OrderBy(order_by) => ScoringQuery::OrderBy(OrderBy::from(order_by)),
Helper::Formula(formula) => ScoringQuery::Formula(ParsedFormula::from(formula)),
Helper::Sample(sample) => ScoringQuery::Sample(SampleInternal::from(sample)),
Helper::Mmr(mmr) => ScoringQuery::Mmr(MmrInternal::from(mmr)),
};
Ok(Self(query))
}
}
impl<'py> IntoPyObject<'py> for PyScoringQuery {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match self.0 {
ScoringQuery::Vector(vector) => PyQuery(vector).into_bound_py_any(py),
ScoringQuery::Fusion(fusion) => PyFusion::from(fusion).into_bound_py_any(py),
ScoringQuery::OrderBy(order_by) => PyOrderBy(order_by).into_bound_py_any(py),
ScoringQuery::Formula(formula) => PyFormula(formula).into_bound_py_any(py),
ScoringQuery::Sample(sample) => PySample::from(sample).into_bound_py_any(py),
ScoringQuery::Mmr(mmr) => PyMmr(mmr).into_bound_py_any(py),
}
}
}
impl<'py> IntoPyObject<'py> for &PyScoringQuery {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(self.clone(), py)
}
}
impl Repr for PyScoringQuery {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self.0 {
ScoringQuery::Vector(vector) => PyQuery::wrap_ref(vector).fmt(f),
ScoringQuery::Fusion(fusion) => PyFusion::from(*fusion).fmt(f),
ScoringQuery::OrderBy(order_by) => PyOrderBy::wrap_ref(order_by).fmt(f),
ScoringQuery::Formula(_formula) => f.unimplemented(), // TODO!
ScoringQuery::Sample(sample) => PySample::from(*sample).fmt(f),
ScoringQuery::Mmr(mmr) => PyMmr::wrap_ref(mmr).fmt(f),
}
}
}
#[pyclass(name = "Fusion")]
#[derive(Copy, Clone, Debug)]
pub enum PyFusion {
Rrfk { rrfk: usize },
Dbsf {},
}
#[pymethods]
impl PyFusion {
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl Repr for PyFusion {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let (repr, fields): (_, &[(_, &dyn Repr)]) = match self {
PyFusion::Rrfk { rrfk } => ("Rrfk", &[("rrfk", rrfk)]),
PyFusion::Dbsf {} => ("Dbsf", &[]),
};
f.complex_enum::<Self>(repr, fields)
}
}
impl From<FusionInternal> for PyFusion {
fn from(fusion: FusionInternal) -> Self {
match fusion {
FusionInternal::RrfK(rrfk) => PyFusion::Rrfk { rrfk },
FusionInternal::Dbsf => PyFusion::Dbsf {},
}
}
}
impl From<PyFusion> for FusionInternal {
fn from(fusion: PyFusion) -> Self {
match fusion {
PyFusion::Rrfk { rrfk } => FusionInternal::RrfK(rrfk),
PyFusion::Dbsf {} => FusionInternal::Dbsf,
}
}
}
#[pyclass(name = "OrderBy")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyOrderBy(OrderBy);
#[pyclass_repr]
#[pymethods]
impl PyOrderBy {
#[new]
pub fn new(
key: PyJsonPath,
direction: Option<PyDirection>,
start_from: Option<PyStartFrom>,
) -> PyResult<Self> {
let order_by = OrderBy {
key: JsonPath::from(key),
direction: direction.map(Direction::from),
start_from: start_from.map(StartFrom::from),
};
Ok(Self(order_by))
}
#[getter]
pub fn key(&self) -> &PyJsonPath {
PyJsonPath::wrap_ref(&self.0.key)
}
#[getter]
pub fn direction(&self) -> Option<PyDirection> {
self.0.direction.map(PyDirection::from)
}
#[getter]
pub fn start_from(&self) -> Option<PyStartFrom> {
self.0.start_from.map(PyStartFrom)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyOrderBy {
fn _getters(self) {
// Every field should have a getter method
let OrderBy {
key: _,
direction: _,
start_from: _,
} = self.0;
}
}
#[pyclass(name = "Direction")]
#[derive(Copy, Clone, Debug)]
pub enum PyDirection {
Asc,
Desc,
}
#[pymethods]
impl PyDirection {
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl Repr for PyDirection {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let repr = match self {
PyDirection::Asc => "Asc",
PyDirection::Desc => "Desc",
};
f.simple_enum::<Self>(repr)
}
}
impl From<Direction> for PyDirection {
fn from(direction: Direction) -> Self {
match direction {
Direction::Asc => PyDirection::Asc,
Direction::Desc => PyDirection::Desc,
}
}
}
impl From<PyDirection> for Direction {
fn from(direction: PyDirection) -> Self {
match direction {
PyDirection::Asc => Direction::Asc,
PyDirection::Desc => Direction::Desc,
}
}
}
#[derive(Copy, Clone, Debug, Into)]
pub struct PyStartFrom(StartFrom);
impl FromPyObject<'_, '_> for PyStartFrom {
type Error = PyErr;
fn extract(start_from: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Integer(IntPayloadType),
Float(FloatPayloadType),
DateTime(String),
}
fn _variants(start_from: StartFrom) {
match start_from {
StartFrom::Integer(_) => {}
StartFrom::Float(_) => {}
StartFrom::Datetime(_) => {}
}
}
let start_from = match start_from.extract()? {
Helper::Integer(int) => StartFrom::Integer(int),
Helper::Float(float) => StartFrom::Float(float),
Helper::DateTime(date_time) => {
let date_time = date_time.parse().map_err(|err| {
PyValueError::new_err(format!("failed to parse date-time: {err}"))
})?;
StartFrom::Datetime(date_time)
}
};
Ok(Self(start_from))
}
}
impl<'py> IntoPyObject<'py> for PyStartFrom {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyStartFrom {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
StartFrom::Integer(int) => int.into_bound_py_any(py),
StartFrom::Float(float) => float.into_bound_py_any(py),
StartFrom::Datetime(date_time) => date_time.to_string().into_bound_py_any(py),
}
}
}
impl Repr for PyStartFrom {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.0 {
StartFrom::Integer(int) => int.fmt(f),
StartFrom::Float(float) => float.fmt(f),
StartFrom::Datetime(date_time) => date_time.to_string().fmt(f),
}
}
}
#[pyclass(name = "Sample")]
#[derive(Copy, Clone, Debug)]
pub enum PySample {
Random,
}
#[pymethods]
impl PySample {
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl Repr for PySample {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let repr = match self {
PySample::Random => "Random",
};
f.simple_enum::<Self>(repr)
}
}
impl From<SampleInternal> for PySample {
fn from(sample: SampleInternal) -> Self {
match sample {
SampleInternal::Random => PySample::Random,
}
}
}
impl From<PySample> for SampleInternal {
fn from(sample: PySample) -> Self {
match sample {
PySample::Random => SampleInternal::Random,
}
}
}
#[pyclass(name = "Mmr")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMmr(MmrInternal);
#[pyclass_repr]
#[pymethods]
impl PyMmr {
#[new]
pub fn new(
vector: PyNamedVectorInternal,
using: Option<String>,
lambda: f32,
candidates_limit: usize,
) -> Self {
let mmr = MmrInternal {
vector: VectorInternal::from(vector),
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_string()),
lambda: OrderedFloat(lambda),
candidates_limit,
};
Self(mmr)
}
#[getter]
pub fn vector(&self) -> &PyNamedVectorInternal {
PyNamedVectorInternal::wrap_ref(&self.0.vector)
}
#[getter]
pub fn using(&self) -> &str {
&self.0.using
}
#[getter]
pub fn lambda(&self) -> f32 {
self.0.lambda.into_inner()
}
#[getter]
pub fn candidates_limit(&self) -> usize {
self.0.candidates_limit
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMmr {
fn _getters(self) {
// Every field should have a getter method
let MmrInternal {
vector: _,
using: _,
lambda: _,
candidates_limit: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/repr.rs | lib/edge/python/src/repr.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
pub use edge_py_codegen::pyclass_repr;
use pyo3::PyTypeInfo;
pub trait Repr {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result;
fn repr(&self) -> String {
let mut repr = String::new();
self.fmt(&mut repr).expect("infallible");
repr
}
}
pub type Formatter<'a> = dyn fmt::Write + 'a;
impl<T: Repr + ?Sized> Repr for &T {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Repr::fmt(*self, f)
}
}
impl Repr for bool {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", if *self { "True" } else { "False" })
}
}
impl Repr for u32 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for u64 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for i64 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for usize {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for f32 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for f64 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for str {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
}
}
impl Repr for String {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.as_str().fmt(f)
}
}
impl<T: Repr> Repr for [T] {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.list(self)
}
}
impl<T: Repr> Repr for Vec<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.list(self)
}
}
impl<K: AsRef<str>, V: Repr, S> Repr for HashMap<K, V, S> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.map(self)
}
}
impl<T: Repr, S> Repr for HashSet<T, S> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.set(self)
}
}
impl<T: Repr> Repr for Option<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Some(value) => value.fmt(f),
None => write!(f, "None"),
}
}
}
impl Repr for uuid::Uuid {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "\"{self}\"")
}
}
impl Repr for serde_json::Value {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
serde_json::Value::Null => write!(f, "None"),
serde_json::Value::Bool(bool) => bool.fmt(f),
serde_json::Value::Number(num) => num.fmt(f),
serde_json::Value::String(str) => str.fmt(f),
serde_json::Value::Array(array) => array.fmt(f),
serde_json::Value::Object(object) => object.fmt(f),
}
}
}
impl Repr for serde_json::Number {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{self}")
}
}
impl Repr for serde_json::Map<String, serde_json::Value> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.map(self)
}
}
pub trait WriteExt: fmt::Write {
fn class<T: PyTypeInfo>(&mut self, fields: &[(&str, &dyn Repr)]) -> fmt::Result {
write!(self, "{}(", T::NAME)?;
let mut separator = "";
for (field, value) in fields {
write!(self, "{separator}{field}={}", ReprFmt(value))?;
separator = ", ";
}
write!(self, ")")?;
Ok(())
}
fn complex_enum<T: PyTypeInfo>(
&mut self,
variant: &str,
fields: &[(&str, &dyn Repr)],
) -> fmt::Result {
write!(self, "{}.{}(", T::NAME, variant)?;
let mut separator = "";
for (field, value) in fields {
write!(self, "{separator}{field}={}", ReprFmt(value))?;
separator = ", ";
}
write!(self, ")")?;
Ok(())
}
fn simple_enum<T: PyTypeInfo>(&mut self, variant: &str) -> fmt::Result {
write!(self, "{}.{}", T::NAME, variant)
}
fn list<T: Repr>(&mut self, list: impl IntoIterator<Item = T>) -> fmt::Result {
write!(self, "[")?;
let mut separator = "";
for value in list {
write!(self, "{separator}{}", ReprFmt(value))?;
separator = ", ";
}
write!(self, "]")?;
Ok(())
}
fn map<K, V>(&mut self, map: impl IntoIterator<Item = (K, V)>) -> fmt::Result
where
K: AsRef<str>,
V: Repr,
{
write!(self, "{{")?;
let mut separator = "";
for (key, value) in map {
write!(
self,
"{separator}{}: {}",
ReprFmt(key.as_ref()),
ReprFmt(value)
)?;
separator = ", ";
}
write!(self, "}}")?;
Ok(())
}
fn set<T: Repr>(&mut self, set: impl IntoIterator<Item = T>) -> fmt::Result {
let mut set = set.into_iter().peekable();
if set.peek().is_none() {
self.write_str("set()")?;
return Ok(());
}
write!(self, "{{")?;
let mut separator = "";
for value in set {
write!(self, "{separator}{}", ReprFmt(value))?;
separator = ", ";
}
write!(self, "}}")?;
Ok(())
}
fn unimplemented(&mut self) -> fmt::Result {
self.write_str("UNIMPLEMENTED")
}
}
impl<W: fmt::Write> WriteExt for W {}
impl<'a> WriteExt for dyn fmt::Write + 'a {}
#[derive(Copy, Clone)]
struct ReprFmt<T>(pub T);
impl<T: Repr> fmt::Display for ReprFmt<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Repr::fmt(&self.0, f)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/vector.rs | lib/edge/python/src/types/vector.rs | use std::collections::HashMap;
use std::{fmt, mem};
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::IntoPyObjectExt;
use pyo3::prelude::*;
use segment::types::VectorNameBuf;
use shard::operations::point_ops::{VectorPersisted, VectorStructPersisted};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::{DimId, DimWeight};
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyVector(VectorStructPersisted);
impl FromPyObject<'_, '_> for PyVector {
type Error = PyErr;
fn extract(vector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Single(Vec<f32>),
MultiDense(Vec<Vec<f32>>),
Named(HashMap<VectorNameBuf, PyNamedVector>),
}
fn _variants(vector: VectorStructPersisted) {
match vector {
VectorStructPersisted::Single(_) => {}
VectorStructPersisted::MultiDense(_) => {}
VectorStructPersisted::Named(_) => {}
}
}
let vector = match vector.extract()? {
Helper::Single(single) => VectorStructPersisted::Single(single),
Helper::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
Helper::Named(named) => VectorStructPersisted::Named(PyNamedVector::peel_map(named)),
};
Ok(Self(vector))
}
}
impl<'py> IntoPyObject<'py> for PyVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
VectorStructPersisted::Single(single) => single.into_bound_py_any(py),
VectorStructPersisted::MultiDense(multi) => multi.into_bound_py_any(py),
VectorStructPersisted::Named(named) => {
PyNamedVector::wrap_map_ref(named).into_bound_py_any(py)
}
}
}
}
impl Repr for PyVector {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
VectorStructPersisted::Single(single) => single.fmt(f),
VectorStructPersisted::MultiDense(multi) => multi.fmt(f),
VectorStructPersisted::Named(named) => PyNamedVector::wrap_map_ref(named).fmt(f),
}
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyNamedVector(VectorPersisted);
impl PyNamedVector {
pub fn peel_map(map: HashMap<String, Self>) -> HashMap<String, VectorPersisted>
where
Self: TransparentWrapper<VectorPersisted>,
{
unsafe { mem::transmute(map) }
}
pub fn wrap_map_ref(map: &HashMap<String, VectorPersisted>) -> &HashMap<String, Self>
where
Self: TransparentWrapper<VectorPersisted>,
{
unsafe { mem::transmute(map) }
}
}
impl FromPyObject<'_, '_> for PyNamedVector {
type Error = PyErr;
fn extract(vector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Dense(Vec<f32>),
Sparse(PySparseVector),
MultiDense(Vec<Vec<f32>>),
}
fn _variants(vector: VectorPersisted) {
match vector {
VectorPersisted::Dense(_) => {}
VectorPersisted::Sparse(_) => {}
VectorPersisted::MultiDense(_) => {}
}
}
let vector = match vector.extract()? {
Helper::Dense(dense) => VectorPersisted::Dense(dense),
Helper::Sparse(sparse) => VectorPersisted::Sparse(SparseVector::from(sparse)),
Helper::MultiDense(multi) => VectorPersisted::MultiDense(multi),
};
Ok(Self(vector))
}
}
impl<'py> IntoPyObject<'py> for PyNamedVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyNamedVector {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
VectorPersisted::Dense(dense) => dense.into_bound_py_any(py),
VectorPersisted::Sparse(sparse) => PySparseVector(sparse.clone()).into_bound_py_any(py),
VectorPersisted::MultiDense(multi) => multi.into_bound_py_any(py),
}
}
}
impl Repr for PyNamedVector {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
VectorPersisted::Dense(dense) => dense.fmt(f),
VectorPersisted::Sparse(sparse) => PySparseVector::wrap_ref(sparse).fmt(f),
VectorPersisted::MultiDense(multi) => multi.fmt(f),
}
}
}
#[pyclass(name = "SparseVector")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PySparseVector(pub SparseVector);
#[pyclass_repr]
#[pymethods]
impl PySparseVector {
#[new]
pub fn new(indices: Vec<DimId>, values: Vec<DimWeight>) -> Self {
Self(SparseVector { indices, values })
}
#[getter]
pub fn indices(&self) -> &[DimId] {
self.0.indices.as_slice()
}
#[getter]
pub fn values(&self) -> &[DimWeight] {
self.0.values.as_slice()
}
fn __repr__(&self) -> String {
self.repr()
}
}
impl PySparseVector {
fn _getters(self) {
// Every field should have a getter method
let SparseVector {
indices: _,
values: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/value.rs | lib/edge/python/src/types/value.rs | use std::collections::HashMap;
use std::{fmt, mem};
use bytemuck::{TransparentWrapper, TransparentWrapperAlloc as _};
use derive_more::Into;
use pyo3::IntoPyObjectExt as _;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::{PyDict, PyString};
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyValue(serde_json::Value);
impl PyValue {
pub fn peel_map(map: HashMap<String, Self>) -> HashMap<String, serde_json::Value>
where
Self: TransparentWrapper<serde_json::Value>,
{
unsafe { mem::transmute(map) }
}
}
impl FromPyObject<'_, '_> for PyValue {
type Error = PyErr;
fn extract(value: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Bool(bool),
Uint(u64),
Int(i64),
Float(f64),
String(String),
Array(Vec<PyValue>),
Object(#[pyo3(from_py_with = value_map_from_py)] ValueMap),
}
if value.is_none() {
return Ok(Self(serde_json::Value::Null));
}
let value = match value.extract()? {
Helper::Bool(bool) => serde_json::Value::Bool(bool),
Helper::Uint(uint) => serde_json::Value::Number(serde_json::Number::from(uint)),
Helper::Int(int) => serde_json::Value::Number(serde_json::Number::from(int)),
Helper::Float(float) => {
let num = serde_json::Number::from_f64(float).ok_or_else(|| {
PyValueError::new_err(format!(
"failed to convert {float} into payload number type"
))
})?;
serde_json::Value::Number(num)
}
Helper::String(str) => serde_json::Value::String(str),
Helper::Array(arr) => serde_json::Value::Array(PyValue::peel_vec(arr)),
Helper::Object(map) => serde_json::Value::Object(map),
};
Ok(Self(value))
}
}
impl<'py> IntoPyObject<'py> for PyValue {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyValue {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
serde_json::Value::Null => Ok(py.None().into_bound(py)),
serde_json::Value::Bool(bool) => bool.into_bound_py_any(py),
serde_json::Value::Number(num) => {
if let Some(uint) = num.as_u64() {
uint.into_bound_py_any(py)
} else if let Some(int) = num.as_i64() {
int.into_bound_py_any(py)
} else if let Some(float) = num.as_f64() {
float.into_bound_py_any(py)
} else {
unreachable!("`serde_json::Number` is always `u64`, `i64` or `f64`")
}
}
serde_json::Value::String(str) => str.into_bound_py_any(py),
serde_json::Value::Array(arr) => PyValue::wrap_slice(arr).into_bound_py_any(py),
serde_json::Value::Object(map) => value_map_into_py(map, py),
}
}
}
impl Repr for PyValue {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
pub type ValueMap = serde_json::Map<String, serde_json::Value>;
pub fn value_map_from_py(dict: &Bound<'_, PyAny>) -> PyResult<ValueMap> {
let dict = dict.cast::<PyDict>()?;
let mut map = serde_json::Map::with_capacity(dict.len());
for (key, value) in dict {
let key = key.extract()?;
let value: PyValue = value.extract()?;
map.insert(key, value.into());
}
Ok(map)
}
pub fn value_map_into_py<'py>(map: &ValueMap, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
let dict = PyDict::new(py);
for (key, value) in map {
let key = PyString::new(py, key);
let value = PyValue::wrap_ref(value).into_pyobject(py)?;
dict.set_item(key, value)?;
}
Ok(dict.into_any())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/record.rs | lib/edge/python/src/types/record.rs | use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::prelude::*;
use shard::retrieve::record_internal::RecordInternal;
use crate::repr::*;
use crate::*;
#[pyclass(name = "Record")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyRecord(pub RecordInternal);
#[pyclass_repr]
#[pymethods]
impl PyRecord {
#[getter]
pub fn id(&self) -> PyPointId {
PyPointId(self.0.id)
}
#[getter]
pub fn vector(&self) -> Option<&PyVectorInternal> {
self.0.vector.as_ref().map(PyVectorInternal::wrap_ref)
}
#[getter]
pub fn payload(&self) -> Option<&PyPayload> {
self.0.payload.as_ref().map(PyPayload::wrap_ref)
}
#[getter]
pub fn order_value(&self) -> Option<PyOrderValue> {
self.0.order_value.map(PyOrderValue::from)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyRecord {
fn _getters(self) {
// Every field should have a getter method
let RecordInternal {
id: _,
payload: _,
vector: _,
shard_key: _, // not relevant for Qdrant Edge
order_value: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/mod.rs | lib/edge/python/src/types/mod.rs | pub mod filter;
pub mod formula;
pub mod json_path;
pub mod order_value;
pub mod payload;
pub mod point;
pub mod point_id;
pub mod point_vectors;
pub mod query;
pub mod record;
pub mod scored_point;
pub mod value;
pub mod vector;
pub mod vector_internal;
pub use self::filter::*;
pub use self::formula::*;
pub use self::json_path::*;
pub use self::order_value::*;
pub use self::payload::*;
pub use self::point::*;
pub use self::point_id::*;
pub use self::point_vectors::*;
pub use self::query::*;
pub use self::record::*;
pub use self::scored_point::*;
pub use self::value::*;
pub use self::vector::*;
pub use self::vector_internal::*;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/order_value.rs | lib/edge/python/src/types/order_value.rs | use std::fmt;
use pyo3::prelude::*;
use segment::data_types::order_by::OrderValue;
use crate::repr::*;
#[derive(IntoPyObject)]
pub enum PyOrderValue {
Int(i64),
Float(f64),
}
impl Repr for PyOrderValue {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::Int(int) => int.fmt(f),
Self::Float(float) => float.fmt(f),
}
}
}
impl From<OrderValue> for PyOrderValue {
fn from(value: OrderValue) -> Self {
match value {
OrderValue::Int(int) => Self::Int(int),
OrderValue::Float(float) => Self::Float(float),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/scored_point.rs | lib/edge/python/src/types/scored_point.rs | use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::prelude::*;
use segment::types::ScoredPoint;
use super::PyOrderValue;
use crate::repr::*;
use crate::{PyPayload, PyPointId, PyVectorInternal};
#[pyclass(name = "ScoredPoint")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyScoredPoint(pub ScoredPoint);
#[pyclass_repr]
#[pymethods]
impl PyScoredPoint {
#[getter]
pub fn id(&self) -> PyPointId {
PyPointId(self.0.id)
}
#[getter]
pub fn version(&self) -> u64 {
self.0.version
}
#[getter]
pub fn score(&self) -> f32 {
self.0.score
}
#[getter]
pub fn vector(&self) -> Option<&PyVectorInternal> {
self.0.vector.as_ref().map(PyVectorInternal::wrap_ref)
}
#[getter]
pub fn payload(&self) -> Option<&PyPayload> {
self.0.payload.as_ref().map(PyPayload::wrap_ref)
}
#[getter]
pub fn order_value(&self) -> Option<PyOrderValue> {
self.0.order_value.map(PyOrderValue::from)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyScoredPoint {
fn _getters(self) {
// Every field should have a getter method
let ScoredPoint {
id: _,
version: _,
score: _,
vector: _,
payload: _,
shard_key: _, // not relevant for Qdrant Edge
order_value: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/payload.rs | lib/edge/python/src/types/payload.rs | use std::fmt;
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::prelude::*;
use segment::types::*;
use super::value::*;
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPayload(pub Payload);
impl FromPyObject<'_, '_> for PyPayload {
type Error = PyErr;
fn extract(payload: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
let payload = value_map_from_py(&payload)?;
Ok(Self(Payload(payload)))
}
}
impl<'py> IntoPyObject<'py> for PyPayload {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyPayload {
type Target = PyAny; // PyDict
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
value_map_into_py(&self.0.0, py)
}
}
impl Repr for PyPayload {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.0.0.fmt(f)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/point_id.rs | lib/edge/python/src/types/point_id.rs | use std::{fmt, mem};
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::IntoPyObjectExt as _;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use segment::types::PointIdType;
use uuid::Uuid;
use crate::repr::*;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPointId(pub PointIdType);
impl PyPointId {
pub fn peel_set(set: ahash::HashSet<Self>) -> ahash::HashSet<PointIdType>
where
Self: TransparentWrapper<PointIdType>,
{
unsafe { mem::transmute(set) }
}
pub fn wrap_set_ref(set: &ahash::HashSet<PointIdType>) -> &ahash::HashSet<Self>
where
Self: TransparentWrapper<PointIdType>,
{
unsafe { mem::transmute(set) }
}
}
impl FromPyObject<'_, '_> for PyPointId {
type Error = PyErr;
fn extract(point_id: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
NumId(u64),
Uuid(Uuid),
UuidStr(String),
}
fn _variants(point_id: PointIdType) {
match point_id {
PointIdType::NumId(_) => {}
PointIdType::Uuid(_) => {}
}
}
let point_id = match point_id.extract()? {
Helper::NumId(id) => PointIdType::NumId(id),
Helper::Uuid(uuid) => PointIdType::Uuid(uuid),
Helper::UuidStr(uuid_str) => {
let uuid = Uuid::parse_str(&uuid_str).map_err(|err| {
PyValueError::new_err(format!("failed to parse {uuid_str} as UUID: {err}"))
})?;
PointIdType::Uuid(uuid)
}
};
Ok(Self(point_id))
}
}
impl<'py> IntoPyObject<'py> for PyPointId {
type Target = PyAny;
type Output = Bound<'py, PyAny>;
type Error = PyErr; // Infallible
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyPointId {
type Target = PyAny;
type Output = Bound<'py, PyAny>;
type Error = PyErr; // Infallible
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
PointIdType::NumId(id) => id.into_bound_py_any(py),
PointIdType::Uuid(uuid) => uuid.into_bound_py_any(py),
}
}
}
impl Repr for PyPointId {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
PointIdType::NumId(id) => id.fmt(f),
PointIdType::Uuid(uuid) => uuid.fmt(f),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/vector_internal.rs | lib/edge/python/src/types/vector_internal.rs | use std::collections::HashMap;
use std::{fmt, mem};
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::IntoPyObjectExt;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::PyList;
use segment::data_types::vectors::*;
use segment::types::VectorNameBuf;
use sparse::common::sparse_vector::SparseVector;
use super::vector::PySparseVector;
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyVectorInternal(VectorStructInternal);
impl FromPyObject<'_, '_> for PyVectorInternal {
type Error = PyErr;
fn extract(vector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Single(Vec<f32>),
MultiDense(#[pyo3(from_py_with = multi_dense_from_py)] MultiDenseVector),
Named(HashMap<VectorNameBuf, PyNamedVectorInternal>),
}
fn _variants(vector: VectorStructInternal) {
match vector {
VectorStructInternal::Single(_) => {}
VectorStructInternal::MultiDense(_) => {}
VectorStructInternal::Named(_) => {}
}
}
let vector = match vector.extract()? {
Helper::Single(single) => VectorStructInternal::Single(single),
Helper::MultiDense(multi) => {
VectorStructInternal::MultiDense(MultiDenseVectorInternal::from(multi))
}
Helper::Named(named) => {
VectorStructInternal::Named(PyNamedVectorInternal::peel_map(named))
}
};
Ok(Self(vector))
}
}
impl<'py> IntoPyObject<'py> for PyVectorInternal {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyVectorInternal {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
VectorStructInternal::Single(single) => single.into_bound_py_any(py),
VectorStructInternal::MultiDense(multi) => multi_dense_into_py(multi, py),
VectorStructInternal::Named(named) => {
PyNamedVectorInternal::wrap_map_ref(named).into_bound_py_any(py)
}
}
}
}
impl Repr for PyVectorInternal {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
VectorStructInternal::Single(single) => single.fmt(f),
VectorStructInternal::MultiDense(multi) => f.list(multi.multi_vectors()),
VectorStructInternal::Named(named) => PyNamedVectorInternal::wrap_map_ref(named).fmt(f),
}
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyNamedVectorInternal(pub VectorInternal);
impl PyNamedVectorInternal {
pub fn peel_map(map: HashMap<String, Self>) -> HashMap<String, VectorInternal>
where
Self: TransparentWrapper<VectorInternal>,
{
unsafe { mem::transmute(map) }
}
pub fn wrap_map_ref(map: &HashMap<String, VectorInternal>) -> &HashMap<String, Self>
where
Self: TransparentWrapper<VectorInternal>,
{
unsafe { mem::transmute(map) }
}
}
impl FromPyObject<'_, '_> for PyNamedVectorInternal {
type Error = PyErr;
fn extract(vector: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Dense(Vec<f32>),
Sparse(PySparseVector),
MultiDense(#[pyo3(from_py_with = multi_dense_from_py)] MultiDenseVector),
}
let vector = match vector.extract()? {
Helper::Dense(dense) => VectorInternal::Dense(dense),
Helper::Sparse(sparse) => VectorInternal::Sparse(SparseVector::from(sparse)),
Helper::MultiDense(multi) => {
VectorInternal::MultiDense(MultiDenseVectorInternal::from(multi))
}
};
Ok(Self(vector))
}
}
impl<'py> IntoPyObject<'py> for PyNamedVectorInternal {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyNamedVectorInternal {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
VectorInternal::Dense(dense) => dense.into_bound_py_any(py),
VectorInternal::Sparse(sparse) => PySparseVector(sparse.clone()).into_bound_py_any(py),
VectorInternal::MultiDense(multi) => multi_dense_into_py(multi, py),
}
}
}
impl Repr for PyNamedVectorInternal {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
VectorInternal::Dense(dense) => dense.fmt(f),
VectorInternal::Sparse(sparse) => PySparseVector::wrap_ref(sparse).fmt(f),
VectorInternal::MultiDense(multi) => f.list(multi.multi_vectors()),
}
}
}
type MultiDenseVector = TypedMultiDenseVector<f32>;
fn multi_dense_from_py(matrix: &Bound<'_, PyAny>) -> PyResult<MultiDenseVector> {
MultiDenseVector::try_from_matrix(matrix.extract()?)
.map_err(|err| PyValueError::new_err(err.to_string()))
}
fn multi_dense_into_py<'py>(
multi: &MultiDenseVector,
py: Python<'py>,
) -> PyResult<Bound<'py, PyAny>> {
let matrix = PyList::empty(py);
for vector in multi.multi_vectors() {
matrix.append(vector.into_pyobject(py)?)?;
}
Ok(matrix.into_any())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/point_vectors.rs | lib/edge/python/src/types/point_vectors.rs | use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::{pyclass, pymethods};
use segment::types::PointIdType;
use shard::operations::point_ops::VectorStructPersisted;
use shard::operations::vector_ops::PointVectorsPersisted;
use crate::repr::*;
use crate::types::{PyPointId, PyVector};
#[pyclass(name = "PointVectors")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPointVectors(pub PointVectorsPersisted);
#[pyclass_repr]
#[pymethods]
impl PyPointVectors {
#[new]
pub fn new(id: PyPointId, vector: PyVector) -> Self {
Self(PointVectorsPersisted {
id: PointIdType::from(id),
vector: VectorStructPersisted::from(vector),
})
}
#[getter]
pub fn id(&self) -> PyPointId {
PyPointId(self.0.id)
}
#[getter]
pub fn vector(&self) -> &PyVector {
PyVector::wrap_ref(&self.0.vector)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyPointVectors {
fn _getters(self) {
// Every field should have a getter method
let PointVectorsPersisted { id: _, vector: _ } = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/json_path.rs | lib/edge/python/src/types/json_path.rs | use std::convert::Infallible;
use std::fmt;
use std::str::FromStr as _;
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::PyString;
use segment::json_path::JsonPath;
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyJsonPath(pub JsonPath);
impl FromPyObject<'_, '_> for PyJsonPath {
type Error = PyErr;
fn extract(json_path: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
let json_path: String = json_path.extract()?;
let json_path = JsonPath::from_str(&json_path)
.map_err(|_| PyValueError::new_err(format!("invalid JSON path {json_path}")))?;
Ok(PyJsonPath(json_path))
}
}
impl<'py> IntoPyObject<'py> for PyJsonPath {
type Target = PyString;
type Output = Bound<'py, Self::Target>;
type Error = Infallible;
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyJsonPath {
type Target = PyString;
type Output = Bound<'py, Self::Target>;
type Error = Infallible;
fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> {
Ok(PyString::new(py, &self.0.to_string()))
}
}
impl Repr for PyJsonPath {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.0.to_string().fmt(f)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/point.rs | lib/edge/python/src/types/point.rs | use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::prelude::*;
use segment::types::{Payload, PointIdType};
use shard::operations::point_ops::{PointStructPersisted, VectorStructPersisted};
use crate::repr::*;
use crate::{PyPayload, PyPointId, PyVector};
#[pyclass(name = "Point")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyPoint(PointStructPersisted);
#[pyclass_repr]
#[pymethods]
impl PyPoint {
#[new]
pub fn new(id: PyPointId, vector: PyVector, payload: Option<PyPayload>) -> Self {
let point = PointStructPersisted {
id: PointIdType::from(id),
vector: VectorStructPersisted::from(vector),
payload: payload.map(Payload::from),
};
Self(point)
}
#[getter]
pub fn id(&self) -> PyPointId {
PyPointId(self.0.id)
}
#[getter]
pub fn vector(&self) -> &PyVector {
PyVector::wrap_ref(&self.0.vector)
}
#[getter]
pub fn payload(&self) -> Option<&PyPayload> {
self.0.payload.as_ref().map(PyPayload::wrap_ref)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyPoint {
fn _getters(self) {
// Every field should have a getter method
let PointStructPersisted {
id: _,
vector: _,
payload: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/filter/match.rs | lib/edge/python/src/types/filter/match.rs | use std::fmt;
use std::hash::Hash;
use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::IntoPyObjectExt as _;
use pyo3::prelude::*;
use pyo3::types::PyList;
use segment::types::*;
use crate::repr::*;
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatch(pub Match);
impl FromPyObject<'_, '_> for PyMatch {
type Error = PyErr;
fn extract(filter: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Value(PyMatchValue),
Text(PyMatchText),
TextAny(PyMatchTextAny),
Phrase(PyMatchPhrase),
Any(PyMatchAny),
Except(PyMatchExcept),
}
fn _variants(filter: Match) {
match filter {
Match::Value(_) => {}
Match::Text(_) => {}
Match::TextAny(_) => {}
Match::Phrase(_) => {}
Match::Any(_) => {}
Match::Except(_) => {}
}
}
let filter = match filter.extract()? {
Helper::Value(value) => Match::Value(MatchValue::from(value)),
Helper::Text(text) => Match::Text(MatchText::from(text)),
Helper::TextAny(text_any) => Match::TextAny(MatchTextAny::from(text_any)),
Helper::Phrase(phrase) => Match::Phrase(MatchPhrase::from(phrase)),
Helper::Any(any) => Match::Any(MatchAny::from(any)),
Helper::Except(except) => Match::Except(MatchExcept::from(except)),
};
Ok(Self(filter))
}
}
impl<'py> IntoPyObject<'py> for PyMatch {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match self.0 {
Match::Value(value) => PyMatchValue(value).into_bound_py_any(py),
Match::Text(text) => PyMatchText(text).into_bound_py_any(py),
Match::TextAny(text_any) => PyMatchTextAny(text_any).into_bound_py_any(py),
Match::Phrase(phrase) => PyMatchPhrase(phrase).into_bound_py_any(py),
Match::Any(any) => PyMatchAny(any).into_bound_py_any(py),
Match::Except(except) => PyMatchExcept(except).into_bound_py_any(py),
}
}
}
impl Repr for PyMatch {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
Match::Value(value) => PyMatchValue::wrap_ref(value).fmt(f),
Match::Text(text) => PyMatchText::wrap_ref(text).fmt(f),
Match::TextAny(text_any) => PyMatchTextAny::wrap_ref(text_any).fmt(f),
Match::Phrase(phrase) => PyMatchPhrase::wrap_ref(phrase).fmt(f),
Match::Any(any) => PyMatchAny::wrap_ref(any).fmt(f),
Match::Except(except) => PyMatchExcept::wrap_ref(except).fmt(f),
}
}
}
#[pyclass(name = "MatchValue")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchValue(pub MatchValue);
#[pyclass_repr]
#[pymethods]
impl PyMatchValue {
#[new]
pub fn new(value: PyValueVariants) -> Self {
Self(MatchValue {
value: ValueVariants::from(value),
})
}
#[getter]
pub fn value(&self) -> &PyValueVariants {
PyValueVariants::wrap_ref(&self.0.value)
}
}
impl PyMatchValue {
fn _getters(self) {
// Every field should have a getter method
let MatchValue { value: _ } = self.0;
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyValueVariants(ValueVariants);
impl FromPyObject<'_, '_> for PyValueVariants {
type Error = PyErr;
fn extract(value: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
String(String),
Integer(IntPayloadType),
Bool(bool),
}
fn _variants(value: ValueVariants) {
match value {
ValueVariants::String(_) => {}
ValueVariants::Integer(_) => {}
ValueVariants::Bool(_) => {}
}
}
let value = match value.extract()? {
Helper::String(str) => ValueVariants::String(str),
Helper::Integer(int) => ValueVariants::Integer(int),
Helper::Bool(bool) => ValueVariants::Bool(bool),
};
Ok(Self(value))
}
}
impl<'py> IntoPyObject<'py> for PyValueVariants {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyValueVariants {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
ValueVariants::String(str) => str.into_bound_py_any(py),
ValueVariants::Integer(int) => int.into_bound_py_any(py),
ValueVariants::Bool(bool) => bool.into_bound_py_any(py),
}
}
}
impl Repr for PyValueVariants {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
ValueVariants::String(str) => str.fmt(f),
ValueVariants::Integer(int) => int.fmt(f),
ValueVariants::Bool(bool) => bool.fmt(f),
}
}
}
#[pyclass(name = "MatchText")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchText(pub MatchText);
#[pyclass_repr]
#[pymethods]
impl PyMatchText {
#[new]
pub fn new(text: String) -> Self {
Self(MatchText { text })
}
#[getter]
pub fn text(&self) -> &str {
&self.0.text
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMatchText {
fn _getters(self) {
// Every field should have a getter method
let MatchText { text: _ } = self.0;
}
}
#[pyclass(name = "MatchTextAny")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchTextAny(pub MatchTextAny);
#[pyclass_repr]
#[pymethods]
impl PyMatchTextAny {
#[new]
pub fn new(text_any: String) -> Self {
Self(MatchTextAny { text_any })
}
#[getter]
pub fn text_any(&self) -> &str {
&self.0.text_any
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMatchTextAny {
fn _getters(self) {
// Every field should have a getter method
let MatchTextAny { text_any: _ } = self.0;
}
}
#[pyclass(name = "MatchPhrase")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchPhrase(pub MatchPhrase);
#[pyclass_repr]
#[pymethods]
impl PyMatchPhrase {
#[new]
pub fn new(phrase: String) -> Self {
Self(MatchPhrase { phrase })
}
#[getter]
pub fn phrase(&self) -> &str {
&self.0.phrase
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMatchPhrase {
fn _getters(self) {
// Every field should have a getter method
let MatchPhrase { phrase: _ } = self.0;
}
}
#[pyclass(name = "MatchAny")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchAny(pub MatchAny);
#[pyclass_repr]
#[pymethods]
impl PyMatchAny {
#[new]
pub fn new(any: PyAnyVariants) -> Self {
Self(MatchAny {
any: AnyVariants::from(any),
})
}
#[getter]
pub fn value(&self) -> &PyAnyVariants {
PyAnyVariants::wrap_ref(&self.0.any)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMatchAny {
fn _getters(self) {
// Every field should have a getter method
let MatchAny { any: _value } = self.0;
}
}
#[pyclass(name = "MatchExcept")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyMatchExcept(pub MatchExcept);
#[pyclass_repr]
#[pymethods]
impl PyMatchExcept {
#[new]
pub fn new(except: PyAnyVariants) -> Self {
Self(MatchExcept {
except: AnyVariants::from(except),
})
}
#[getter]
pub fn value(&self) -> &PyAnyVariants {
PyAnyVariants::wrap_ref(&self.0.except)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyMatchExcept {
fn _getters(self) {
// Every field should have a getter method
let MatchExcept { except: _value } = self.0;
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyAnyVariants(AnyVariants);
impl FromPyObject<'_, '_> for PyAnyVariants {
type Error = PyErr;
fn extract(value: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
#[derive(FromPyObject)]
enum Helper {
Strings(#[pyo3(from_py_with = index_set_from_py)] IndexSet<String>),
Integers(#[pyo3(from_py_with = index_set_from_py)] IndexSet<i64>),
}
fn _variants(value: AnyVariants) {
match value {
AnyVariants::Strings(_) => {}
AnyVariants::Integers(_) => {}
}
}
let value = match value.extract()? {
Helper::Strings(str) => AnyVariants::Strings(str),
Helper::Integers(int) => AnyVariants::Integers(int),
};
Ok(Self(value))
}
}
impl<'py> IntoPyObject<'py> for PyAnyVariants {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyAnyVariants {
type Target = PyAny;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
match &self.0 {
AnyVariants::Strings(str) => index_set_into_py::<String>(str, py),
AnyVariants::Integers(int) => index_set_into_py::<i64>(int, py),
}
}
}
impl Repr for PyAnyVariants {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match &self.0 {
AnyVariants::Strings(str) => f.list(str),
AnyVariants::Integers(int) => f.list(int),
}
}
}
type IndexSet<T, S = fnv::FnvBuildHasher> = indexmap::IndexSet<T, S>;
fn index_set_from_py<T>(list: &Bound<'_, PyAny>) -> PyResult<IndexSet<T>>
where
T: for<'py> FromPyObjectOwned<'py, Error = PyErr> + Eq + Hash,
{
let list = list.cast::<PyList>()?;
let mut set = IndexSet::with_capacity_and_hasher(list.len(), Default::default());
for value in list.iter() {
let value = value.extract()?;
set.insert(value);
}
Ok(set)
}
fn index_set_into_py<'py, T>(set: &IndexSet<T>, py: Python<'py>) -> PyResult<Bound<'py, PyAny>>
where
for<'a> &'a T: IntoPyObject<'py>,
{
let list = PyList::empty(py);
for value in set {
list.append(value)?;
}
Ok(list.into_any())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/filter/value_count.rs | lib/edge/python/src/types/filter/value_count.rs | use derive_more::Into;
use pyo3::prelude::*;
use segment::types::ValuesCount;
use crate::repr::*;
#[pyclass(name = "ValuesCount")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyValuesCount(pub ValuesCount);
#[pyclass_repr]
#[pymethods]
impl PyValuesCount {
#[new]
#[pyo3(signature = (lt=None, gt=None, lte=None, gte=None))]
pub fn new(
lt: Option<usize>,
gt: Option<usize>,
lte: Option<usize>,
gte: Option<usize>,
) -> Self {
Self(ValuesCount { lt, gt, lte, gte })
}
#[getter]
pub fn lt(&self) -> Option<usize> {
self.0.lt
}
#[getter]
pub fn gt(&self) -> Option<usize> {
self.0.gt
}
#[getter]
pub fn lte(&self) -> Option<usize> {
self.0.lte
}
#[getter]
pub fn gte(&self) -> Option<usize> {
self.0.gte
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyValuesCount {
fn _getters(self) {
// Every field should have a getter method
let ValuesCount {
lt: _,
gt: _,
lte: _,
gte: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/filter/field_condition.rs | lib/edge/python/src/types/filter/field_condition.rs | use bytemuck::TransparentWrapper;
use derive_more::Into;
use pyo3::prelude::*;
use segment::json_path::JsonPath;
use segment::types::*;
use crate::repr::*;
use crate::types::*;
#[pyclass(name = "FieldCondition")]
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyFieldCondition(pub FieldCondition);
#[pyclass_repr]
#[pymethods]
impl PyFieldCondition {
#[new]
#[pyo3(signature = (
key,
r#match=None,
range=None,
geo_bounding_box=None,
geo_radius=None,
geo_polygon=None,
values_count=None,
is_empty=None,
is_null=None,
))]
#[expect(clippy::too_many_arguments)]
pub fn new(
key: PyJsonPath,
r#match: Option<PyMatch>,
range: Option<PyRange>,
geo_bounding_box: Option<PyGeoBoundingBox>,
geo_radius: Option<PyGeoRadius>,
geo_polygon: Option<PyGeoPolygon>,
values_count: Option<PyValuesCount>,
is_empty: Option<bool>,
is_null: Option<bool>,
) -> Self {
Self(FieldCondition {
key: JsonPath::from(key),
r#match: r#match.map(Match::from),
range: range.map(RangeInterface::from),
geo_bounding_box: geo_bounding_box.map(GeoBoundingBox::from),
geo_radius: geo_radius.map(GeoRadius::from),
geo_polygon: geo_polygon.map(GeoPolygon::from),
values_count: values_count.map(ValuesCount::from),
is_empty,
is_null,
})
}
#[getter]
pub fn key(&self) -> &PyJsonPath {
PyJsonPath::wrap_ref(&self.0.key)
}
#[getter]
pub fn r#match(&self) -> Option<PyMatch> {
self.0.r#match.clone().map(PyMatch)
}
#[getter]
pub fn range(&self) -> Option<PyRange> {
self.0.range.map(PyRange::from)
}
#[getter]
pub fn geo_bounding_box(&self) -> Option<PyGeoBoundingBox> {
self.0.geo_bounding_box.map(PyGeoBoundingBox)
}
#[getter]
pub fn geo_radius(&self) -> Option<PyGeoRadius> {
self.0.geo_radius.map(PyGeoRadius)
}
#[getter]
pub fn geo_polygon(&self) -> Option<PyGeoPolygon> {
self.0.geo_polygon.clone().map(PyGeoPolygon)
}
#[getter]
pub fn values_count(&self) -> Option<PyValuesCount> {
self.0.values_count.map(PyValuesCount)
}
#[getter]
pub fn is_empty(&self) -> Option<bool> {
self.0.is_empty
}
#[getter]
pub fn is_null(&self) -> Option<bool> {
self.0.is_null
}
}
impl PyFieldCondition {
fn _getters(self) {
// Every field should have a getter method
let FieldCondition {
key: _,
r#match: _,
range: _,
geo_bounding_box: _,
geo_radius: _,
geo_polygon: _,
values_count: _,
is_empty: _,
is_null: _,
} = self.0;
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/filter/range.rs | lib/edge/python/src/types/filter/range.rs | use std::fmt;
use derive_more::Into;
use ordered_float::OrderedFloat;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use segment::types::*;
use crate::repr::*;
#[derive(Copy, Clone, Debug, FromPyObject, IntoPyObject)]
pub enum PyRange {
Float(PyRangeFloat),
DateTime(PyRangeDateTime),
}
impl Repr for PyRange {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
PyRange::Float(float) => float.fmt(f),
PyRange::DateTime(date_time) => date_time.fmt(f),
}
}
}
impl From<RangeInterface> for PyRange {
fn from(range: RangeInterface) -> Self {
match range {
RangeInterface::Float(float) => PyRange::Float(PyRangeFloat(float)),
RangeInterface::DateTime(date_time) => PyRange::DateTime(PyRangeDateTime(date_time)),
}
}
}
impl From<PyRange> for RangeInterface {
fn from(range: PyRange) -> Self {
match range {
PyRange::Float(float) => RangeInterface::Float(float.0),
PyRange::DateTime(date_time) => RangeInterface::DateTime(date_time.0),
}
}
}
#[pyclass(name = "RangeFloat")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyRangeFloat(pub Range<OrderedFloat<FloatPayloadType>>);
#[pyclass_repr]
#[pymethods]
impl PyRangeFloat {
#[new]
#[pyo3(signature = (gte=None, gt=None, lte=None, lt=None))]
pub fn new(
gte: Option<FloatPayloadType>,
gt: Option<FloatPayloadType>,
lte: Option<FloatPayloadType>,
lt: Option<FloatPayloadType>,
) -> Self {
Self(Range {
gte: gte.map(OrderedFloat),
gt: gt.map(OrderedFloat),
lte: lte.map(OrderedFloat),
lt: lt.map(OrderedFloat),
})
}
#[getter]
pub fn gte(&self) -> Option<FloatPayloadType> {
self.0.gte.map(|of| of.into_inner())
}
#[getter]
pub fn gt(&self) -> Option<FloatPayloadType> {
self.0.gt.map(|of| of.into_inner())
}
#[getter]
pub fn lte(&self) -> Option<FloatPayloadType> {
self.0.lte.map(|of| of.into_inner())
}
#[getter]
pub fn lt(&self) -> Option<FloatPayloadType> {
self.0.lt.map(|of| of.into_inner())
}
}
impl PyRangeFloat {
fn _getters(self) {
// Every field should have a getter method
let Range {
gte: _,
gt: _,
lte: _,
lt: _,
} = self.0;
}
}
#[pyclass(name = "RangeDateTime")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyRangeDateTime(pub Range<DateTimePayloadType>);
#[pyclass_repr]
#[pymethods]
impl PyRangeDateTime {
#[new]
#[pyo3(signature = (gte=None, gt=None, lte=None, lt=None))]
pub fn new(
gte: Option<String>,
gt: Option<String>,
lte: Option<String>,
lt: Option<String>,
) -> Result<Self, PyErr> {
Ok(Self(Range {
gte: parse_datetime_opt(gte.as_deref())?,
gt: parse_datetime_opt(gt.as_deref())?,
lte: parse_datetime_opt(lte.as_deref())?,
lt: parse_datetime_opt(lt.as_deref())?,
}))
}
#[getter]
pub fn gte(&self) -> Option<String> {
self.0.gte.map(|dt| dt.to_string())
}
#[getter]
pub fn gt(&self) -> Option<String> {
self.0.gt.map(|dt| dt.to_string())
}
#[getter]
pub fn lte(&self) -> Option<String> {
self.0.lte.map(|dt| dt.to_string())
}
#[getter]
pub fn lt(&self) -> Option<String> {
self.0.lt.map(|dt| dt.to_string())
}
}
impl PyRangeDateTime {
fn _getters(self) {
// Every field should have a getter method
let Range {
gte: _,
gt: _,
lte: _,
lt: _,
} = self.0;
}
}
fn parse_datetime_opt(date_time: Option<&str>) -> PyResult<Option<DateTimeWrapper>> {
date_time.map(parse_datetime).transpose()
}
fn parse_datetime(date_time: &str) -> PyResult<DateTimeWrapper> {
date_time
.parse()
.map_err(|err| PyValueError::new_err(format!("failed to parse date-time: {err}")))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/edge/python/src/types/filter/geo.rs | lib/edge/python/src/types/filter/geo.rs | use std::fmt;
use bytemuck::{TransparentWrapper, TransparentWrapperAlloc as _};
use derive_more::Into;
use ordered_float::OrderedFloat;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use segment::types::*;
use crate::repr::*;
#[pyclass(name = "GeoPoint")]
#[derive(Copy, Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyGeoPoint(pub GeoPoint);
#[pyclass_repr]
#[pymethods]
impl PyGeoPoint {
#[new]
pub fn new(lon: f64, lat: f64) -> Result<Self, PyErr> {
let point =
GeoPoint::new(lon, lat).map_err(|err| PyValueError::new_err(err.to_string()))?;
Ok(Self(point))
}
#[getter]
pub fn lon(&self) -> f64 {
self.0.lon.into_inner()
}
#[getter]
pub fn lat(&self) -> f64 {
self.0.lat.into_inner()
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyGeoPoint {
fn _getters(self) {
// Every field should have a getter method
let GeoPoint { lon: _, lat: _ } = self.0;
}
}
impl<'py> IntoPyObject<'py> for &PyGeoPoint {
type Target = PyGeoPoint;
type Output = Bound<'py, Self::Target>;
type Error = PyErr;
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(*self, py)
}
}
#[pyclass(name = "GeoBoundingBox")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyGeoBoundingBox(pub GeoBoundingBox);
#[pyclass_repr]
#[pymethods]
impl PyGeoBoundingBox {
#[new]
pub fn new(top_left: PyGeoPoint, bottom_right: PyGeoPoint) -> Self {
Self(GeoBoundingBox {
top_left: GeoPoint::from(top_left),
bottom_right: GeoPoint::from(bottom_right),
})
}
#[getter]
pub fn top_left(&self) -> PyGeoPoint {
PyGeoPoint(self.0.top_left)
}
#[getter]
pub fn bottom_right(&self) -> PyGeoPoint {
PyGeoPoint(self.0.bottom_right)
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyGeoBoundingBox {
fn _getters(self) {
// Every field should have a getter method
let GeoBoundingBox {
top_left: _,
bottom_right: _,
} = self.0;
}
}
#[pyclass(name = "GeoRadius")]
#[derive(Copy, Clone, Debug, Into)]
pub struct PyGeoRadius(pub GeoRadius);
#[pyclass_repr]
#[pymethods]
impl PyGeoRadius {
#[new]
pub fn new(center: PyGeoPoint, radius: f64) -> Self {
Self(GeoRadius {
center: GeoPoint::from(center),
radius: OrderedFloat(radius),
})
}
#[getter]
pub fn center(&self) -> PyGeoPoint {
PyGeoPoint(self.0.center)
}
#[getter]
pub fn radius(&self) -> f64 {
self.0.radius.into_inner()
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyGeoRadius {
fn _getters(self) {
// Every field should have a getter method
let GeoRadius {
center: _,
radius: _,
} = self.0;
}
}
#[pyclass(name = "GeoPolygon")]
#[derive(Clone, Debug, Into)]
pub struct PyGeoPolygon(pub GeoPolygon);
#[pyclass_repr]
#[pymethods]
impl PyGeoPolygon {
#[new]
#[pyo3(signature = (exterior, interiors=None))]
pub fn new(
exterior: PyGeoLineString,
interiors: Option<Vec<PyGeoLineString>>,
) -> Result<Self, PyErr> {
let shadow = GeoPolygonShadow {
exterior: GeoLineString::from(exterior),
interiors: interiors.map(PyGeoLineString::peel_vec),
};
let polygon =
GeoPolygon::try_from(shadow).map_err(|err| PyValueError::new_err(err.to_string()))?;
Ok(Self(polygon))
}
#[getter]
pub fn exterior(&self) -> &PyGeoLineString {
PyGeoLineString::wrap_ref(&self.0.exterior)
}
#[getter]
pub fn interiors(&self) -> Option<&[PyGeoLineString]> {
self.0
.interiors
.as_ref()
.map(|interiors| PyGeoLineString::wrap_slice(interiors))
}
pub fn __repr__(&self) -> String {
self.repr()
}
}
impl PyGeoPolygon {
fn _getters(self) {
// Every field should have a getter method
let GeoPolygon {
exterior: _,
interiors: _,
} = self.0;
}
}
#[derive(Clone, Debug, Into, TransparentWrapper)]
#[repr(transparent)]
pub struct PyGeoLineString(GeoLineString);
impl FromPyObject<'_, '_> for PyGeoLineString {
type Error = PyErr;
fn extract(points: Borrowed<'_, '_, PyAny>) -> PyResult<Self> {
let points = points.extract()?;
Ok(Self(GeoLineString {
points: PyGeoPoint::peel_vec(points),
}))
}
}
impl<'py> IntoPyObject<'py> for PyGeoLineString {
type Target = PyAny; // PyList
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
IntoPyObject::into_pyobject(&self, py)
}
}
impl<'py> IntoPyObject<'py> for &PyGeoLineString {
type Target = PyAny; // PyList
type Output = Bound<'py, Self::Target>;
type Error = PyErr; // Infallible
fn into_pyobject(self, py: Python<'py>) -> PyResult<Self::Output> {
PyGeoPoint::wrap_slice(&self.0.points).into_pyobject(py)
}
}
impl Repr for PyGeoLineString {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.list(PyGeoPoint::wrap_slice(&self.0.points))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.