repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/in_memory_payload_storage_impl.rs | lib/segment/src/payload_storage/in_memory_payload_storage_impl.rs | use std::path::PathBuf;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use serde_json::Value;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
use crate::json_path::JsonPath;
use crate::payload_storage::PayloadStorage;
use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use crate::types::Payload;
impl PayloadStorage for InMemoryPayloadStorage {
fn overwrite(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
_hw_counter: &HardwareCounterCell, // No measurement needed for in memory payload
) -> OperationResult<()> {
self.payload.insert(point_id, payload.to_owned());
Ok(())
}
fn set(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
_hw_counter: &HardwareCounterCell, // No measurement needed for in memory payload
) -> OperationResult<()> {
match self.payload.get_mut(&point_id) {
Some(point_payload) => point_payload.merge(payload),
None => {
self.payload.insert(point_id, payload.to_owned());
}
}
Ok(())
}
fn set_by_key(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
key: &JsonPath,
_hw_counter: &HardwareCounterCell, // No measurements for in memory storage
) -> OperationResult<()> {
match self.payload.get_mut(&point_id) {
Some(point_payload) => point_payload.merge_by_key(payload, key),
None => {
let mut dest_payload = Payload::default();
dest_payload.merge_by_key(payload, key);
self.payload.insert(point_id, dest_payload);
}
}
Ok(())
}
fn get(
&self,
point_id: PointOffsetType,
_hw_counter: &HardwareCounterCell, // No measurements for in memory storage
) -> OperationResult<Payload> {
match self.payload.get(&point_id) {
Some(payload) => Ok(payload.to_owned()),
None => Ok(Default::default()),
}
}
fn get_sequential(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
// In memory => No optimizations available.
self.get(point_id, hw_counter)
}
fn delete(
&mut self,
point_id: PointOffsetType,
key: &JsonPath,
_hw_counter: &HardwareCounterCell, // No measurements for in memory storage
) -> OperationResult<Vec<Value>> {
match self.payload.get_mut(&point_id) {
Some(payload) => {
let res = payload.remove(key);
Ok(res)
}
None => Ok(vec![]),
}
}
fn clear(
&mut self,
point_id: PointOffsetType,
_hw_counter: &HardwareCounterCell, // No measurements for in memory storage
) -> OperationResult<Option<Payload>> {
let res = self.payload.remove(&point_id);
Ok(res)
}
#[cfg(test)]
fn clear_all(&mut self, _: &HardwareCounterCell) -> OperationResult<()> {
self.payload = ahash::AHashMap::new();
Ok(())
}
fn flusher(&self) -> Flusher {
Box::new(|| Ok(()))
}
fn iter<F>(&self, mut callback: F, _hw_counter: &HardwareCounterCell) -> OperationResult<()>
where
F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>,
{
for (key, val) in self.payload.iter() {
let do_continue = callback(*key, val)?;
if !do_continue {
return Ok(());
}
}
Ok(())
}
fn files(&self) -> Vec<PathBuf> {
vec![]
}
fn get_storage_size_bytes(&self) -> OperationResult<usize> {
let mut estimated_size = 0;
for (_p_id, val) in self.payload.iter() {
// account for point_id
estimated_size += size_of::<PointOffsetType>();
for (key, val) in val.0.iter() {
// account for key and value
estimated_size += key.len() + serde_json::to_string(val).unwrap().len()
}
}
Ok(estimated_size)
}
fn is_on_disk(&self) -> bool {
false
}
}
#[cfg(test)]
mod tests {
use std::cell::RefCell;
use serde_json::json;
use super::*;
use crate::common::utils::IndexesMap;
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::payload_storage::query_checker::check_payload;
use crate::types::{Condition, FieldCondition, Filter, OwnedPayloadRef};
#[test]
fn test_condition_checking() {
let id_tracker = FixtureIdTracker::new(1);
let get_payload = || {
let payload: Payload = serde_json::from_value(json!({
"name": "John Doe",
"age": 43,
"boolean": "true",
"floating": 30.5,
"string_array": ["hello", "world"],
"boolean_array": ["true", "false"],
"float_array": [1.0, 2.0],
}))
.unwrap();
eprintln!("assigning payload"); // should occur only once
payload
};
let query = Filter {
should: None,
min_should: None,
must: Some(vec![
Condition::Field(FieldCondition::new_match(JsonPath::new("age"), 43.into())),
Condition::Field(FieldCondition::new_match(
JsonPath::new("name"),
"John Doe".to_string().into(),
)),
]),
must_not: None,
};
// Example:
// How to check for payload in case if Payload is stored on disk
// and it is preferred to only load the Payload once and if it is strictly required.
let payload: RefCell<Option<OwnedPayloadRef>> = RefCell::new(None);
check_payload(
Box::new(|| {
eprintln!("request payload");
if payload.borrow().is_none() {
payload.replace(Some(get_payload().into()));
}
payload.borrow().as_ref().cloned().unwrap()
}),
Some(&id_tracker),
&std::collections::HashMap::new(),
&query,
0,
&IndexesMap::new(),
&HardwareCounterCell::new(),
);
}
#[test]
fn test_wipe() {
let mut storage = InMemoryPayloadStorage::default();
let payload: Payload = serde_json::from_str(r#"{"name": "John Doe"}"#).unwrap();
let hw_counter = HardwareCounterCell::new();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
assert!(!storage.get(100, &hw_counter).unwrap().is_empty());
storage.clear_all(&hw_counter).unwrap();
}
#[test]
fn test_assign_payload_from_serde_json() {
let data = r#"
{
"name": "John Doe",
"age": 43,
"boolean": "true",
"floating": 30.5,
"string_array": ["hello", "world"],
"boolean_array": ["true", "false"],
"float_array": [1.0, 2.0],
"integer_array": [1, 2],
"geo_data": {"type": "geo", "value": {"lon": 1.0, "lat": 1.0}},
"metadata": {
"height": 50,
"width": 60,
"temperature": 60.5,
"nested": {
"feature": 30.5
},
"integer_array": [1, 2]
}
}"#;
let hw_counter = HardwareCounterCell::new();
let payload: Payload = serde_json::from_str(data).unwrap();
let mut storage = InMemoryPayloadStorage::default();
storage.set(100, &payload, &hw_counter).unwrap();
let pload = storage.get(100, &hw_counter).unwrap();
assert_eq!(pload, payload);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/in_memory_payload_storage.rs | lib/segment/src/payload_storage/in_memory_payload_storage.rs | use ahash::AHashMap;
use common::types::PointOffsetType;
use crate::types::Payload;
/// Same as `SimplePayloadStorage` but without persistence
/// Warn: for tests only
#[derive(Debug, Default)]
pub struct InMemoryPayloadStorage {
pub(crate) payload: AHashMap<PointOffsetType, Payload>,
}
impl InMemoryPayloadStorage {
pub fn payload_ptr(&self, point_id: PointOffsetType) -> Option<&Payload> {
self.payload.get(&point_id)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/mod.rs | lib/segment/src/payload_storage/mod.rs | pub mod condition_checker;
#[cfg(feature = "testing")]
pub mod in_memory_payload_storage;
#[cfg(feature = "testing")]
pub mod in_memory_payload_storage_impl;
pub mod mmap_payload_storage;
#[cfg(feature = "rocksdb")]
pub mod on_disk_payload_storage;
mod payload_storage_base;
pub mod payload_storage_enum;
pub mod query_checker;
#[cfg(feature = "rocksdb")]
pub mod simple_payload_storage;
#[cfg(feature = "rocksdb")]
pub mod simple_payload_storage_impl;
#[cfg(test)]
mod tests;
pub use payload_storage_base::*;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/simple_payload_storage.rs | lib/segment/src/payload_storage/simple_payload_storage.rs | use std::sync::Arc;
use ahash::AHashMap;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use parking_lot::RwLock;
use rocksdb::DB;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper;
use crate::common::rocksdb_wrapper::{DB_PAYLOAD_CF, DatabaseColumnWrapper};
use crate::types::Payload;
/// In-memory implementation of `PayloadStorage`.
/// Persists all changes to disk using `store`, but only uses this storage during the initial load
#[derive(Debug)]
pub struct SimplePayloadStorage {
pub(crate) payload: AHashMap<PointOffsetType, Payload>,
pub(crate) db_wrapper: DatabaseColumnScheduledDeleteWrapper,
}
impl SimplePayloadStorage {
pub fn open(database: Arc<RwLock<DB>>) -> OperationResult<Self> {
let mut payload_map: AHashMap<PointOffsetType, Payload> = Default::default();
let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new(
database,
DB_PAYLOAD_CF,
));
for (key, val) in db_wrapper.lock_db().iter()? {
let point_id: PointOffsetType = serde_cbor::from_slice(&key)
.map_err(|_| OperationError::service_error("cannot deserialize point id"))?;
let payload: Payload = serde_cbor::from_slice(&val)
.map_err(|_| OperationError::service_error("cannot deserialize payload"))?;
payload_map.insert(point_id, payload);
}
Ok(SimplePayloadStorage {
payload: payload_map,
db_wrapper,
})
}
pub(crate) fn update_storage(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let point_id_serialized = serde_cbor::to_vec(&point_id).unwrap();
hw_counter
.payload_io_write_counter()
.incr_delta(point_id_serialized.len());
match self.payload.get(&point_id) {
None => self.db_wrapper.remove(point_id_serialized),
Some(payload) => {
let payload_serialized = serde_cbor::to_vec(payload).unwrap();
hw_counter
.payload_io_write_counter()
.incr_delta(payload_serialized.len());
self.db_wrapper.put(point_id_serialized, payload_serialized)
}
}
}
pub fn payload_ptr(&self, point_id: PointOffsetType) -> Option<&Payload> {
self.payload.get(&point_id)
}
/// Destroy this payload storage, remove persisted data from RocksDB
pub fn destroy(&self) -> OperationResult<()> {
self.db_wrapper.remove_column_family()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use rand::distr::SampleString;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::Alphanumeric;
use tempfile::Builder;
use super::*;
use crate::common::rocksdb_wrapper::open_db;
use crate::payload_json;
use crate::payload_storage::PayloadStorage;
use crate::payload_storage::payload_storage_enum::PayloadStorageEnum;
use crate::segment_constructor::migrate_rocksdb_payload_storage_to_mmap;
const RAND_SEED: u64 = 42;
/// Create RocksDB based payload storage.
///
/// Migrate it to the mmap based payload storage and assert correctness.
#[test]
fn test_migrate_simple_to_mmap() {
const POINT_COUNT: PointOffsetType = 128;
const DELETE_PROBABILITY: f64 = 0.1;
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let hw_counter = HardwareCounterCell::disposable();
let db_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(db_dir.path(), &[DB_PAYLOAD_CF]).unwrap();
// Create simple payload storage, insert test data and delete some again
let mut storage =
PayloadStorageEnum::SimplePayloadStorage(SimplePayloadStorage::open(db).unwrap());
for internal_id in 0..POINT_COUNT {
let payload = payload_json! {
"a": rng.random_range(0..100),
"b": rng.random_bool(0.3),
"c": Alphanumeric.sample_string(&mut rng, 8),
};
storage.set(internal_id, &payload, &hw_counter).unwrap();
if rng.random_bool(DELETE_PROBABILITY) {
storage.clear(internal_id, &hw_counter).unwrap();
}
}
// Migrate from RocksDB to mmap storage
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let new_storage = migrate_rocksdb_payload_storage_to_mmap(&storage, storage_dir.path())
.expect("failed to migrate from RocksDB to mmap");
// Destroy persisted RocksDB payload data
match storage {
PayloadStorageEnum::SimplePayloadStorage(storage) => storage.destroy().unwrap(),
_ => unreachable!("unexpected payload storage type"),
}
// We can drop RocksDB storage now
db_dir.close().expect("failed to drop RocksDB storage");
// Assert payload data
let mut rng = StdRng::seed_from_u64(RAND_SEED);
for internal_id in 0..POINT_COUNT {
let payload = payload_json! {
"a": rng.random_range(0..100),
"b": rng.random_bool(0.3),
"c": Alphanumeric.sample_string(&mut rng, 8),
};
let is_deleted = rng.random_bool(DELETE_PROBABILITY);
if !is_deleted {
assert_eq!(new_storage.get(internal_id, &hw_counter).unwrap(), payload);
} else {
assert_eq!(
new_storage
.get(internal_id, &hw_counter)
.unwrap()
.is_empty(),
is_deleted,
);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/on_disk_payload_storage.rs | lib/segment/src/payload_storage/on_disk_payload_storage.rs | use std::path::PathBuf;
use std::sync::Arc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use parking_lot::RwLock;
use rocksdb::DB;
use serde_json::Value;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::common::rocksdb_buffered_delete_wrapper::DatabaseColumnScheduledDeleteWrapper;
use crate::common::rocksdb_wrapper::{DB_PAYLOAD_CF, DatabaseColumnWrapper};
use crate::json_path::JsonPath;
use crate::payload_storage::PayloadStorage;
use crate::types::Payload;
/// On-disk implementation of `PayloadStorage`.
/// Persists all changes to disk using `store`, does not keep payload in memory
#[derive(Debug)]
pub struct OnDiskPayloadStorage {
db_wrapper: DatabaseColumnScheduledDeleteWrapper,
}
impl OnDiskPayloadStorage {
pub fn open(database: Arc<RwLock<DB>>) -> OperationResult<Self> {
let db_wrapper = DatabaseColumnScheduledDeleteWrapper::new(DatabaseColumnWrapper::new(
database,
DB_PAYLOAD_CF,
));
Ok(OnDiskPayloadStorage { db_wrapper })
}
pub fn remove_from_storage(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let serialized = serde_cbor::to_vec(&point_id).unwrap();
hw_counter
.payload_io_write_counter()
.incr_delta(serialized.len());
self.db_wrapper.remove(serialized)
}
pub fn update_storage(
&self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let point_id_serialized = serde_cbor::to_vec(&point_id).unwrap();
let payload_serialized = serde_cbor::to_vec(payload).unwrap();
hw_counter
.payload_io_write_counter()
.incr_delta(point_id_serialized.len() + payload_serialized.len());
self.db_wrapper.put(point_id_serialized, payload_serialized)
}
pub fn read_payload(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Option<Payload>> {
let key = serde_cbor::to_vec(&point_id).unwrap();
self.db_wrapper
.get_pinned(&key, |raw| {
hw_counter.payload_io_read_counter().incr_delta(raw.len());
serde_cbor::from_slice(raw)
})?
.transpose()
.map_err(OperationError::from)
}
/// Destroy this payload storage, remove persisted data from RocksDB
pub fn destroy(&self) -> OperationResult<()> {
self.db_wrapper.remove_column_family()?;
Ok(())
}
}
impl PayloadStorage for OnDiskPayloadStorage {
fn overwrite(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.update_storage(point_id, payload, hw_counter)
}
fn set(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let stored_payload = self.read_payload(point_id, hw_counter)?;
match stored_payload {
Some(mut point_payload) => {
point_payload.merge(payload);
self.update_storage(point_id, &point_payload, hw_counter)?
}
None => self.update_storage(point_id, payload, hw_counter)?,
}
Ok(())
}
fn set_by_key(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
key: &JsonPath,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let stored_payload = self.read_payload(point_id, hw_counter)?;
match stored_payload {
Some(mut point_payload) => {
point_payload.merge_by_key(payload, key);
self.update_storage(point_id, &point_payload, hw_counter)
}
None => {
let mut dest_payload = Payload::default();
dest_payload.merge_by_key(payload, key);
self.update_storage(point_id, &dest_payload, hw_counter)
}
}
}
fn get(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
let payload = self.read_payload(point_id, hw_counter)?;
match payload {
Some(payload) => Ok(payload),
None => Ok(Default::default()),
}
}
fn get_sequential(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
// No sequential access optimizations for rocksdb.
self.get(point_id, hw_counter)
}
fn delete(
&mut self,
point_id: PointOffsetType,
key: &JsonPath,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<Value>> {
let stored_payload = self.read_payload(point_id, hw_counter)?;
match stored_payload {
Some(mut payload) => {
let res = payload.remove(key);
if !res.is_empty() {
self.update_storage(point_id, &payload, hw_counter)?;
}
Ok(res)
}
None => Ok(vec![]),
}
}
fn clear(
&mut self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Option<Payload>> {
let payload = self.read_payload(point_id, hw_counter)?;
self.remove_from_storage(point_id, hw_counter)?;
Ok(payload)
}
#[cfg(test)]
fn clear_all(&mut self, _: &HardwareCounterCell) -> OperationResult<()> {
self.db_wrapper.recreate_column_family()
}
fn flusher(&self) -> Flusher {
self.db_wrapper.flusher()
}
fn iter<F>(&self, mut callback: F, hw_counter: &HardwareCounterCell) -> OperationResult<()>
where
F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>,
{
// TODO(io_measurements): Replace with write-back counter.
let counter = hw_counter.payload_io_read_counter();
for (key, val) in self.db_wrapper.lock_db().iter()? {
counter.incr_delta(key.len() + val.len());
let do_continue = callback(
serde_cbor::from_slice(&key)?,
&serde_cbor::from_slice(&val)?,
)?;
if !do_continue {
return Ok(());
}
}
Ok(())
}
fn files(&self) -> Vec<PathBuf> {
vec![]
}
fn get_storage_size_bytes(&self) -> OperationResult<usize> {
self.db_wrapper.get_storage_size_bytes()
}
fn is_on_disk(&self) -> bool {
true
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/query_checker.rs | lib/segment/src/payload_storage/query_checker.rs | #![cfg_attr(not(feature = "testing"), allow(unused_imports))]
use std::cell::RefCell;
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use crate::common::utils::{IndexesMap, check_is_empty, check_is_null};
use crate::id_tracker::IdTrackerSS;
use crate::index::field_index::FieldIndex;
use crate::payload_storage::condition_checker::ValueChecker;
use crate::payload_storage::payload_storage_enum::PayloadStorageEnum;
use crate::payload_storage::{ConditionChecker, PayloadStorage};
use crate::types::{
Condition, FieldCondition, Filter, IsEmptyCondition, IsNullCondition, MinShould,
OwnedPayloadRef, Payload, PayloadContainer, PayloadKeyType, VectorNameBuf,
};
use crate::vector_storage::{VectorStorage, VectorStorageEnum};
fn check_condition<F>(checker: &F, condition: &Condition) -> bool
where
F: Fn(&Condition) -> bool,
{
match condition {
Condition::Filter(filter) => check_filter(checker, filter),
_ => checker(condition),
}
}
pub fn check_filter<F>(checker: &F, filter: &Filter) -> bool
where
F: Fn(&Condition) -> bool,
{
check_should(checker, &filter.should)
&& check_min_should(checker, &filter.min_should)
&& check_must(checker, &filter.must)
&& check_must_not(checker, &filter.must_not)
}
fn check_should<F>(checker: &F, should: &Option<Vec<Condition>>) -> bool
where
F: Fn(&Condition) -> bool,
{
let check = |x| check_condition(checker, x);
match should {
None => true,
Some(conditions) => conditions.iter().any(check),
}
}
fn check_min_should<F>(checker: &F, min_should: &Option<MinShould>) -> bool
where
F: Fn(&Condition) -> bool,
{
let check = |x| check_condition(checker, x);
match min_should {
None => true,
Some(MinShould {
conditions,
min_count,
}) => {
conditions
.iter()
.filter(|cond| check(cond))
.take(*min_count)
.count()
== *min_count
}
}
}
fn check_must<F>(checker: &F, must: &Option<Vec<Condition>>) -> bool
where
F: Fn(&Condition) -> bool,
{
let check = |x| check_condition(checker, x);
match must {
None => true,
Some(conditions) => conditions.iter().all(check),
}
}
fn check_must_not<F>(checker: &F, must: &Option<Vec<Condition>>) -> bool
where
F: Fn(&Condition) -> bool,
{
let check = |x| !check_condition(checker, x);
match must {
None => true,
Some(conditions) => conditions.iter().all(check),
}
}
pub fn select_nested_indexes<'a, R>(
nested_path: &PayloadKeyType,
field_indexes: &'a HashMap<PayloadKeyType, R>,
) -> HashMap<PayloadKeyType, &'a Vec<FieldIndex>>
where
R: AsRef<Vec<FieldIndex>>,
{
let nested_indexes: HashMap<_, _> = field_indexes
.iter()
.filter_map(|(key, indexes)| {
key.strip_prefix(nested_path)
.map(|key| (key, indexes.as_ref()))
})
.collect();
nested_indexes
}
pub fn check_payload<'a, R>(
get_payload: Box<dyn Fn() -> OwnedPayloadRef<'a> + 'a>,
id_tracker: Option<&IdTrackerSS>,
vector_storages: &HashMap<VectorNameBuf, Arc<AtomicRefCell<VectorStorageEnum>>>,
query: &Filter,
point_id: PointOffsetType,
field_indexes: &HashMap<PayloadKeyType, R>,
hw_counter: &HardwareCounterCell,
) -> bool
where
R: AsRef<Vec<FieldIndex>>,
{
let checker = |condition: &Condition| match condition {
Condition::Field(field_condition) => check_field_condition(
field_condition,
get_payload().deref(),
field_indexes,
hw_counter,
),
Condition::IsEmpty(is_empty) => check_is_empty_condition(is_empty, get_payload().deref()),
Condition::IsNull(is_null) => check_is_null_condition(is_null, get_payload().deref()),
Condition::HasId(has_id) => id_tracker
.and_then(|id_tracker| id_tracker.external_id(point_id))
.is_some_and(|id| has_id.has_id.contains(&id)),
Condition::HasVector(has_vector) => {
if let Some(vector_storage) = vector_storages.get(&has_vector.has_vector) {
!vector_storage.borrow().is_deleted_vector(point_id)
} else {
false
}
}
Condition::Nested(nested) => {
let nested_path = nested.array_key();
let nested_indexes = select_nested_indexes(&nested_path, field_indexes);
get_payload()
.get_value(&nested_path)
.iter()
.filter_map(|value| value.as_object())
.any(|object| {
check_payload(
Box::new(|| OwnedPayloadRef::from(object)),
None, // HasId check in nested fields is not supported
&HashMap::new(), // HasVector check in nested fields is not supported
&nested.nested.filter,
point_id,
&nested_indexes,
hw_counter,
)
})
}
Condition::CustomIdChecker(cond) => id_tracker
.and_then(|id_tracker| id_tracker.external_id(point_id))
.is_some_and(|point_id| cond.0.check(point_id)),
Condition::Filter(_) => unreachable!(),
};
check_filter(&checker, query)
}
pub fn check_is_empty_condition(
is_empty: &IsEmptyCondition,
payload: &impl PayloadContainer,
) -> bool {
check_is_empty(payload.get_value(&is_empty.is_empty.key).iter().copied())
}
pub fn check_is_null_condition(is_null: &IsNullCondition, payload: &impl PayloadContainer) -> bool {
check_is_null(payload.get_value(&is_null.is_null.key).iter().copied())
}
pub fn check_field_condition<R>(
field_condition: &FieldCondition,
payload: &impl PayloadContainer,
field_indexes: &HashMap<PayloadKeyType, R>,
hw_counter: &HardwareCounterCell,
) -> bool
where
R: AsRef<Vec<FieldIndex>>,
{
let field_values = payload.get_value(&field_condition.key);
let field_indexes = field_indexes.get(&field_condition.key);
if field_values.is_empty() {
return field_condition.check_empty();
}
// This covers a case, when a field index affects the result of the condition.
if let Some(field_indexes) = field_indexes {
for p in field_values {
let mut index_checked = false;
for index in field_indexes.as_ref() {
if let Some(index_check_res) =
index.special_check_condition(field_condition, p, hw_counter)
{
if index_check_res {
// If at least one object matches the condition, we can return true
return true;
}
index_checked = true;
// If index check of the condition returned something, we don't need to check
// other indexes
break;
}
}
if !index_checked {
// If none of the indexes returned anything, we need to check the condition
// against the payload
if field_condition.check(p) {
return true;
}
}
}
false
} else {
// Fallback to regular condition check if there are no indexes for the field
field_values.into_iter().any(|p| field_condition.check(p))
}
}
/// Only used for testing
#[cfg(feature = "testing")]
pub struct SimpleConditionChecker {
payload_storage: Arc<AtomicRefCell<PayloadStorageEnum>>,
id_tracker: Arc<AtomicRefCell<IdTrackerSS>>,
vector_storages: HashMap<VectorNameBuf, Arc<AtomicRefCell<VectorStorageEnum>>>,
empty_payload: Payload,
}
#[cfg(feature = "testing")]
impl SimpleConditionChecker {
pub fn new(
payload_storage: Arc<AtomicRefCell<PayloadStorageEnum>>,
id_tracker: Arc<AtomicRefCell<IdTrackerSS>>,
vector_storages: HashMap<VectorNameBuf, Arc<AtomicRefCell<VectorStorageEnum>>>,
) -> Self {
SimpleConditionChecker {
payload_storage,
id_tracker,
vector_storages,
empty_payload: Default::default(),
}
}
}
#[cfg(feature = "testing")]
impl ConditionChecker for SimpleConditionChecker {
fn check(&self, point_id: PointOffsetType, query: &Filter) -> bool {
let hw_counter = HardwareCounterCell::new(); // No measurements needed as this is only for test!
let payload_storage_guard = self.payload_storage.borrow();
let payload_ref_cell: RefCell<Option<OwnedPayloadRef>> = RefCell::new(None);
let id_tracker = self.id_tracker.borrow();
let vector_storages = &self.vector_storages;
check_payload(
Box::new(|| {
if payload_ref_cell.borrow().is_none() {
let payload_ptr = match payload_storage_guard.deref() {
PayloadStorageEnum::InMemoryPayloadStorage(s) => {
s.payload_ptr(point_id).map(|x| x.into())
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => {
s.payload_ptr(point_id).map(|x| x.into())
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => {
// Warn: Possible panic here
// Currently, it is possible that `read_payload` fails with Err,
// but it seems like a very rare possibility which might only happen
// if something is wrong with disk or storage is corrupted.
//
// In both cases it means that service can't be of use any longer.
// It is as good as dead. Therefore it is tolerable to just panic here.
// Downside is - API user won't be notified of the failure.
// It will just timeout.
//
// The alternative:
// Rewrite condition checking code to support error reporting.
// Which may lead to slowdown and assumes a lot of changes.
s.read_payload(point_id, &hw_counter)
.unwrap_or_else(|err| panic!("Payload storage is corrupted: {err}"))
.map(|x| x.into())
}
PayloadStorageEnum::MmapPayloadStorage(s) => {
let payload = s.get(point_id, &hw_counter).unwrap_or_else(|err| {
panic!("Payload storage is corrupted: {err}")
});
Some(OwnedPayloadRef::from(payload))
}
};
payload_ref_cell
.replace(payload_ptr.or_else(|| Some((&self.empty_payload).into())));
}
payload_ref_cell.borrow().as_ref().cloned().unwrap()
}),
Some(id_tracker.deref()),
vector_storages,
query,
point_id,
&IndexesMap::new(),
&HardwareCounterCell::new(),
)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use ahash::AHashSet;
use ordered_float::OrderedFloat;
use super::*;
use crate::id_tracker::IdTracker;
use crate::id_tracker::in_memory_id_tracker::InMemoryIdTracker;
use crate::json_path::JsonPath;
use crate::payload_json;
use crate::payload_storage::PayloadStorage;
use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use crate::types::{
DateTimeWrapper, FieldCondition, GeoBoundingBox, GeoPoint, PayloadField, Range, ValuesCount,
};
#[test]
fn test_condition_checker() {
let payload = payload_json! {
"location": {
"lon": 13.404954,
"lat": 52.520008,
},
"price": 499.90,
"amount": 10,
"rating": vec![3, 7, 9, 9],
"color": "red",
"has_delivery": true,
"shipped_at": "2020-02-15T00:00:00Z",
"parts": [],
"packaging": null,
"not_null": [null],
};
let hw_counter = HardwareCounterCell::new();
let mut payload_storage: PayloadStorageEnum =
PayloadStorageEnum::InMemoryPayloadStorage(InMemoryPayloadStorage::default());
let mut id_tracker = InMemoryIdTracker::new();
id_tracker.set_link(0.into(), 0).unwrap();
id_tracker.set_link(1.into(), 1).unwrap();
id_tracker.set_link(2.into(), 2).unwrap();
id_tracker.set_link(10.into(), 10).unwrap();
payload_storage.overwrite(0, &payload, &hw_counter).unwrap();
let payload_checker = SimpleConditionChecker::new(
Arc::new(AtomicRefCell::new(payload_storage)),
Arc::new(AtomicRefCell::new(id_tracker)),
HashMap::new(),
);
let is_empty_condition = Filter::new_must(Condition::IsEmpty(IsEmptyCondition {
is_empty: PayloadField {
key: JsonPath::new("price"),
},
}));
assert!(!payload_checker.check(0, &is_empty_condition));
let is_empty_condition = Filter::new_must(Condition::IsEmpty(IsEmptyCondition {
is_empty: PayloadField {
key: JsonPath::new("something_new"),
},
}));
assert!(payload_checker.check(0, &is_empty_condition));
let is_empty_condition = Filter::new_must(Condition::IsEmpty(IsEmptyCondition {
is_empty: PayloadField {
key: JsonPath::new("parts"),
},
}));
assert!(payload_checker.check(0, &is_empty_condition));
let is_empty_condition = Filter::new_must(Condition::IsEmpty(IsEmptyCondition {
is_empty: PayloadField {
key: JsonPath::new("not_null"),
},
}));
assert!(!payload_checker.check(0, &is_empty_condition));
let is_null_condition = Filter::new_must(Condition::IsNull(IsNullCondition {
is_null: PayloadField {
key: JsonPath::new("amount"),
},
}));
assert!(!payload_checker.check(0, &is_null_condition));
let is_null_condition = Filter::new_must(Condition::IsNull(IsNullCondition {
is_null: PayloadField {
key: JsonPath::new("parts"),
},
}));
assert!(!payload_checker.check(0, &is_null_condition));
let is_null_condition = Filter::new_must(Condition::IsNull(IsNullCondition {
is_null: PayloadField {
key: JsonPath::new("something_else"),
},
}));
assert!(!payload_checker.check(0, &is_null_condition));
let is_null_condition = Filter::new_must(Condition::IsNull(IsNullCondition {
is_null: PayloadField {
key: JsonPath::new("packaging"),
},
}));
assert!(payload_checker.check(0, &is_null_condition));
let is_null_condition = Filter::new_must(Condition::IsNull(IsNullCondition {
is_null: PayloadField {
key: JsonPath::new("not_null"),
},
}));
assert!(!payload_checker.check(0, &is_null_condition));
let match_red = Condition::Field(FieldCondition::new_match(
JsonPath::new("color"),
"red".to_owned().into(),
));
let match_blue = Condition::Field(FieldCondition::new_match(
JsonPath::new("color"),
"blue".to_owned().into(),
));
let shipped_in_february = Condition::Field(FieldCondition::new_datetime_range(
JsonPath::new("shipped_at"),
Range {
lt: Some(DateTimeWrapper::from_str("2020-03-01T00:00:00Z").unwrap()),
gt: None,
gte: Some(DateTimeWrapper::from_str("2020-02-01T00:00:00Z").unwrap()),
lte: None,
},
));
let shipped_in_march = Condition::Field(FieldCondition::new_datetime_range(
JsonPath::new("shipped_at"),
Range {
lt: Some(DateTimeWrapper::from_str("2020-04-01T00:00:00Z").unwrap()),
gt: None,
gte: Some(DateTimeWrapper::from_str("2020-03-01T00:00:00Z").unwrap()),
lte: None,
},
));
let with_delivery = Condition::Field(FieldCondition::new_match(
JsonPath::new("has_delivery"),
true.into(),
));
let many_value_count_condition =
Filter::new_must(Condition::Field(FieldCondition::new_values_count(
JsonPath::new("rating"),
ValuesCount {
lt: None,
gt: None,
gte: Some(10),
lte: None,
},
)));
assert!(!payload_checker.check(0, &many_value_count_condition));
let few_value_count_condition =
Filter::new_must(Condition::Field(FieldCondition::new_values_count(
JsonPath::new("rating"),
ValuesCount {
lt: Some(5),
gt: None,
gte: None,
lte: None,
},
)));
assert!(payload_checker.check(0, &few_value_count_condition));
let in_berlin = Condition::Field(FieldCondition::new_geo_bounding_box(
JsonPath::new("location"),
GeoBoundingBox {
top_left: GeoPoint::new_unchecked(13.08835, 52.67551),
bottom_right: GeoPoint::new_unchecked(13.76116, 52.33826),
},
));
let in_moscow = Condition::Field(FieldCondition::new_geo_bounding_box(
JsonPath::new("location"),
GeoBoundingBox {
top_left: GeoPoint::new_unchecked(37.0366, 56.1859),
bottom_right: GeoPoint::new_unchecked(38.2532, 55.317),
},
));
let with_bad_rating = Condition::Field(FieldCondition::new_range(
JsonPath::new("rating"),
Range {
lt: None,
gt: None,
gte: None,
lte: Some(OrderedFloat(5.)),
},
));
let query = Filter::new_must(match_red.clone());
assert!(payload_checker.check(0, &query));
let query = Filter::new_must(match_blue.clone());
assert!(!payload_checker.check(0, &query));
let query = Filter::new_must_not(match_blue.clone());
assert!(payload_checker.check(0, &query));
let query = Filter::new_must_not(match_red.clone());
assert!(!payload_checker.check(0, &query));
let query = Filter {
should: Some(vec![match_red.clone(), match_blue.clone()]),
min_should: None,
must: Some(vec![with_delivery.clone(), in_berlin.clone()]),
must_not: None,
};
assert!(payload_checker.check(0, &query));
let query = Filter {
should: Some(vec![match_red.clone(), match_blue.clone()]),
min_should: None,
must: Some(vec![with_delivery, in_moscow.clone()]),
must_not: None,
};
assert!(!payload_checker.check(0, &query));
let query = Filter {
should: Some(vec![
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_red.clone(), in_moscow.clone()]),
must_not: None,
}),
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_blue.clone(), in_berlin.clone()]),
must_not: None,
}),
]),
min_should: None,
must: None,
must_not: None,
};
assert!(!payload_checker.check(0, &query));
let query = Filter {
should: Some(vec![
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_blue.clone(), in_moscow.clone()]),
must_not: None,
}),
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_red.clone(), in_berlin.clone()]),
must_not: None,
}),
]),
min_should: None,
must: None,
must_not: None,
};
assert!(payload_checker.check(0, &query));
let query = Filter::new_must_not(with_bad_rating);
assert!(!payload_checker.check(0, &query));
// min_should
let query = Filter::new_min_should(MinShould {
conditions: vec![match_blue.clone(), in_moscow.clone()],
min_count: 1,
});
assert!(!payload_checker.check(0, &query));
let query = Filter::new_min_should(MinShould {
conditions: vec![match_red.clone(), in_berlin.clone(), in_moscow.clone()],
min_count: 2,
});
assert!(payload_checker.check(0, &query));
let query = Filter::new_min_should(MinShould {
conditions: vec![
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_blue, in_moscow]),
must_not: None,
}),
Condition::Filter(Filter {
should: None,
min_should: None,
must: Some(vec![match_red, in_berlin]),
must_not: None,
}),
],
min_count: 1,
});
assert!(payload_checker.check(0, &query));
// DateTime payload index
let query = Filter::new_must(shipped_in_february);
assert!(payload_checker.check(0, &query));
let query = Filter::new_must(shipped_in_march);
assert!(!payload_checker.check(0, &query));
// id Filter
let ids: AHashSet<_> = vec![1, 2, 3].into_iter().map(|x| x.into()).collect();
let query = Filter::new_must_not(Condition::HasId(ids.into()));
assert!(!payload_checker.check(2, &query));
let ids: AHashSet<_> = vec![1, 2, 3].into_iter().map(|x| x.into()).collect();
let query = Filter::new_must_not(Condition::HasId(ids.into()));
assert!(payload_checker.check(10, &query));
let ids: AHashSet<_> = vec![1, 2, 3].into_iter().map(|x| x.into()).collect();
let query = Filter::new_must(Condition::HasId(ids.into()));
assert!(payload_checker.check(2, &query));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/mmap_payload_storage.rs | lib/segment/src/payload_storage/mmap_payload_storage.rs | use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use fs_err as fs;
use gridstore::config::StorageOptions;
use gridstore::{Blob, Gridstore};
use serde_json::Value;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::json_path::JsonPath;
use crate::payload_storage::PayloadStorage;
use crate::types::{Payload, PayloadKeyTypeRef};
const STORAGE_PATH: &str = "payload_storage";
impl Blob for Payload {
fn to_bytes(&self) -> Vec<u8> {
serde_json::to_vec(self).unwrap()
}
fn from_bytes(data: &[u8]) -> Self {
serde_json::from_slice(data).unwrap()
}
}
#[derive(Debug)]
pub struct MmapPayloadStorage {
storage: Gridstore<Payload>,
populate: bool,
}
impl MmapPayloadStorage {
pub fn open_or_create(path: PathBuf, populate: bool) -> OperationResult<Self> {
let path = storage_dir(path);
if path.exists() {
Self::open(path, populate)
} else {
// create folder if it does not exist
fs::create_dir_all(&path).map_err(|_| {
OperationError::service_error("Failed to create mmap payload storage directory")
})?;
Ok(Self::new(path, populate)?)
}
}
fn open(path: PathBuf, populate: bool) -> OperationResult<Self> {
let storage = Gridstore::open(path).map_err(|err| {
OperationError::service_error(format!("Failed to open mmap payload storage: {err}"))
})?;
if populate {
storage.populate()?;
}
Ok(Self { storage, populate })
}
fn new(path: PathBuf, populate: bool) -> OperationResult<Self> {
let storage = Gridstore::new(path, StorageOptions::default())?;
if populate {
storage.populate()?;
}
Ok(Self { storage, populate })
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
self.storage.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
self.storage.clear_cache()?;
Ok(())
}
}
impl PayloadStorage for MmapPayloadStorage {
fn overwrite(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.storage
.put_value(point_id, payload, hw_counter.ref_payload_io_write_counter())?;
Ok(())
}
fn set(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
match self.storage.get_value::<false>(point_id, hw_counter) {
Some(mut point_payload) => {
point_payload.merge(payload);
self.storage.put_value(
point_id,
&point_payload,
hw_counter.ref_payload_io_write_counter(),
)?;
}
None => {
self.storage.put_value(
point_id,
payload,
hw_counter.ref_payload_io_write_counter(),
)?;
}
}
Ok(())
}
fn set_by_key(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
key: &JsonPath,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
match self.storage.get_value::<false>(point_id, hw_counter) {
Some(mut point_payload) => {
point_payload.merge_by_key(payload, key);
self.storage.put_value(
point_id,
&point_payload,
hw_counter.ref_payload_io_write_counter(),
)?;
}
None => {
let mut dest_payload = Payload::default();
dest_payload.merge_by_key(payload, key);
self.storage.put_value(
point_id,
&dest_payload,
hw_counter.ref_payload_io_write_counter(),
)?;
}
}
Ok(())
}
fn get(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
match self.storage.get_value::<false>(point_id, hw_counter) {
Some(payload) => Ok(payload),
None => Ok(Default::default()),
}
}
fn get_sequential(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
match self.storage.get_value::<true>(point_id, hw_counter) {
Some(payload) => Ok(payload),
None => Ok(Default::default()),
}
}
fn delete(
&mut self,
point_id: PointOffsetType,
key: PayloadKeyTypeRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<Value>> {
match self.storage.get_value::<false>(point_id, hw_counter) {
Some(mut payload) => {
let res = payload.remove(key);
if !res.is_empty() {
self.storage.put_value(
point_id,
&payload,
hw_counter.ref_payload_io_write_counter(),
)?;
}
Ok(res)
}
None => Ok(vec![]),
}
}
fn clear(
&mut self,
point_id: PointOffsetType,
_: &HardwareCounterCell,
) -> OperationResult<Option<Payload>> {
let res = self.storage.delete_value(point_id);
Ok(res)
}
#[cfg(test)]
fn clear_all(&mut self, _: &HardwareCounterCell) -> OperationResult<()> {
self.storage.clear().map_err(|err| {
OperationError::service_error(format!("Failed to clear mmap payload storage: {err}"))
})
}
fn flusher(&self) -> Flusher {
let storage_flusher = self.storage.flusher();
Box::new(move || {
storage_flusher().map_err(|err| {
OperationError::service_error(format!(
"Failed to flush mmap payload gridstore: {err}"
))
})
})
}
fn iter<F>(&self, mut callback: F, hw_counter: &HardwareCounterCell) -> OperationResult<()>
where
F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>,
{
self.storage.iter(
|point_id, payload| {
callback(point_id, &payload).map_err(|e|
// TODO return proper error
std::io::Error::other(
e.to_string(),
))
},
hw_counter.ref_payload_io_read_counter(),
)?;
Ok(())
}
fn files(&self) -> Vec<PathBuf> {
self.storage.files()
}
fn immutable_files(&self) -> Vec<PathBuf> {
self.storage.immutable_files()
}
fn get_storage_size_bytes(&self) -> OperationResult<usize> {
Ok(self.storage.get_storage_size_bytes())
}
fn is_on_disk(&self) -> bool {
!self.populate
}
}
/// Get storage directory for this payload storage
pub fn storage_dir<P: AsRef<Path>>(segment_path: P) -> PathBuf {
segment_path.as_ref().join(STORAGE_PATH)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/payload_storage/payload_storage_enum.rs | lib/segment/src/payload_storage/payload_storage_enum.rs | use std::path::PathBuf;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use serde_json::Value;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
use crate::json_path::JsonPath;
use crate::payload_storage::PayloadStorage;
#[cfg(feature = "testing")]
use crate::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use crate::payload_storage::mmap_payload_storage::MmapPayloadStorage;
#[cfg(feature = "rocksdb")]
use crate::payload_storage::on_disk_payload_storage::OnDiskPayloadStorage;
#[cfg(feature = "rocksdb")]
use crate::payload_storage::simple_payload_storage::SimplePayloadStorage;
use crate::types::Payload;
#[derive(Debug)]
pub enum PayloadStorageEnum {
#[cfg(feature = "testing")]
InMemoryPayloadStorage(InMemoryPayloadStorage),
#[cfg(feature = "rocksdb")]
SimplePayloadStorage(SimplePayloadStorage),
#[cfg(feature = "rocksdb")]
OnDiskPayloadStorage(OnDiskPayloadStorage),
MmapPayloadStorage(MmapPayloadStorage),
}
#[cfg(feature = "testing")]
impl From<InMemoryPayloadStorage> for PayloadStorageEnum {
fn from(a: InMemoryPayloadStorage) -> Self {
PayloadStorageEnum::InMemoryPayloadStorage(a)
}
}
#[cfg(feature = "rocksdb")]
impl From<SimplePayloadStorage> for PayloadStorageEnum {
fn from(a: SimplePayloadStorage) -> Self {
PayloadStorageEnum::SimplePayloadStorage(a)
}
}
#[cfg(feature = "rocksdb")]
impl From<OnDiskPayloadStorage> for PayloadStorageEnum {
fn from(a: OnDiskPayloadStorage) -> Self {
PayloadStorageEnum::OnDiskPayloadStorage(a)
}
}
impl From<MmapPayloadStorage> for PayloadStorageEnum {
fn from(a: MmapPayloadStorage) -> Self {
PayloadStorageEnum::MmapPayloadStorage(a)
}
}
impl PayloadStorage for PayloadStorageEnum {
fn overwrite(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => {
s.overwrite(point_id, payload, hw_counter)
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => {
s.overwrite(point_id, payload, hw_counter)
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => {
s.overwrite(point_id, payload, hw_counter)
}
PayloadStorageEnum::MmapPayloadStorage(s) => s.overwrite(point_id, payload, hw_counter),
}
}
fn set(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.set(point_id, payload, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.set(point_id, payload, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.set(point_id, payload, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.set(point_id, payload, hw_counter),
}
}
fn set_by_key(
&mut self,
point_id: PointOffsetType,
payload: &Payload,
key: &JsonPath,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => {
s.set_by_key(point_id, payload, key, hw_counter)
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => {
s.set_by_key(point_id, payload, key, hw_counter)
}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => {
s.set_by_key(point_id, payload, key, hw_counter)
}
PayloadStorageEnum::MmapPayloadStorage(s) => {
s.set_by_key(point_id, payload, key, hw_counter)
}
}
}
fn get(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.get(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.get(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.get(point_id, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.get(point_id, hw_counter),
}
}
fn get_sequential(
&self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Payload> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.get_sequential(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.get_sequential(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.get_sequential(point_id, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.get_sequential(point_id, hw_counter),
}
}
fn delete(
&mut self,
point_id: PointOffsetType,
key: &JsonPath,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Vec<Value>> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.delete(point_id, key, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.delete(point_id, key, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.delete(point_id, key, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.delete(point_id, key, hw_counter),
}
}
fn clear(
&mut self,
point_id: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> OperationResult<Option<Payload>> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.clear(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.clear(point_id, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.clear(point_id, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.clear(point_id, hw_counter),
}
}
#[cfg(test)]
fn clear_all(&mut self, hw_counter: &HardwareCounterCell) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.clear_all(hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.clear_all(hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.clear_all(hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.clear_all(hw_counter),
}
}
fn flusher(&self) -> Flusher {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.flusher(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.flusher(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.flusher(),
PayloadStorageEnum::MmapPayloadStorage(s) => s.flusher(),
}
}
fn iter<F>(&self, callback: F, hw_counter: &HardwareCounterCell) -> OperationResult<()>
where
F: FnMut(PointOffsetType, &Payload) -> OperationResult<bool>,
{
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.iter(callback, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.iter(callback, hw_counter),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.iter(callback, hw_counter),
PayloadStorageEnum::MmapPayloadStorage(s) => s.iter(callback, hw_counter),
}
}
fn files(&self) -> Vec<PathBuf> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.files(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.files(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.files(),
PayloadStorageEnum::MmapPayloadStorage(s) => s.files(),
}
}
fn immutable_files(&self) -> Vec<PathBuf> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.immutable_files(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.immutable_files(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.immutable_files(),
PayloadStorageEnum::MmapPayloadStorage(s) => s.immutable_files(),
}
}
fn get_storage_size_bytes(&self) -> OperationResult<usize> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.get_storage_size_bytes(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.get_storage_size_bytes(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.get_storage_size_bytes(),
PayloadStorageEnum::MmapPayloadStorage(s) => s.get_storage_size_bytes(),
}
}
fn is_on_disk(&self) -> bool {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(s) => s.is_on_disk(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(s) => s.is_on_disk(),
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(s) => s.is_on_disk(),
PayloadStorageEnum::MmapPayloadStorage(s) => s.is_on_disk(),
}
}
}
impl PayloadStorageEnum {
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(_) => {}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(_) => {}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(_) => {}
PayloadStorageEnum::MmapPayloadStorage(s) => s.populate()?,
}
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
match self {
#[cfg(feature = "testing")]
PayloadStorageEnum::InMemoryPayloadStorage(_) => {}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::SimplePayloadStorage(_) => {}
#[cfg(feature = "rocksdb")]
PayloadStorageEnum::OnDiskPayloadStorage(_) => {}
PayloadStorageEnum::MmapPayloadStorage(s) => s.clear_cache()?,
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use rstest::rstest;
use tempfile::Builder;
use super::*;
#[cfg(feature = "rocksdb")]
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::types::Payload;
#[test]
#[cfg(feature = "rocksdb")]
fn test_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let hw_counter = HardwareCounterCell::new();
let mut storage: PayloadStorageEnum = SimplePayloadStorage::open(db).unwrap().into();
let payload: Payload = serde_json::from_str(r#"{"name": "John Doe"}"#).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
assert!(!storage.get(100, &hw_counter).unwrap().is_empty());
storage.clear_all(&hw_counter).unwrap();
assert_eq!(storage.get(100, &hw_counter).unwrap(), Default::default());
}
#[rstest]
fn test_mmap_storage(#[values(false, true)] populate: bool) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let mut storage: PayloadStorageEnum =
MmapPayloadStorage::open_or_create(dir.path().to_path_buf(), populate)
.unwrap()
.into();
let payload: Payload = serde_json::from_str(r#"{"name": "John Doe"}"#).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
storage.clear_all(&hw_counter).unwrap();
storage.set(100, &payload, &hw_counter).unwrap();
assert!(!storage.get(100, &hw_counter).unwrap().is_empty());
storage.clear_all(&hw_counter).unwrap();
assert_eq!(storage.get(100, &hw_counter).unwrap(), Default::default());
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_on_disk_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let hw_counter = HardwareCounterCell::new();
{
let mut storage: PayloadStorageEnum =
SimplePayloadStorage::open(db.clone()).unwrap().into();
let payload: Payload = serde_json::from_str(
r#"{
"name": "John Doe",
"age": 52,
"location": {
"city": "Melbourne",
"geo": {
"lon": 144.9631,
"lat": 37.8136
}
}
}"#,
)
.unwrap();
storage.overwrite(100, &payload, &hw_counter).unwrap();
let partial_payload: Payload = serde_json::from_str(r#"{ "age": 53 }"#).unwrap();
storage.set(100, &partial_payload, &hw_counter).unwrap();
storage
.delete(100, &JsonPath::new("location.geo"), &hw_counter)
.unwrap();
let res = storage.get(100, &hw_counter).unwrap();
assert!(res.0.contains_key("age"));
assert!(res.0.contains_key("location"));
assert!(res.0.contains_key("name"));
}
{
let mut storage: PayloadStorageEnum = OnDiskPayloadStorage::open(db).unwrap().into();
let res = storage.get(100, &hw_counter).unwrap();
assert!(res.0.contains_key("age"));
assert!(res.0.contains_key("location"));
assert!(res.0.contains_key("name"));
eprintln!("res = {res:#?}");
let partial_payload: Payload =
serde_json::from_str(r#"{ "hobby": "vector search" }"#).unwrap();
storage.set(100, &partial_payload, &hw_counter).unwrap();
storage
.delete(100, &JsonPath::new("location.city"), &hw_counter)
.unwrap();
storage
.delete(100, &JsonPath::new("location"), &hw_counter)
.unwrap();
let res = storage.get(100, &hw_counter).unwrap();
assert!(res.0.contains_key("age"));
assert!(res.0.contains_key("hobby"));
assert!(res.0.contains_key("name"));
eprintln!("res = {res:#?}");
}
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_get_storage_size() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage = SimplePayloadStorage::open(db.clone()).unwrap();
let hw_counter = HardwareCounterCell::new();
assert_eq!(storage.get_storage_size_bytes().unwrap(), 0);
let point_id = 0;
let payload: Payload = serde_json::from_str(
r#"{
"name": "John Doe",
"age": 52,
"location": {
"city": "Melbourne",
"geo": {
"lon": 144.9631,
"lat": 37.8136
}
}
}"#,
)
.unwrap();
let raw_payload_size = serde_cbor::to_vec(&point_id).unwrap().len() as u64
+ serde_json::to_vec(&payload).unwrap().len() as u64;
assert_eq!(raw_payload_size, 98);
// insert payload
storage.overwrite(point_id, &payload, &hw_counter).unwrap();
assert_eq!(storage.get_storage_size_bytes().unwrap(), 0);
// needs a flush to impact the storage size
storage.flusher()().unwrap();
// large value contains initial cost of infra (SSTable, etc.), not stable across different OS
let storage_size = storage.get_storage_size_bytes().unwrap();
assert!(
storage_size > 1000 && storage_size < 1300,
"storage_size = {storage_size}"
);
// check how it scales
for _ in 1..=100 {
storage.overwrite(point_id, &payload, &hw_counter).unwrap();
}
storage.flusher()().unwrap();
// loose assertion because value not stable across different OS
let storage_size = storage.get_storage_size_bytes().unwrap();
assert!(
storage_size > 2000 && storage_size < 2600,
"storage_size = {storage_size}"
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/chunked_vector_storage.rs | lib/segment/src/vector_storage/chunked_vector_storage.rs | use std::mem::MaybeUninit;
use std::path::PathBuf;
use common::counter::hardware_counter::HardwareCounterCell;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
use crate::vector_storage::AccessPattern;
/// In case of simple vector storage, vector offset is the same as PointOffsetType.
/// But in case of multivectors, it requires an additional lookup.
pub type VectorOffsetType = usize;
#[allow(clippy::len_without_is_empty)]
pub trait ChunkedVectorStorage<T> {
fn len(&self) -> usize;
fn dim(&self) -> usize;
fn get<P: AccessPattern>(&self, key: VectorOffsetType) -> Option<&[T]>
where
Self: Sized;
fn files(&self) -> Vec<PathBuf>;
fn immutable_files(&self) -> Vec<PathBuf>;
fn flusher(&self) -> Flusher;
fn push(
&mut self,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<VectorOffsetType>;
fn insert(
&mut self,
key: VectorOffsetType,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<()>;
fn insert_many(
&mut self,
start_key: VectorOffsetType,
vectors: &[T],
count: usize,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()>;
/// Returns `count` flattened vectors starting from key. if chunk boundary is crossed, returns None
fn get_many<P: AccessPattern>(&self, key: VectorOffsetType, count: usize) -> Option<&[T]>;
/// Returns batch of vectors by keys.
/// Underlying storage might apply some optimizations to prefetch vectors.
fn get_batch<'a>(
&'a self,
keys: &[VectorOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]];
fn get_remaining_chunk_keys(&self, start_key: VectorOffsetType) -> usize;
fn max_vector_size_bytes(&self) -> usize;
/// True, if this storage is on-disk by default.
fn is_on_disk(&self) -> bool;
/// Populate all pages in the mmap.
/// Block until all pages are populated.
fn populate(&self) -> OperationResult<()>;
/// Drop disk cache.
fn clear_cache(&self) -> OperationResult<()>;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/async_io_mock.rs | lib/segment/src/vector_storage/async_io_mock.rs | #![allow(dead_code)] // The mock is unused on Linux, and so produces dead code warnings
use fs_err::File;
use crate::common::operation_error::OperationResult;
use crate::data_types::primitive::PrimitiveVectorElement;
// This is a mock implementation of the async_io module for those platforms that don't support io_uring.
#[derive(Debug)]
pub struct UringReader<T: PrimitiveVectorElement> {
_phantom: std::marker::PhantomData<T>,
}
#[allow(clippy::unnecessary_wraps)] // Mock `new` have to follow the same signature as real `UringReader`
impl<T: PrimitiveVectorElement> UringReader<T> {
pub fn new(_file: File, _raw_size: usize, _header_size: usize) -> OperationResult<Self> {
Ok(Self {
_phantom: std::marker::PhantomData,
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/async_raw_scorer.rs | lib/segment/src/vector_storage/async_raw_scorer.rs | use common::counter::hardware_counter::HardwareCounterCell;
use common::types::{PointOffsetType, ScoreType};
use super::query::{
ContextQuery, DiscoveryQuery, RecoBestScoreQuery, RecoQuery, RecoSumScoresQuery, TransformInto,
};
use super::query_scorer::custom_query_scorer::CustomQueryScorer;
use super::query_scorer::{QueryScorerBytes, QueryScorerBytesImpl};
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::vectors::{DenseVector, QueryVector, VectorElementType, VectorInternal};
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric, EuclidMetric, ManhattanMetric};
use crate::types::Distance;
use crate::vector_storage::dense::memmap_dense_vector_storage::MemmapDenseVectorStorage;
use crate::vector_storage::dense::mmap_dense_vectors::MmapDenseVectors;
use crate::vector_storage::query::NaiveFeedbackQuery;
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::query_scorer::metric_query_scorer::MetricQueryScorer;
use crate::vector_storage::{RawScorer, VectorStorage as _};
pub fn new<'a>(
query: QueryVector,
storage: &'a MemmapDenseVectorStorage<VectorElementType>,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
AsyncRawScorerBuilder::new(query, storage, hardware_counter).build()
}
pub struct AsyncRawScorerImpl<'a, TQueryScorer: QueryScorer<TVector = [VectorElementType]>> {
query_scorer: TQueryScorer,
storage: &'a MmapDenseVectors<VectorElementType>,
}
impl<'a, TQueryScorer> AsyncRawScorerImpl<'a, TQueryScorer>
where
TQueryScorer: QueryScorer<TVector = [VectorElementType]>,
{
fn new(query_scorer: TQueryScorer, storage: &'a MmapDenseVectors<VectorElementType>) -> Self {
Self {
query_scorer,
storage,
}
}
}
impl<TQueryScorer> RawScorer for AsyncRawScorerImpl<'_, TQueryScorer>
where
TQueryScorer: QueryScorer<TVector = [VectorElementType]>,
{
fn score_points(&self, points: &[PointOffsetType], scores: &mut [ScoreType]) {
assert_eq!(points.len(), scores.len());
let points_stream = points.iter().copied();
self.storage
.read_vectors_async(points_stream, |idx, _point_id, other_vector| {
scores[idx] = self.query_scorer.score(other_vector);
})
.unwrap();
// ToDo: io_uring is experimental, it can fail if it is not supported.
// Instead of silently falling back to the sync implementation, we prefer to panic
// and notify the user that they better use the default IO implementation.
}
fn score_point(&self, point: PointOffsetType) -> ScoreType {
self.query_scorer.score_stored(point)
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
self.query_scorer.score_internal(point_a, point_b)
}
fn scorer_bytes(&self) -> Option<&dyn QueryScorerBytes> {
QueryScorerBytesImpl::new(&self.query_scorer).map(|s| s as _)
}
}
struct AsyncRawScorerBuilder<'a> {
query: QueryVector,
storage: &'a MemmapDenseVectorStorage<VectorElementType>,
distance: Distance,
hardware_counter: HardwareCounterCell,
}
impl<'a> AsyncRawScorerBuilder<'a> {
pub fn new(
query: QueryVector,
storage: &'a MemmapDenseVectorStorage<VectorElementType>,
hardware_counter: HardwareCounterCell,
) -> Self {
Self {
query,
storage,
distance: storage.distance(),
hardware_counter,
}
}
pub fn build(self) -> OperationResult<Box<dyn RawScorer + 'a>> {
match self.distance {
Distance::Cosine => self._build_with_metric::<CosineMetric>(),
Distance::Euclid => self._build_with_metric::<EuclidMetric>(),
Distance::Dot => self._build_with_metric::<DotProductMetric>(),
Distance::Manhattan => self._build_with_metric::<ManhattanMetric>(),
}
}
fn _build_with_metric<TMetric: Metric<VectorElementType> + 'a>(
self,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
let Self {
query,
storage,
distance: _,
hardware_counter,
} = self;
match query {
QueryVector::Nearest(vector) => {
match vector {
VectorInternal::Dense(dense_vector) => {
let query_scorer = MetricQueryScorer::<_, TMetric, _>::new(
dense_vector,
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
VectorInternal::Sparse(_sparse_vector) => Err(OperationError::service_error(
"sparse vectors are not supported for async scorer",
)), // TODO(sparse) add support?
VectorInternal::MultiDense(_multi_dense_vector) => {
Err(OperationError::service_error(
"multi-dense vectors are not supported for async scorer",
))
} // TODO(colbert) add support?
}
}
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
RecoBestScoreQuery::from(reco_query),
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
RecoSumScoresQuery::from(reco_query),
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<DenseVector> =
discovery_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
discovery_query,
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<DenseVector> = context_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
context_query,
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<DenseVector> =
feedback_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
feedback_query.into_query(),
storage,
hardware_counter,
);
Ok(async_raw_scorer_from_query_scorer(query_scorer, storage))
}
}
}
}
fn async_raw_scorer_from_query_scorer<'a, TQueryScorer>(
query_scorer: TQueryScorer,
storage: &'a MemmapDenseVectorStorage<VectorElementType>,
) -> Box<dyn RawScorer + 'a>
where
TQueryScorer: QueryScorer<TVector = [VectorElementType]> + 'a,
{
Box::new(AsyncRawScorerImpl::new(
query_scorer,
storage.get_mmap_vectors(),
))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/in_ram_persisted_vectors.rs | lib/segment/src/vector_storage/in_ram_persisted_vectors.rs | use std::mem::MaybeUninit;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use memory::madvise::{Advice, AdviceSetting};
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
use crate::vector_storage::AccessPattern;
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
#[derive(Debug)]
pub struct InRamPersistedVectors<T: Sized + 'static> {
mmap_storage: ChunkedMmapVectors<T>,
}
impl<T: Sized + Copy + Clone + Default + 'static> InRamPersistedVectors<T> {
pub fn open(directory: &Path, dim: usize) -> OperationResult<Self> {
let mmap_storage = ChunkedMmapVectors::open(
directory,
dim,
AdviceSetting::from(Advice::Normal),
Some(true),
)?;
Ok(Self { mmap_storage })
}
}
impl<T: Sized + Copy + Clone + Default + 'static> ChunkedVectorStorage<T>
for InRamPersistedVectors<T>
{
#[inline]
fn len(&self) -> usize {
self.mmap_storage.len()
}
#[inline]
fn dim(&self) -> usize {
self.mmap_storage.dim()
}
#[inline]
fn get<P: AccessPattern>(&self, key: VectorOffsetType) -> Option<&[T]> {
self.mmap_storage.get::<P>(key)
}
#[inline]
fn files(&self) -> Vec<PathBuf> {
self.mmap_storage.files()
}
#[inline]
fn immutable_files(&self) -> Vec<PathBuf> {
self.mmap_storage.immutable_files()
}
#[inline]
fn flusher(&self) -> Flusher {
self.mmap_storage.flusher()
}
#[inline]
fn push(
&mut self,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<VectorOffsetType> {
self.mmap_storage.push(vector, hw_counter)
}
#[inline]
fn insert(
&mut self,
key: VectorOffsetType,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.mmap_storage.insert(key, vector, hw_counter)
}
#[inline]
fn insert_many(
&mut self,
start_key: VectorOffsetType,
vectors: &[T],
count: usize,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.mmap_storage
.insert_many(start_key, vectors, count, hw_counter)
}
#[inline]
fn get_many<P: AccessPattern>(&self, key: VectorOffsetType, count: usize) -> Option<&[T]> {
self.mmap_storage.get_many::<P>(key, count)
}
#[inline]
fn get_batch<'a>(
&'a self,
keys: &[VectorOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
self.mmap_storage.get_batch(keys, vectors)
}
#[inline]
fn get_remaining_chunk_keys(&self, start_key: VectorOffsetType) -> usize {
self.mmap_storage.get_remaining_chunk_keys(start_key)
}
fn max_vector_size_bytes(&self) -> usize {
self.mmap_storage.max_vector_size_bytes()
}
fn is_on_disk(&self) -> bool {
false
}
fn populate(&self) -> OperationResult<()> {
self.mmap_storage.populate()
}
fn clear_cache(&self) -> OperationResult<()> {
self.mmap_storage.clear_cache()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/bitvec.rs | lib/segment/src/vector_storage/bitvec.rs | use bitvec::vec::BitVec;
use common::types::PointOffsetType;
/// Set deleted state in given bitvec.
///
/// Grows bitvec automatically if it is not big enough.
///
/// Returns previous deleted state of the given point.
#[inline]
pub fn bitvec_set_deleted(bitvec: &mut BitVec, point_id: PointOffsetType, deleted: bool) -> bool {
// Set deleted flag if bitvec is large enough, no need to check bounds
if (point_id as usize) < bitvec.len() {
return unsafe { bitvec.replace_unchecked(point_id as usize, deleted) };
}
// Bitvec is too small; grow and set the deletion flag, no need to check bounds
if deleted {
bitvec.resize(point_id as usize + 1, false);
unsafe { bitvec.set_unchecked(point_id as usize, true) };
}
false
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/vector_storage_base.rs | lib/segment/src/vector_storage/vector_storage_base.rs | use std::alloc::Layout;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::maybe_uninit::maybe_uninit_fill_from;
use common::types::PointOffsetType;
use sparse::common::sparse_vector::SparseVector;
use zerocopy::IntoBytes;
use super::dense::memmap_dense_vector_storage::MemmapDenseVectorStorage;
#[cfg(feature = "rocksdb")]
use super::dense::simple_dense_vector_storage::SimpleDenseVectorStorage;
use super::dense::volatile_dense_vector_storage::VolatileDenseVectorStorage;
use super::multi_dense::appendable_mmap_multi_dense_vector_storage::{
AppendableMmapMultiDenseVectorStorage, MultivectorMmapOffset,
};
#[cfg(feature = "rocksdb")]
use super::multi_dense::simple_multi_dense_vector_storage::SimpleMultiDenseVectorStorage;
use super::multi_dense::volatile_multi_dense_vector_storage::VolatileMultiDenseVectorStorage;
use super::sparse::mmap_sparse_vector_storage::MmapSparseVectorStorage;
use super::sparse::volatile_sparse_vector_storage::VolatileSparseVectorStorage;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
MultiDenseVectorInternal, TypedMultiDenseVectorRef, VectorElementType, VectorElementTypeByte,
VectorElementTypeHalf, VectorInternal, VectorRef,
};
use crate::types::{Distance, MultiVectorConfig, VectorStorageDatatype};
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::dense::appendable_dense_vector_storage::AppendableMmapDenseVectorStorage;
use crate::vector_storage::in_ram_persisted_vectors::InRamPersistedVectors;
#[cfg(feature = "rocksdb")]
use crate::vector_storage::sparse::simple_sparse_vector_storage::SimpleSparseVectorStorage;
pub trait AccessPattern: Copy {
const IS_SEQUENTIAL: bool;
}
#[derive(Copy, Clone)]
pub struct Random;
#[derive(Copy, Clone)]
pub struct Sequential;
impl AccessPattern for Random {
const IS_SEQUENTIAL: bool = false;
}
impl AccessPattern for Sequential {
const IS_SEQUENTIAL: bool = true;
}
/// Trait for vector storage
/// El - type of vector element, expected numerical type
/// Storage operates with internal IDs (`PointOffsetType`), which always starts with zero and have no skips
pub trait VectorStorage {
fn distance(&self) -> Distance;
fn datatype(&self) -> VectorStorageDatatype;
fn is_on_disk(&self) -> bool;
/// Number of vectors
///
/// - includes soft deleted vectors, as they are still stored
fn total_vector_count(&self) -> usize;
/// Get the number of available vectors, considering deleted points and vectors
///
/// This uses [`VectorStorage::total_vector_count`] and [`VectorStorage::deleted_vector_count`] internally.
///
/// # Warning
///
/// This number may not always be accurate. See warning in [`VectorStorage::deleted_vector_count`] documentation.
fn available_vector_count(&self) -> usize {
self.total_vector_count()
.saturating_sub(self.deleted_vector_count())
}
/// Get the vector by the given key
/// Get the vector by the given key with potential optimizations for sequential reads.
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_>;
/// Get the vector by the given key if it exists
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>>;
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()>;
/// Add the given vectors to the storage.
///
/// # Returns
/// The range of point offsets that were added to the storage.
///
/// If stopped, the operation returns a cancellation error.
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>>;
fn flusher(&self) -> Flusher;
fn files(&self) -> Vec<PathBuf>;
fn immutable_files(&self) -> Vec<PathBuf> {
Vec::new()
}
/// Flag the vector by the given key as deleted
///
/// Returns true if the vector was not deleted before and is now deleted
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool>;
/// Check whether the vector at the given key is flagged as deleted
fn is_deleted_vector(&self, key: PointOffsetType) -> bool;
/// Get the number of deleted vectors, considering deleted points and vectors
///
/// Vectors may be deleted at two levels, as point or as vector. Deleted points should
/// propagate to deleting the vectors. That means that the deleted vector count includes the
/// number of deleted points as well.
///
/// This includes any vectors that were deleted at creation.
///
/// # Warning
///
/// In some very exceptional cases it is possible for this count not to include some deleted
/// points. That may happen when flushing a segment to disk fails. This should be recovered
/// when loading/recovering the segment, but that isn't guaranteed. You should therefore use
/// the deleted count with care.
fn deleted_vector_count(&self) -> usize;
/// Get [`BitSlice`] representation for deleted vectors with deletion flags
///
/// The size of this slice is not guaranteed. It may be smaller/larger than the number of
/// vectors in this segment.
fn deleted_vector_bitslice(&self) -> &BitSlice;
}
pub trait DenseVectorStorage<T: PrimitiveVectorElement>: VectorStorage {
fn vector_dim(&self) -> usize;
fn get_dense<P: AccessPattern>(&self, key: PointOffsetType) -> &[T];
/// Get the raw bytes of the vector by the given key if it exists
fn get_dense_bytes_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<&[u8]> {
((key as usize) < self.total_vector_count()).then(|| self.get_dense::<P>(key).as_bytes())
}
/// Get layout for a single vector
fn get_dense_vector_layout(&self) -> OperationResult<Layout> {
Layout::array::<T>(self.vector_dim())
.map_err(|_| OperationError::service_error("Layout is too big"))
}
/// Get the dense vectors by the given keys
///
/// Implementation can assume that the keys are consecutive
fn get_dense_batch<'a>(
&'a self,
keys: &[PointOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
let iter = keys.iter().map(|key| self.get_dense::<Random>(*key));
maybe_uninit_fill_from(vectors, iter).0
}
fn size_of_available_vectors_in_bytes(&self) -> usize {
self.available_vector_count() * self.vector_dim() * std::mem::size_of::<T>()
}
}
pub trait SparseVectorStorage: VectorStorage {
fn get_sparse<P: AccessPattern>(&self, key: PointOffsetType) -> OperationResult<SparseVector>;
fn get_sparse_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> OperationResult<Option<SparseVector>>;
}
pub trait MultiVectorStorage<T: PrimitiveVectorElement>: VectorStorage {
fn vector_dim(&self) -> usize;
fn get_multi<P: AccessPattern>(&self, key: PointOffsetType) -> TypedMultiDenseVectorRef<'_, T>;
fn get_multi_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> Option<TypedMultiDenseVectorRef<'_, T>>;
fn get_batch_multi<'a>(
&'a self,
keys: &[PointOffsetType],
vectors: &'a mut [MaybeUninit<TypedMultiDenseVectorRef<'a, T>>],
) -> &'a [TypedMultiDenseVectorRef<'a, T>] {
debug_assert_eq!(keys.len(), vectors.len());
debug_assert!(keys.len() <= VECTOR_READ_BATCH_SIZE);
let iter = keys.iter().map(|key| self.get_multi::<Random>(*key));
maybe_uninit_fill_from(vectors, iter).0
}
fn iterate_inner_vectors(&self) -> impl Iterator<Item = &[T]> + Clone + Send;
fn multi_vector_config(&self) -> &MultiVectorConfig;
fn size_of_available_vectors_in_bytes(&self) -> usize;
}
#[derive(Debug)]
pub enum VectorStorageEnum {
#[cfg(feature = "rocksdb")]
DenseSimple(SimpleDenseVectorStorage<VectorElementType>),
#[cfg(feature = "rocksdb")]
DenseSimpleByte(SimpleDenseVectorStorage<VectorElementTypeByte>),
#[cfg(feature = "rocksdb")]
DenseSimpleHalf(SimpleDenseVectorStorage<VectorElementTypeHalf>),
DenseVolatile(VolatileDenseVectorStorage<VectorElementType>),
#[cfg(test)]
DenseVolatileByte(VolatileDenseVectorStorage<VectorElementTypeByte>),
#[cfg(test)]
DenseVolatileHalf(VolatileDenseVectorStorage<VectorElementTypeHalf>),
DenseMemmap(Box<MemmapDenseVectorStorage<VectorElementType>>),
DenseMemmapByte(Box<MemmapDenseVectorStorage<VectorElementTypeByte>>),
DenseMemmapHalf(Box<MemmapDenseVectorStorage<VectorElementTypeHalf>>),
DenseAppendableMemmap(
Box<
AppendableMmapDenseVectorStorage<
VectorElementType,
ChunkedMmapVectors<VectorElementType>,
>,
>,
),
DenseAppendableMemmapByte(
Box<
AppendableMmapDenseVectorStorage<
VectorElementTypeByte,
ChunkedMmapVectors<VectorElementTypeByte>,
>,
>,
),
DenseAppendableMemmapHalf(
Box<
AppendableMmapDenseVectorStorage<
VectorElementTypeHalf,
ChunkedMmapVectors<VectorElementTypeHalf>,
>,
>,
),
DenseAppendableInRam(
Box<
AppendableMmapDenseVectorStorage<
VectorElementType,
InRamPersistedVectors<VectorElementType>,
>,
>,
),
DenseAppendableInRamByte(
Box<
AppendableMmapDenseVectorStorage<
VectorElementTypeByte,
InRamPersistedVectors<VectorElementTypeByte>,
>,
>,
),
DenseAppendableInRamHalf(
Box<
AppendableMmapDenseVectorStorage<
VectorElementTypeHalf,
InRamPersistedVectors<VectorElementTypeHalf>,
>,
>,
),
#[cfg(feature = "rocksdb")]
SparseSimple(SimpleSparseVectorStorage),
SparseVolatile(VolatileSparseVectorStorage),
SparseMmap(MmapSparseVectorStorage),
#[cfg(feature = "rocksdb")]
MultiDenseSimple(SimpleMultiDenseVectorStorage<VectorElementType>),
#[cfg(feature = "rocksdb")]
MultiDenseSimpleByte(SimpleMultiDenseVectorStorage<VectorElementTypeByte>),
#[cfg(feature = "rocksdb")]
MultiDenseSimpleHalf(SimpleMultiDenseVectorStorage<VectorElementTypeHalf>),
MultiDenseVolatile(VolatileMultiDenseVectorStorage<VectorElementType>),
#[cfg(test)]
MultiDenseVolatileByte(VolatileMultiDenseVectorStorage<VectorElementTypeByte>),
#[cfg(test)]
MultiDenseVolatileHalf(VolatileMultiDenseVectorStorage<VectorElementTypeHalf>),
MultiDenseAppendableMemmap(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementType,
ChunkedMmapVectors<VectorElementType>,
ChunkedMmapVectors<MultivectorMmapOffset>,
>,
>,
),
MultiDenseAppendableMemmapByte(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementTypeByte,
ChunkedMmapVectors<VectorElementTypeByte>,
ChunkedMmapVectors<MultivectorMmapOffset>,
>,
>,
),
MultiDenseAppendableMemmapHalf(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementTypeHalf,
ChunkedMmapVectors<VectorElementTypeHalf>,
ChunkedMmapVectors<MultivectorMmapOffset>,
>,
>,
),
MultiDenseAppendableInRam(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementType,
InRamPersistedVectors<VectorElementType>,
InRamPersistedVectors<MultivectorMmapOffset>,
>,
>,
),
MultiDenseAppendableInRamByte(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementTypeByte,
InRamPersistedVectors<VectorElementTypeByte>,
InRamPersistedVectors<MultivectorMmapOffset>,
>,
>,
),
MultiDenseAppendableInRamHalf(
Box<
AppendableMmapMultiDenseVectorStorage<
VectorElementTypeHalf,
InRamPersistedVectors<VectorElementTypeHalf>,
InRamPersistedVectors<MultivectorMmapOffset>,
>,
>,
),
}
impl VectorStorageEnum {
pub fn try_multi_vector_config(&self) -> Option<&MultiVectorConfig> {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(_) => None,
VectorStorageEnum::DenseVolatile(_) => None,
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(_) => None,
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(_) => None,
VectorStorageEnum::DenseMemmap(_) => None,
VectorStorageEnum::DenseMemmapByte(_) => None,
VectorStorageEnum::DenseMemmapHalf(_) => None,
VectorStorageEnum::DenseAppendableMemmap(_) => None,
VectorStorageEnum::DenseAppendableMemmapByte(_) => None,
VectorStorageEnum::DenseAppendableMemmapHalf(_) => None,
VectorStorageEnum::DenseAppendableInRam(_) => None,
VectorStorageEnum::DenseAppendableInRamByte(_) => None,
VectorStorageEnum::DenseAppendableInRamHalf(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => None,
VectorStorageEnum::SparseVolatile(_) => None,
VectorStorageEnum::SparseMmap(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(s) => Some(s.multi_vector_config()),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(s) => Some(s.multi_vector_config()),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseVolatile(s) => Some(s.multi_vector_config()),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(s) => Some(s.multi_vector_config()),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableMemmap(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableMemmapByte(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableMemmapHalf(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableInRam(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableInRamByte(s) => Some(s.multi_vector_config()),
VectorStorageEnum::MultiDenseAppendableInRamHalf(s) => Some(s.multi_vector_config()),
}
}
pub(crate) fn default_vector(&self) -> VectorInternal {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(v) => VectorInternal::from(vec![1.0; v.vector_dim()]),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseVolatile(v) => VectorInternal::from(vec![1.0; v.vector_dim()]),
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseMemmap(v) => VectorInternal::from(vec![1.0; v.vector_dim()]),
VectorStorageEnum::DenseMemmapByte(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseMemmapHalf(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableMemmap(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableMemmapByte(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableMemmapHalf(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableInRam(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableInRamByte(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
VectorStorageEnum::DenseAppendableInRamHalf(v) => {
VectorInternal::from(vec![1.0; v.vector_dim()])
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => VectorInternal::from(SparseVector::default()),
VectorStorageEnum::SparseVolatile(_) => VectorInternal::from(SparseVector::default()),
VectorStorageEnum::SparseMmap(_) => VectorInternal::from(SparseVector::default()),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseVolatile(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableMemmap(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableMemmapByte(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableMemmapHalf(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableInRam(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableInRamByte(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
VectorStorageEnum::MultiDenseAppendableInRamHalf(v) => {
VectorInternal::from(MultiDenseVectorInternal::placeholder(v.vector_dim()))
}
}
}
pub fn size_of_available_vectors_in_bytes(&self) -> usize {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseVolatile(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseMemmap(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseMemmapByte(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseMemmapHalf(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseAppendableMemmap(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseAppendableMemmapByte(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::DenseAppendableMemmapHalf(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::DenseAppendableInRam(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::DenseAppendableInRamByte(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::DenseAppendableInRamHalf(v) => {
v.size_of_available_vectors_in_bytes()
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::SparseVolatile(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::SparseMmap(_v) => {
unreachable!(
"Mmap sparse storage does not know its total size, get from index instead"
)
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::MultiDenseVolatile(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(v) => v.size_of_available_vectors_in_bytes(),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(v) => v.size_of_available_vectors_in_bytes(),
VectorStorageEnum::MultiDenseAppendableMemmap(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::MultiDenseAppendableMemmapByte(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::MultiDenseAppendableMemmapHalf(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::MultiDenseAppendableInRam(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::MultiDenseAppendableInRamByte(v) => {
v.size_of_available_vectors_in_bytes()
}
VectorStorageEnum::MultiDenseAppendableInRamHalf(v) => {
v.size_of_available_vectors_in_bytes()
}
}
}
pub fn populate(&self) -> OperationResult<()> {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::DenseVolatile(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::DenseMemmap(vs) => vs.populate(),
VectorStorageEnum::DenseMemmapByte(vs) => vs.populate(),
VectorStorageEnum::DenseMemmapHalf(vs) => vs.populate(),
VectorStorageEnum::DenseAppendableMemmap(vs) => vs.populate()?,
VectorStorageEnum::DenseAppendableMemmapByte(vs) => vs.populate()?,
VectorStorageEnum::DenseAppendableMemmapHalf(vs) => vs.populate()?,
VectorStorageEnum::DenseAppendableInRam(vs) => vs.populate()?,
VectorStorageEnum::DenseAppendableInRamByte(vs) => vs.populate()?,
VectorStorageEnum::DenseAppendableInRamHalf(vs) => vs.populate()?,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::SparseVolatile(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::SparseMmap(vs) => vs.populate()?,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::MultiDenseVolatile(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::MultiDenseAppendableMemmap(vs) => vs.populate()?,
VectorStorageEnum::MultiDenseAppendableMemmapByte(vs) => vs.populate()?,
VectorStorageEnum::MultiDenseAppendableMemmapHalf(vs) => vs.populate()?,
VectorStorageEnum::MultiDenseAppendableInRam(vs) => vs.populate()?,
VectorStorageEnum::MultiDenseAppendableInRamByte(vs) => vs.populate()?,
VectorStorageEnum::MultiDenseAppendableInRamHalf(vs) => vs.populate()?,
}
Ok(())
}
pub fn clear_cache(&self) -> OperationResult<()> {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::DenseVolatile(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::DenseMemmap(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseMemmapByte(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseMemmapHalf(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableMemmap(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableMemmapByte(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableMemmapHalf(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableInRam(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableInRamByte(vs) => vs.clear_cache()?,
VectorStorageEnum::DenseAppendableInRamHalf(vs) => vs.clear_cache()?,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::SparseVolatile(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::SparseMmap(vs) => vs.clear_cache()?,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(_) => {} // Can't populate as it is not mmap
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::MultiDenseVolatile(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(_) => {} // Can't populate as it is not mmap
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(_) => {} // Can't populate as it is not mmap
VectorStorageEnum::MultiDenseAppendableMemmap(vs) => vs.clear_cache()?,
VectorStorageEnum::MultiDenseAppendableMemmapByte(vs) => vs.clear_cache()?,
VectorStorageEnum::MultiDenseAppendableMemmapHalf(vs) => vs.clear_cache()?,
VectorStorageEnum::MultiDenseAppendableInRam(vs) => vs.clear_cache()?,
VectorStorageEnum::MultiDenseAppendableInRamByte(vs) => vs.clear_cache()?,
VectorStorageEnum::MultiDenseAppendableInRamHalf(vs) => vs.clear_cache()?,
}
Ok(())
}
/// Get the raw bytes of the vector by the given key if it exists
pub fn get_vector_bytes_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<&[u8]> {
match self {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(v) => v.get_dense_bytes_opt::<P>(key),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(v) => v.get_dense_bytes_opt::<P>(key),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseVolatile(v) => v.get_dense_bytes_opt::<P>(key),
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(v) => v.get_dense_bytes_opt::<P>(key),
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseMemmap(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseMemmapByte(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseMemmapHalf(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableMemmap(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableMemmapByte(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableMemmapHalf(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableInRam(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableInRamByte(v) => v.get_dense_bytes_opt::<P>(key),
VectorStorageEnum::DenseAppendableInRamHalf(v) => v.get_dense_bytes_opt::<P>(key),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => None,
VectorStorageEnum::SparseVolatile(_) => None,
VectorStorageEnum::SparseMmap(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(_) => None,
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(_) => None,
VectorStorageEnum::MultiDenseVolatile(_) => None,
#[cfg(test)]
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/mod.rs | lib/segment/src/vector_storage/mod.rs | #[cfg(target_os = "linux")]
pub mod async_raw_scorer;
mod chunked_mmap_vectors;
pub mod chunked_vectors;
pub mod quantized;
pub mod raw_scorer;
mod vector_storage_base;
#[cfg(test)]
mod tests;
#[cfg(target_os = "linux")]
mod async_io;
mod async_io_mock;
mod bitvec;
pub mod chunked_vector_storage;
pub mod common;
pub mod dense;
mod in_ram_persisted_vectors;
pub mod multi_dense;
pub mod query;
pub mod query_scorer;
pub mod sparse;
pub use raw_scorer::*;
pub use vector_storage_base::*;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/chunked_mmap_vectors.rs | lib/segment/src/vector_storage/chunked_mmap_vectors.rs | use std::cmp::max;
use std::io::BufReader;
use std::mem::MaybeUninit;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::maybe_uninit::maybe_uninit_fill_from;
use fs_err as fs;
use fs_err::File;
use io::file_operations::atomic_save_json;
use memmap2::MmapMut;
use memory::chunked_utils::{UniversalMmapChunk, chunk_name, create_chunk, read_mmaps};
use memory::fadvise::clear_disk_cache;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_ops::{create_and_ensure_length, open_write_mmap};
use memory::mmap_type::MmapType;
use num_traits::AsPrimitive;
use serde::{Deserialize, Serialize};
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::vector_storage::AccessPattern;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
use crate::vector_storage::common::{CHUNK_SIZE, PAGE_SIZE_BYTES, VECTOR_READ_BATCH_SIZE};
use crate::vector_storage::query_scorer::is_read_with_prefetch_efficient_vectors;
const CONFIG_FILE_NAME: &str = "config.json";
const STATUS_FILE_NAME: &str = "status.dat";
#[repr(C)]
pub struct Status {
pub len: usize,
}
#[derive(Debug, Serialize, Deserialize)]
struct ChunkedMmapConfig {
chunk_size_bytes: usize,
chunk_size_vectors: usize,
dim: usize,
#[serde(default)]
populate: Option<bool>,
}
#[derive(Debug)]
pub struct ChunkedMmapVectors<T: Sized + 'static> {
config: ChunkedMmapConfig,
status: MmapType<Status>,
chunks: Vec<UniversalMmapChunk<T>>,
directory: PathBuf,
}
impl<T: Sized + Copy + 'static> ChunkedMmapVectors<T> {
fn config_file(directory: &Path) -> PathBuf {
directory.join(CONFIG_FILE_NAME)
}
pub fn status_file(directory: &Path) -> PathBuf {
directory.join(STATUS_FILE_NAME)
}
pub fn ensure_status_file(directory: &Path) -> OperationResult<MmapMut> {
let status_file = Self::status_file(directory);
if !status_file.exists() {
{
let length = std::mem::size_of::<usize>() as u64;
create_and_ensure_length(&status_file, length as usize)?;
}
}
Ok(open_write_mmap(
&status_file,
AdviceSetting::from(Advice::Normal),
false, // Status file is write-only
)?)
}
fn ensure_config(
directory: &Path,
dim: usize,
populate: Option<bool>,
) -> OperationResult<ChunkedMmapConfig> {
let config_file = Self::config_file(directory);
match Self::load_config(&config_file) {
Ok(Some(config)) => {
if config.dim == dim {
Ok(config)
} else {
Err(OperationError::service_error(format!(
"Wrong configuration in {}: expected {}, found {dim}",
config_file.display(),
config.dim,
)))
}
}
Ok(None) => Self::create_config(&config_file, dim, populate),
Err(e) => {
log::error!("Failed to deserialize config file {:?}: {e}", &config_file);
Self::create_config(&config_file, dim, populate)
}
}
}
fn load_config(config_file: &Path) -> OperationResult<Option<ChunkedMmapConfig>> {
if config_file.exists() {
let file = BufReader::new(File::open(config_file)?);
let config: ChunkedMmapConfig = serde_json::from_reader(file)?;
Ok(Some(config))
} else {
Ok(None)
}
}
fn create_config(
config_file: &Path,
dim: usize,
populate: Option<bool>,
) -> OperationResult<ChunkedMmapConfig> {
let chunk_size_bytes = CHUNK_SIZE;
let vector_size_bytes = dim * std::mem::size_of::<T>();
let chunk_size_vectors = chunk_size_bytes / vector_size_bytes;
let corrected_chunk_size_bytes = chunk_size_vectors * vector_size_bytes;
let config = ChunkedMmapConfig {
chunk_size_bytes: corrected_chunk_size_bytes,
chunk_size_vectors,
dim,
populate,
};
atomic_save_json(config_file, &config)?;
Ok(config)
}
pub fn open(
directory: &Path,
dim: usize,
advice: AdviceSetting,
populate: Option<bool>,
) -> OperationResult<Self> {
fs::create_dir_all(directory)?;
let status_mmap = Self::ensure_status_file(directory)?;
let status = unsafe { MmapType::from(status_mmap) };
let config = Self::ensure_config(directory, dim, populate)?;
let chunks = read_mmaps(directory, populate.unwrap_or_default(), advice)?;
let vectors = Self {
status,
config,
chunks,
directory: directory.to_owned(),
};
Ok(vectors)
}
#[inline]
fn get_chunk_index(&self, key: usize) -> usize {
key / self.config.chunk_size_vectors
}
/// Returns the byte offset of the vector in the chunk
#[inline]
fn get_chunk_offset(&self, key: usize) -> usize {
let chunk_vector_idx = key % self.config.chunk_size_vectors;
chunk_vector_idx * self.config.dim
}
pub fn max_vector_size_bytes(&self) -> usize {
self.config.chunk_size_bytes
}
pub fn len(&self) -> usize {
self.status.len
}
pub fn dim(&self) -> usize {
self.config.dim
}
fn add_chunk(&mut self) -> OperationResult<()> {
let chunk = create_chunk(
&self.directory,
self.chunks.len(),
self.config.chunk_size_bytes,
)?;
self.chunks.push(chunk);
Ok(())
}
pub fn insert(
&mut self,
key: VectorOffsetType,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.insert_many(key, vector, 1, hw_counter)
}
#[inline]
pub fn insert_many(
&mut self,
start_key: VectorOffsetType,
vectors: &[T],
count: usize,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
assert_eq!(
vectors.len(),
count * self.config.dim,
"Vector size mismatch"
);
let start_key = start_key.as_();
let chunk_idx = self.get_chunk_index(start_key);
let chunk_offset = self.get_chunk_offset(start_key);
// check if the vectors fit in the chunk
if chunk_offset + vectors.len() > self.config.dim * self.config.chunk_size_vectors {
return Err(OperationError::service_error(format!(
"Vectors do not fit in the chunk. Chunk idx {chunk_idx}, chunk offset {chunk_offset}, vectors count {count}",
)));
}
// Ensure capacity
while chunk_idx >= self.chunks.len() {
self.add_chunk()?;
}
let chunk = &mut self.chunks[chunk_idx];
chunk.as_mut_slice()[chunk_offset..chunk_offset + vectors.len()].copy_from_slice(vectors);
hw_counter
.vector_io_write_counter()
.incr_delta(size_of_val(vectors));
let new_len = max(self.status.len, start_key + count);
if new_len > self.status.len {
self.status.len = new_len;
}
Ok(())
}
// returns how many vectors can be inserted starting from key
pub fn get_remaining_chunk_keys(&self, start_key: VectorOffsetType) -> usize {
let start_key = start_key.as_();
let chunk_vector_idx = self.get_chunk_offset(start_key) / self.config.dim;
self.config.chunk_size_vectors - chunk_vector_idx
}
pub fn push(
&mut self,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<VectorOffsetType> {
let new_id = self.status.len;
self.insert(new_id, vector, hw_counter)?;
Ok(new_id)
}
fn get(&self, key: VectorOffsetType, force_sequential: bool) -> Option<&[T]> {
self.get_many(key, 1, force_sequential)
}
// returns count flattened vectors starting from key. if chunk boundary is crossed, returns None
#[inline]
fn get_many(
&self,
start_key: VectorOffsetType,
count: usize,
force_sequential: bool,
) -> Option<&[T]> {
let start_key: usize = start_key.as_();
let chunk_idx = self.get_chunk_index(start_key);
if chunk_idx >= self.chunks.len() {
return None;
}
let block_size_elements = count * self.config.dim;
let chunk_offset = self.get_chunk_offset(start_key);
let chunk_end = chunk_offset + block_size_elements;
let chunk = &self.chunks[chunk_idx];
if chunk_end > chunk.len() {
None
} else if force_sequential || block_size_elements * size_of::<T>() > PAGE_SIZE_BYTES * 4 {
Some(&chunk.as_seq_slice()[chunk_offset..chunk_end])
} else {
Some(&chunk.as_slice()[chunk_offset..chunk_end])
}
}
pub fn get_batch<'a>(
&'a self,
keys: &[VectorOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
debug_assert!(keys.len() == vectors.len());
debug_assert!(keys.len() <= VECTOR_READ_BATCH_SIZE);
let do_sequential_read = is_read_with_prefetch_efficient_vectors(keys);
maybe_uninit_fill_from(
vectors,
keys.iter().map(|key| {
self.get(*key, do_sequential_read)
.unwrap_or_else(|| panic!("Vector {key} not found"))
}),
)
.0
}
pub fn flusher(&self) -> Flusher {
Box::new({
let status_flusher = self.status.flusher();
let chunks_flushers: Vec<_> = self.chunks.iter().map(|chunk| chunk.flusher()).collect();
move || {
for flusher in chunks_flushers {
flusher()?;
}
status_flusher()?;
Ok(())
}
})
}
pub fn files(&self) -> Vec<PathBuf> {
let mut files = Vec::new();
files.push(Self::config_file(&self.directory));
files.push(Self::status_file(&self.directory));
for chunk_idx in 0..self.chunks.len() {
files.push(chunk_name(&self.directory, chunk_idx));
}
files
}
pub fn immutable_files(&self) -> Vec<PathBuf> {
vec![Self::config_file(&self.directory)] // TODO: Is config immutable?
}
}
impl<T: Sized + Copy + 'static> ChunkedVectorStorage<T> for ChunkedMmapVectors<T> {
#[inline]
fn len(&self) -> usize {
ChunkedMmapVectors::len(self)
}
#[inline]
fn dim(&self) -> usize {
ChunkedMmapVectors::dim(self)
}
#[inline]
fn get<P: AccessPattern>(&self, key: VectorOffsetType) -> Option<&[T]> {
ChunkedMmapVectors::get(self, key, P::IS_SEQUENTIAL)
}
#[inline]
fn files(&self) -> Vec<PathBuf> {
ChunkedMmapVectors::files(self)
}
#[inline]
fn immutable_files(&self) -> Vec<PathBuf> {
ChunkedMmapVectors::immutable_files(self)
}
#[inline]
fn flusher(&self) -> Flusher {
ChunkedMmapVectors::flusher(self)
}
#[inline]
fn push(
&mut self,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<VectorOffsetType> {
ChunkedMmapVectors::push(self, vector, hw_counter)
}
#[inline]
fn insert(
&mut self,
key: VectorOffsetType,
vector: &[T],
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
ChunkedMmapVectors::insert(self, key, vector, hw_counter)
}
#[inline]
fn insert_many(
&mut self,
start_key: VectorOffsetType,
vectors: &[T],
count: usize,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
ChunkedMmapVectors::insert_many(self, start_key, vectors, count, hw_counter)
}
#[inline]
fn get_many<P: AccessPattern>(&self, key: VectorOffsetType, count: usize) -> Option<&[T]> {
ChunkedMmapVectors::get_many(self, key, count, P::IS_SEQUENTIAL)
}
#[inline]
fn get_batch<'a>(
&'a self,
keys: &[VectorOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
ChunkedMmapVectors::get_batch(self, keys, vectors)
}
#[inline]
fn get_remaining_chunk_keys(&self, start_key: VectorOffsetType) -> usize {
ChunkedMmapVectors::get_remaining_chunk_keys(self, start_key)
}
#[inline]
fn max_vector_size_bytes(&self) -> usize {
ChunkedMmapVectors::max_vector_size_bytes(self)
}
fn is_on_disk(&self) -> bool {
!self.config.populate.unwrap_or(false)
}
fn populate(&self) -> OperationResult<()> {
for chunk in &self.chunks {
chunk.populate()?;
}
Ok(())
}
fn clear_cache(&self) -> OperationResult<()> {
for chunk_idx in 0..self.chunks.len() {
let file_path = chunk_name(&self.directory, chunk_idx);
clear_disk_cache(&file_path)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::iter::zip;
use rand::SeedableRng;
use rand::prelude::StdRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::random_vector;
#[test]
fn test_chunked_mmap() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dim = 500;
let num_vectors = 1000;
let mut rng = StdRng::seed_from_u64(42);
let hw_counter = HardwareCounterCell::new();
let mut vectors: Vec<_> = (0..num_vectors)
.map(|_| random_vector(&mut rng, dim))
.collect();
{
let mut chunked_mmap: ChunkedMmapVectors<VectorElementType> =
ChunkedMmapVectors::open(dir.path(), dim, AdviceSetting::Global, Some(true))
.unwrap();
for vec in &vectors {
chunked_mmap.push(vec, &hw_counter).unwrap();
}
let mut vectors_buffer = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let random_offset = 666;
let batch_size = 10;
assert!(random_offset + batch_size < num_vectors);
assert!(batch_size <= VECTOR_READ_BATCH_SIZE);
let batch_ids = (random_offset..random_offset + batch_size).collect::<Vec<_>>();
let vectors_buffer =
chunked_mmap.get_batch(&batch_ids, &mut vectors_buffer[..batch_size]);
for (i, (vec, loaded_vec)) in zip(
&vectors[random_offset..random_offset + batch_size],
&vectors_buffer[..batch_size],
)
.enumerate()
{
assert_eq!(
vec, loaded_vec,
"Vectors at index {i} in chunked_mmap are not equal to vectors",
);
}
vectors[0] = random_vector(&mut rng, dim);
vectors[150] = random_vector(&mut rng, dim);
vectors[44] = random_vector(&mut rng, dim);
vectors[999] = random_vector(&mut rng, dim);
chunked_mmap.insert(0, &vectors[0], &hw_counter).unwrap();
chunked_mmap
.insert(150, &vectors[150], &hw_counter)
.unwrap();
chunked_mmap.insert(44, &vectors[44], &hw_counter).unwrap();
chunked_mmap
.insert(999, &vectors[999], &hw_counter)
.unwrap();
assert!(
chunked_mmap.chunks.len() > 1,
"must have multiple chunks to test",
);
chunked_mmap.flusher()().unwrap();
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/common.rs | lib/segment/src/vector_storage/common.rs | use std::sync::atomic::{AtomicBool, Ordering};
#[cfg(feature = "rocksdb")]
use serde::{Deserialize, Serialize};
static ASYNC_SCORER: AtomicBool = AtomicBool::new(false);
pub fn set_async_scorer(async_scorer: bool) {
ASYNC_SCORER.store(async_scorer, Ordering::Relaxed);
}
pub fn get_async_scorer() -> bool {
ASYNC_SCORER.load(Ordering::Relaxed)
}
/// Storage type for RocksDB based storage
#[derive(Debug, Deserialize, Serialize, Clone)]
#[cfg(feature = "rocksdb")]
pub struct StoredRecord<T> {
pub deleted: bool,
pub vector: T,
}
/// Minimal number of bytes we read from disk in one go
/// WARN: this might be system dependent, so we assume 4Kb, which might be wrong
/// ToDo: read this from system
pub const PAGE_SIZE_BYTES: usize = 4096;
/// Number of vectors we read from storage in one batch
/// in case we need to score an iterator of vector ids
pub const VECTOR_READ_BATCH_SIZE: usize = 64;
#[cfg(debug_assertions)]
pub const CHUNK_SIZE: usize = 512 * 1024;
/// Vector storage chunk size in bytes
#[cfg(not(debug_assertions))]
pub const CHUNK_SIZE: usize = 32 * 1024 * 1024;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/raw_scorer.rs | lib/segment/src/vector_storage/raw_scorer.rs | use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::{PointOffsetType, ScoreType};
use sparse::common::sparse_vector::SparseVector;
use super::query::{
ContextQuery, DiscoveryQuery, RecoBestScoreQuery, RecoQuery, RecoSumScoresQuery, TransformInto,
};
use super::query_scorer::custom_query_scorer::CustomQueryScorer;
use super::query_scorer::multi_custom_query_scorer::MultiCustomQueryScorer;
use super::query_scorer::sparse_custom_query_scorer::SparseCustomQueryScorer;
use super::query_scorer::{QueryScorerBytes, QueryScorerBytesImpl};
use super::{DenseVectorStorage, MultiVectorStorage, SparseVectorStorage, VectorStorageEnum};
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
DenseVector, MultiDenseVectorInternal, QueryVector, VectorInternal,
};
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric, EuclidMetric, ManhattanMetric};
use crate::types::Distance;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query::NaiveFeedbackQuery;
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::query_scorer::metric_query_scorer::MetricQueryScorer;
use crate::vector_storage::query_scorer::multi_metric_query_scorer::MultiMetricQueryScorer;
use crate::vector_storage::query_scorer::sparse_metric_query_scorer::SparseMetricQueryScorer;
use crate::vector_storage::sparse::volatile_sparse_vector_storage::VolatileSparseVectorStorage;
pub trait RawScorer {
fn score_points(&self, points: &[PointOffsetType], scores: &mut [ScoreType]);
/// Score stored vector with vector under the given index
fn score_point(&self, point: PointOffsetType) -> ScoreType;
/// Return distance between stored points selected by IDs
///
/// # Panics
///
/// Panics if any id is out of range
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType;
/// Return [`QueryScorerBytes`] if the underlying scorer supports it
fn scorer_bytes(&self) -> Option<&dyn QueryScorerBytes>;
}
pub struct RawScorerImpl<TQueryScorer: QueryScorer> {
pub query_scorer: TQueryScorer,
}
pub fn new_raw_scorer<'a>(
query: QueryVector,
vector_storage: &'a VectorStorageEnum,
hc: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
match vector_storage {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(vs) => raw_scorer_impl(query, vs, hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(vs) => raw_scorer_impl(query, vs, hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(vs) => raw_scorer_impl(query, vs, hc),
VectorStorageEnum::DenseVolatile(vs) => raw_scorer_impl(query, vs, hc),
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(vs) => raw_scorer_impl(query, vs, hc),
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(vs) => raw_scorer_impl(query, vs, hc),
VectorStorageEnum::DenseMemmap(vs) => {
if vs.has_async_reader() {
#[cfg(target_os = "linux")]
{
let scorer_result = super::async_raw_scorer::new(query.clone(), vs, hc.fork());
match scorer_result {
Ok(raw_scorer) => return Ok(raw_scorer),
Err(err) => log::error!("failed to initialize async raw scorer: {err}"),
};
}
#[cfg(not(target_os = "linux"))]
log::warn!("async raw scorer is only supported on Linux");
}
raw_scorer_impl(query, vs.as_ref(), hc)
}
// TODO(byte_storage): Implement async raw scorer for DenseMemmapByte and DenseMemmapHalf
VectorStorageEnum::DenseMemmapByte(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseMemmapHalf(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableMemmap(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableMemmapByte(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableMemmapHalf(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableInRam(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableInRamByte(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
VectorStorageEnum::DenseAppendableInRamHalf(vs) => raw_scorer_impl(query, vs.as_ref(), hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(vs) => raw_sparse_scorer_impl(query, vs, hc),
VectorStorageEnum::SparseVolatile(vs) => raw_sparse_scorer_volatile(query, vs, hc),
VectorStorageEnum::SparseMmap(vs) => raw_sparse_scorer_impl(query, vs, hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(vs) => raw_multi_scorer_impl(query, vs, hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(vs) => raw_multi_scorer_impl(query, vs, hc),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(vs) => raw_multi_scorer_impl(query, vs, hc),
VectorStorageEnum::MultiDenseVolatile(vs) => raw_multi_scorer_impl(query, vs, hc),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(vs) => raw_multi_scorer_impl(query, vs, hc),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(vs) => raw_multi_scorer_impl(query, vs, hc),
VectorStorageEnum::MultiDenseAppendableMemmap(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
VectorStorageEnum::MultiDenseAppendableMemmapByte(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
VectorStorageEnum::MultiDenseAppendableMemmapHalf(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
VectorStorageEnum::MultiDenseAppendableInRam(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
VectorStorageEnum::MultiDenseAppendableInRamByte(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
VectorStorageEnum::MultiDenseAppendableInRamHalf(vs) => {
raw_multi_scorer_impl(query, vs.as_ref(), hc)
}
}
}
pub static DEFAULT_STOPPED: AtomicBool = AtomicBool::new(false);
pub fn raw_sparse_scorer_volatile<'a>(
query: QueryVector,
vector_storage: &'a VolatileSparseVectorStorage,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
let QueryVector::Nearest(vector) = query else {
return raw_sparse_scorer_impl(query, vector_storage, hardware_counter);
};
let VectorInternal::Sparse(sparse_vector) = vector else {
return Err(OperationError::service_error(
"Sparse vector expected to be used against a sparse vector storage",
));
};
let query_scorer =
SparseMetricQueryScorer::new(sparse_vector, vector_storage, hardware_counter);
raw_scorer_from_query_scorer(query_scorer)
}
pub fn raw_sparse_scorer_impl<'a, TVectorStorage: SparseVectorStorage>(
query: QueryVector,
vector_storage: &'a TVectorStorage,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
match query {
QueryVector::Nearest(_vector) => Err(OperationError::service_error(
"Raw scorer must not be used for nearest queries",
)),
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<SparseVector> = reco_query.transform_into()?;
let query_scorer = SparseCustomQueryScorer::<_, _>::new(
RecoBestScoreQuery::from(reco_query),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<SparseVector> = reco_query.transform_into()?;
let query_scorer = SparseCustomQueryScorer::<_, _>::new(
RecoSumScoresQuery::from(reco_query),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<SparseVector> = discovery_query.transform_into()?;
let query_scorer = SparseCustomQueryScorer::<_, _>::new(
discovery_query,
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<SparseVector> = context_query.transform_into()?;
let query_scorer = SparseCustomQueryScorer::<_, _>::new(
context_query,
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<SparseVector> =
feedback_query.transform_into()?;
let query_scorer = SparseCustomQueryScorer::<_, _>::new(
feedback_query.into_query(),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
}
}
#[cfg(feature = "testing")]
pub fn new_raw_scorer_for_test<'a>(
vector: QueryVector,
vector_storage: &'a VectorStorageEnum,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
new_raw_scorer(vector, vector_storage, HardwareCounterCell::new())
}
pub fn raw_scorer_impl<
'a,
TElement: PrimitiveVectorElement,
TVectorStorage: DenseVectorStorage<TElement>,
>(
query: QueryVector,
vector_storage: &'a TVectorStorage,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>>
where
CosineMetric: Metric<TElement>,
EuclidMetric: Metric<TElement>,
DotProductMetric: Metric<TElement>,
ManhattanMetric: Metric<TElement>,
{
match vector_storage.distance() {
Distance::Cosine => new_scorer_with_metric::<TElement, CosineMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Euclid => new_scorer_with_metric::<TElement, EuclidMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Dot => new_scorer_with_metric::<TElement, DotProductMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Manhattan => new_scorer_with_metric::<TElement, ManhattanMetric, _>(
query,
vector_storage,
hardware_counter,
),
}
}
fn new_scorer_with_metric<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement> + 'a,
TVectorStorage: DenseVectorStorage<TElement>,
>(
query: QueryVector,
vector_storage: &'a TVectorStorage,
hardware_counter_cell: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
match query {
QueryVector::Nearest(vector) => {
let query_scorer = MetricQueryScorer::<_, TMetric, _>::new(
vector.try_into()?,
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
RecoBestScoreQuery::from(reco_query),
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
RecoSumScoresQuery::from(reco_query),
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<DenseVector> = discovery_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
discovery_query,
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<DenseVector> = context_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
context_query,
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<DenseVector> =
feedback_query.transform_into()?;
let query_scorer = CustomQueryScorer::<_, TMetric, _, _>::new(
feedback_query.into_query(),
vector_storage,
hardware_counter_cell,
);
raw_scorer_from_query_scorer(query_scorer)
}
}
}
pub fn raw_scorer_from_query_scorer<'a>(
query_scorer: impl QueryScorer + 'a,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
Ok(Box::new(RawScorerImpl { query_scorer }))
}
pub fn raw_multi_scorer_impl<
'a,
TElement: PrimitiveVectorElement,
TVectorStorage: MultiVectorStorage<TElement>,
>(
query: QueryVector,
vector_storage: &'a TVectorStorage,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>>
where
CosineMetric: Metric<TElement>,
EuclidMetric: Metric<TElement>,
DotProductMetric: Metric<TElement>,
ManhattanMetric: Metric<TElement>,
{
match vector_storage.distance() {
Distance::Cosine => new_multi_scorer_with_metric::<_, CosineMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Euclid => new_multi_scorer_with_metric::<_, EuclidMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Dot => new_multi_scorer_with_metric::<_, DotProductMetric, _>(
query,
vector_storage,
hardware_counter,
),
Distance::Manhattan => new_multi_scorer_with_metric::<_, ManhattanMetric, _>(
query,
vector_storage,
hardware_counter,
),
}
}
fn new_multi_scorer_with_metric<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement> + 'a,
TVectorStorage: MultiVectorStorage<TElement>,
>(
query: QueryVector,
vector_storage: &'a TVectorStorage,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
match query {
QueryVector::Nearest(vector) => {
let query_scorer = MultiMetricQueryScorer::<_, TMetric, _>::new(
&vector.try_into()?,
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<MultiDenseVectorInternal> = reco_query.transform_into()?;
let query_scorer = MultiCustomQueryScorer::<_, TMetric, _, _>::new(
RecoBestScoreQuery::from(reco_query),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<MultiDenseVectorInternal> = reco_query.transform_into()?;
let query_scorer = MultiCustomQueryScorer::<_, TMetric, _, _>::new(
RecoSumScoresQuery::from(reco_query),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<MultiDenseVectorInternal> =
discovery_query.transform_into()?;
let query_scorer = MultiCustomQueryScorer::<_, TMetric, _, _>::new(
discovery_query,
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<MultiDenseVectorInternal> =
context_query.transform_into()?;
let query_scorer = MultiCustomQueryScorer::<_, TMetric, _, _>::new(
context_query,
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<MultiDenseVectorInternal> =
feedback_query.transform_into()?;
let query_scorer = MultiCustomQueryScorer::<_, TMetric, _, _>::new(
feedback_query.into_query(),
vector_storage,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
}
}
impl<TQueryScorer: QueryScorer> RawScorer for RawScorerImpl<TQueryScorer> {
fn score_points(&self, points: &[PointOffsetType], scores: &mut [ScoreType]) {
assert_eq!(points.len(), scores.len());
let (mut remaining_points, mut remaining_scores) = (points, scores);
while !remaining_points.is_empty() {
let chunk_size = remaining_points.len().min(VECTOR_READ_BATCH_SIZE);
let (chunk_points, rest_points) = remaining_points.split_at(chunk_size);
let (chunk_scores, rest_scores) = remaining_scores.split_at_mut(chunk_size);
remaining_points = rest_points;
remaining_scores = rest_scores;
self.query_scorer
.score_stored_batch(chunk_points, chunk_scores);
}
}
fn score_point(&self, point: PointOffsetType) -> ScoreType {
self.query_scorer.score_stored(point)
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
self.query_scorer.score_internal(point_a, point_b)
}
fn scorer_bytes(&self) -> Option<&dyn QueryScorerBytes> {
QueryScorerBytesImpl::new(&self.query_scorer).map(|s| s as _)
}
}
#[inline]
pub fn check_deleted_condition(
point: PointOffsetType,
vec_deleted: &BitSlice,
point_deleted: &BitSlice,
) -> bool {
// Deleted points propagate to vectors; check vector deletion for possible early return
// Default to not deleted if our deleted flags failed grow
!vec_deleted.get_bit(point as usize).unwrap_or(false)
// Additionally check point deletion for integrity if delete propagation to vector failed
// Default to deleted if the point mapping was removed from the ID tracker
&& !point_deleted.get_bit(point as usize).unwrap_or(true)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/chunked_vectors.rs | lib/segment/src/vector_storage/chunked_vectors.rs | use std::cmp::max;
use std::collections::TryReserveError;
use std::mem;
use crate::common::vector_utils::{TrySetCapacity, TrySetCapacityExact};
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::common::CHUNK_SIZE;
#[derive(Debug)]
pub struct ChunkedVectors<T> {
/// Vector's dimension.
///
/// Each vector will consume `size_of::<T>() * dim` bytes.
dim: usize,
/// Number of stored vectors in all chunks.
len: usize,
/// Maximum number of vectors in each chunk.
chunk_capacity: usize,
chunks: Vec<Vec<T>>,
}
impl<T: Copy + Clone + Default> ChunkedVectors<T> {
pub fn new(dim: usize) -> Self {
assert_ne!(dim, 0, "The vector's dimension cannot be 0");
let vector_size = dim * mem::size_of::<T>();
let chunk_capacity = CHUNK_SIZE / vector_size;
assert_ne!(chunk_capacity, 0, "The vector's size is too big");
Self {
dim,
len: 0,
chunk_capacity,
chunks: Vec::new(),
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn get(&self, key: VectorOffsetType) -> &[T] {
self.get_opt(key).expect("vector not found")
}
pub fn get_opt(&self, key: VectorOffsetType) -> Option<&[T]> {
if self.chunks.is_empty() {
return None;
}
self.chunks
.get(key / self.chunk_capacity)
.and_then(|chunk_data| {
let idx = (key % self.chunk_capacity) * self.dim;
let range = idx..idx + self.dim;
chunk_data.get(range)
})
}
pub fn get_many(&self, key: VectorOffsetType, count: usize) -> Option<&[T]> {
if self.chunks.is_empty() {
return None;
}
self.chunks
.get(key / self.chunk_capacity)
.and_then(|chunk_data| {
let idx = (key % self.chunk_capacity) * self.dim;
let range = idx..idx + count * self.dim;
chunk_data.get(range)
})
}
pub fn push(&mut self, vector: &[T]) -> Result<VectorOffsetType, TryReserveError> {
let new_id = self.len;
self.insert(new_id, vector)?;
Ok(new_id)
}
// returns how many flattened vectors can be inserted starting from key
pub fn get_chunk_left_keys(&self, start_key: VectorOffsetType) -> usize {
self.chunk_capacity - (start_key % self.chunk_capacity)
}
pub fn insert(&mut self, key: VectorOffsetType, vector: &[T]) -> Result<(), TryReserveError> {
assert_eq!(vector.len(), self.dim, "Vector size mismatch");
self.insert_many(key, vector, 1)
}
pub fn insert_many(
&mut self,
key: VectorOffsetType,
vectors: &[T],
vectors_count: usize,
) -> Result<(), TryReserveError> {
assert_eq!(
vectors.len(),
vectors_count * self.dim,
"Vector size mismatch"
);
assert!(
self.get_chunk_left_keys(key) >= vectors_count,
"Index out of bounds"
);
let desired_capacity = self.chunk_capacity * self.dim;
let new_len = max(self.len, key + vectors_count);
let chunks_len = new_len.div_ceil(self.chunk_capacity);
if chunks_len > self.chunks.len() {
// All chunks except the last one should be fully allocated.
// If we are going to add new chunks, resize last one which may be partially allocated.
if let Some(last_chunk) = self.chunks.last_mut() {
last_chunk.try_set_capacity_exact(desired_capacity)?;
last_chunk.resize_with(desired_capacity, T::default);
}
self.chunks.try_set_capacity(chunks_len)?;
let new_chunks = chunks_len - self.chunks.len();
let skipped_chunks = new_chunks - 1;
// All skipped chunks should be fully allocated.
for _ in 0..skipped_chunks {
let mut chunk = Vec::new();
chunk.try_set_capacity_exact(desired_capacity)?;
chunk.resize_with(desired_capacity, T::default);
self.chunks.push(chunk);
}
// Add new chunk with lower capacity.
self.chunks.push(Default::default());
assert_eq!(self.chunks.len(), chunks_len);
}
let chunk_idx = key / self.chunk_capacity;
let chunk_data = &mut self.chunks[chunk_idx];
let idx = (key % self.chunk_capacity) * self.dim;
// Grow the current chunk if needed to fit the new vector.
//
// All chunks are dynamically resized to fit their vectors in it.
// Chunks have a size of zero by default. It's grown with zeroes to fit new vectors.
//
// The capacity for the first chunk is allocated normally to keep the memory footprint as
// small as possible, see
// <https://doc.rust-lang.org/std/vec/struct.Vec.html#capacity-and-reallocation>).
// All other chunks allocate their capacity in full on first use to prevent expensive
// reallocations when their data grows.
if chunk_data.len() < idx + vectors.len() {
// If the chunk is not the first one, allocate it fully on first use
if chunk_idx != 0 {
chunk_data.try_set_capacity_exact(desired_capacity)?;
}
chunk_data.resize_with(idx + vectors.len(), T::default);
}
let data = &mut chunk_data[idx..idx + vectors.len()];
data.copy_from_slice(vectors);
// Update `self.len` only after the vector is successfully inserted.
// In case of OOM, `self.len` will not be updated.
self.len = new_len;
Ok(())
}
}
impl<T: Clone> TrySetCapacityExact for ChunkedVectors<T> {
fn try_set_capacity_exact(&mut self, capacity: usize) -> Result<(), TryReserveError> {
let num_chunks = capacity.div_ceil(self.chunk_capacity);
let last_chunk_idx = capacity / self.chunk_capacity;
self.chunks.try_set_capacity_exact(num_chunks)?;
self.chunks.resize_with(num_chunks, Vec::new);
for chunk_idx in 0..num_chunks {
if chunk_idx == last_chunk_idx {
let desired_capacity = (capacity % self.chunk_capacity) * self.dim;
self.chunks[chunk_idx].try_set_capacity_exact(desired_capacity)?;
} else {
let desired_capacity = self.chunk_capacity * self.dim;
self.chunks[chunk_idx].try_set_capacity_exact(desired_capacity)?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_chunked_vectors_with_skipped_chunks() {
let mut vectors = ChunkedVectors::new(3);
assert_eq!(vectors.get_opt(0), None);
vectors.insert(0, &[1, 2, 3]).unwrap();
vectors.insert(10_000_000, &[4, 5, 6]).unwrap();
assert!(vectors.chunks.len() > 3);
assert_eq!(vectors.get(0), &[1, 2, 3]);
assert_eq!(vectors.get(10_000_000), &[4, 5, 6]);
assert_eq!(vectors.get_opt(10_000_001), None);
// check if first chunk is fully allocated
assert_eq!(vectors.get(100), &[0, 0, 0]);
// check if middle chunk is fully allocated
assert_eq!(vectors.get(5_000_000), &[0, 0, 0]);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/async_io.rs | lib/segment/src/vector_storage/async_io.rs | use std::fmt;
use std::os::fd::AsRawFd;
use common::types::PointOffsetType;
use fs_err::File;
use io_uring::{IoUring, opcode, types};
use memory::mmap_ops::transmute_from_u8_to_slice;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::primitive::PrimitiveVectorElement;
const DISK_PARALLELISM: usize = 16; // TODO: benchmark it better, or make it configurable
#[derive(Debug)]
struct BufferMeta {
/// Sequential index of the processing point
pub index: usize,
/// Id of the point that is currently being processed
pub point_id: PointOffsetType,
}
#[derive(Debug)]
struct Buffer {
/// Stores the buffer for the point vectors
pub buffer: Vec<u8>,
/// Stores the point ids that are currently being processed in each buffer.
pub meta: Option<BufferMeta>,
}
#[derive(Debug)]
struct BufferStore {
/// Stores the buffer for the point vectors
pub buffers: Vec<Buffer>,
}
impl BufferStore {
pub fn new(num_buffers: usize, buffer_raw_size: usize) -> Self {
Self {
buffers: (0..num_buffers)
.map(|_| Buffer {
buffer: vec![0; buffer_raw_size],
meta: None,
})
.collect(),
}
}
}
pub struct UringReader<T: PrimitiveVectorElement> {
file: File,
buffers: BufferStore,
io_uring: Option<IoUring>,
raw_size: usize,
header_size: usize,
_phantom: std::marker::PhantomData<T>,
}
impl<T: PrimitiveVectorElement> fmt::Debug for UringReader<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VectorData")
.field("file", &self.file)
.field("buffers", &self.buffers)
.field("raw_size", &self.raw_size)
.field("header_size", &self.header_size)
.field("_phantom", &self._phantom)
.finish_non_exhaustive()
}
}
impl<T: PrimitiveVectorElement> UringReader<T> {
pub fn new(file: File, raw_size: usize, header_size: usize) -> OperationResult<Self> {
let buffers = BufferStore::new(DISK_PARALLELISM, raw_size);
let io_uring = IoUring::new(DISK_PARALLELISM as _)?;
Ok(Self {
file,
buffers,
io_uring: Some(io_uring),
raw_size,
header_size,
_phantom: std::marker::PhantomData,
})
}
/// Takes in iterator of point offsets, reads it, and yields a callback with the read data.
pub fn read_stream(
&mut self,
points: impl IntoIterator<Item = PointOffsetType>,
mut callback: impl FnMut(usize, PointOffsetType, &[T]),
) -> OperationResult<()> {
// Take `UringReader::io_uring`, so that if we return an error or panic during `read_stream`,
// `IoUring` would be transparently dropped.
let mut io_uring = match self.io_uring.take() {
// Use existing `IoUring` if there's one...
Some(io_uring) => io_uring,
// ...or create a new one if not
None => IoUring::new(DISK_PARALLELISM as _)?,
};
let buffers_count = self.buffers.buffers.len();
let mut unused_buffer_ids = (0..buffers_count).collect::<Vec<_>>();
for item in points.into_iter().enumerate() {
let (idx, point): (usize, PointOffsetType) = item;
if unused_buffer_ids.is_empty() {
submit_and_read(
&mut io_uring,
&mut self.buffers,
&mut unused_buffer_ids,
&mut callback,
self.raw_size,
)?;
}
// Assume there is at least one buffer available at this point
let buffer_id = unused_buffer_ids.pop().unwrap();
self.buffers.buffers[buffer_id].meta = Some(BufferMeta {
index: idx,
point_id: point,
});
let buffer = &mut self.buffers.buffers[buffer_id].buffer;
let offset = self.header_size + self.raw_size * point as usize;
let user_data = buffer_id;
let read_e = opcode::Read::new(
types::Fd(self.file.as_raw_fd()),
buffer.as_mut_ptr(),
buffer.len() as _,
)
.offset(offset as _)
.build()
.user_data(user_data as _);
unsafe {
// self.io_uring.submission().push(&read_e).unwrap();
io_uring.submission().push(&read_e).map_err(|err| {
OperationError::service_error(format!("Failed using io-uring: {err}"))
})?;
}
}
let mut operations_to_wait_for = self.buffers.buffers.len() - unused_buffer_ids.len();
while operations_to_wait_for > 0 {
submit_and_read(
&mut io_uring,
&mut self.buffers,
&mut unused_buffer_ids,
&mut callback,
self.raw_size,
)?;
operations_to_wait_for = self.buffers.buffers.len() - unused_buffer_ids.len();
}
self.io_uring = Some(io_uring);
Ok(())
}
}
fn submit_and_read<T: PrimitiveVectorElement>(
io_uring: &mut IoUring,
buffers: &mut BufferStore,
unused_buffer_ids: &mut Vec<usize>,
mut callback: impl FnMut(usize, PointOffsetType, &[T]),
raw_size: usize,
) -> OperationResult<()> {
let buffers_count = buffers.buffers.len();
let used_buffers_count = buffers_count - unused_buffer_ids.len();
// Wait for at least one buffer to become available
io_uring.submit_and_wait(used_buffers_count)?;
let cqe = io_uring.completion();
for entry in cqe {
let result = entry.result();
if result < 0 {
return Err(OperationError::service_error(format!(
"io_uring operation failed with {result} error"
)));
} else if (result as usize) != raw_size {
return Err(OperationError::service_error(format!(
"io_uring operation returned {result} bytes instead of {raw_size}"
)));
}
let buffer_id = entry.user_data() as usize;
let meta = buffers.buffers[buffer_id].meta.take().unwrap();
let buffer = &buffers.buffers[buffer_id].buffer;
let vector = transmute_from_u8_to_slice(buffer);
callback(meta.index, meta.point_id, vector);
unused_buffer_ids.push(buffer_id);
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/multi_dense/simple_multi_dense_vector_storage.rs | lib/segment/src/vector_storage/multi_dense/simple_multi_dense_vector_storage.rs | use std::fmt;
use std::ops::Range;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use parking_lot::RwLock;
use rocksdb::DB;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::common::rocksdb_wrapper::DatabaseColumnWrapper;
use crate::data_types::named_vectors::{CowMultiVector, CowVector};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
TypedMultiDenseVector, TypedMultiDenseVectorRef, VectorElementType, VectorRef,
};
use crate::types::{Distance, MultiVectorConfig, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::chunked_vectors::ChunkedVectors;
use crate::vector_storage::common::{CHUNK_SIZE, StoredRecord};
use crate::vector_storage::{AccessPattern, MultiVectorStorage, VectorStorage, VectorStorageEnum};
type StoredMultiDenseVector<T> = StoredRecord<TypedMultiDenseVector<T>>;
/// All fields are counting vectors and not dimensions.
#[derive(Debug, Clone, Default)]
struct MultiVectorMetadata {
id: VectorOffsetType,
start: VectorOffsetType,
inner_vectors_count: usize,
inner_vector_capacity: usize,
}
/// In-memory vector storage with on-update persistence using `store`
pub struct SimpleMultiDenseVectorStorage<T: PrimitiveVectorElement> {
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
/// Keep vectors in memory
vectors: ChunkedVectors<T>,
vectors_metadata: Vec<MultiVectorMetadata>,
db_wrapper: DatabaseColumnWrapper,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
}
impl<T: fmt::Debug + PrimitiveVectorElement> fmt::Debug for SimpleMultiDenseVectorStorage<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimpleMultiDenseVectorStorage")
.field("dim", &self.dim)
.field("distance", &self.distance)
.field("multi_vector_config", &self.multi_vector_config)
.field("vectors", &self.vectors)
.field("vectors_metadata", &self.vectors_metadata)
.field("db_wrapper", &self.db_wrapper)
.field("deleted_count", &self.deleted_count)
.finish_non_exhaustive()
}
}
pub fn open_simple_multi_dense_vector_storage(
storage_element_type: VectorStorageDatatype,
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
match storage_element_type {
VectorStorageDatatype::Float32 => open_simple_multi_dense_vector_storage_full(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
),
VectorStorageDatatype::Uint8 => open_simple_multi_dense_vector_storage_byte(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
),
VectorStorageDatatype::Float16 => open_simple_multi_dense_vector_storage_half(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
),
}
}
pub fn open_simple_multi_dense_vector_storage_full(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_multi_dense_vector_storage_impl(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
)?;
Ok(VectorStorageEnum::MultiDenseSimple(storage))
}
pub fn open_simple_multi_dense_vector_storage_byte(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_multi_dense_vector_storage_impl(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
)?;
Ok(VectorStorageEnum::MultiDenseSimpleByte(storage))
}
pub fn open_simple_multi_dense_vector_storage_half(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_multi_dense_vector_storage_impl(
database,
database_column_name,
dim,
distance,
multi_vector_config,
stopped,
)?;
Ok(VectorStorageEnum::MultiDenseSimpleHalf(storage))
}
fn open_simple_multi_dense_vector_storage_impl<T: PrimitiveVectorElement>(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
stopped: &AtomicBool,
) -> OperationResult<SimpleMultiDenseVectorStorage<T>> {
let mut vectors = ChunkedVectors::new(dim);
let mut vectors_metadata = Vec::<MultiVectorMetadata>::new();
let (mut deleted, mut deleted_count) = (BitVec::new(), 0);
let db_wrapper = DatabaseColumnWrapper::new(database, database_column_name);
db_wrapper.lock_db().iter()?;
for (key, value) in db_wrapper.lock_db().iter()? {
let point_id: PointOffsetType = bincode::deserialize(&key)
.map_err(|_| OperationError::service_error("cannot deserialize point id from db"))?;
let stored_record: StoredMultiDenseVector<T> = bincode::deserialize(&value)
.map_err(|_| OperationError::service_error("cannot deserialize record from db"))?;
// Propagate deleted flag
if stored_record.deleted {
bitvec_set_deleted(&mut deleted, point_id, true);
deleted_count += 1;
}
let point_id_usize = point_id as usize;
if point_id_usize >= vectors_metadata.len() {
vectors_metadata.resize(point_id_usize + 1, Default::default());
}
let metadata = &mut vectors_metadata[point_id_usize];
metadata.inner_vectors_count = stored_record.vector.vectors_count();
metadata.inner_vector_capacity = metadata.inner_vectors_count;
metadata.id = point_id as VectorOffsetType;
metadata.start = vectors.len();
let left_keys = vectors.get_chunk_left_keys(metadata.start);
if stored_record.vector.vectors_count() > left_keys {
metadata.start += left_keys;
}
vectors.insert_many(
metadata.start,
&stored_record.vector.flattened_vectors,
stored_record.vector.vectors_count(),
)?;
check_process_stopped(stopped)?;
}
Ok(SimpleMultiDenseVectorStorage {
dim,
distance,
multi_vector_config,
vectors,
vectors_metadata,
db_wrapper,
deleted,
deleted_count,
})
}
impl<T: PrimitiveVectorElement> SimpleMultiDenseVectorStorage<T> {
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.vectors.len() {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
fn update_stored(
&self,
key: PointOffsetType,
deleted: bool,
vector: Option<TypedMultiDenseVectorRef<'_, T>>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let mut record = StoredMultiDenseVector {
deleted,
vector: TypedMultiDenseVector::placeholder(self.dim),
};
if let Some(vector) = vector {
record.vector.dim = vector.dim;
record.vector.flattened_vectors.clear();
record
.vector
.flattened_vectors
.extend_from_slice(vector.flattened_vectors);
}
let key_enc = bincode::serialize(&key).unwrap();
let record_enc = bincode::serialize(&record).unwrap();
hw_counter
.vector_io_write_counter()
.incr_delta(key_enc.len() + record_enc.len());
// Store updated record
self.db_wrapper.put(key_enc, record_enc)?;
Ok(())
}
fn insert_vector_impl(
&mut self,
key: PointOffsetType,
vector: VectorRef,
is_deleted: bool,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let multi_vector: TypedMultiDenseVectorRef<VectorElementType> = vector.try_into()?;
let multi_vector = T::from_float_multivector(CowMultiVector::Borrowed(multi_vector));
let multi_vector = multi_vector.as_vec_ref();
assert_eq!(multi_vector.dim, self.dim);
let multivector_size_in_bytes = std::mem::size_of_val(multi_vector.flattened_vectors);
if multivector_size_in_bytes >= CHUNK_SIZE {
return Err(OperationError::service_error(format!(
"Cannot insert multi vector of size {multivector_size_in_bytes} to the vector storage. It's too large, maximum size is {CHUNK_SIZE}.",
)));
}
let key_usize = key as usize;
if key_usize >= self.vectors_metadata.len() {
self.vectors_metadata
.resize(key_usize + 1, Default::default());
}
let metadata = &mut self.vectors_metadata[key_usize];
metadata.id = key as VectorOffsetType;
metadata.inner_vectors_count = multi_vector.vectors_count();
if multi_vector.vectors_count() > metadata.inner_vector_capacity {
metadata.inner_vector_capacity = metadata.inner_vectors_count;
metadata.start = self.vectors.len();
let left_keys = self.vectors.get_chunk_left_keys(metadata.start);
if multi_vector.vectors_count() > left_keys {
metadata.start += left_keys;
}
self.vectors.insert_many(
metadata.start,
multi_vector.flattened_vectors,
multi_vector.vectors_count(),
)?;
} else {
self.vectors.insert_many(
metadata.start,
multi_vector.flattened_vectors,
multi_vector.vectors_count(),
)?;
}
self.set_deleted(key, is_deleted);
self.update_stored(key, is_deleted, Some(multi_vector), hw_counter)?;
Ok(())
}
/// Destroy this vector storage, remove persisted data from RocksDB
pub fn destroy(&self) -> OperationResult<()> {
self.db_wrapper.remove_column_family()?;
Ok(())
}
}
impl<T: PrimitiveVectorElement> MultiVectorStorage<T> for SimpleMultiDenseVectorStorage<T> {
fn vector_dim(&self) -> usize {
self.dim
}
/// Panics if key is out of bounds
fn get_multi<P: AccessPattern>(&self, key: PointOffsetType) -> TypedMultiDenseVectorRef<'_, T> {
self.get_multi_opt::<P>(key).expect("vector not found")
}
/// None if key is out of bounds
fn get_multi_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> Option<TypedMultiDenseVectorRef<'_, T>> {
// No sequential optimizations available for in memory storage.
self.vectors_metadata.get(key as usize).map(|metadata| {
let flattened_vectors = self
.vectors
.get_many(metadata.start, metadata.inner_vectors_count)
.unwrap_or_else(|| panic!("Vectors does not contain data for {metadata:?}"));
TypedMultiDenseVectorRef {
flattened_vectors,
dim: self.dim,
}
})
}
fn iterate_inner_vectors(&self) -> impl Iterator<Item = &[T]> + Clone + Send {
(0..self.total_vector_count()).flat_map(|key| {
let metadata = &self.vectors_metadata[key];
(0..metadata.inner_vectors_count).map(|i| self.vectors.get(metadata.start + i))
})
}
fn multi_vector_config(&self) -> &MultiVectorConfig {
&self.multi_vector_config
}
fn size_of_available_vectors_in_bytes(&self) -> usize {
if self.total_vector_count() > 0 {
let total_size = self.vectors.len() * self.vector_dim() * std::mem::size_of::<T>();
(total_size as u128 * self.available_vector_count() as u128
/ self.total_vector_count() as u128) as usize
} else {
0
}
}
}
impl<T: PrimitiveVectorElement> VectorStorage for SimpleMultiDenseVectorStorage<T> {
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn is_on_disk(&self) -> bool {
false
}
fn total_vector_count(&self) -> usize {
self.vectors_metadata.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key).expect("vector not found")
}
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
self.get_multi_opt::<P>(key).map(|multi_dense_vector| {
CowVector::MultiDense(T::into_float_multivector(CowMultiVector::Borrowed(
multi_dense_vector,
)))
})
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.insert_vector_impl(key, vector, false, hw_counter)
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.vectors_metadata.len() as PointOffsetType;
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector: VectorRef = other_vector.as_vec_ref();
let new_id = self.vectors_metadata.len() as PointOffsetType;
self.insert_vector_impl(
new_id,
other_vector,
other_deleted,
&HardwareCounterCell::disposable(), // This function is only used by internal operations
)?;
}
let end_index = self.vectors_metadata.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
self.db_wrapper.flusher()
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
if is_deleted {
// We don't measure deletions.
self.update_stored(key, true, None, &HardwareCounterCell::disposable())?;
}
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
#[cfg(test)]
mod tests {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tempfile::Builder;
use super::*;
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::data_types::vectors::MultiDenseVectorInternal;
use crate::segment_constructor::migrate_rocksdb_multi_dense_vector_storage_to_mmap;
use crate::vector_storage::Sequential;
const RAND_SEED: u64 = 42;
/// Create RocksDB based multi dense vector storage.
///
/// Migrate it to the mmap based multi dense vector storage and assert vector data is correct.
#[test]
fn test_migrate_simple_to_mmap() {
const POINT_COUNT: PointOffsetType = 128;
const DIM: usize = 128;
const DELETE_PROBABILITY: f64 = 0.1;
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let multi_vector_config = MultiVectorConfig::default();
let db_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(db_dir.path(), &[DB_VECTOR_CF]).unwrap();
// Create simple multi dense vector storage, insert test points and delete some of them again
let mut storage = open_simple_multi_dense_vector_storage_full(
db,
DB_VECTOR_CF,
DIM,
Distance::Dot,
multi_vector_config,
&AtomicBool::new(false),
)
.unwrap();
for internal_id in 0..POINT_COUNT {
let size = rng.random_range(1..=4);
let vectors = std::iter::repeat_with(|| {
std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect()
})
.take(size)
.collect::<Vec<Vec<_>>>();
let multivec = MultiDenseVectorInternal::try_from(vectors).unwrap();
storage
.insert_vector(
internal_id,
VectorRef::from(&multivec),
&HardwareCounterCell::disposable(),
)
.unwrap();
if rng.random_bool(DELETE_PROBABILITY) {
storage.delete_vector(internal_id).unwrap();
}
}
let deleted_vector_count = storage.deleted_vector_count();
let total_vector_count = storage.total_vector_count();
// Migrate from RocksDB to mmap storage
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let new_storage = migrate_rocksdb_multi_dense_vector_storage_to_mmap(
&storage,
DIM,
multi_vector_config,
storage_dir.path(),
)
.expect("failed to migrate from RocksDB to mmap");
// Destroy persisted RocksDB dense vector data
match storage {
VectorStorageEnum::MultiDenseSimple(storage) => storage.destroy().unwrap(),
VectorStorageEnum::MultiDenseSimpleByte(storage) => storage.destroy().unwrap(),
VectorStorageEnum::MultiDenseSimpleHalf(storage) => storage.destroy().unwrap(),
_ => unreachable!("unexpected vector storage type"),
}
// We can drop RocksDB storage now
db_dir.close().expect("failed to drop RocksDB storage");
// Assert vector counts and data
let mut rng = StdRng::seed_from_u64(RAND_SEED);
assert_eq!(new_storage.deleted_vector_count(), deleted_vector_count);
assert_eq!(new_storage.total_vector_count(), total_vector_count);
for internal_id in 0..POINT_COUNT {
let size = rng.random_range(1..=4);
let vectors = std::iter::repeat_with(|| {
std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect()
})
.take(size)
.collect::<Vec<Vec<_>>>();
let multivec = MultiDenseVectorInternal::try_from(vectors).unwrap();
assert_eq!(
new_storage.get_vector::<Sequential>(internal_id),
CowVector::from(&multivec),
);
assert_eq!(
new_storage.is_deleted_vector(internal_id),
rng.random_bool(DELETE_PROBABILITY)
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/multi_dense/mod.rs | lib/segment/src/vector_storage/multi_dense/mod.rs | pub mod appendable_mmap_multi_dense_vector_storage;
#[cfg(feature = "rocksdb")]
pub mod simple_multi_dense_vector_storage;
pub mod volatile_multi_dense_vector_storage;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/multi_dense/volatile_multi_dense_vector_storage.rs | lib/segment/src/vector_storage/multi_dense/volatile_multi_dense_vector_storage.rs | use std::fmt;
use std::ops::Range;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::named_vectors::{CowMultiVector, CowVector};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{TypedMultiDenseVectorRef, VectorElementType, VectorRef};
use crate::types::{Distance, MultiVectorConfig, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::chunked_vectors::ChunkedVectors;
use crate::vector_storage::common::CHUNK_SIZE;
use crate::vector_storage::{AccessPattern, MultiVectorStorage, VectorStorage, VectorStorageEnum};
/// All fields are counting vectors and not dimensions.
#[derive(Debug, Clone, Default)]
struct MultiVectorMetadata {
id: VectorOffsetType,
start: VectorOffsetType,
inner_vectors_count: usize,
inner_vector_capacity: usize,
}
/// In-memory vector storage with on-update persistence using `store`
pub struct VolatileMultiDenseVectorStorage<T: PrimitiveVectorElement> {
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
/// Keep vectors in memory
vectors: ChunkedVectors<T>,
vectors_metadata: Vec<MultiVectorMetadata>,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
}
impl<T: fmt::Debug + PrimitiveVectorElement> fmt::Debug for VolatileMultiDenseVectorStorage<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VolatileMultiDenseVectorStorage")
.field("dim", &self.dim)
.field("distance", &self.distance)
.field("multi_vector_config", &self.multi_vector_config)
.field("vectors", &self.vectors)
.field("vectors_metadata", &self.vectors_metadata)
.field("deleted_count", &self.deleted_count)
.finish_non_exhaustive()
}
}
pub fn new_volatile_multi_dense_vector_storage(
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> VectorStorageEnum {
VectorStorageEnum::MultiDenseVolatile(VolatileMultiDenseVectorStorage::new(
dim,
distance,
multi_vector_config,
))
}
#[cfg(test)]
pub fn new_volatile_multi_dense_vector_storage_byte(
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> VectorStorageEnum {
VectorStorageEnum::MultiDenseVolatileByte(VolatileMultiDenseVectorStorage::new(
dim,
distance,
multi_vector_config,
))
}
#[cfg(test)]
pub fn new_volatile_multi_dense_vector_storage_half(
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> VectorStorageEnum {
VectorStorageEnum::MultiDenseVolatileHalf(VolatileMultiDenseVectorStorage::new(
dim,
distance,
multi_vector_config,
))
}
impl<T: PrimitiveVectorElement> VolatileMultiDenseVectorStorage<T> {
pub fn new(dim: usize, distance: Distance, multi_vector_config: MultiVectorConfig) -> Self {
Self {
dim,
distance,
multi_vector_config,
vectors: ChunkedVectors::new(dim),
vectors_metadata: vec![],
deleted: BitVec::new(),
deleted_count: 0,
}
}
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.vectors.len() {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
fn insert_vector_impl(
&mut self,
key: PointOffsetType,
vector: VectorRef,
is_deleted: bool,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let multi_vector: TypedMultiDenseVectorRef<VectorElementType> = vector.try_into()?;
let multi_vector = T::from_float_multivector(CowMultiVector::Borrowed(multi_vector));
let multi_vector = multi_vector.as_vec_ref();
assert_eq!(multi_vector.dim, self.dim);
let multivector_size_in_bytes = std::mem::size_of_val(multi_vector.flattened_vectors);
if multivector_size_in_bytes >= CHUNK_SIZE {
return Err(OperationError::service_error(format!(
"Cannot insert multi vector of size {multivector_size_in_bytes} to the vector storage. It's too large, maximum size is {CHUNK_SIZE}.",
)));
}
let key_usize = key as usize;
if key_usize >= self.vectors_metadata.len() {
self.vectors_metadata
.resize(key_usize + 1, Default::default());
}
let metadata = &mut self.vectors_metadata[key_usize];
metadata.id = key as VectorOffsetType;
metadata.inner_vectors_count = multi_vector.vectors_count();
if multi_vector.vectors_count() > metadata.inner_vector_capacity {
metadata.inner_vector_capacity = metadata.inner_vectors_count;
metadata.start = self.vectors.len();
let left_keys = self.vectors.get_chunk_left_keys(metadata.start);
if multi_vector.vectors_count() > left_keys {
metadata.start += left_keys;
}
self.vectors.insert_many(
metadata.start,
multi_vector.flattened_vectors,
multi_vector.vectors_count(),
)?;
} else {
self.vectors.insert_many(
metadata.start,
multi_vector.flattened_vectors,
multi_vector.vectors_count(),
)?;
}
self.set_deleted(key, is_deleted);
Ok(())
}
}
impl<T: PrimitiveVectorElement> MultiVectorStorage<T> for VolatileMultiDenseVectorStorage<T> {
fn vector_dim(&self) -> usize {
self.dim
}
/// Panics if key is out of bounds
fn get_multi<P: AccessPattern>(&self, key: PointOffsetType) -> TypedMultiDenseVectorRef<'_, T> {
self.get_multi_opt::<P>(key).expect("vector not found")
}
/// None if key is out of bounds
fn get_multi_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> Option<TypedMultiDenseVectorRef<'_, T>> {
// No sequential optimizations available for in memory storage.
self.vectors_metadata.get(key as usize).map(|metadata| {
let flattened_vectors = self
.vectors
.get_many(metadata.start, metadata.inner_vectors_count)
.unwrap_or_else(|| panic!("Vectors does not contain data for {metadata:?}"));
TypedMultiDenseVectorRef {
flattened_vectors,
dim: self.dim,
}
})
}
fn iterate_inner_vectors(&self) -> impl Iterator<Item = &[T]> + Clone + Send {
(0..self.total_vector_count()).flat_map(|key| {
let metadata = &self.vectors_metadata[key];
(0..metadata.inner_vectors_count).map(|i| self.vectors.get(metadata.start + i))
})
}
fn multi_vector_config(&self) -> &MultiVectorConfig {
&self.multi_vector_config
}
fn size_of_available_vectors_in_bytes(&self) -> usize {
if self.total_vector_count() > 0 {
let total_size = self.vectors.len() * self.vector_dim() * std::mem::size_of::<T>();
(total_size as u128 * self.available_vector_count() as u128
/ self.total_vector_count() as u128) as usize
} else {
0
}
}
}
impl<T: PrimitiveVectorElement> VectorStorage for VolatileMultiDenseVectorStorage<T> {
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn is_on_disk(&self) -> bool {
false
}
fn total_vector_count(&self) -> usize {
self.vectors_metadata.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key).expect("vector not found")
}
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
self.get_multi_opt::<P>(key).map(|multi_dense_vector| {
CowVector::MultiDense(T::into_float_multivector(CowMultiVector::Borrowed(
multi_dense_vector,
)))
})
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
self.insert_vector_impl(key, vector, false, hw_counter)
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.vectors_metadata.len() as PointOffsetType;
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector: VectorRef = other_vector.as_vec_ref();
let new_id = self.vectors_metadata.len() as PointOffsetType;
self.insert_vector_impl(
new_id,
other_vector,
other_deleted,
&HardwareCounterCell::disposable(), // This function is only used by internal operations
)?;
}
let end_index = self.vectors_metadata.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/multi_dense/appendable_mmap_multi_dense_vector_storage.rs | lib/segment/src/vector_storage/multi_dense/appendable_mmap_multi_dense_vector_storage.rs | use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use fs_err as fs;
use memory::madvise::AdviceSetting;
use crate::common::Flusher;
use crate::common::flags::bitvec_flags::BitvecFlags;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::named_vectors::{CowMultiVector, CowVector};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{TypedMultiDenseVectorRef, VectorElementType, VectorRef};
use crate::types::{Distance, MultiVectorConfig, VectorStorageDatatype};
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
use crate::vector_storage::in_ram_persisted_vectors::InRamPersistedVectors;
use crate::vector_storage::{
AccessPattern, MultiVectorStorage, Random, Sequential, VectorStorage, VectorStorageEnum,
};
const VECTORS_DIR_PATH: &str = "vectors";
const OFFSETS_DIR_PATH: &str = "offsets";
const DELETED_DIR_PATH: &str = "deleted";
#[derive(Clone, Copy, Debug, Default, PartialEq)]
#[repr(C)]
pub struct MultivectorMmapOffset {
offset: u32,
count: u32,
capacity: u32,
}
#[derive(Debug)]
pub struct AppendableMmapMultiDenseVectorStorage<
T: PrimitiveVectorElement,
S: ChunkedVectorStorage<T>,
O: ChunkedVectorStorage<MultivectorMmapOffset>,
> {
vectors: S,
offsets: O,
/// Flags marking deleted vectors
///
/// Structure grows dynamically, but may be smaller than actual number of vectors. Must not
/// depend on its length.
deleted: BitvecFlags,
distance: Distance,
multi_vector_config: MultiVectorConfig,
deleted_count: usize,
_phantom: std::marker::PhantomData<T>,
}
impl<
T: PrimitiveVectorElement,
S: ChunkedVectorStorage<T>,
O: ChunkedVectorStorage<MultivectorMmapOffset>,
> AppendableMmapMultiDenseVectorStorage<T, S, O>
{
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && self.vectors.len() <= key as usize {
return false;
}
// set value
let previous = self.deleted.set(key, deleted);
// update counter
if !previous && deleted {
self.deleted_count += 1;
} else if previous && !deleted {
self.deleted_count -= 1;
}
previous
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
// deleted bitvec is already loaded
self.vectors.populate()?;
self.offsets.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
self.vectors.clear_cache()?;
self.offsets.clear_cache()?;
Ok(())
}
}
impl<
T: PrimitiveVectorElement,
S: ChunkedVectorStorage<T> + Sync,
O: ChunkedVectorStorage<MultivectorMmapOffset> + Sync,
> MultiVectorStorage<T> for AppendableMmapMultiDenseVectorStorage<T, S, O>
{
fn vector_dim(&self) -> usize {
self.vectors.dim()
}
/// Panics if key is not found
fn get_multi<P: AccessPattern>(&self, key: PointOffsetType) -> TypedMultiDenseVectorRef<'_, T> {
self.get_multi_opt::<P>(key).expect("vector not found")
}
/// Returns None if key is not found
fn get_multi_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> Option<TypedMultiDenseVectorRef<'_, T>> {
self.offsets
.get::<P>(key as VectorOffsetType)
.and_then(|mmap_offset| {
let mmap_offset = mmap_offset.first().expect("mmap_offset must not be empty");
self.vectors.get_many::<P>(
mmap_offset.offset as VectorOffsetType,
mmap_offset.count as usize,
)
})
.map(|flattened_vectors| TypedMultiDenseVectorRef {
flattened_vectors,
dim: self.vectors.dim(),
})
}
fn iterate_inner_vectors(&self) -> impl Iterator<Item = &[T]> + Clone + Send {
(0..self.total_vector_count()).flat_map(|key| {
let mmap_offset = self
.offsets
.get::<Sequential>(key as VectorOffsetType)
.unwrap()
.first()
.unwrap();
(0..mmap_offset.count).map(|i| {
self.vectors
.get::<Sequential>((mmap_offset.offset + i) as VectorOffsetType)
.unwrap()
})
})
}
fn multi_vector_config(&self) -> &MultiVectorConfig {
&self.multi_vector_config
}
fn size_of_available_vectors_in_bytes(&self) -> usize {
if self.total_vector_count() > 0 {
let total_size = self.vectors.len() * self.vector_dim() * std::mem::size_of::<T>();
(total_size as u128 * self.available_vector_count() as u128
/ self.total_vector_count() as u128) as usize
} else {
0
}
}
}
impl<
T: PrimitiveVectorElement,
S: ChunkedVectorStorage<T> + Sync,
O: ChunkedVectorStorage<MultivectorMmapOffset> + Sync,
> VectorStorage for AppendableMmapMultiDenseVectorStorage<T, S, O>
{
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
T::datatype()
}
fn is_on_disk(&self) -> bool {
self.vectors.is_on_disk()
}
fn total_vector_count(&self) -> usize {
self.offsets.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key).expect("vector not found")
}
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
self.get_multi_opt::<P>(key).map(|multi_dense_vector| {
CowVector::MultiDense(T::into_float_multivector(CowMultiVector::Borrowed(
multi_dense_vector,
)))
})
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let multi_vector: TypedMultiDenseVectorRef<VectorElementType> = vector.try_into()?;
let multi_vector = T::from_float_multivector(CowMultiVector::Borrowed(multi_vector));
let multi_vector = multi_vector.as_vec_ref();
assert_eq!(multi_vector.dim, self.vectors.dim());
let multivector_size_in_bytes = std::mem::size_of_val(multi_vector.flattened_vectors);
let max_vector_size_bytes = self.vectors.max_vector_size_bytes();
if multivector_size_in_bytes >= max_vector_size_bytes {
return Err(OperationError::service_error(format!(
"Cannot insert multi vector of size {multivector_size_in_bytes} to the mmap vector storage.\
It's too large, maximum size is {max_vector_size_bytes}."
)));
}
let mut offset = self
.offsets
.get::<Random>(key as VectorOffsetType)
.map(|x| x.first().copied().unwrap_or_default())
.unwrap_or_default();
if multi_vector.vectors_count() > offset.capacity as usize {
// append vector to the end
let mut new_key = self.vectors.len();
let chunk_left_keys = self.vectors.get_remaining_chunk_keys(new_key);
if multi_vector.vectors_count() > chunk_left_keys {
new_key += chunk_left_keys;
}
offset = MultivectorMmapOffset {
offset: new_key as PointOffsetType,
count: multi_vector.vectors_count() as PointOffsetType,
capacity: multi_vector.vectors_count() as PointOffsetType,
};
} else {
// use existing place to insert vector
offset.count = multi_vector.vectors_count() as PointOffsetType;
}
self.vectors.insert_many(
offset.offset as VectorOffsetType,
multi_vector.flattened_vectors,
multi_vector.vectors_count(),
hw_counter,
)?;
self.offsets
.insert(key as VectorOffsetType, &[offset], hw_counter)?;
self.set_deleted(key, false);
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.offsets.len() as PointOffsetType;
let disposed_hw_counter = HardwareCounterCell::disposable(); // Internal operation
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector: VectorRef = other_vector.as_vec_ref();
let new_id = self.offsets.len() as PointOffsetType;
self.insert_vector(new_id, other_vector, &disposed_hw_counter)?;
self.set_deleted(new_id, other_deleted);
}
let end_index = self.offsets.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
Box::new({
let vectors_flusher = self.vectors.flusher();
let offsets_flusher = self.offsets.flusher();
let deleted_flusher = self.deleted.flusher();
move || {
vectors_flusher()?;
offsets_flusher()?;
deleted_flusher()?;
Ok(())
}
})
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.vectors.files();
files.extend(self.offsets.files());
files.extend(self.deleted.files());
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
let mut files = self.vectors.immutable_files();
files.extend(self.offsets.immutable_files());
files
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
Ok(self.set_deleted(key, true))
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get(key)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.get_bitslice()
}
}
pub fn open_appendable_memmap_multi_vector_storage(
storage_element_type: VectorStorageDatatype,
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
match storage_element_type {
VectorStorageDatatype::Float32 => open_appendable_memmap_multi_vector_storage_full(
path,
dim,
distance,
multi_vector_config,
),
VectorStorageDatatype::Uint8 => open_appendable_memmap_multi_vector_storage_byte(
path,
dim,
distance,
multi_vector_config,
),
VectorStorageDatatype::Float16 => open_appendable_memmap_multi_vector_storage_half(
path,
dim,
distance,
multi_vector_config,
),
}
}
pub fn open_appendable_memmap_multi_vector_storage_full(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_memmap_multi_vector_storage_impl::<VectorElementType>(
path,
dim,
distance,
multi_vector_config,
)?;
Ok(VectorStorageEnum::MultiDenseAppendableMemmap(Box::new(
storage,
)))
}
pub fn open_appendable_memmap_multi_vector_storage_byte(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_memmap_multi_vector_storage_impl(path, dim, distance, multi_vector_config)?;
Ok(VectorStorageEnum::MultiDenseAppendableMemmapByte(Box::new(
storage,
)))
}
pub fn open_appendable_memmap_multi_vector_storage_half(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_memmap_multi_vector_storage_impl(path, dim, distance, multi_vector_config)?;
Ok(VectorStorageEnum::MultiDenseAppendableMemmapHalf(Box::new(
storage,
)))
}
pub fn open_appendable_memmap_multi_vector_storage_impl<T: PrimitiveVectorElement>(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<
AppendableMmapMultiDenseVectorStorage<
T,
ChunkedMmapVectors<T>,
ChunkedMmapVectors<MultivectorMmapOffset>,
>,
> {
fs::create_dir_all(path)?;
let vectors_path = path.join(VECTORS_DIR_PATH);
let offsets_path = path.join(OFFSETS_DIR_PATH);
let deleted_path = path.join(DELETED_DIR_PATH);
let populate = false;
let vectors =
ChunkedMmapVectors::open(&vectors_path, dim, AdviceSetting::Global, Some(populate))?;
let offsets =
ChunkedMmapVectors::open(&offsets_path, 1, AdviceSetting::Global, Some(populate))?;
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
let deleted_count = deleted.count_trues();
Ok(AppendableMmapMultiDenseVectorStorage {
vectors,
offsets,
deleted,
distance,
multi_vector_config,
deleted_count,
_phantom: Default::default(),
})
}
pub fn open_appendable_in_ram_multi_vector_storage(
storage_element_type: VectorStorageDatatype,
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
match storage_element_type {
VectorStorageDatatype::Float32 => open_appendable_in_ram_multi_vector_storage_full(
path,
dim,
distance,
multi_vector_config,
),
VectorStorageDatatype::Float16 => open_appendable_in_ram_multi_vector_storage_half(
path,
dim,
distance,
multi_vector_config,
),
VectorStorageDatatype::Uint8 => open_appendable_in_ram_multi_vector_storage_byte(
path,
dim,
distance,
multi_vector_config,
),
}
}
pub fn open_appendable_in_ram_multi_vector_storage_full(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_in_ram_multi_vector_storage_impl::<VectorElementType>(
path,
dim,
distance,
multi_vector_config,
)?;
Ok(VectorStorageEnum::MultiDenseAppendableInRam(Box::new(
storage,
)))
}
pub fn open_appendable_in_ram_multi_vector_storage_byte(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_in_ram_multi_vector_storage_impl(path, dim, distance, multi_vector_config)?;
Ok(VectorStorageEnum::MultiDenseAppendableInRamByte(Box::new(
storage,
)))
}
pub fn open_appendable_in_ram_multi_vector_storage_half(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_in_ram_multi_vector_storage_impl(path, dim, distance, multi_vector_config)?;
Ok(VectorStorageEnum::MultiDenseAppendableInRamHalf(Box::new(
storage,
)))
}
pub fn open_appendable_in_ram_multi_vector_storage_impl<T: PrimitiveVectorElement>(
path: &Path,
dim: usize,
distance: Distance,
multi_vector_config: MultiVectorConfig,
) -> OperationResult<
AppendableMmapMultiDenseVectorStorage<
T,
InRamPersistedVectors<T>,
InRamPersistedVectors<MultivectorMmapOffset>,
>,
> {
fs::create_dir_all(path)?;
let vectors_path = path.join(VECTORS_DIR_PATH);
let offsets_path = path.join(OFFSETS_DIR_PATH);
let deleted_path = path.join(DELETED_DIR_PATH);
let populate = true;
let vectors = InRamPersistedVectors::open(&vectors_path, dim)?;
let offsets = InRamPersistedVectors::open(&offsets_path, 1)?;
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
let deleted_count = deleted.count_trues();
Ok(AppendableMmapMultiDenseVectorStorage {
vectors,
offsets,
deleted,
distance,
multi_vector_config,
deleted_count,
_phantom: Default::default(),
})
}
/// Find files related to this dense vector storage
#[cfg(any(test, feature = "rocksdb"))]
pub(crate) fn find_storage_files(vector_storage_path: &Path) -> OperationResult<Vec<PathBuf>> {
let vectors_path = vector_storage_path.join(VECTORS_DIR_PATH);
let offsets_path = vector_storage_path.join(OFFSETS_DIR_PATH);
let deleted_path = vector_storage_path.join(DELETED_DIR_PATH);
let mut files = vec![];
files.extend(common::disk::list_files(&vectors_path)?);
files.extend(common::disk::list_files(&offsets_path)?);
files.extend(common::disk::list_files(&deleted_path)?);
Ok(files)
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::MultiDenseVectorInternal;
const RAND_SEED: u64 = 42;
/// Test that `find_storage_files` finds all files that are reported by the storage.
#[test]
fn test_find_storage_files() {
// Numbers chosen so we get 3 data chunks, not just 1
const POINT_COUNT: PointOffsetType = 1000;
const DIM: usize = 128;
let mutli_vector_config = MultiVectorConfig::default();
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage = open_appendable_memmap_multi_vector_storage_full(
dir.path(),
DIM,
Distance::Dot,
mutli_vector_config,
)
.unwrap();
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let hw_counter = HardwareCounterCell::disposable();
// Insert points, delete 10% of it, and flush
for internal_id in 0..POINT_COUNT {
let size = rng.random_range(1..=4);
let vectors = std::iter::repeat_with(|| {
std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect()
})
.take(size)
.collect::<Vec<Vec<_>>>();
let multivec = MultiDenseVectorInternal::try_from(vectors).unwrap();
storage
.insert_vector(internal_id, VectorRef::from(&multivec), &hw_counter)
.unwrap();
}
for internal_id in 0..POINT_COUNT {
if !rng.random_bool(0.1) {
continue;
}
storage.delete_vector(internal_id).unwrap();
}
storage.flusher()().unwrap();
let storage_files = storage.files().into_iter().collect::<HashSet<_>>();
let found_files = find_storage_files(dir.path())
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
assert_eq!(
storage_files, found_files,
"find_storage_files must find same files that storage reports",
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_multivector_storage.rs | lib/segment/src/vector_storage/quantized/quantized_multivector_storage.rs | use std::ops::DerefMut;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use fs_err as fs;
use memmap2::MmapMut;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_type::{MmapFlusher, MmapSlice};
use quantization::EncodedVectors;
use serde::{Deserialize, Serialize};
use crate::common::operation_error::OperationResult;
use crate::data_types::vectors::{TypedMultiDenseVectorRef, VectorElementType};
use crate::types::{MultiVectorComparator, MultiVectorConfig};
use crate::vector_storage::Random;
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct MultivectorOffset {
pub start: PointOffsetType,
pub count: PointOffsetType,
}
pub trait MultivectorOffsets {
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset;
}
#[allow(clippy::len_without_is_empty)]
pub trait MultivectorOffsetsStorage: Sized {
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset;
fn len(&self) -> usize;
fn upsert_offset(
&mut self,
id: PointOffsetType,
offset: MultivectorOffset,
hw_counter: &HardwareCounterCell,
) -> std::io::Result<()>;
fn flusher(&self) -> MmapFlusher;
fn files(&self) -> Vec<PathBuf>;
fn immutable_files(&self) -> Vec<PathBuf>;
}
pub struct MultivectorOffsetsStorageRam {
offsets: Vec<MultivectorOffset>,
path: PathBuf,
}
impl MultivectorOffsetsStorageRam {
pub fn create(
path: &Path,
offsets: impl Iterator<Item = MultivectorOffset>,
) -> OperationResult<Self> {
let offsets: Vec<_> = offsets.collect();
create_offsets_file_from_iter(path, offsets.len(), offsets.iter().cloned())?;
Ok(MultivectorOffsetsStorageRam {
path: path.to_path_buf(),
offsets,
})
}
pub fn load(path: &Path) -> OperationResult<Self> {
let offsets_file = fs::OpenOptions::new().read(true).write(true).open(path)?;
let offsets_mmap = unsafe { MmapMut::map_mut(&offsets_file) }?;
let mut offsets_mmap_type =
unsafe { MmapSlice::<MultivectorOffset>::try_from(offsets_mmap)? };
Ok(MultivectorOffsetsStorageRam {
offsets: offsets_mmap_type.deref_mut().iter().copied().collect(),
path: path.to_path_buf(),
})
}
}
impl MultivectorOffsetsStorage for MultivectorOffsetsStorageRam {
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset {
self.offsets[idx as usize]
}
fn len(&self) -> usize {
self.offsets.len()
}
fn upsert_offset(
&mut self,
id: PointOffsetType,
offset: MultivectorOffset,
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
// Skip hardware counter increment because it's a RAM storage.
if id as usize >= self.len() {
self.offsets
.resize(id as usize + 1, MultivectorOffset::default());
}
self.offsets[id as usize] = offset;
Ok(())
}
fn flusher(&self) -> MmapFlusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
fn immutable_files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
}
#[derive(Debug)]
pub struct MultivectorOffsetsStorageMmap {
offsets: MmapSlice<MultivectorOffset>,
path: PathBuf,
}
impl MultivectorOffsetsStorageMmap {
pub fn create(
path: &Path,
offsets: impl Iterator<Item = MultivectorOffset>,
count: usize,
) -> OperationResult<Self> {
create_offsets_file_from_iter(path, count, offsets)?;
MultivectorOffsetsStorageMmap::load(path)
}
pub fn load(path: &Path) -> OperationResult<Self> {
let offsets_file = fs::OpenOptions::new().read(true).write(true).open(path)?;
let offsets_mmap = unsafe { MmapMut::map_mut(&offsets_file) }?;
let offsets = unsafe { MmapSlice::<MultivectorOffset>::try_from(offsets_mmap)? };
Ok(Self {
offsets,
path: path.to_path_buf(),
})
}
pub fn populate(&self) -> std::io::Result<()> {
self.offsets.populate()
}
}
impl MultivectorOffsetsStorage for MultivectorOffsetsStorageMmap {
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset {
self.offsets[idx as usize]
}
fn len(&self) -> usize {
self.offsets.len()
}
fn upsert_offset(
&mut self,
_id: PointOffsetType,
_offset: MultivectorOffset,
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
Err(std::io::Error::new(
std::io::ErrorKind::Unsupported,
"Cannot upsert offset in mmap storage",
))
}
fn flusher(&self) -> MmapFlusher {
// Mmap storage does not need a flusher, as it is non-appendable and already backed by a file.
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
fn immutable_files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
}
pub struct MultivectorOffsetsStorageChunkedMmap {
data: ChunkedMmapVectors<MultivectorOffset>,
}
impl MultivectorOffsetsStorageChunkedMmap {
pub fn create(
path: &Path,
offsets: impl Iterator<Item = MultivectorOffset>,
in_ram: bool,
) -> OperationResult<Self> {
let hw_counter = HardwareCounterCell::disposable();
let mut offsets_storage = Self::load(path, in_ram)?;
for (id, offset) in offsets.enumerate() {
offsets_storage.upsert_offset(id as PointOffsetType, offset, &hw_counter)?;
}
offsets_storage.flusher()()?;
Ok(offsets_storage)
}
pub fn load(path: &Path, in_ram: bool) -> OperationResult<Self> {
let advice = if in_ram {
AdviceSetting::from(Advice::Normal)
} else {
AdviceSetting::Global
};
let data = ChunkedMmapVectors::<MultivectorOffset>::open(
path,
1,
advice,
Some(in_ram), // populate
)?;
Ok(Self { data })
}
pub fn populate(&self) -> OperationResult<()> {
self.data.populate()
}
}
impl MultivectorOffsetsStorage for MultivectorOffsetsStorageChunkedMmap {
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset {
ChunkedVectorStorage::get::<Random>(&self.data, idx as VectorOffsetType)
.and_then(|offsets| offsets.first())
.cloned()
.unwrap_or_default()
}
fn len(&self) -> usize {
ChunkedVectorStorage::len(&self.data)
}
fn flusher(&self) -> MmapFlusher {
let flusher = ChunkedMmapVectors::flusher(&self.data);
Box::new(move || {
flusher().map_err(|e| {
std::io::Error::other(format!("Failed to flush multivector offsets storage: {e}"))
})?;
Ok(())
})
}
fn upsert_offset(
&mut self,
id: PointOffsetType,
offset: MultivectorOffset,
hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
ChunkedVectorStorage::insert(
&mut self.data,
id as VectorOffsetType,
&[offset],
hw_counter,
)
.map_err(std::io::Error::other)
}
fn files(&self) -> Vec<PathBuf> {
ChunkedVectorStorage::files(&self.data)
}
fn immutable_files(&self) -> Vec<PathBuf> {
ChunkedVectorStorage::immutable_files(&self.data)
}
}
pub struct QuantizedMultivectorStorage<QuantizedStorage, TMultivectorOffsetsStorage>
where
QuantizedStorage: EncodedVectors,
TMultivectorOffsetsStorage: MultivectorOffsetsStorage,
{
quantized_storage: QuantizedStorage,
offsets: TMultivectorOffsetsStorage,
dim: usize,
multi_vector_config: MultiVectorConfig,
}
impl<QuantizedStorage, TMultivectorOffsetsStorage>
QuantizedMultivectorStorage<QuantizedStorage, TMultivectorOffsetsStorage>
where
QuantizedStorage: EncodedVectors,
TMultivectorOffsetsStorage: MultivectorOffsetsStorage,
{
pub fn storage(&self) -> &QuantizedStorage {
&self.quantized_storage
}
pub fn offsets_storage(&self) -> &TMultivectorOffsetsStorage {
&self.offsets
}
pub fn new(
dim: usize,
quantized_storage: QuantizedStorage,
offsets: TMultivectorOffsetsStorage,
multi_vector_config: MultiVectorConfig,
) -> Self {
Self {
quantized_storage,
offsets,
dim,
multi_vector_config,
}
}
/// Custom `score_max_similarity` implementation for quantized vectors
fn score_point_max_similarity(
&self,
query: &Vec<QuantizedStorage::EncodedQuery>,
vector_index: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> ScoreType {
let offset = self.offsets.get_offset(vector_index);
let mut sum = 0.0;
for inner_query in query {
let mut max_sim = ScoreType::NEG_INFINITY;
// manual `max_by` for performance
for i in 0..offset.count {
let sim =
self.quantized_storage
.score_point(inner_query, offset.start + i, hw_counter);
if sim > max_sim {
max_sim = sim;
}
}
// sum of max similarity
sum += max_sim;
}
sum
}
/// Custom `score_max_similarity` implementation for quantized vectors
fn score_internal_max_similarity(
&self,
vector_a_index: PointOffsetType,
vector_b_index: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> ScoreType {
let offset_a = self.offsets.get_offset(vector_a_index);
let offset_b = self.offsets.get_offset(vector_b_index);
let mut sum = 0.0;
for a in 0..offset_a.count {
let mut max_sim = ScoreType::NEG_INFINITY;
// manual `max_by` for performance
for b in 0..offset_b.count {
let sim = self.quantized_storage.score_internal(
offset_a.start + a,
offset_b.start + b,
hw_counter,
);
if sim > max_sim {
max_sim = sim;
}
}
// sum of max similarity
sum += max_sim;
}
sum
}
pub fn inner_storage(&self) -> &QuantizedStorage {
&self.quantized_storage
}
pub fn inner_vector_offset(&self, id: PointOffsetType) -> MultivectorOffset {
self.offsets.get_offset(id)
}
pub fn vectors_count(&self) -> usize {
self.offsets.len()
}
}
impl<QuantizedStorage, TMultivectorOffsetsStorage> EncodedVectors
for QuantizedMultivectorStorage<QuantizedStorage, TMultivectorOffsetsStorage>
where
QuantizedStorage: EncodedVectors,
TMultivectorOffsetsStorage: MultivectorOffsetsStorage,
{
// TODO(colbert): refactor `EncodedVectors` to support multi vector storage after quantization migration
type EncodedQuery = Vec<QuantizedStorage::EncodedQuery>;
fn is_on_disk(&self) -> bool {
self.quantized_storage.is_on_disk()
}
fn encode_query(&self, query: &[VectorElementType]) -> Vec<QuantizedStorage::EncodedQuery> {
let multi_vector = TypedMultiDenseVectorRef {
dim: self.dim,
flattened_vectors: query,
};
multi_vector
.multi_vectors()
.map(|inner_vector| self.quantized_storage.encode_query(inner_vector))
.collect()
}
fn score_point(
&self,
query: &Vec<QuantizedStorage::EncodedQuery>,
i: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> ScoreType {
match self.multi_vector_config.comparator {
MultiVectorComparator::MaxSim => self.score_point_max_similarity(query, i, hw_counter),
}
}
fn score_internal(
&self,
i: PointOffsetType,
j: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> ScoreType {
match self.multi_vector_config.comparator {
MultiVectorComparator::MaxSim => self.score_internal_max_similarity(i, j, hw_counter),
}
}
fn quantized_vector_size(&self) -> usize {
self.quantized_storage.quantized_vector_size()
}
fn encode_internal_vector(
&self,
id: PointOffsetType,
) -> Option<Vec<QuantizedStorage::EncodedQuery>> {
let offset = self.offsets.get_offset(id);
let mut query = Vec::with_capacity(offset.count as usize);
for i in 0..offset.count {
let internal_id = offset.start + i;
query.push(self.quantized_storage.encode_internal_vector(internal_id)?)
}
Some(query)
}
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[f32],
hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
let multi_vector = TypedMultiDenseVectorRef {
dim: self.dim,
flattened_vectors: vector,
};
let inner_vectors_count = self.quantized_storage.vectors_count() as PointOffsetType;
let offset = if (id as usize) < self.offsets.len() {
let old_offset = self.offsets.get_offset(id);
if multi_vector.vectors_count() <= old_offset.count as usize {
// If the new vector has less or equal number of inner vectors, we can reuse the old offset
MultivectorOffset {
start: old_offset.start,
count: multi_vector.vectors_count() as PointOffsetType,
}
} else {
// Otherwise, we need allocate a new offset
MultivectorOffset {
start: inner_vectors_count,
count: multi_vector.vectors_count() as PointOffsetType,
}
}
} else {
MultivectorOffset {
start: inner_vectors_count,
count: multi_vector.vectors_count() as PointOffsetType,
}
};
for (i, inner_vector) in multi_vector.multi_vectors().enumerate() {
self.quantized_storage.upsert_vector(
offset.start + i as PointOffsetType,
inner_vector,
hw_counter,
)?;
}
self.offsets.upsert_offset(id, offset, hw_counter)?;
Ok(())
}
fn vectors_count(&self) -> usize {
self.offsets.len()
}
fn flusher(&self) -> MmapFlusher {
let quantized_storage_flusher = self.quantized_storage.flusher();
let offsets_flusher = self.offsets.flusher();
Box::new(move || {
quantized_storage_flusher()?;
offsets_flusher()?;
Ok(())
})
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.quantized_storage.files();
files.extend(self.offsets.files());
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
let mut files = self.quantized_storage.immutable_files();
files.extend(self.offsets.immutable_files());
files
}
type SupportsBytes = False;
fn score_bytes(
&self,
enabled: Self::SupportsBytes,
_: &Self::EncodedQuery,
_: &[u8],
_: &HardwareCounterCell,
) -> f32 {
match enabled {}
}
}
impl<QuantizedStorage, TMultivectorOffsetsStorage> MultivectorOffsets
for QuantizedMultivectorStorage<QuantizedStorage, TMultivectorOffsetsStorage>
where
QuantizedStorage: EncodedVectors,
TMultivectorOffsetsStorage: MultivectorOffsetsStorage,
{
fn get_offset(&self, idx: PointOffsetType) -> MultivectorOffset {
self.offsets.get_offset(idx)
}
}
fn create_offsets_file_from_iter(
path: &Path,
count: usize,
iter: impl Iterator<Item = MultivectorOffset>,
) -> OperationResult<()> {
path.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)?;
let offsets_file_size = count * std::mem::size_of::<MultivectorOffset>();
let offsets_file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
// Don't truncate because we explicitly set the length later
.truncate(false)
.open(path)?;
offsets_file.set_len(offsets_file_size as u64)?;
let offsets_mmap = unsafe { MmapMut::map_mut(&offsets_file) }?;
let mut offsets_mmap_type = unsafe { MmapSlice::<MultivectorOffset>::try_from(offsets_mmap)? };
let offsets_mut: &mut [MultivectorOffset] = offsets_mmap_type.deref_mut();
for (dst, src) in offsets_mut.iter_mut().zip(iter) {
*dst = src;
}
offsets_mmap_type.flusher()()?;
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_query_scorer.rs | lib/segment/src/vector_storage/quantized/quantized_query_scorer.rs | use std::borrow::Cow;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::{PointOffsetType, ScoreType};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{DenseVector, VectorElementType};
use crate::spaces::metric::Metric;
use crate::types::QuantizationConfig;
use crate::vector_storage::query_scorer::QueryScorer;
pub struct QuantizedQueryScorer<'a, TEncodedVectors>
where
TEncodedVectors: quantization::EncodedVectors,
{
query: TEncodedVectors::EncodedQuery,
quantized_data: &'a TEncodedVectors,
hardware_counter: HardwareCounterCell,
}
/// Error type returned when [`QuantizedQueryScorer::new_internal`] fails.
/// Contains the original [`HardwareCounterCell`] passed to [`QuantizedQueryScorer::new_internal`].
pub struct InternalScorerUnsupported(pub HardwareCounterCell);
impl<'a, TEncodedVectors> QuantizedQueryScorer<'a, TEncodedVectors>
where
TEncodedVectors: quantization::EncodedVectors,
{
pub fn new<TElement, TMetric>(
raw_query: DenseVector,
quantized_data: &'a TEncodedVectors,
quantization_config: &QuantizationConfig,
mut hardware_counter: HardwareCounterCell,
) -> Self
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
{
let raw_preprocessed_query = TMetric::preprocess(raw_query);
let original_query = TElement::slice_from_float_cow(Cow::Owned(raw_preprocessed_query));
let original_query_prequantized = TElement::quantization_preprocess(
quantization_config,
TMetric::distance(),
original_query.as_ref(),
);
let query = quantized_data.encode_query(&original_query_prequantized);
hardware_counter.set_vector_io_read_multiplier(usize::from(quantized_data.is_on_disk()));
Self {
query,
quantized_data,
hardware_counter,
}
}
/// Build a raw scorer for the specified `point_id`.
/// If not supported, return [`InternalScorerUnsupported`] with the original `hardware_counter`.
pub fn new_internal(
point_id: PointOffsetType,
quantized_data: &'a TEncodedVectors,
mut hardware_counter: HardwareCounterCell,
) -> Result<Self, InternalScorerUnsupported> {
let Some(query) = quantized_data.encode_internal_vector(point_id) else {
return Err(InternalScorerUnsupported(hardware_counter));
};
hardware_counter.set_vector_io_read_multiplier(usize::from(quantized_data.is_on_disk()));
Ok(Self {
query,
quantized_data,
hardware_counter,
})
}
}
impl<TEncodedVectors> QueryScorer for QuantizedQueryScorer<'_, TEncodedVectors>
where
TEncodedVectors: quantization::EncodedVectors,
{
type TVector = [VectorElementType];
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
self.hardware_counter
.vector_io_read()
.incr_delta(self.quantized_data.quantized_vector_size());
self.quantized_data
.score_point(&self.query, idx, &self.hardware_counter)
}
fn score(&self, _v2: &[VectorElementType]) -> ScoreType {
unimplemented!("This method is not expected to be called for quantized scorer");
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
self.quantized_data
.score_internal(point_a, point_b, &self.hardware_counter)
}
type SupportsBytes = TEncodedVectors::SupportsBytes;
fn score_bytes(&self, enabled: Self::SupportsBytes, bytes: &[u8]) -> ScoreType {
self.quantized_data
.score_bytes(enabled, &self.query, bytes, &self.hardware_counter)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_multi_custom_query_scorer.rs | lib/segment/src/vector_storage/quantized/quantized_multi_custom_query_scorer.rs | use std::marker::PhantomData;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use crate::data_types::named_vectors::CowMultiVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{MultiDenseVectorInternal, TypedMultiDenseVector};
use crate::spaces::metric::Metric;
use crate::types::QuantizationConfig;
use crate::vector_storage::quantized::quantized_multivector_storage::{
MultivectorOffset, MultivectorOffsets,
};
use crate::vector_storage::query::{Query, TransformInto};
use crate::vector_storage::query_scorer::QueryScorer;
pub struct QuantizedMultiCustomQueryScorer<'a, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors + MultivectorOffsets,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
query: TQuery,
quantized_multivector_storage: &'a TEncodedVectors,
metric: PhantomData<TMetric>,
element: PhantomData<TElement>,
hardware_counter: HardwareCounterCell,
}
impl<'a, TElement, TMetric, TEncodedVectors, TQuery>
QuantizedMultiCustomQueryScorer<'a, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors + MultivectorOffsets,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
pub fn new_multi<TOriginalQuery, TInputQuery>(
raw_query: TInputQuery,
quantized_multivector_storage: &'a TEncodedVectors,
quantization_config: &QuantizationConfig,
mut hardware_counter: HardwareCounterCell,
) -> Self
where
TOriginalQuery: Query<TypedMultiDenseVector<TElement>>
+ TransformInto<TQuery, TypedMultiDenseVector<TElement>, TEncodedVectors::EncodedQuery>
+ Clone,
TInputQuery: Query<MultiDenseVectorInternal>
+ TransformInto<TOriginalQuery, MultiDenseVectorInternal, TypedMultiDenseVector<TElement>>,
{
let original_query: TOriginalQuery = raw_query
.transform(|vector| {
let mut preprocessed = Vec::new();
for slice in vector.multi_vectors() {
preprocessed.extend_from_slice(&TMetric::preprocess(slice.to_vec()));
}
let preprocessed = MultiDenseVectorInternal::new(preprocessed, vector.dim);
let converted =
TElement::from_float_multivector(CowMultiVector::Owned(preprocessed))
.to_owned();
Ok(converted)
})
.unwrap();
let query: TQuery = original_query
.transform(|original_vector| {
let original_vector_prequantized = TElement::quantization_preprocess(
quantization_config,
TMetric::distance(),
&original_vector.flattened_vectors,
);
Ok(quantized_multivector_storage.encode_query(&original_vector_prequantized))
})
.unwrap();
hardware_counter.set_cpu_multiplier(size_of::<TElement>());
hardware_counter
.set_vector_io_read_multiplier(usize::from(quantized_multivector_storage.is_on_disk()));
Self {
query,
quantized_multivector_storage,
metric: PhantomData,
element: PhantomData,
hardware_counter,
}
}
}
impl<TElement, TMetric, TEncodedVectors, TQuery> QueryScorer
for QuantizedMultiCustomQueryScorer<'_, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors + MultivectorOffsets,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
type TVector = [TElement];
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let multi_vector_offset = self.quantized_multivector_storage.get_offset(idx);
let sub_vectors_count = multi_vector_offset.count as usize;
// compute vector IO read once for all examples
self.hardware_counter.vector_io_read().incr_delta(
size_of::<MultivectorOffset>()
+ self.quantized_multivector_storage.quantized_vector_size() * sub_vectors_count,
);
self.query.score_by(|this| {
// quantized multivector storage handles hardware counter to batch vector IO
self.quantized_multivector_storage
.score_point(this, idx, &self.hardware_counter)
})
}
fn score(&self, _v2: &[TElement]) -> ScoreType {
unimplemented!("This method is not expected to be called for quantized scorer");
}
fn score_internal(&self, _point_a: PointOffsetType, _point_b: PointOffsetType) -> ScoreType {
unimplemented!("Custom scorer compares against multiple vectors, not just one")
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/mod.rs | lib/segment/src/vector_storage/quantized/mod.rs | mod quantized_chunked_mmap_storage;
mod quantized_custom_query_scorer;
mod quantized_mmap_storage;
mod quantized_multi_custom_query_scorer;
mod quantized_multi_query_scorer;
pub mod quantized_multivector_storage;
pub mod quantized_query_scorer;
mod quantized_ram_storage;
mod quantized_scorer_builder;
pub mod quantized_vectors;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_mmap_storage.rs | lib/segment/src/vector_storage/quantized/quantized_mmap_storage.rs | use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use fs_err as fs;
use fs_err::OpenOptions;
use memmap2::{Mmap, MmapMut};
use memory::madvise;
use memory::madvise::Madviseable;
use memory::mmap_type::MmapFlusher;
#[derive(Debug)]
pub struct QuantizedMmapStorage {
mmap: Mmap,
quantized_vector_size: NonZeroUsize,
path: PathBuf,
}
impl QuantizedMmapStorage {
pub fn populate(&self) {
self.mmap.populate();
}
}
pub struct QuantizedMmapStorageBuilder {
mmap: MmapMut,
cursor_pos: usize,
quantized_vector_size: NonZeroUsize,
path: PathBuf,
}
impl QuantizedMmapStorage {
pub fn from_file(
path: &Path,
quantized_vector_size: usize,
) -> std::io::Result<QuantizedMmapStorage> {
let file = OpenOptions::new().read(true).open(path)?;
let mmap = unsafe { Mmap::map(&file)? };
madvise::madvise(&mmap, madvise::get_global())?;
let quantized_vector_size = NonZeroUsize::new(quantized_vector_size).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"`quantized_vector_size` must be non-zero",
)
})?;
if !mmap.len().is_multiple_of(quantized_vector_size.get()) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!(
"Encoded file size ({}) is not a multiple of quantized_vector_size ({})",
mmap.len(),
quantized_vector_size
),
));
}
Ok(Self {
mmap,
quantized_vector_size,
path: path.to_path_buf(),
})
}
}
impl quantization::EncodedStorage for QuantizedMmapStorage {
fn get_vector_data(&self, index: PointOffsetType) -> &[u8] {
let start = self.quantized_vector_size.get() * index as usize;
let end = self.quantized_vector_size.get() * (index + 1) as usize;
self.mmap.get(start..end).unwrap_or(&[])
}
fn upsert_vector(
&mut self,
_id: PointOffsetType,
_vector: &[u8],
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
Err(std::io::Error::new(
std::io::ErrorKind::Unsupported,
"Cannot upsert vector in mmap storage",
))
}
fn is_on_disk(&self) -> bool {
true
}
fn vectors_count(&self) -> usize {
self.mmap.len() / self.quantized_vector_size.get()
}
fn flusher(&self) -> MmapFlusher {
// Mmap storage does not need a flusher, as it is non-appendable and already backed by a file.
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
fn immutable_files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
}
impl quantization::EncodedStorageBuilder for QuantizedMmapStorageBuilder {
type Storage = QuantizedMmapStorage;
fn build(self) -> std::io::Result<QuantizedMmapStorage> {
self.mmap.flush()?;
let mmap = self.mmap.make_read_only()?;
Ok(QuantizedMmapStorage {
mmap,
quantized_vector_size: self.quantized_vector_size,
path: self.path,
})
}
fn push_vector_data(&mut self, other: &[u8]) -> std::io::Result<()> {
debug_assert_eq!(
self.quantized_vector_size.get(),
other.len(),
"Pushed vector size does not match expected quantized vector size"
);
debug_assert!(
self.cursor_pos + other.len() <= self.mmap.len(),
"Overflow allocated quantization storage mmap file (cursor_pos {} + len {} > total {})",
self.cursor_pos,
other.len(),
self.mmap.len()
);
self.mmap[self.cursor_pos..self.cursor_pos + other.len()].copy_from_slice(other);
self.cursor_pos += other.len();
Ok(())
}
}
impl QuantizedMmapStorageBuilder {
pub fn new(
path: &Path,
vectors_count: usize,
quantized_vector_size: usize,
) -> std::io::Result<Self> {
if quantized_vector_size == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"quantized_vector_size must be > 0",
));
}
let encoded_storage_size = quantized_vector_size * vectors_count;
path.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)?;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
// Don't truncate because we explicitly set the length later
.truncate(false)
.open(path)?;
file.set_len(encoded_storage_size as u64)?;
let mmap = unsafe { MmapMut::map_mut(&file) }?;
madvise::madvise(&mmap, madvise::get_global())?;
Ok(Self {
mmap,
cursor_pos: 0,
quantized_vector_size: NonZeroUsize::new(quantized_vector_size).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"`quantized_vector_size` must be non-zero",
)
})?,
path: path.to_path_buf(),
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_scorer_builder.rs | lib/segment/src/vector_storage/quantized/quantized_scorer_builder.rs | use common::counter::hardware_counter::HardwareCounterCell;
use quantization::EncodedVectors;
use super::quantized_custom_query_scorer::QuantizedCustomQueryScorer;
use super::quantized_query_scorer::QuantizedQueryScorer;
use super::quantized_vectors::QuantizedVectorStorage;
use crate::common::operation_error::OperationResult;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
DenseVector, MultiDenseVectorInternal, QueryVector, VectorElementType, VectorElementTypeByte,
VectorElementTypeHalf,
};
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric, EuclidMetric, ManhattanMetric};
use crate::types::{Distance, QuantizationConfig, VectorStorageDatatype};
use crate::vector_storage::quantized::quantized_multi_custom_query_scorer::QuantizedMultiCustomQueryScorer;
use crate::vector_storage::quantized::quantized_multi_query_scorer::QuantizedMultiQueryScorer;
use crate::vector_storage::quantized::quantized_multivector_storage::MultivectorOffsets;
use crate::vector_storage::query::{
ContextQuery, DiscoveryQuery, NaiveFeedbackQuery, RecoBestScoreQuery, RecoQuery,
RecoSumScoresQuery, TransformInto,
};
use crate::vector_storage::{RawScorer, raw_scorer_from_query_scorer};
pub(super) struct QuantizedScorerBuilder<'a> {
quantized_storage: &'a QuantizedVectorStorage,
quantization_config: &'a QuantizationConfig,
query: QueryVector,
distance: &'a Distance,
datatype: VectorStorageDatatype,
hardware_counter: HardwareCounterCell,
}
impl<'a> QuantizedScorerBuilder<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
quantized_storage: &'a QuantizedVectorStorage,
quantization_config: &'a QuantizationConfig,
query: QueryVector,
distance: &'a Distance,
datatype: VectorStorageDatatype,
mut hardware_counter: HardwareCounterCell,
) -> Self {
hardware_counter.set_vector_io_read_multiplier(usize::from(quantized_storage.is_on_disk()));
Self {
quantized_storage,
quantization_config,
query,
distance,
datatype,
hardware_counter,
}
}
pub fn build(self) -> OperationResult<Box<dyn RawScorer + 'a>> {
match self.datatype {
VectorStorageDatatype::Float32 => match self.distance {
Distance::Cosine => self.build_with_metric::<VectorElementType, CosineMetric>(),
Distance::Euclid => self.build_with_metric::<VectorElementType, EuclidMetric>(),
Distance::Dot => self.build_with_metric::<VectorElementType, DotProductMetric>(),
Distance::Manhattan => {
self.build_with_metric::<VectorElementType, ManhattanMetric>()
}
},
VectorStorageDatatype::Uint8 => match self.distance {
Distance::Cosine => self.build_with_metric::<VectorElementTypeByte, CosineMetric>(),
Distance::Euclid => self.build_with_metric::<VectorElementTypeByte, EuclidMetric>(),
Distance::Dot => {
self.build_with_metric::<VectorElementTypeByte, DotProductMetric>()
}
Distance::Manhattan => {
self.build_with_metric::<VectorElementTypeByte, ManhattanMetric>()
}
},
VectorStorageDatatype::Float16 => match self.distance {
Distance::Cosine => self.build_with_metric::<VectorElementTypeHalf, CosineMetric>(),
Distance::Euclid => self.build_with_metric::<VectorElementTypeHalf, EuclidMetric>(),
Distance::Dot => {
self.build_with_metric::<VectorElementTypeHalf, DotProductMetric>()
}
Distance::Manhattan => {
self.build_with_metric::<VectorElementTypeHalf, ManhattanMetric>()
}
},
}
}
pub fn build_with_metric<TElement, TMetric>(self) -> OperationResult<Box<dyn RawScorer + 'a>>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement> + 'a,
{
match self.quantized_storage {
QuantizedVectorStorage::ScalarRam(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::ScalarMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::ScalarChunkedMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQRam(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQChunkedMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryRam(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryChunkedMmap(storage) => {
self.new_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::ScalarRamMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::ScalarMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::ScalarChunkedMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQRamMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::PQChunkedMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryRamMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
QuantizedVectorStorage::BinaryChunkedMmapMulti(storage) => {
self.new_multi_quantized_scorer::<TElement, TMetric>(storage)
}
}
}
fn new_quantized_scorer<TElement, TMetric>(
self,
quantized_storage: &'a impl EncodedVectors,
) -> OperationResult<Box<dyn RawScorer + 'a>>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement> + 'a,
{
let Self {
quantized_storage: _same_as_quantized_storage_in_args,
quantization_config,
query,
distance: _,
datatype: _,
hardware_counter,
} = self;
match query {
QueryVector::Nearest(vector) => {
let query_scorer = QuantizedQueryScorer::<_>::new::<TElement, TMetric>(
DenseVector::try_from(vector)?,
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = QuantizedCustomQueryScorer::<TElement, TMetric, _, _>::new(
RecoBestScoreQuery::from(reco_query),
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<DenseVector> = reco_query.transform_into()?;
let query_scorer = QuantizedCustomQueryScorer::<TElement, TMetric, _, _>::new(
RecoSumScoresQuery::from(reco_query),
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<DenseVector> =
discovery_query.transform_into()?;
let query_scorer = QuantizedCustomQueryScorer::<TElement, TMetric, _, _>::new(
discovery_query,
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<DenseVector> = context_query.transform_into()?;
let query_scorer = QuantizedCustomQueryScorer::<TElement, TMetric, _, _>::new(
context_query,
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<DenseVector> =
feedback_query.transform_into()?;
let query_scorer = QuantizedCustomQueryScorer::<TElement, TMetric, _, _>::new(
feedback_query.into_query(),
quantized_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
}
}
fn new_multi_quantized_scorer<TElement, TMetric>(
self,
quantized_multivector_storage: &'a (impl EncodedVectors + MultivectorOffsets),
) -> OperationResult<Box<dyn RawScorer + 'a>>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement> + 'a,
{
let Self {
quantized_storage: _same_as_quantized_storage_in_args,
quantization_config,
query,
distance: _,
datatype: _,
hardware_counter,
} = self;
match query {
QueryVector::Nearest(vector) => {
let query_scorer = QuantizedMultiQueryScorer::<TElement, TMetric, _>::new_multi(
&MultiDenseVectorInternal::try_from(vector)?,
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendBestScore(reco_query) => {
let reco_query: RecoQuery<MultiDenseVectorInternal> =
reco_query.transform_into()?;
let query_scorer =
QuantizedMultiCustomQueryScorer::<TElement, TMetric, _, _>::new_multi(
RecoBestScoreQuery::from(reco_query),
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::RecommendSumScores(reco_query) => {
let reco_query: RecoQuery<MultiDenseVectorInternal> =
reco_query.transform_into()?;
let query_scorer =
QuantizedMultiCustomQueryScorer::<TElement, TMetric, _, _>::new_multi(
RecoSumScoresQuery::from(reco_query),
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Discovery(discovery_query) => {
let discovery_query: DiscoveryQuery<MultiDenseVectorInternal> =
discovery_query.transform_into()?;
let query_scorer =
QuantizedMultiCustomQueryScorer::<TElement, TMetric, _, _>::new_multi(
discovery_query,
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::Context(context_query) => {
let context_query: ContextQuery<MultiDenseVectorInternal> =
context_query.transform_into()?;
let query_scorer =
QuantizedMultiCustomQueryScorer::<TElement, TMetric, _, _>::new_multi(
context_query,
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
QueryVector::FeedbackNaive(feedback_query) => {
let feedback_query: NaiveFeedbackQuery<MultiDenseVectorInternal> =
feedback_query.transform_into()?;
let query_scorer =
QuantizedMultiCustomQueryScorer::<TElement, TMetric, _, _>::new_multi(
feedback_query.into_query(),
quantized_multivector_storage,
quantization_config,
hardware_counter,
);
raw_scorer_from_query_scorer(query_scorer)
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_chunked_mmap_storage.rs | lib/segment/src/vector_storage/quantized/quantized_chunked_mmap_storage.rs | use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use memory::madvise::{Advice, AdviceSetting};
use memory::mmap_type::MmapFlusher;
use crate::common::operation_error::OperationResult;
use crate::vector_storage::Random;
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
pub struct QuantizedChunkedMmapStorage {
data: ChunkedMmapVectors<u8>,
}
impl QuantizedChunkedMmapStorage {
pub fn new(path: &Path, quantized_vector_size: usize, in_ram: bool) -> OperationResult<Self> {
let advice = if in_ram {
AdviceSetting::from(Advice::Normal)
} else {
AdviceSetting::Global
};
let data = ChunkedMmapVectors::<u8>::open(
path,
quantized_vector_size,
advice,
Some(in_ram), // populate
)?;
Ok(Self { data })
}
pub fn populate(&self) -> OperationResult<()> {
self.data.populate()
}
}
impl quantization::EncodedStorage for QuantizedChunkedMmapStorage {
fn get_vector_data(&self, index: PointOffsetType) -> &[u8] {
ChunkedVectorStorage::get::<Random>(&self.data, index as VectorOffsetType)
.unwrap_or_default()
}
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[u8],
hw_counter: &common::counter::hardware_counter::HardwareCounterCell,
) -> std::io::Result<()> {
ChunkedVectorStorage::insert(&mut self.data, id as VectorOffsetType, vector, hw_counter)
.map_err(std::io::Error::other)
}
fn is_on_disk(&self) -> bool {
true
}
fn vectors_count(&self) -> usize {
self.data.len()
}
fn flusher(&self) -> MmapFlusher {
let flusher = self.data.flusher();
Box::new(move || {
Ok(flusher().map_err(|e| {
std::io::Error::other(format!("Failed to flush quantization storage: {e}"))
})?)
})
}
fn files(&self) -> Vec<PathBuf> {
ChunkedMmapVectors::files(&self.data)
}
fn immutable_files(&self) -> Vec<PathBuf> {
ChunkedMmapVectors::immutable_files(&self.data)
}
}
#[allow(dead_code)]
pub struct QuantizedChunkedMmapStorageBuilder {
data: ChunkedMmapVectors<u8>,
hw_counter: HardwareCounterCell,
}
impl QuantizedChunkedMmapStorageBuilder {
#[allow(dead_code)]
pub fn new(path: &Path, quantized_vector_size: usize, in_ram: bool) -> OperationResult<Self> {
let advice = if in_ram {
AdviceSetting::from(Advice::Normal)
} else {
AdviceSetting::Global
};
let data = ChunkedMmapVectors::<u8>::open(
path,
quantized_vector_size,
advice,
Some(in_ram), // populate
)?;
Ok(Self {
data,
hw_counter: HardwareCounterCell::disposable(),
})
}
}
impl quantization::EncodedStorageBuilder for QuantizedChunkedMmapStorageBuilder {
type Storage = QuantizedChunkedMmapStorage;
fn build(self) -> std::io::Result<QuantizedChunkedMmapStorage> {
let Self {
data,
hw_counter: _,
} = self;
data.flusher()().map_err(|e| {
std::io::Error::other(format!("Failed to flush quantization storage: {e}"))
})?;
Ok(QuantizedChunkedMmapStorage { data })
}
fn push_vector_data(&mut self, other: &[u8]) -> std::io::Result<()> {
self.data
.push(other, &self.hw_counter)
.map(|_| ())
.map_err(|e| std::io::Error::other(format!("Failed to push vector data: {e}")))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_vectors.rs | lib/segment/src/vector_storage/quantized/quantized_vectors.rs | use std::alloc::Layout;
use std::fmt;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use io::file_operations::{atomic_save_json, read_json};
use memory::fadvise::clear_disk_cache;
use quantization::encoded_vectors_binary::EncodedVectorsBin;
use quantization::encoded_vectors_u8::ScalarQuantizationMethod;
use quantization::{EncodedVectors, EncodedVectorsPQ, EncodedVectorsU8};
use serde::{Deserialize, Serialize};
use super::quantized_multivector_storage::{
MultivectorOffset, MultivectorOffsetsStorageMmap, QuantizedMultivectorStorage,
};
use super::quantized_scorer_builder::QuantizedScorerBuilder;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{QueryVector, VectorElementType, VectorRef};
use crate::types::{
BinaryQuantization, BinaryQuantizationConfig, BinaryQuantizationEncoding,
BinaryQuantizationQueryEncoding, CompressionRatio, Distance, MultiVectorConfig,
ProductQuantization, ProductQuantizationConfig, QuantizationConfig, ScalarQuantization,
ScalarQuantizationConfig, ScalarType, VectorStorageDatatype,
};
use crate::vector_storage::quantized::quantized_chunked_mmap_storage::{
QuantizedChunkedMmapStorage, QuantizedChunkedMmapStorageBuilder,
};
use crate::vector_storage::quantized::quantized_mmap_storage::{
QuantizedMmapStorage, QuantizedMmapStorageBuilder,
};
use crate::vector_storage::quantized::quantized_multivector_storage::{
MultivectorOffsetsStorageChunkedMmap, MultivectorOffsetsStorageRam,
};
use crate::vector_storage::quantized::quantized_query_scorer::{
InternalScorerUnsupported, QuantizedQueryScorer,
};
use crate::vector_storage::quantized::quantized_ram_storage::{
QuantizedRamStorage, QuantizedRamStorageBuilder,
};
use crate::vector_storage::{
DenseVectorStorage, MultiVectorStorage, Random, RawScorer, RawScorerImpl, Sequential,
VectorStorage, VectorStorageEnum,
};
pub const QUANTIZED_CONFIG_PATH: &str = "quantized.config.json";
pub const QUANTIZED_DATA_PATH: &str = "quantized.data";
pub const QUANTIZED_APPENDABLE_DATA_PATH: &str = "quantized_data";
pub const QUANTIZED_META_PATH: &str = "quantized.meta.json";
pub const QUANTIZED_OFFSETS_PATH: &str = "quantized.offsets.data";
pub const QUANTIZED_APPENDABLE_OFFSETS_PATH: &str = "quantized_offsets_data";
#[derive(Deserialize, Serialize, Clone)]
pub struct QuantizedVectorsConfig {
pub quantization_config: QuantizationConfig,
pub vector_parameters: quantization::VectorParameters,
#[serde(default)]
#[serde(skip_serializing_if = "QuantizedVectorsStorageType::is_immutable")]
pub storage_type: QuantizedVectorsStorageType,
}
impl fmt::Debug for QuantizedVectorsConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("QuantizedVectorsConfig")
.field("quantization_config", &self.quantization_config)
.finish_non_exhaustive()
}
}
#[derive(Deserialize, Serialize, Clone, Copy, Debug, Eq, PartialEq, Default)]
pub enum QuantizedVectorsStorageType {
#[default]
Immutable,
Mutable,
}
impl QuantizedVectorsStorageType {
pub fn is_immutable(&self) -> bool {
matches!(self, QuantizedVectorsStorageType::Immutable)
}
}
type ScalarRamMulti = QuantizedMultivectorStorage<
EncodedVectorsU8<QuantizedRamStorage>,
MultivectorOffsetsStorageRam,
>;
type ScalarMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsU8<QuantizedMmapStorage>,
MultivectorOffsetsStorageMmap,
>;
type ScalarChunkedMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsU8<QuantizedChunkedMmapStorage>,
MultivectorOffsetsStorageChunkedMmap,
>;
type PQRamMulti = QuantizedMultivectorStorage<
EncodedVectorsPQ<QuantizedRamStorage>,
MultivectorOffsetsStorageRam,
>;
type PQMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsPQ<QuantizedMmapStorage>,
MultivectorOffsetsStorageMmap,
>;
type PQChunkedMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsPQ<QuantizedChunkedMmapStorage>,
MultivectorOffsetsStorageChunkedMmap,
>;
type BinaryRamMulti = QuantizedMultivectorStorage<
EncodedVectorsBin<u8, QuantizedRamStorage>,
MultivectorOffsetsStorageRam,
>;
type BinaryMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsBin<u8, QuantizedMmapStorage>,
MultivectorOffsetsStorageMmap,
>;
type BinaryChunkedMmapMulti = QuantizedMultivectorStorage<
EncodedVectorsBin<u8, QuantizedChunkedMmapStorage>,
MultivectorOffsetsStorageChunkedMmap,
>;
pub enum QuantizedVectorStorage {
ScalarRam(EncodedVectorsU8<QuantizedRamStorage>),
ScalarMmap(EncodedVectorsU8<QuantizedMmapStorage>),
ScalarChunkedMmap(EncodedVectorsU8<QuantizedChunkedMmapStorage>),
PQRam(EncodedVectorsPQ<QuantizedRamStorage>),
PQMmap(EncodedVectorsPQ<QuantizedMmapStorage>),
PQChunkedMmap(EncodedVectorsPQ<QuantizedChunkedMmapStorage>),
BinaryRam(EncodedVectorsBin<u128, QuantizedRamStorage>),
BinaryMmap(EncodedVectorsBin<u128, QuantizedMmapStorage>),
BinaryChunkedMmap(EncodedVectorsBin<u128, QuantizedChunkedMmapStorage>),
ScalarRamMulti(ScalarRamMulti),
ScalarMmapMulti(ScalarMmapMulti),
ScalarChunkedMmapMulti(ScalarChunkedMmapMulti),
PQRamMulti(PQRamMulti),
PQMmapMulti(PQMmapMulti),
PQChunkedMmapMulti(PQChunkedMmapMulti),
BinaryRamMulti(BinaryRamMulti),
BinaryMmapMulti(BinaryMmapMulti),
BinaryChunkedMmapMulti(BinaryChunkedMmapMulti),
}
impl QuantizedVectorStorage {
pub fn is_on_disk(&self) -> bool {
match self {
QuantizedVectorStorage::ScalarRam(q) => q.is_on_disk(),
QuantizedVectorStorage::ScalarMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::ScalarChunkedMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::PQRam(q) => q.is_on_disk(),
QuantizedVectorStorage::PQMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::PQChunkedMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryRam(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryChunkedMmap(q) => q.is_on_disk(),
QuantizedVectorStorage::ScalarRamMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::ScalarMmapMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::ScalarChunkedMmapMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::PQRamMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::PQMmapMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::PQChunkedMmapMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryRamMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryMmapMulti(q) => q.is_on_disk(),
QuantizedVectorStorage::BinaryChunkedMmapMulti(q) => q.is_on_disk(),
}
}
}
impl fmt::Debug for QuantizedVectorStorage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("QuantizedVectorStorage").finish()
}
}
#[derive(Debug)]
pub struct QuantizedVectors {
storage_impl: QuantizedVectorStorage,
config: QuantizedVectorsConfig,
path: PathBuf,
distance: Distance,
datatype: VectorStorageDatatype,
}
impl QuantizedVectors {
pub fn config(&self) -> &QuantizedVectorsConfig {
&self.config
}
pub fn default_rescoring(&self) -> bool {
match self.storage_impl {
QuantizedVectorStorage::ScalarRam(_) => false,
QuantizedVectorStorage::ScalarMmap(_) => false,
QuantizedVectorStorage::ScalarChunkedMmap(_) => false,
QuantizedVectorStorage::PQRam(_) => false,
QuantizedVectorStorage::PQMmap(_) => false,
QuantizedVectorStorage::PQChunkedMmap(_) => false,
QuantizedVectorStorage::BinaryRam(_) => true,
QuantizedVectorStorage::BinaryMmap(_) => true,
QuantizedVectorStorage::BinaryChunkedMmap(_) => true,
QuantizedVectorStorage::ScalarRamMulti(_) => false,
QuantizedVectorStorage::ScalarMmapMulti(_) => false,
QuantizedVectorStorage::ScalarChunkedMmapMulti(_) => false,
QuantizedVectorStorage::PQRamMulti(_) => false,
QuantizedVectorStorage::PQMmapMulti(_) => false,
QuantizedVectorStorage::PQChunkedMmapMulti(_) => false,
QuantizedVectorStorage::BinaryRamMulti(_) => true,
QuantizedVectorStorage::BinaryMmapMulti(_) => true,
QuantizedVectorStorage::BinaryChunkedMmapMulti(_) => true,
}
}
pub fn is_multivector(&self) -> bool {
match self.storage_impl {
QuantizedVectorStorage::ScalarRam(_) => false,
QuantizedVectorStorage::ScalarMmap(_) => false,
QuantizedVectorStorage::ScalarChunkedMmap(_) => false,
QuantizedVectorStorage::PQRam(_) => false,
QuantizedVectorStorage::PQMmap(_) => false,
QuantizedVectorStorage::PQChunkedMmap(_) => false,
QuantizedVectorStorage::BinaryRam(_) => false,
QuantizedVectorStorage::BinaryMmap(_) => false,
QuantizedVectorStorage::BinaryChunkedMmap(_) => false,
QuantizedVectorStorage::ScalarRamMulti(_) => true,
QuantizedVectorStorage::ScalarMmapMulti(_) => true,
QuantizedVectorStorage::ScalarChunkedMmapMulti(_) => true,
QuantizedVectorStorage::PQRamMulti(_) => true,
QuantizedVectorStorage::PQMmapMulti(_) => true,
QuantizedVectorStorage::PQChunkedMmapMulti(_) => true,
QuantizedVectorStorage::BinaryRamMulti(_) => true,
QuantizedVectorStorage::BinaryMmapMulti(_) => true,
QuantizedVectorStorage::BinaryChunkedMmapMulti(_) => true,
}
}
/// Get layout for a single quantized vector.
///
/// I.e. the size of a single vector in bytes, and the required alignment.
pub fn get_quantized_vector_layout(&self) -> OperationResult<Layout> {
match &self.storage_impl {
QuantizedVectorStorage::ScalarRam(storage) => Ok(storage.layout()),
QuantizedVectorStorage::ScalarMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::ScalarChunkedMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::PQRam(storage) => Ok(storage.layout()),
QuantizedVectorStorage::PQMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::PQChunkedMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::BinaryRam(storage) => Ok(storage.layout()),
QuantizedVectorStorage::BinaryMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::BinaryChunkedMmap(storage) => Ok(storage.layout()),
QuantizedVectorStorage::ScalarRamMulti(_)
| QuantizedVectorStorage::ScalarMmapMulti(_)
| QuantizedVectorStorage::ScalarChunkedMmapMulti(_)
| QuantizedVectorStorage::PQRamMulti(_)
| QuantizedVectorStorage::PQMmapMulti(_)
| QuantizedVectorStorage::PQChunkedMmapMulti(_)
| QuantizedVectorStorage::BinaryRamMulti(_)
| QuantizedVectorStorage::BinaryMmapMulti(_)
| QuantizedVectorStorage::BinaryChunkedMmapMulti(_) => {
Err(OperationError::service_error(
"Cannot get quantized vector layout from multivector storage",
))
}
}
}
pub fn get_quantized_vector(&self, id: PointOffsetType) -> &[u8] {
match &self.storage_impl {
QuantizedVectorStorage::ScalarRam(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::ScalarMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::ScalarChunkedMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::PQRam(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::PQMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::PQChunkedMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::BinaryRam(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::BinaryMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::BinaryChunkedMmap(storage) => storage.get_quantized_vector(id),
QuantizedVectorStorage::ScalarRamMulti(_)
| QuantizedVectorStorage::ScalarMmapMulti(_)
| QuantizedVectorStorage::ScalarChunkedMmapMulti(_)
| QuantizedVectorStorage::PQRamMulti(_)
| QuantizedVectorStorage::PQMmapMulti(_)
| QuantizedVectorStorage::PQChunkedMmapMulti(_)
| QuantizedVectorStorage::BinaryRamMulti(_)
| QuantizedVectorStorage::BinaryMmapMulti(_)
| QuantizedVectorStorage::BinaryChunkedMmapMulti(_) => {
panic!("Cannot get quantized vector from multivector storage");
}
}
}
pub fn raw_scorer<'a>(
&'a self,
query: QueryVector,
hardware_counter: HardwareCounterCell,
) -> OperationResult<Box<dyn RawScorer + 'a>> {
QuantizedScorerBuilder::new(
&self.storage_impl,
&self.config.quantization_config,
query,
&self.distance,
self.datatype,
hardware_counter,
)
.build()
}
/// Build a raw scorer for the specified `point_id`.
/// If not supported, return [`InternalScorerUnsupported`] with the original `hardware_counter`.
pub fn raw_internal_scorer<'a>(
&'a self,
point_id: PointOffsetType,
hardware_counter: HardwareCounterCell,
) -> Result<Box<dyn RawScorer + 'a>, InternalScorerUnsupported> {
fn build<'a, TEncodedVectors: quantization::EncodedVectors>(
point_id: PointOffsetType,
quantized_data: &'a TEncodedVectors,
hardware_counter: HardwareCounterCell,
) -> Result<Box<dyn RawScorer + 'a>, InternalScorerUnsupported> {
let query_scorer =
QuantizedQueryScorer::new_internal(point_id, quantized_data, hardware_counter)?;
Ok(Box::new(RawScorerImpl { query_scorer }))
}
match &self.storage_impl {
QuantizedVectorStorage::ScalarRam(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::ScalarMmap(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::ScalarChunkedMmap(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::PQRam(storage) => build(point_id, storage, hardware_counter),
QuantizedVectorStorage::PQMmap(storage) => build(point_id, storage, hardware_counter),
QuantizedVectorStorage::PQChunkedMmap(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryRam(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryMmap(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryChunkedMmap(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::ScalarRamMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::ScalarMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::ScalarChunkedMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::PQRamMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::PQMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::PQChunkedMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryRamMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
QuantizedVectorStorage::BinaryChunkedMmapMulti(storage) => {
build(point_id, storage, hardware_counter)
}
}
}
fn get_config_path(path: &Path) -> PathBuf {
path.join(QUANTIZED_CONFIG_PATH)
}
fn get_data_path(path: &Path, storage_type: QuantizedVectorsStorageType) -> PathBuf {
match storage_type {
QuantizedVectorsStorageType::Immutable => path.join(QUANTIZED_DATA_PATH),
QuantizedVectorsStorageType::Mutable => path.join(QUANTIZED_APPENDABLE_DATA_PATH),
}
}
fn get_meta_path(path: &Path) -> PathBuf {
path.join(QUANTIZED_META_PATH)
}
fn get_offsets_path(path: &Path, storage_type: QuantizedVectorsStorageType) -> PathBuf {
match storage_type {
QuantizedVectorsStorageType::Immutable => path.join(QUANTIZED_OFFSETS_PATH),
QuantizedVectorsStorageType::Mutable => path.join(QUANTIZED_APPENDABLE_OFFSETS_PATH),
}
}
pub fn files(&self) -> Vec<PathBuf> {
let mut files = match &self.storage_impl {
QuantizedVectorStorage::ScalarRam(q) => q.files(),
QuantizedVectorStorage::ScalarMmap(q) => q.files(),
QuantizedVectorStorage::ScalarChunkedMmap(q) => q.files(),
QuantizedVectorStorage::PQRam(q) => q.files(),
QuantizedVectorStorage::PQMmap(q) => q.files(),
QuantizedVectorStorage::PQChunkedMmap(q) => q.files(),
QuantizedVectorStorage::BinaryRam(q) => q.files(),
QuantizedVectorStorage::BinaryMmap(q) => q.files(),
QuantizedVectorStorage::BinaryChunkedMmap(q) => q.files(),
QuantizedVectorStorage::ScalarRamMulti(q) => q.files(),
QuantizedVectorStorage::ScalarMmapMulti(q) => q.files(),
QuantizedVectorStorage::ScalarChunkedMmapMulti(q) => q.files(),
QuantizedVectorStorage::PQRamMulti(q) => q.files(),
QuantizedVectorStorage::PQMmapMulti(q) => q.files(),
QuantizedVectorStorage::PQChunkedMmapMulti(q) => q.files(),
QuantizedVectorStorage::BinaryRamMulti(q) => q.files(),
QuantizedVectorStorage::BinaryMmapMulti(q) => q.files(),
QuantizedVectorStorage::BinaryChunkedMmapMulti(q) => q.files(),
};
files.push(self.path.join(QUANTIZED_CONFIG_PATH));
files
}
pub fn immutable_files(&self) -> Vec<PathBuf> {
let mut files = match &self.storage_impl {
QuantizedVectorStorage::ScalarRam(q) => q.immutable_files(),
QuantizedVectorStorage::ScalarMmap(q) => q.immutable_files(),
QuantizedVectorStorage::ScalarChunkedMmap(q) => q.immutable_files(),
QuantizedVectorStorage::PQRam(q) => q.immutable_files(),
QuantizedVectorStorage::PQMmap(q) => q.immutable_files(),
QuantizedVectorStorage::PQChunkedMmap(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryRam(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryMmap(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryChunkedMmap(q) => q.immutable_files(),
QuantizedVectorStorage::ScalarRamMulti(q) => q.immutable_files(),
QuantizedVectorStorage::ScalarMmapMulti(q) => q.immutable_files(),
QuantizedVectorStorage::ScalarChunkedMmapMulti(q) => q.immutable_files(),
QuantizedVectorStorage::PQRamMulti(q) => q.immutable_files(),
QuantizedVectorStorage::PQMmapMulti(q) => q.immutable_files(),
QuantizedVectorStorage::PQChunkedMmapMulti(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryRamMulti(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryMmapMulti(q) => q.immutable_files(),
QuantizedVectorStorage::BinaryChunkedMmapMulti(q) => q.immutable_files(),
};
files.push(self.path.join(QUANTIZED_CONFIG_PATH));
files
}
pub fn create(
vector_storage: &VectorStorageEnum,
quantization_config: &QuantizationConfig,
storage_type: QuantizedVectorsStorageType,
path: &Path,
max_threads: usize,
stopped: &AtomicBool,
) -> OperationResult<Self> {
match vector_storage {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleByte(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimpleHalf(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseVolatile(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(test)]
VectorStorageEnum::DenseVolatileByte(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(test)]
VectorStorageEnum::DenseVolatileHalf(v) => Self::create_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseMemmap(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseMemmapByte(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseMemmapHalf(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableMemmap(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableMemmapByte(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableMemmapHalf(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableInRam(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableInRamByte(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::DenseAppendableInRamHalf(v) => Self::create_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => Err(OperationError::WrongSparse),
VectorStorageEnum::SparseVolatile(_) => Err(OperationError::WrongSparse),
VectorStorageEnum::SparseMmap(_) => Err(OperationError::WrongSparse),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleHalf(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseVolatile(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileByte(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
#[cfg(test)]
VectorStorageEnum::MultiDenseVolatileHalf(v) => Self::create_multi_impl(
v,
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableMemmap(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableMemmapByte(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableMemmapHalf(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableInRam(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableInRamByte(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
VectorStorageEnum::MultiDenseAppendableInRamHalf(v) => Self::create_multi_impl(
v.as_ref(),
quantization_config,
storage_type,
path,
max_threads,
stopped,
),
}
}
fn create_impl<
TElement: PrimitiveVectorElement,
TVectorStorage: DenseVectorStorage<TElement> + Send + Sync,
>(
vector_storage: &TVectorStorage,
quantization_config: &QuantizationConfig,
storage_type: QuantizedVectorsStorageType,
path: &Path,
max_threads: usize,
stopped: &AtomicBool,
) -> OperationResult<Self> {
let dim = vector_storage.vector_dim();
let count = vector_storage.total_vector_count();
let distance = vector_storage.distance();
let datatype = vector_storage.datatype();
let vectors = (0..count as PointOffsetType).map(|i| {
PrimitiveVectorElement::quantization_preprocess(
quantization_config,
distance,
vector_storage.get_dense::<Sequential>(i),
)
});
let on_disk_vector_storage = vector_storage.is_on_disk();
let vector_parameters =
Self::construct_vector_parameters(distance, dim, count, storage_type);
let quantized_storage = match quantization_config {
QuantizationConfig::Scalar(ScalarQuantization {
scalar: scalar_config,
}) => Self::create_scalar(
vectors,
&vector_parameters,
count,
scalar_config,
storage_type,
path,
on_disk_vector_storage,
stopped,
)?,
QuantizationConfig::Product(ProductQuantization { product: pq_config }) => {
Self::create_pq(
vectors,
&vector_parameters,
count,
pq_config,
storage_type,
path,
on_disk_vector_storage,
max_threads,
stopped,
)?
}
QuantizationConfig::Binary(BinaryQuantization {
binary: binary_config,
}) => Self::create_binary(
vectors,
&vector_parameters,
count,
binary_config,
storage_type,
path,
on_disk_vector_storage,
stopped,
)?,
};
let quantized_vectors_config = QuantizedVectorsConfig {
quantization_config: quantization_config.clone(),
vector_parameters,
storage_type,
};
let quantized_vectors = QuantizedVectors {
storage_impl: quantized_storage,
config: quantized_vectors_config,
path: path.to_path_buf(),
distance,
datatype,
};
atomic_save_json(&path.join(QUANTIZED_CONFIG_PATH), &quantized_vectors.config)?;
Ok(quantized_vectors)
}
fn create_multi_impl<
TElement: PrimitiveVectorElement + 'static,
TVectorStorage: MultiVectorStorage<TElement> + Send + Sync,
>(
vector_storage: &TVectorStorage,
quantization_config: &QuantizationConfig,
storage_type: QuantizedVectorsStorageType,
path: &Path,
max_threads: usize,
stopped: &AtomicBool,
) -> OperationResult<Self> {
let dim = vector_storage.vector_dim();
let distance = vector_storage.distance();
let datatype = vector_storage.datatype();
let multi_vector_config = *vector_storage.multi_vector_config();
let vectors = vector_storage.iterate_inner_vectors().map(|v| {
PrimitiveVectorElement::quantization_preprocess(quantization_config, distance, v)
});
let inner_vectors_count = vectors.clone().count();
let vectors_count = vector_storage.total_vector_count();
let on_disk_vector_storage = vector_storage.is_on_disk();
let vector_parameters =
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_ram_storage.rs | lib/segment/src/vector_storage/quantized/quantized_ram_storage.rs | use std::io::{BufReader, BufWriter, Read, Write};
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use fs_err as fs;
use fs_err::File;
use memory::fadvise::OneshotFile;
use memory::mmap_type::MmapFlusher;
use crate::common::operation_error::OperationResult;
use crate::common::vector_utils::TrySetCapacityExact;
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::chunked_vectors::ChunkedVectors;
#[derive(Debug)]
pub struct QuantizedRamStorage {
vectors: ChunkedVectors<u8>,
path: PathBuf,
}
impl QuantizedRamStorage {
pub fn from_file(path: &Path, quantized_vector_size: usize) -> std::io::Result<Self> {
let mut vectors = ChunkedVectors::<u8>::new(quantized_vector_size);
let file = OneshotFile::open(path)?;
let mut reader = BufReader::new(file);
let mut buffer = vec![0u8; quantized_vector_size];
while reader.read_exact(&mut buffer).is_ok() {
vectors.push(&buffer).map_err(|err| {
std::io::Error::new(
std::io::ErrorKind::OutOfMemory,
format!("Failed to load quantized vectors from file: {err}"),
)
})?;
}
reader.into_inner().drop_cache()?;
Ok(QuantizedRamStorage {
vectors,
path: path.to_path_buf(),
})
}
}
impl quantization::EncodedStorage for QuantizedRamStorage {
fn get_vector_data(&self, index: PointOffsetType) -> &[u8] {
self.vectors.get(index as VectorOffsetType)
}
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[u8],
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
// Skip hardware counter increment because it's a RAM storage.
self.vectors
.insert(id as usize, vector)
.map_err(|err| std::io::Error::other(err.to_string()))?;
Ok(())
}
fn is_on_disk(&self) -> bool {
false
}
fn vectors_count(&self) -> usize {
self.vectors.len()
}
fn flusher(&self) -> MmapFlusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
fn immutable_files(&self) -> Vec<PathBuf> {
vec![self.path.clone()]
}
}
pub struct QuantizedRamStorageBuilder {
pub vectors: ChunkedVectors<u8>,
pub path: PathBuf,
}
impl QuantizedRamStorageBuilder {
pub fn new(path: &Path, count: usize, dim: usize) -> OperationResult<Self> {
let mut vectors = ChunkedVectors::new(dim);
vectors.try_set_capacity_exact(count)?;
Ok(Self {
vectors,
path: path.to_path_buf(),
})
}
}
impl quantization::EncodedStorageBuilder for QuantizedRamStorageBuilder {
type Storage = QuantizedRamStorage;
fn build(self) -> std::io::Result<QuantizedRamStorage> {
if let Some(dir) = self.path.parent() {
fs::create_dir_all(dir)?;
}
let mut buffer = BufWriter::new(File::create(&self.path)?);
for i in 0..self.vectors.len() {
buffer.write_all(self.vectors.get(i))?;
}
// Explicitly flush write buffer so we can catch IO errors
buffer.flush()?;
buffer.into_inner()?.sync_all()?;
Ok(QuantizedRamStorage {
vectors: self.vectors,
path: self.path,
})
}
fn push_vector_data(&mut self, other: &[u8]) -> std::io::Result<()> {
self.vectors
.push(other)
.map(|_| ())
.map_err(|e| std::io::Error::other(format!("Failed to push vector data: {e}")))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_custom_query_scorer.rs | lib/segment/src/vector_storage/quantized/quantized_custom_query_scorer.rs | use std::borrow::Cow;
use std::marker::PhantomData;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::{PointOffsetType, ScoreType};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{DenseVector, TypedDenseVector};
use crate::spaces::metric::Metric;
use crate::types::QuantizationConfig;
use crate::vector_storage::query::{Query, TransformInto};
use crate::vector_storage::query_scorer::QueryScorer;
pub struct QuantizedCustomQueryScorer<'a, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
query: TQuery,
quantized_storage: &'a TEncodedVectors,
metric: PhantomData<TMetric>,
element: PhantomData<TElement>,
hardware_counter: HardwareCounterCell,
}
impl<'a, TElement, TMetric, TEncodedVectors, TQuery>
QuantizedCustomQueryScorer<'a, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
pub fn new<TOriginalQuery, TInputQuery>(
raw_query: TInputQuery,
quantized_storage: &'a TEncodedVectors,
quantization_config: &QuantizationConfig,
mut hardware_counter: HardwareCounterCell,
) -> Self
where
TOriginalQuery: Query<TypedDenseVector<TElement>>
+ TransformInto<TQuery, TypedDenseVector<TElement>, TEncodedVectors::EncodedQuery>
+ Clone,
TInputQuery: Query<DenseVector>
+ TransformInto<TOriginalQuery, DenseVector, TypedDenseVector<TElement>>,
{
let original_query: TOriginalQuery = raw_query
.transform(|raw_vector| {
let preprocessed_vector = TMetric::preprocess(raw_vector);
let original_vector = TypedDenseVector::from(TElement::slice_from_float_cow(
Cow::Owned(preprocessed_vector),
));
Ok(original_vector)
})
.unwrap();
let query: TQuery = original_query
.transform(|original_vector| {
let original_vector_prequantized = TElement::quantization_preprocess(
quantization_config,
TMetric::distance(),
&original_vector,
);
Ok(quantized_storage.encode_query(&original_vector_prequantized))
})
.unwrap();
hardware_counter.set_cpu_multiplier(size_of::<TElement>());
hardware_counter.set_vector_io_read_multiplier(usize::from(quantized_storage.is_on_disk()));
Self {
query,
quantized_storage,
metric: PhantomData,
element: PhantomData,
hardware_counter,
}
}
}
impl<TElement, TMetric, TEncodedVectors, TQuery> QueryScorer
for QuantizedCustomQueryScorer<'_, TElement, TMetric, TEncodedVectors, TQuery>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors,
TQuery: Query<TEncodedVectors::EncodedQuery>,
{
type TVector = [TElement];
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
// account for read outside of `score_by` because the closure is called once per example
self.hardware_counter
.vector_io_read()
.incr_delta(self.quantized_storage.quantized_vector_size());
self.query.score_by(|this| {
self.quantized_storage
.score_point(this, idx, &self.hardware_counter)
})
}
fn score(&self, _v2: &[TElement]) -> ScoreType {
unimplemented!("This method is not expected to be called for quantized scorer");
}
fn score_internal(&self, _point_a: PointOffsetType, _point_b: PointOffsetType) -> ScoreType {
unimplemented!("Custom scorer compares against multiple vectors, not just one")
}
type SupportsBytes = TEncodedVectors::SupportsBytes;
fn score_bytes(&self, enabled: Self::SupportsBytes, bytes: &[u8]) -> ScoreType {
self.query.score_by(|this| {
self.quantized_storage
.score_bytes(enabled, this, bytes, &self.hardware_counter)
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/quantized/quantized_multi_query_scorer.rs | lib/segment/src/vector_storage/quantized/quantized_multi_query_scorer.rs | use std::borrow::Cow;
use std::marker::PhantomData;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::MultiDenseVectorInternal;
use crate::spaces::metric::Metric;
use crate::types::QuantizationConfig;
use crate::vector_storage::quantized::quantized_multivector_storage::{
MultivectorOffset, MultivectorOffsets,
};
use crate::vector_storage::query_scorer::QueryScorer;
pub struct QuantizedMultiQueryScorer<'a, TElement, TMetric, TEncodedVectors>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors,
{
query: TEncodedVectors::EncodedQuery,
quantized_multivector_storage: &'a TEncodedVectors,
metric: PhantomData<TMetric>,
element: PhantomData<TElement>,
hardware_counter: HardwareCounterCell,
}
impl<'a, TElement, TMetric, TEncodedVectors>
QuantizedMultiQueryScorer<'a, TElement, TMetric, TEncodedVectors>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors,
{
pub fn new_multi(
raw_query: &MultiDenseVectorInternal,
quantized_multivector_storage: &'a TEncodedVectors,
quantization_config: &QuantizationConfig,
mut hardware_counter: HardwareCounterCell,
) -> Self {
let mut query = Vec::new();
for inner_vector in raw_query.multi_vectors() {
let inner_preprocessed = TMetric::preprocess(inner_vector.to_vec());
let inner_converted = TElement::slice_from_float_cow(Cow::Owned(inner_preprocessed));
let inner_prequantized = TElement::quantization_preprocess(
quantization_config,
TMetric::distance(),
inner_converted.as_ref(),
);
query.extend_from_slice(&inner_prequantized);
}
let query = quantized_multivector_storage.encode_query(&query);
hardware_counter
.set_vector_io_read_multiplier(usize::from(quantized_multivector_storage.is_on_disk()));
Self {
query,
quantized_multivector_storage,
metric: PhantomData,
element: PhantomData,
hardware_counter,
}
}
}
impl<TElement, TMetric, TEncodedVectors> QueryScorer
for QuantizedMultiQueryScorer<'_, TElement, TMetric, TEncodedVectors>
where
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TEncodedVectors: quantization::EncodedVectors + MultivectorOffsets,
{
type TVector = [TElement];
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let multi_vector_offset = self.quantized_multivector_storage.get_offset(idx);
let sub_vectors_count = multi_vector_offset.count as usize;
self.hardware_counter.vector_io_read().incr_delta(
size_of::<MultivectorOffset>()
+ self.quantized_multivector_storage.quantized_vector_size() * sub_vectors_count,
);
// quantized multivector storage handles hardware counter to batch vector IO
self.quantized_multivector_storage
.score_point(&self.query, idx, &self.hardware_counter)
}
fn score(&self, _v2: &[TElement]) -> ScoreType {
unimplemented!("This method is not expected to be called for quantized scorer");
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
self.quantized_multivector_storage
.score_internal(point_a, point_b, &self.hardware_counter)
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query/discovery_query.rs | lib/segment/src/vector_storage/query/discovery_query.rs | use std::hash::Hash;
use std::iter;
use common::math::scaled_fast_sigmoid;
use common::types::ScoreType;
use itertools::Itertools;
use serde::Serialize;
use super::context_query::ContextPair;
use super::{Query, TransformInto};
use crate::common::operation_error::OperationResult;
use crate::data_types::vectors::{QueryVector, VectorInternal};
type RankType = i32;
impl<T> ContextPair<T> {
/// Calculates on which side of the space the point is, with respect to this pair
fn rank_by(&self, similarity: impl Fn(&T) -> ScoreType) -> RankType {
let positive_similarity = similarity(&self.positive);
let negative_similarity = similarity(&self.negative);
// if closer to positive, return 1, else -1
positive_similarity.total_cmp(&negative_similarity) as RankType
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct DiscoveryQuery<T> {
pub target: T,
pub pairs: Vec<ContextPair<T>>,
}
impl<T> DiscoveryQuery<T> {
pub fn new(target: T, pairs: Vec<ContextPair<T>>) -> Self {
Self { target, pairs }
}
pub fn flat_iter(&self) -> impl Iterator<Item = &T> {
let pairs_iter = self.pairs.iter().flat_map(|pair| pair.iter());
iter::once(&self.target).chain(pairs_iter)
}
fn rank_by(&self, similarity: impl Fn(&T) -> ScoreType) -> RankType {
self.pairs
.iter()
.map(|pair| pair.rank_by(&similarity))
// get overall rank
.sum()
}
}
impl<T, U> TransformInto<DiscoveryQuery<U>, T, U> for DiscoveryQuery<T> {
fn transform<F>(self, mut f: F) -> OperationResult<DiscoveryQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(DiscoveryQuery::new(
f(self.target)?,
self.pairs
.into_iter()
.map(|pair| pair.transform(&mut f))
.try_collect()?,
))
}
}
impl<T> Query<T> for DiscoveryQuery<T> {
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
let rank = self.rank_by(&similarity);
let target_similarity = similarity(&self.target);
let sigmoid_similarity = scaled_fast_sigmoid(target_similarity);
rank as ScoreType + sigmoid_similarity
}
}
impl From<DiscoveryQuery<VectorInternal>> for QueryVector {
fn from(query: DiscoveryQuery<VectorInternal>) -> Self {
QueryVector::Discovery(query)
}
}
#[cfg(test)]
mod test {
use std::cmp::Ordering;
use common::types::ScoreType;
use itertools::Itertools;
use proptest::prelude::*;
use rstest::rstest;
use super::*;
fn dummy_similarity(x: &isize) -> ScoreType {
*x as ScoreType
}
/// Considers each "vector" as the actual score from the similarity function by
/// using a dummy identity function.
#[rstest]
#[case::no_pairs(vec![], 0)]
#[case::closer_to_positive(vec![(10, 4)], 1)]
#[case::closer_to_negative(vec![(4, 10)], -1)]
#[case::equal_scores(vec![(11, 11)], 0)]
#[case::neutral_zone(vec![(10, 4), (4, 10)], 0)]
#[case::best_zone(vec![(10, 4), (4, 2)], 2)]
#[case::worst_zone(vec![(4, 10), (2, 4)], -2)]
#[case::many_pairs(vec![(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (0, 4)], 4)]
fn context_ranking(#[case] pairs: Vec<(isize, isize)>, #[case] expected: RankType) {
let pairs = pairs.into_iter().map(ContextPair::from).collect();
let target = 42;
let query = DiscoveryQuery::new(target, pairs);
let rank = query.rank_by(dummy_similarity);
assert_eq!(
rank, expected,
"Ranking is incorrect, expected {expected}, but got {rank}"
);
}
/// Compares the score of a query against a fixed score
#[rstest]
#[case::no_pairs(1, vec![], Ordering::Less)]
#[case::just_above(1, vec![(1,0),(1,0)], Ordering::Greater)]
#[case::just_below(-1, vec![(1,0),(1,0)], Ordering::Less)]
#[case::bad_target_good_context(-1000, vec![(1,0),(1,0),(1, 0)], Ordering::Greater)]
#[case::good_target_bad_context(1000, vec![(1,0),(0,1)], Ordering::Less)]
fn score_better(
#[case] target: isize,
#[case] pairs: Vec<(isize, isize)>,
#[case] expected: Ordering,
) {
let fixed_score: f32 = 2.5;
let pairs = pairs.into_iter().map(ContextPair::from).collect();
let query = DiscoveryQuery::new(target, pairs);
let score = query.score_by(dummy_similarity);
assert_eq!(
score.total_cmp(&fixed_score),
expected,
"Comparison is incorrect, expected {expected:?} for {score} against {fixed_score}"
);
}
proptest! {
#[test]
fn same_target_only_changes_rank(
target in -1000f32..1000f32,
pairs1 in prop::collection::vec((0f32..1000f32, 0.0f32..1000f32), 0..10),
pairs2 in prop::collection::vec((0f32..1000f32, 0.0f32..1000f32), 0..10),
) {
let dummy_similarity = |x: &ScoreType| *x as ScoreType;
let pairs1 = pairs1.into_iter().map(ContextPair::from).collect();
let query1 = DiscoveryQuery::new(target, pairs1);
let score1 = query1.score_by(dummy_similarity);
let pairs2 = pairs2.into_iter().map(ContextPair::from).collect();
let query2 = DiscoveryQuery::new(target, pairs2);
let score2 = query2.score_by(dummy_similarity);
let target_part1 = score1 - score1.floor();
let target_part2 = score2 - score2.floor();
assert!((target_part1 - target_part2).abs() <= 1.0e-6, "Target part of score is not similar, score1: {score1}, score2: {score2}");
}
#[test]
fn same_context_only_changes_target(
target1 in -1000f32..1000f32,
target2 in -1000f32..1000f32,
pairs in prop::collection::vec((0f32..1000f32, 0.0f32..1000f32), 0..10),
)
{
let dummy_similarity = |x: &ScoreType| *x as ScoreType;
let pairs = pairs.into_iter().map(ContextPair::from).collect_vec();
let query1 = DiscoveryQuery::new(target1, pairs.clone());
let score1 = query1.score_by(dummy_similarity);
let query2 = DiscoveryQuery::new(target2, pairs);
let score2 = query2.score_by(dummy_similarity);
let context_part1 = score1.floor();
let context_part2 = score2.floor();
assert_eq!(context_part1, context_part2,"Context part of score isn't equal, score1: {score1}, score2: {score2}");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query/feedback_query.rs | lib/segment/src/vector_storage/query/feedback_query.rs | use std::hash::Hash;
use common::types::ScoreType;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use serde::Serialize;
use super::{Query, TransformInto};
use crate::common::operation_error::OperationResult;
const DEFAULT_MAX_PAIRS: usize = 3;
#[derive(Clone, Debug, Serialize, Hash, PartialEq)]
pub struct FeedbackItem<T> {
pub vector: T,
pub score: OrderedFloat<ScoreType>,
}
impl<T> FeedbackItem<T> {
pub fn transform<F, U>(self, mut f: F) -> OperationResult<FeedbackItem<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(FeedbackItem {
vector: f(self.vector)?,
score: self.score,
})
}
}
/// Akin to external representation of the query. Unoptimized for scoring.
///
/// Call `into_query` to get the type implementing `Query` trait.
#[derive(Clone, Debug, Serialize, Hash, PartialEq)]
pub struct NaiveFeedbackQuery<T> {
/// The original query vector.
pub target: T,
/// Pairs of results with higher difference in their feedback score.
pub feedback: Vec<FeedbackItem<T>>,
/// How to handle the feedback
pub coefficients: NaiveFeedbackCoefficients,
}
impl<T: Clone> NaiveFeedbackQuery<T> {
pub fn into_query(self) -> FeedbackQuery<T> {
FeedbackQuery::new(self.target, self.feedback, self.coefficients)
}
}
impl<T> NaiveFeedbackQuery<T> {
pub fn flat_iter(&self) -> impl Iterator<Item = &T> {
self.feedback
.iter()
.map(|item| &item.vector)
.chain(std::iter::once(&self.target))
}
}
impl<T, U> TransformInto<NaiveFeedbackQuery<U>, T, U> for NaiveFeedbackQuery<T> {
fn transform<F>(self, mut f: F) -> OperationResult<NaiveFeedbackQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
let Self {
target,
feedback,
coefficients,
} = self;
Ok(NaiveFeedbackQuery {
target: f(target)?,
feedback: feedback
.into_iter()
.map(|item| item.transform(&mut f))
.try_collect()?,
coefficients,
})
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct PrecomputedFeedbackPair<T> {
/// A vector with higher feedback score.
pub positive: T,
/// A vector with lower feedback score.
pub negative: T,
/// Partial computation related to this pair.
pub partial_computation: OrderedFloat<f32>,
}
impl<T> PrecomputedFeedbackPair<T> {
pub fn transform<F, U>(self, mut f: F) -> OperationResult<PrecomputedFeedbackPair<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(PrecomputedFeedbackPair {
positive: f(self.positive)?,
negative: f(self.negative)?,
partial_computation: self.partial_computation,
})
}
}
/// Trained coefficients for the formula. Specific to a triplet of dataset-smallmodel-bigmodel.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)]
pub struct NaiveFeedbackCoefficients {
/// Trained coefficient `a`
pub a: OrderedFloat<f32>,
/// Trained coefficient `b`
pub b: OrderedFloat<f32>,
/// Trained coefficient `c`
pub c: OrderedFloat<f32>,
}
impl NaiveFeedbackCoefficients {
/// Extracts pairs of points, ranked by score difference in descending order.
///
/// Assumes scoring order is BiggerIsBetter
fn extract_feedback_pairs<TVector: Clone>(
&self,
mut feedback: Vec<FeedbackItem<TVector>>,
num_pairs: usize,
) -> Vec<PrecomputedFeedbackPair<TVector>> {
feedback.sort_by_key(|item| OrderedFloat(-item.score));
if feedback.len() < 2 {
return Vec::new();
}
// Pair front and back items until we run out of them
let mut front_idx = 0;
let mut back_idx = feedback.len() - 1;
let max_num_pairs = num_pairs.min(feedback.len() / 2);
let mut feedback_pairs = Vec::with_capacity(max_num_pairs);
while front_idx < back_idx && feedback_pairs.len() < max_num_pairs {
let front = &feedback[front_idx];
let back = &feedback[back_idx];
let confidence = front.score - back.score;
let partial_computation = confidence.powf(self.b.0) * self.c.0;
feedback_pairs.push(PrecomputedFeedbackPair {
positive: front.vector.clone(),
negative: back.vector.clone(),
partial_computation: partial_computation.into(),
});
front_idx += 1;
back_idx -= 1;
}
feedback_pairs
}
}
/// Query for relevance feedback scoring
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct FeedbackQuery<TVector> {
/// The original query vector.
target: TVector,
/// Pairs of results with higher difference in their feedback score.
feedback_pairs: Vec<PrecomputedFeedbackPair<TVector>>,
/// How to handle the feedback
coefficients: NaiveFeedbackCoefficients,
}
impl<TVector: Clone> FeedbackQuery<TVector> {
pub fn new(
target: TVector,
feedback: Vec<FeedbackItem<TVector>>,
coefficients: NaiveFeedbackCoefficients,
) -> Self {
let feedback_pairs = coefficients.extract_feedback_pairs(feedback, DEFAULT_MAX_PAIRS);
Self {
target,
feedback_pairs,
coefficients,
}
}
}
impl<T, U> TransformInto<FeedbackQuery<U>, T, U> for FeedbackQuery<T> {
fn transform<F>(self, mut f: F) -> OperationResult<FeedbackQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
let Self {
target,
feedback_pairs,
coefficients,
} = self;
Ok(FeedbackQuery {
target: f(target)?,
feedback_pairs: feedback_pairs
.into_iter()
.map(|pair| pair.transform(&mut f))
.try_collect()?,
coefficients,
})
}
}
impl<T> Query<T> for FeedbackQuery<T> {
/// This follows the following formula:
///
/// $ a * score + \sum{confidence_pair ^b * c * delta_pair} $
///
/// where
/// - `confidence_pair` means the difference in feedback score of the pair,
/// - `delta_pair` is the difference in similarity score between the target
/// and positive/negative vectors e.g. `similarity(positive) - similarity(negative)`
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
let Self {
target,
feedback_pairs,
coefficients,
} = self;
let mut score = coefficients.a.0 * similarity(target);
for pair in feedback_pairs {
let PrecomputedFeedbackPair {
positive,
negative,
partial_computation,
} = pair;
let delta = similarity(positive) - similarity(negative);
score += partial_computation.0 * delta;
}
score
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query/mod.rs | lib/segment/src/vector_storage/query/mod.rs | use common::types::ScoreType;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::vectors::DenseVector;
mod context_query;
mod discovery_query;
mod feedback_query;
mod reco_query;
pub use context_query::{ContextPair, ContextQuery};
pub use discovery_query::DiscoveryQuery;
pub use feedback_query::{FeedbackItem, NaiveFeedbackCoefficients, NaiveFeedbackQuery};
pub use reco_query::{RecoBestScoreQuery, RecoQuery, RecoSumScoresQuery};
pub trait TransformInto<Output, T = DenseVector, U = DenseVector> {
/// Change the underlying type of the query, or just process it in some way.
fn transform<F>(self, f: F) -> OperationResult<Output>
where
F: FnMut(T) -> OperationResult<U>;
fn transform_into(self) -> OperationResult<Output>
where
Self: Sized,
T: TryInto<U, Error = OperationError>,
{
self.transform(|v| v.try_into())
}
}
pub trait Query<T> {
/// Compares the vectors of the query against a single vector via a similarity function,
/// then folds the similarites into a single score.
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query/context_query.rs | lib/segment/src/vector_storage/query/context_query.rs | use std::hash::Hash;
use std::iter::{self, Chain, Once};
use common::math::fast_sigmoid;
use common::types::ScoreType;
use itertools::Itertools;
use serde::Serialize;
use super::{Query, TransformInto};
use crate::common::operation_error::OperationResult;
use crate::data_types::vectors::{QueryVector, VectorInternal};
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct ContextPair<T> {
pub positive: T,
pub negative: T,
}
impl<T> ContextPair<T> {
pub fn iter(&self) -> impl Iterator<Item = &T> {
iter::once(&self.positive).chain(iter::once(&self.negative))
}
pub fn transform<F, U>(self, mut f: F) -> OperationResult<ContextPair<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(ContextPair {
positive: f(self.positive)?,
negative: f(self.negative)?,
})
}
/// In the first stage of discovery search, the objective is to get the best entry point
/// for the search. This is done by using a smooth loss function instead of hard ranking
/// to approach the best zone, once the best zone is reached, score will be same for all
/// points inside that zone.
/// e.g.:
/// ```text
/// │
/// │
/// │ +0
/// │ +0
/// │
/// n │ p
/// │
/// ─► ─► │
/// -0.4 -0.1 │ +0
/// │
/// ```
/// Simple 2D model:
/// <https://www.desmos.com/calculator/lbxycyh2hs>
pub fn loss_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
const MARGIN: ScoreType = ScoreType::EPSILON;
let positive = similarity(&self.positive);
let negative = similarity(&self.negative);
let difference = positive - negative - MARGIN;
fast_sigmoid(ScoreType::min(difference, 0.0))
}
}
impl<T> IntoIterator for ContextPair<T> {
type Item = T;
type IntoIter = Chain<Once<T>, Once<T>>;
fn into_iter(self) -> Self::IntoIter {
iter::once(self.positive).chain(iter::once(self.negative))
}
}
#[cfg(test)]
impl<T> From<(T, T)> for ContextPair<T> {
fn from(pair: (T, T)) -> Self {
Self {
positive: pair.0,
negative: pair.1,
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct ContextQuery<T> {
pub pairs: Vec<ContextPair<T>>,
}
impl<T> ContextQuery<T> {
pub fn new(pairs: Vec<ContextPair<T>>) -> Self {
Self { pairs }
}
pub fn flat_iter(&self) -> impl Iterator<Item = &T> {
self.pairs.iter().flat_map(|pair| pair.iter())
}
}
impl<T, U> TransformInto<ContextQuery<U>, T, U> for ContextQuery<T> {
fn transform<F>(self, mut f: F) -> OperationResult<ContextQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(ContextQuery::new(
self.pairs
.into_iter()
.map(|pair| pair.transform(&mut f))
.try_collect()?,
))
}
}
impl<T> Query<T> for ContextQuery<T> {
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
self.pairs
.iter()
.map(|pair| pair.loss_by(&similarity))
.sum()
}
}
impl<T> From<Vec<ContextPair<T>>> for ContextQuery<T> {
fn from(pairs: Vec<ContextPair<T>>) -> Self {
ContextQuery::new(pairs)
}
}
impl From<ContextQuery<VectorInternal>> for QueryVector {
fn from(query: ContextQuery<VectorInternal>) -> Self {
QueryVector::Context(query)
}
}
#[cfg(test)]
mod test {
use common::types::ScoreType;
use proptest::prelude::*;
use super::*;
fn dummy_similarity(x: &f32) -> ScoreType {
*x as ScoreType
}
/// Possible similarities
fn sim() -> impl Strategy<Value = f32> {
(-100.0..=100.0).prop_map(|x| x as f32)
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(1000))]
/// Checks that the loss is between 0 and -1
#[test]
fn loss_is_not_more_than_1_per_pair((p, n) in (sim(), sim())) {
let query = ContextQuery::new(vec![ContextPair::from((p, n))]);
let score = query.score_by(dummy_similarity);
assert!(score <= 0.0, "similarity: {score}");
assert!(score > -1.0, "similarity: {score}");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query/reco_query.rs | lib/segment/src/vector_storage/query/reco_query.rs | use std::hash::Hash;
use common::math::scaled_fast_sigmoid;
use common::types::ScoreType;
use itertools::Itertools;
use serde::Serialize;
use super::{Query, TransformInto};
use crate::common::operation_error::OperationResult;
use crate::data_types::vectors::{QueryVector, VectorInternal};
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct RecoQuery<T> {
pub positives: Vec<T>,
pub negatives: Vec<T>,
}
impl<T> RecoQuery<T> {
pub fn new(positives: Vec<T>, negatives: Vec<T>) -> Self {
Self {
positives,
negatives,
}
}
pub fn flat_iter(&self) -> impl Iterator<Item = &T> {
self.positives.iter().chain(self.negatives.iter())
}
}
impl<T, U> TransformInto<RecoQuery<U>, T, U> for RecoQuery<T> {
fn transform<F>(self, mut f: F) -> OperationResult<RecoQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(RecoQuery::new(
self.positives.into_iter().map(&mut f).try_collect()?,
self.negatives.into_iter().map(&mut f).try_collect()?,
))
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct RecoBestScoreQuery<T>(RecoQuery<T>);
impl<T> From<RecoQuery<T>> for RecoBestScoreQuery<T> {
fn from(query: RecoQuery<T>) -> Self {
Self(query)
}
}
impl<T, U> TransformInto<RecoBestScoreQuery<U>, T, U> for RecoBestScoreQuery<T> {
fn transform<F>(self, f: F) -> OperationResult<RecoBestScoreQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(RecoBestScoreQuery(self.0.transform(f)?))
}
}
impl From<RecoBestScoreQuery<VectorInternal>> for QueryVector {
fn from(query: RecoBestScoreQuery<VectorInternal>) -> Self {
QueryVector::RecommendBestScore(query.0)
}
}
impl<T> Query<T> for RecoBestScoreQuery<T> {
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
// get similarities to all positives
let positive_similarities = self.0.positives.iter().map(&similarity);
// and all negatives
let negative_similarities = self.0.negatives.iter().map(&similarity);
// get max similarity to positives and max to negatives
let max_positive = positive_similarities
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(ScoreType::NEG_INFINITY);
let max_negative = negative_similarities
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(ScoreType::NEG_INFINITY);
if max_positive > max_negative {
scaled_fast_sigmoid(max_positive)
} else {
-scaled_fast_sigmoid(max_negative)
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct RecoSumScoresQuery<T>(RecoQuery<T>);
impl<T> From<RecoQuery<T>> for RecoSumScoresQuery<T> {
fn from(query: RecoQuery<T>) -> Self {
Self(query)
}
}
impl<T, U> TransformInto<RecoSumScoresQuery<U>, T, U> for RecoSumScoresQuery<T> {
fn transform<F>(self, f: F) -> OperationResult<RecoSumScoresQuery<U>>
where
F: FnMut(T) -> OperationResult<U>,
{
Ok(RecoSumScoresQuery(self.0.transform(f)?))
}
}
impl From<RecoSumScoresQuery<VectorInternal>> for QueryVector {
fn from(query: RecoSumScoresQuery<VectorInternal>) -> Self {
QueryVector::RecommendSumScores(query.0)
}
}
impl<T> Query<T> for RecoSumScoresQuery<T> {
fn score_by(&self, similarity: impl Fn(&T) -> ScoreType) -> ScoreType {
// Sum all positive vectors scores
let positive_score: ScoreType = self.0.positives.iter().map(&similarity).sum();
// Sum all negative vectors scores
let negative_score: ScoreType = self.0.negatives.iter().map(&similarity).sum();
// Subtract
positive_score - negative_score
}
}
#[cfg(test)]
mod test {
use std::cmp::Ordering;
use common::math::scaled_fast_sigmoid;
use common::types::ScoreType;
use proptest::prelude::*;
use rstest::rstest;
use crate::vector_storage::query::{Query, RecoBestScoreQuery, RecoQuery};
enum Chosen {
Positive,
Negative,
}
#[rstest]
#[case::higher_positive(vec![42], vec![4], Chosen::Positive, 42.0)]
#[case::higher_negative(vec![4], vec![42], Chosen::Negative, 42.0)]
#[case::negative_zero(vec![-1], vec![0], Chosen::Negative, 0.0)]
#[case::positive_zero(vec![0], vec![-1], Chosen::Positive, 0.0)]
#[case::both_under_zero(vec![-42], vec![-84], Chosen::Positive, -42.0)]
#[case::both_under_zero_but_negative_is_higher(vec![-84], vec![-42], Chosen::Negative, -42.0)]
#[case::multiple_with_negative_best(vec![1, 2, 3], vec![4, 5, 6], Chosen::Negative, 6.0)]
#[case::multiple_with_positive_best(vec![10, 2, 3], vec![4, 5, 6], Chosen::Positive, 10.0)]
fn score_query(
#[case] positives: Vec<isize>,
#[case] negatives: Vec<isize>,
#[case] chosen: Chosen,
#[case] expected: ScoreType,
) {
use super::{RecoBestScoreQuery, RecoQuery};
let query = RecoBestScoreQuery::from(RecoQuery::new(positives, negatives));
let dummy_similarity = |x: &isize| *x as ScoreType;
let positive_transformation = scaled_fast_sigmoid;
let negative_transformation = |x| -scaled_fast_sigmoid(x);
let score = query.score_by(dummy_similarity);
match chosen {
Chosen::Positive => {
assert_eq!(score, positive_transformation(expected));
}
Chosen::Negative => {
assert_eq!(score, negative_transformation(expected));
}
}
}
fn ulps_eq(a: f32, b: f32, ulps: u32) -> bool {
if a.signum() != b.signum() {
return false;
}
let a = a.to_bits();
let b = b.to_bits();
a.abs_diff(b) <= ulps
}
/// Relaxes the comparison of floats to allow for a some difference in units of least precision
fn float_cmp(a: f32, b: f32) -> Ordering {
if ulps_eq(a, b, 80) {
Ordering::Equal
} else {
a.total_cmp(&b)
}
}
proptest! {
/// Checks that the negative-chosen scores invert the order of the candidates
#[test]
fn correct_negative_order(a in -100f32..=100f32, b in -100f32..=100f32) {
let dummy_similarity = |x: &f32| *x as ScoreType;
let ordering_before = float_cmp(dummy_similarity(&a), dummy_similarity(&b));
let query_a = RecoBestScoreQuery::from(RecoQuery::new(vec![], vec![a]));
let query_b = RecoBestScoreQuery::from(RecoQuery::new(vec![], vec![b]));
let score_a = query_a.score_by(dummy_similarity);
let score_b = query_b.score_by(dummy_similarity);
let ordering_after = float_cmp(score_a, score_b);
if ordering_before == std::cmp::Ordering::Equal {
assert_eq!(ordering_before, ordering_after);
} else {
assert_ne!(ordering_before, ordering_after)
}
}
/// Checks that the positive-chosen scores preserve the order of the candidates
#[test]
fn correct_positive_order(a in -100f32..=100f32, b in -100f32..=100f32) {
let dummy_similarity = |x: &f32| *x as ScoreType;
let ordering_before = float_cmp(dummy_similarity(&a), dummy_similarity(&b));
// Too similar scores can get compressed to the same value by the sigmoid function.
// This would make the test useless, so we skip those cases.
prop_assume!(ordering_before != Ordering::Equal);
let query_a = RecoBestScoreQuery::from(RecoQuery::new(vec![a], vec![]));
let query_b = RecoBestScoreQuery::from(RecoQuery::new(vec![b], vec![]));
let score_a = query_a.score_by(dummy_similarity);
let score_b = query_b.score_by(dummy_similarity);
let ordering_after = score_a.total_cmp(&score_b);
assert_eq!(ordering_before, ordering_after);
}
/// Guarantees that the point that was chosen from positive is always preferred on
/// the candidate list over a point that was chosen from negatives
#[test]
fn correct_positive_and_negative_order(p in -100f32..=100f32, n in -100f32..=100f32) {
let dummy_similarity = |x: &f32| *x as ScoreType;
let query_p = RecoBestScoreQuery::from(RecoQuery::new(vec![p], vec![]));
let query_n = RecoBestScoreQuery::from(RecoQuery::new(vec![], vec![n]));
let ordering = query_p.score_by(dummy_similarity).total_cmp(&query_n.score_by(dummy_similarity));
assert_ne!(ordering, std::cmp::Ordering::Less);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/custom_query_scorer_equivalency.rs | lib/segment/src/vector_storage/tests/custom_query_scorer_equivalency.rs | use std::collections::HashSet;
use std::path::Path;
use std::sync::atomic::AtomicBool;
use std::{error, result};
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::seq::IteratorRandom;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use super::utils::sampler;
use crate::data_types::vectors::{QueryVector, VectorElementType};
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::fixtures::query_fixtures::QueryVariant;
use crate::id_tracker::id_tracker_base::IdTracker;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::types::{
BinaryQuantizationConfig, Distance, ProductQuantizationConfig, QuantizationConfig,
ScalarQuantizationConfig,
};
#[cfg(target_os = "linux")]
use crate::vector_storage::dense::memmap_dense_vector_storage::open_memmap_vector_storage_with_async_io;
use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage;
use crate::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use crate::vector_storage::vector_storage_base::VectorStorage;
use crate::vector_storage::{Random, VectorStorageEnum};
const DIMS: usize = 128;
const NUM_POINTS: usize = 600;
const DISTANCE: Distance = Distance::Dot;
const SAMPLE_SIZE: usize = 100;
const SEED: u64 = 42;
type Result<T, E = Error> = result::Result<T, E>;
type Error = Box<dyn error::Error>;
type Sampler<'a> = Box<dyn Iterator<Item = VectorElementType> + 'a>;
type SamplerGenerator = Box<dyn for<'a> Fn(&'a mut StdRng) -> Sampler<'a>>;
type WithQuantization = (QuantizationConfig, SamplerGenerator);
fn random_query<R: Rng + ?Sized>(
query_variant: &QueryVariant,
rng: &mut R,
gen_sampler: &dyn Fn(&mut R) -> Sampler,
) -> QueryVector {
crate::fixtures::query_fixtures::random_query(query_variant, rng, |rng| {
gen_sampler(rng).take(DIMS).collect_vec().into()
})
}
fn ram_storage(_dir: &Path) -> VectorStorageEnum {
new_volatile_dense_vector_storage(DIMS, DISTANCE)
}
#[cfg(target_os = "linux")]
fn async_memmap_storage(dir: &std::path::Path) -> VectorStorageEnum {
open_memmap_vector_storage_with_async_io(dir, DIMS, DISTANCE, true).unwrap()
}
fn scalar_u8() -> WithQuantization {
let config = ScalarQuantizationConfig {
r#type: crate::types::ScalarType::Int8,
quantile: Some(0.5),
always_ram: Some(true),
}
.into();
let sampler: SamplerGenerator = Box::new(|rng: &mut StdRng| {
Box::new(rng.sample_iter(rand_distr::Normal::new(0.0f32, 8.0).unwrap()))
});
(config, sampler)
}
fn product_x4() -> WithQuantization {
let config = ProductQuantizationConfig {
compression: crate::types::CompressionRatio::X4,
always_ram: Some(true),
}
.into();
let sampler: SamplerGenerator =
Box::new(|rng: &mut StdRng| Box::new(rng.sample_iter(rand::distr::StandardUniform)));
(config, sampler)
}
fn binary() -> WithQuantization {
let config = BinaryQuantizationConfig {
always_ram: Some(true),
encoding: None,
query_encoding: None,
}
.into();
let sampler: SamplerGenerator = Box::new(|rng: &mut StdRng| {
Box::new(
rng.sample_iter(rand::distr::Uniform::new_inclusive(-1.0, 1.0).unwrap())
.map(|x| f32::from(x as u8)),
)
});
(config, sampler)
}
fn scoring_equivalency(
query_variant: QueryVariant,
other_storage: impl FnOnce(&std::path::Path) -> VectorStorageEnum,
with_quantization: Option<WithQuantization>,
) -> Result<()> {
let (quant_config, quant_sampler) = with_quantization
.map(|v| (Some(v.0), Some(v.1)))
.unwrap_or_default();
let mut raw_storage = new_volatile_dense_vector_storage(DIMS, DISTANCE);
let mut rng = StdRng::seed_from_u64(SEED);
let gen_sampler = quant_sampler.unwrap_or_else(|| Box::new(|rng| Box::new(sampler(rng))));
super::utils::insert_distributed_vectors(
DIMS,
&mut raw_storage,
NUM_POINTS,
&mut gen_sampler(&mut rng.clone()),
)?;
let mut id_tracker = FixtureIdTracker::new(NUM_POINTS);
super::utils::delete_random_vectors(
&mut rng,
&mut raw_storage,
&mut id_tracker,
NUM_POINTS / 10,
)?;
let other_dir = tempfile::Builder::new().prefix("other-storage").tempdir()?;
let mut other_storage = other_storage(other_dir.path());
let mut iter = (0..NUM_POINTS).map(|i| {
let i = i as PointOffsetType;
let vec = raw_storage.get_vector::<Random>(i);
let deleted = raw_storage.is_deleted_vector(i);
(vec, deleted)
});
other_storage.update_from(&mut iter, &Default::default())?;
let quant_dir = tempfile::Builder::new().prefix("quant-storage").tempdir()?;
let quantized_vectors = if let Some(config) = &quant_config {
Some(QuantizedVectors::create(
&other_storage,
config,
QuantizedVectorsStorageType::Immutable,
quant_dir.path(),
4,
&AtomicBool::new(false),
)?)
} else {
None
};
let attempts = 50;
for i in 0..attempts {
let query = random_query(&query_variant, &mut rng, &gen_sampler);
let mut scorer = FilteredScorer::new_for_test(
query.clone(),
&raw_storage,
id_tracker.deleted_point_bitslice(),
);
let mut other_scorer = FilteredScorer::new(
query.clone(),
&other_storage,
quantized_vectors.as_ref(),
None,
id_tracker.deleted_point_bitslice(),
HardwareCounterCell::new(),
)?;
let points =
(0..other_storage.total_vector_count() as _).choose_multiple(&mut rng, SAMPLE_SIZE);
let scores = scorer.score_points(&mut points.clone(), 0).collect_vec();
let other_scores = other_scorer
.score_points(&mut points.clone(), 0)
.collect_vec();
// Compare scores
if quantized_vectors.is_none() {
// both calculations are done on raw vectors, so score should be exactly the same
assert_eq!(
scores, other_scores,
"Scorer results are not equal, attempt: {i}, query: {query:?}"
);
} else {
// Quantization is used for the other storage, so score should be similar
// but not necessarily the exact same. Recommend query has a step function,
// so small differences in similarities can lead to very different scores
let top = SAMPLE_SIZE / 10;
let raw_top: HashSet<_> = scores
.iter()
.sorted()
.rev()
.take(top)
.map(|p| p.idx)
.collect();
let other_top: HashSet<_> = other_scores
.iter()
.sorted()
.rev()
.take(top)
.map(|p| p.idx)
.collect();
let intersection = raw_top.intersection(&other_top).count();
assert!(
(intersection as f32 / top as f32) >= 0.7, // at least 70% of top 10% results should be shared
"Top results from scorers are not similar, attempt {i}:
top raw: {raw_top:?},
top other: {other_top:?}
only {intersection} of {top} top results are shared",
);
}
}
Ok(())
}
#[rstest]
fn compare_scoring_equivalency(
#[values(
QueryVariant::RecoBestScore,
QueryVariant::RecoSumScores,
QueryVariant::Discovery,
QueryVariant::Context
)]
query_variant: QueryVariant,
#[values(ram_storage)] other_storage: impl FnOnce(&std::path::Path) -> VectorStorageEnum,
#[values(None, Some(product_x4()), Some(scalar_u8()), Some(binary()))]
quantization_config: Option<WithQuantization>,
) -> Result<()> {
scoring_equivalency(query_variant, other_storage, quantization_config)
}
#[cfg(target_os = "linux")]
#[rstest]
fn async_compare_scoring_equivalency(
#[values(
QueryVariant::RecoBestScore,
QueryVariant::RecoSumScores,
QueryVariant::Discovery,
QueryVariant::Context
)]
query_variant: QueryVariant,
#[values(async_memmap_storage)] other_storage: impl FnOnce(&std::path::Path) -> VectorStorageEnum,
) -> Result<()> {
scoring_equivalency(query_variant, other_storage, None)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/async_raw_scorer.rs | lib/segment/src/vector_storage/tests/async_raw_scorer.rs | use bitvec::slice::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools;
use rand::SeedableRng as _;
use rand::seq::IteratorRandom as _;
use super::utils::{Result, delete_random_vectors, insert_distributed_vectors, sampler};
use crate::data_types::vectors::QueryVector;
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::id_tracker::IdTracker;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::types::Distance;
use crate::vector_storage::dense::memmap_dense_vector_storage::open_memmap_vector_storage_with_async_io;
use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage;
use crate::vector_storage::vector_storage_base::VectorStorage;
use crate::vector_storage::{Random, VectorStorageEnum};
#[test]
fn async_raw_scorer_cosine() -> Result<()> {
test_async_raw_scorer_defaults(Distance::Cosine)
}
#[test]
fn async_raw_scorer_euclid() -> Result<()> {
test_async_raw_scorer_defaults(Distance::Euclid)
}
#[test]
fn async_raw_scorer_manhattan() -> Result<()> {
test_async_raw_scorer_defaults(Distance::Manhattan)
}
#[test]
fn async_raw_scorer_dot() -> Result<()> {
test_async_raw_scorer_defaults(Distance::Dot)
}
fn test_async_raw_scorer_defaults(distance: Distance) -> Result<()> {
test_async_raw_scorer(6942, 128, distance, 1024, 128, 256)
}
fn test_async_raw_scorer(
seed: u64,
dim: usize,
distance: Distance,
points: usize,
delete: usize,
score: usize,
) -> Result<()> {
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
let dir = tempfile::Builder::new()
.prefix("immutable-storage")
.tempdir()?;
let mut storage = open_memmap_vector_storage_with_async_io(dir.path(), dim, distance, true)?;
let mut id_tracker = FixtureIdTracker::new(points);
{
let mut volatile_storage = new_volatile_dense_vector_storage(dim, distance);
insert_random_vectors(&mut rng, dim, &mut volatile_storage, points)?;
delete_random_vectors(&mut rng, &mut volatile_storage, &mut id_tracker, delete)?;
let mut iter = (0..points).map(|i| {
let i = i as PointOffsetType;
let vec = volatile_storage.get_vector::<Random>(i);
let deleted = volatile_storage.is_deleted_vector(i);
(vec, deleted)
});
storage.update_from(&mut iter, &Default::default())?;
}
for _ in 0..score {
test_random_score(&mut rng, dim, &storage, id_tracker.deleted_point_bitslice())?;
}
Ok(())
}
fn insert_random_vectors(
rng: &mut impl rand::Rng,
dim: usize,
storage: &mut VectorStorageEnum,
vectors: usize,
) -> Result<()> {
insert_distributed_vectors(dim, storage, vectors, &mut sampler(rng))
}
fn test_random_score(
mut rng: impl rand::Rng,
dim: usize,
storage: &VectorStorageEnum,
deleted_points: &BitSlice,
) -> Result<()> {
let query: QueryVector = sampler(&mut rng).take(dim).collect_vec().into();
let mut scorer = FilteredScorer::new_for_test(query.clone(), storage, deleted_points);
let mut async_scorer = FilteredScorer::new(
query,
storage,
None,
None,
deleted_points,
HardwareCounterCell::new(),
)?;
let points = rng.random_range(1..storage.total_vector_count());
let points = (0..storage.total_vector_count() as _).choose_multiple(&mut rng, points);
let res = scorer.score_points(&mut points.clone(), 0).collect_vec();
let async_res = async_scorer
.score_points(&mut points.clone(), 0)
.collect_vec();
assert_eq!(res, async_res);
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/utils.rs | lib/segment/src/vector_storage/tests/utils.rs | use std::{error, result};
use common::counter::hardware_counter::HardwareCounterCell;
use rand::seq::IteratorRandom;
use crate::data_types::vectors::VectorElementType;
use crate::id_tracker::IdTracker;
use crate::vector_storage::{VectorStorage, VectorStorageEnum};
pub type Result<T, E = Error> = result::Result<T, E>;
pub type Error = Box<dyn error::Error>;
pub fn sampler(rng: impl rand::Rng) -> impl Iterator<Item = f32> {
rng.sample_iter(rand::distr::StandardUniform)
}
pub fn insert_distributed_vectors(
dim: usize,
storage: &mut VectorStorageEnum,
vectors: usize,
sampler: &mut impl Iterator<Item = VectorElementType>,
) -> Result<()> {
let start = storage.total_vector_count() as u32;
let end = start + vectors as u32;
let mut vector = vec![0.; dim];
let hw_counter = HardwareCounterCell::new();
for offset in start..end {
for (item, value) in vector.iter_mut().zip(&mut *sampler) {
*item = value;
}
storage.insert_vector(offset, vector.as_slice().into(), &hw_counter)?;
}
Ok(())
}
pub fn delete_random_vectors(
rng: &mut impl rand::Rng,
storage: &mut VectorStorageEnum,
id_tracker: &mut impl IdTracker,
vectors: usize,
) -> Result<()> {
let offsets = (0..storage.total_vector_count() as _).choose_multiple(rng, vectors);
for offset in offsets {
storage.delete_vector(offset)?;
id_tracker.drop(crate::types::ExtendedPointId::NumId(offset.into()))?;
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/mod.rs | lib/segment/src/vector_storage/tests/mod.rs | #[cfg(target_os = "linux")]
mod async_raw_scorer;
mod custom_query_scorer_equivalency;
mod test_appendable_dense_vector_storage;
mod test_appendable_multi_dense_vector_storage;
mod test_appendable_sparse_vector_storage;
mod utils;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/test_appendable_sparse_vector_storage.rs | lib/segment/src/vector_storage/tests/test_appendable_sparse_vector_storage.rs | use std::path::Path;
use std::sync::Arc;
#[cfg(feature = "rocksdb")]
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools;
use sparse::common::sparse_vector::SparseVector;
use tempfile::Builder;
#[cfg(feature = "rocksdb")]
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::data_types::vectors::QueryVector;
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::id_tracker::IdTrackerSS;
use crate::index::hnsw_index::point_scorer::BatchFilteredSearcher;
use crate::vector_storage::query::RecoQuery;
use crate::vector_storage::sparse::mmap_sparse_vector_storage::MmapSparseVectorStorage;
#[cfg(feature = "rocksdb")]
use crate::vector_storage::sparse::simple_sparse_vector_storage::open_simple_sparse_vector_storage;
use crate::vector_storage::sparse::volatile_sparse_vector_storage::new_volatile_sparse_vector_storage;
use crate::vector_storage::{DEFAULT_STOPPED, Random, VectorStorage, VectorStorageEnum};
fn do_test_delete_points(storage: &mut VectorStorageEnum) {
let points: Vec<SparseVector> = vec![
vec![(0, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (2, 1.0)],
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (1, 1.0), (3, 1.0)],
vec![(0, 1.0)],
]
.into_iter()
.map(|v| v.try_into().unwrap())
.collect();
let delete_mask = [false, false, true, true, false];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
// Insert all points
for (i, vec) in points.iter().enumerate() {
storage
.insert_vector(i as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
// Check that all points are inserted
for (i, vec) in points.iter().enumerate() {
let stored_vec = storage.get_vector::<Random>(i as PointOffsetType);
let sparse: &SparseVector = stored_vec.as_vec_ref().try_into().unwrap();
assert_eq!(sparse, vec);
}
// Delete select number of points
delete_mask
.into_iter()
.enumerate()
.filter(|(_, d)| *d)
.for_each(|(i, _)| {
storage.delete_vector(i as PointOffsetType).unwrap();
});
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted"
);
// Check that deleted points are deleted through raw scorer
// Because raw scorer for nearest Query is incorrect
// (nearest search is processed using inverted index),
// use Recommend query to simulate nearest search
let vector: SparseVector = vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)]
.try_into()
.unwrap();
let query_vector = QueryVector::RecommendBestScore(RecoQuery {
positives: vec![vector.into()],
negatives: vec![],
});
// Because nearest search for raw scorer is incorrect,
let searcher = BatchFilteredSearcher::new_for_test(
&[query_vector],
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 0);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 4);
// Delete 1, re-delete 2
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(2 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
3,
"3 vectors must be deleted"
);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted"
);
}
fn do_test_update_from_delete_points(storage: &mut VectorStorageEnum) {
let points: Vec<Option<SparseVector>> = vec![
Some(vec![(0, 1.0), (2, 1.0), (3, 1.0)]),
Some(vec![(0, 1.0), (2, 1.0)]),
None,
None,
Some(vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)]),
Some(vec![(0, 1.0), (1, 1.0), (3, 1.0)]),
None,
]
.into_iter()
.map(|opt| opt.map(|v| v.try_into().unwrap()))
.collect();
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let hw_counter = HardwareCounterCell::new();
let borrowed_id_tracker = id_tracker.borrow_mut();
{
let mut storage2 = new_volatile_sparse_vector_storage();
points.iter().enumerate().for_each(|(i, opt_vec)| {
if let Some(vec) = opt_vec {
storage2
.insert_vector(i as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
} else {
storage2.delete_vector(i as PointOffsetType).unwrap();
}
});
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vec = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vec, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(
storage.deleted_vector_count(),
3,
"3 vectors must be deleted from other storage"
);
// Check that deleted points are deleted through raw scorer
// Because raw scorer for nearest Query is incorrect
// (nearest search is processed using inverted index),
// use Recommend query to simulate nearest search
let vector: SparseVector = vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)]
.try_into()
.unwrap();
let query_vector = QueryVector::RecommendBestScore(RecoQuery {
positives: vec![vector.into()],
negatives: vec![],
});
let searcher = BatchFilteredSearcher::new_for_test(
&[query_vector],
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let results = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4, 5].iter().cloned(), &DEFAULT_STOPPED)
.unwrap();
let closest = results.into_iter().exactly_one().unwrap();
assert_eq!(
closest.len(),
4,
"must have 4 vectors, 3 are deleted. closest = {closest:?}"
);
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 0);
assert_eq!(closest[2].idx, 5);
assert_eq!(closest[3].idx, 1);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
storage.delete_vector(5 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
7,
"all vectors must be deleted"
);
}
fn do_test_persistence(open: impl Fn(&Path) -> VectorStorageEnum) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage = open(dir.path());
let points = vec![
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)],
]
.into_iter()
.map(|v| v.try_into().unwrap())
.collect::<Vec<SparseVector>>();
let hw_counter = HardwareCounterCell::new();
points.iter().enumerate().for_each(|(i, vec)| {
storage
.insert_vector(i as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
});
// Delete selective vectors
storage.delete_vector(1).unwrap();
storage.delete_vector(3).unwrap();
storage.flusher()().unwrap();
let deleted_vector_count = storage.deleted_vector_count();
let available_vector_count = storage.available_vector_count();
drop(storage);
// Re-open storage and verify state
let storage = open(dir.path());
// Check deleted vectors are still marked as deleted
assert!(storage.is_deleted_vector(1));
assert!(storage.get_vector_opt::<Random>(1).is_none());
assert!(storage.is_deleted_vector(3));
assert!(storage.get_vector_opt::<Random>(3).is_none());
// Check non-deleted vectors still have correct data
let verify_idx = [0, 2, 4];
for idx in verify_idx {
let stored = storage.get_vector::<Random>(idx);
let sparse: &SparseVector = stored.as_vec_ref().try_into().unwrap();
assert_eq!(sparse, &points[idx as usize]);
}
assert_eq!(storage.deleted_vector_count(), 2);
assert_eq!(storage.deleted_vector_count(), deleted_vector_count);
assert_eq!(storage.available_vector_count(), available_vector_count);
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_delete_points_in_simple_sparse_vector_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap();
do_test_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap();
}
#[test]
fn test_delete_points_in_mmap_sparse_vector_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage =
VectorStorageEnum::SparseMmap(MmapSparseVectorStorage::open_or_create(dir.path()).unwrap());
do_test_delete_points(&mut storage);
storage.flusher()().unwrap();
drop(storage);
let _storage = MmapSparseVectorStorage::open_or_create(dir.path()).unwrap();
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_update_from_delete_points_simple_sparse_vector_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap();
do_test_update_from_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap();
}
#[test]
fn test_update_from_delete_points_mmap_sparse_vector_storage() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage =
VectorStorageEnum::SparseMmap(MmapSparseVectorStorage::open_or_create(dir.path()).unwrap());
do_test_update_from_delete_points(&mut storage);
storage.flusher()().unwrap();
drop(storage);
let _storage =
VectorStorageEnum::SparseMmap(MmapSparseVectorStorage::open_or_create(dir.path()).unwrap());
}
#[test]
fn test_persistence_in_mmap_sparse_vector_storage() {
do_test_persistence(|path| {
VectorStorageEnum::SparseMmap(MmapSparseVectorStorage::open_or_create(path).unwrap())
});
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_persistence_in_simple_sparse_vector_storage() {
do_test_persistence(|path| {
let db = open_db(path, &[DB_VECTOR_CF]).unwrap();
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap()
});
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/test_appendable_multi_dense_vector_storage.rs | lib/segment/src/vector_storage/tests/test_appendable_multi_dense_vector_storage.rs | use std::path::Path;
use std::sync::Arc;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use common::validation::MAX_MULTIVECTOR_FLATTENED_LEN;
use rstest::rstest;
use tempfile::Builder;
use crate::data_types::vectors::{
MultiDenseVectorInternal, QueryVector, TypedMultiDenseVectorRef, VectorElementType, VectorRef,
};
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::id_tracker::IdTrackerSS;
use crate::index::hnsw_index::point_scorer::BatchFilteredSearcher;
use crate::types::{Distance, MultiVectorConfig};
use crate::vector_storage::common::CHUNK_SIZE;
use crate::vector_storage::multi_dense::appendable_mmap_multi_dense_vector_storage::open_appendable_memmap_multi_vector_storage_full;
use crate::vector_storage::multi_dense::volatile_multi_dense_vector_storage::new_volatile_multi_dense_vector_storage;
use crate::vector_storage::{
DEFAULT_STOPPED, MultiVectorStorage, Random, VectorStorage, VectorStorageEnum,
};
#[derive(Clone, Copy)]
enum MultiDenseStorageType {
#[cfg(feature = "rocksdb")]
RocksDbFloat,
AppendableMmapFloat,
}
fn multi_points_fixtures(vec_count: usize, vec_dim: usize) -> Vec<MultiDenseVectorInternal> {
let mut multis: Vec<MultiDenseVectorInternal> = Vec::new();
for i in 0..vec_count {
let value = i as f32;
// hardcoded 5 inner vectors
let vectors = vec![
vec![value; vec_dim],
vec![value; vec_dim],
vec![value; vec_dim],
vec![value; vec_dim],
vec![value; vec_dim],
];
let multi = MultiDenseVectorInternal::try_from(vectors).unwrap();
multis.push(multi);
}
multis
}
fn do_test_delete_points(vector_dim: usize, vec_count: usize, storage: &mut VectorStorageEnum) {
let points = multi_points_fixtures(vec_count, vector_dim);
let delete_mask = [false, false, true, true, false];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
// Insert all points
for (i, vec) in points.iter().enumerate() {
storage
.insert_vector(i as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
// Check that all points are inserted
for (i, vec) in points.iter().enumerate() {
let stored_vec = storage.get_vector::<Random>(i as PointOffsetType);
let multi_dense: TypedMultiDenseVectorRef<_> = stored_vec.as_vec_ref().try_into().unwrap();
assert_eq!(multi_dense.to_owned(), vec.clone());
}
// Check that all points are inserted #2
{
let orig_iter = points.iter().flat_map(|multivec| multivec.multi_vectors());
match storage as &VectorStorageEnum {
#[cfg(feature = "rocksdb")]
VectorStorageEnum::DenseSimple(_)
| VectorStorageEnum::DenseSimpleByte(_)
| VectorStorageEnum::DenseSimpleHalf(_) => unreachable!(),
#[cfg(test)]
VectorStorageEnum::DenseVolatile(_)
| VectorStorageEnum::DenseVolatileByte(_)
| VectorStorageEnum::DenseVolatileHalf(_) => unreachable!(),
VectorStorageEnum::DenseMemmap(_)
| VectorStorageEnum::DenseMemmapByte(_)
| VectorStorageEnum::DenseMemmapHalf(_) => unreachable!(),
VectorStorageEnum::DenseAppendableMemmap(_)
| VectorStorageEnum::DenseAppendableMemmapByte(_)
| VectorStorageEnum::DenseAppendableMemmapHalf(_) => unreachable!(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::SparseSimple(_) => unreachable!(),
VectorStorageEnum::SparseMmap(_) => unreachable!(),
#[cfg(test)]
VectorStorageEnum::SparseVolatile(_) => unreachable!(),
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimple(v) => {
for (orig, vec) in orig_iter.zip(v.iterate_inner_vectors()) {
assert_eq!(orig, vec);
}
}
#[cfg(feature = "rocksdb")]
VectorStorageEnum::MultiDenseSimpleByte(_)
| VectorStorageEnum::MultiDenseSimpleHalf(_) => unreachable!(),
VectorStorageEnum::MultiDenseVolatile(v) => {
for (orig, vec) in orig_iter.zip(v.iterate_inner_vectors()) {
assert_eq!(orig, vec);
}
}
VectorStorageEnum::MultiDenseVolatileByte(_)
| VectorStorageEnum::MultiDenseVolatileHalf(_) => unreachable!(),
VectorStorageEnum::MultiDenseAppendableMemmap(v) => {
for (orig, vec) in orig_iter.zip(v.iterate_inner_vectors()) {
assert_eq!(orig, vec);
}
}
VectorStorageEnum::MultiDenseAppendableMemmapByte(_)
| VectorStorageEnum::MultiDenseAppendableMemmapHalf(_) => unreachable!(),
VectorStorageEnum::DenseAppendableInRam(_)
| VectorStorageEnum::DenseAppendableInRamByte(_)
| VectorStorageEnum::DenseAppendableInRamHalf(_) => unreachable!(),
VectorStorageEnum::MultiDenseAppendableInRam(_)
| VectorStorageEnum::MultiDenseAppendableInRamByte(_)
| VectorStorageEnum::MultiDenseAppendableInRamHalf(_) => unreachable!(),
};
}
// Delete select number of points
delete_mask
.into_iter()
.enumerate()
.filter(|(_, d)| *d)
.for_each(|(i, _)| {
storage.delete_vector(i as PointOffsetType).unwrap();
});
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted"
);
let vector: Vec<Vec<f32>> = vec![vec![2.0; vector_dim]];
let query = QueryVector::Nearest(vector.try_into().unwrap());
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.pop()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 0);
// Delete 1, redelete 2
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(2 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
3,
"3 vectors must be deleted"
);
let vector: Vec<Vec<f32>> = vec![vec![1.0; vector_dim]];
let query = QueryVector::Nearest(vector.try_into().unwrap());
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.pop()
.unwrap();
assert_eq!(closest.len(), 2, "must have 2 vectors, 3 are deleted");
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 0);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted"
);
let vector: Vec<Vec<f32>> = vec![vec![1.0; vector_dim]];
let query = QueryVector::Nearest(vector.try_into().unwrap());
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
.pop()
.unwrap();
assert!(closest.is_empty(), "must have no results, all deleted");
}
fn do_test_update_from_delete_points(
vector_dim: usize,
vec_count: usize,
storage: &mut VectorStorageEnum,
) {
let points = multi_points_fixtures(vec_count, vector_dim);
let delete_mask = [false, false, true, true, false];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_multi_dense_vector_storage(
vector_dim,
Distance::Dot,
MultiVectorConfig::default(),
);
{
points.iter().enumerate().for_each(|(i, vec)| {
storage2
.insert_vector(i as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
if delete_mask[i] {
storage2.delete_vector(i as PointOffsetType).unwrap();
}
});
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vec = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vec, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted from other storage"
);
let vector: Vec<Vec<f32>> = vec![vec![1.0; vector_dim]];
let query = QueryVector::Nearest(vector.try_into().unwrap());
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.pop()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 0);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted"
);
}
fn create_vector_storage(
storage_type: MultiDenseStorageType,
vec_dim: usize,
path: &Path,
) -> VectorStorageEnum {
match storage_type {
#[cfg(feature = "rocksdb")]
MultiDenseStorageType::RocksDbFloat => {
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::vector_storage::multi_dense::simple_multi_dense_vector_storage::open_simple_multi_dense_vector_storage_full;
let db = open_db(path, &[DB_VECTOR_CF]).unwrap();
open_simple_multi_dense_vector_storage_full(
db,
DB_VECTOR_CF,
vec_dim,
Distance::Dot,
MultiVectorConfig::default(),
&Default::default(),
)
.unwrap()
}
MultiDenseStorageType::AppendableMmapFloat => {
open_appendable_memmap_multi_vector_storage_full(
path,
vec_dim,
Distance::Dot,
MultiVectorConfig::default(),
)
.unwrap()
}
}
}
#[rstest]
#[cfg_attr(feature = "rocksdb", case(MultiDenseStorageType::RocksDbFloat))]
#[case(MultiDenseStorageType::AppendableMmapFloat)]
fn test_delete_points_in_multi_dense_vector_storage(#[case] storage_type: MultiDenseStorageType) {
let vec_dim = 1024;
let vec_count = 5;
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let total_vector_count = {
let mut storage = create_vector_storage(storage_type, vec_dim, dir.path());
do_test_delete_points(vec_dim, vec_count, &mut storage);
let count = storage.total_vector_count();
storage.flusher()().unwrap();
count
};
let storage = create_vector_storage(storage_type, vec_dim, dir.path());
assert_eq!(
storage.total_vector_count(),
total_vector_count,
"total vector count must be the same"
);
// retrieve all vectors from storage
for id in 0..total_vector_count {
assert!(
storage
.get_vector_opt::<Random>(id as PointOffsetType)
.is_some()
);
}
}
#[rstest]
#[cfg_attr(feature = "rocksdb", case(MultiDenseStorageType::RocksDbFloat))]
#[case(MultiDenseStorageType::AppendableMmapFloat)]
fn test_update_from_delete_points_multi_dense_vector_storage(
#[case] storage_type: MultiDenseStorageType,
) {
let vec_dim = 1024;
let vec_count = 5;
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let total_vector_count = {
let mut storage = create_vector_storage(storage_type, vec_dim, dir.path());
do_test_update_from_delete_points(vec_dim, vec_count, &mut storage);
let count = storage.total_vector_count();
storage.flusher()().unwrap();
count
};
let storage = create_vector_storage(storage_type, vec_dim, dir.path());
assert_eq!(
storage.total_vector_count(),
total_vector_count,
"total vector count must be the same"
);
// retrieve all vectors from storage
for id in 0..total_vector_count {
assert!(
storage
.get_vector_opt::<Random>(id as PointOffsetType)
.is_some()
);
}
}
#[rstest]
#[cfg_attr(feature = "rocksdb", case(MultiDenseStorageType::RocksDbFloat))]
#[case(MultiDenseStorageType::AppendableMmapFloat)]
fn test_large_multi_dense_vector_storage(#[case] storage_type: MultiDenseStorageType) {
assert!(MAX_MULTIVECTOR_FLATTENED_LEN * std::mem::size_of::<VectorElementType>() < CHUNK_SIZE);
let vec_dim = 100_000;
let vec_count = 100;
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage = create_vector_storage(storage_type, vec_dim, dir.path());
let vectors = vec![vec![0.0; vec_dim]; vec_count];
let multivec = MultiDenseVectorInternal::try_from(vectors).unwrap();
let hw_counter = HardwareCounterCell::new();
let result = storage.insert_vector(0, VectorRef::from(&multivec), &hw_counter);
match result {
Ok(_) => {
panic!("Inserting vector should fail");
}
Err(e) => {
assert!(e.to_string().contains("too large"));
}
}
}
#[test]
fn test_delete_points_in_volatile_multi_dense_vector_storage() {
let vec_dim = 1024;
let vec_count = 5;
let mut storage = new_volatile_multi_dense_vector_storage(
vec_dim,
Distance::Dot,
MultiVectorConfig::default(),
);
do_test_delete_points(vec_dim, vec_count, &mut storage);
// retrieve all vectors from storage
for id in 0..storage.total_vector_count() {
assert!(
storage
.get_vector_opt::<Random>(id as PointOffsetType)
.is_some()
);
}
}
#[test]
fn test_update_from_delete_points_volatile_multi_dense_vector_storage() {
let vec_dim = 1024;
let vec_count = 5;
let mut storage = new_volatile_multi_dense_vector_storage(
vec_dim,
Distance::Dot,
MultiVectorConfig::default(),
);
do_test_update_from_delete_points(vec_dim, vec_count, &mut storage);
// retrieve all vectors from storage
for id in 0..storage.total_vector_count() {
assert!(
storage
.get_vector_opt::<Random>(id as PointOffsetType)
.is_some()
);
}
}
#[test]
fn test_large_volatile_multi_dense_vector_storage() {
assert!(MAX_MULTIVECTOR_FLATTENED_LEN * std::mem::size_of::<VectorElementType>() < CHUNK_SIZE);
let vec_dim = 100_000;
let vec_count = 100;
let mut storage = new_volatile_multi_dense_vector_storage(
vec_dim,
Distance::Dot,
MultiVectorConfig::default(),
);
let vectors = vec![vec![0.0; vec_dim]; vec_count];
let multivec = MultiDenseVectorInternal::try_from(vectors).unwrap();
let hw_counter = HardwareCounterCell::new();
let result = storage.insert_vector(0, VectorRef::from(&multivec), &hw_counter);
match result {
Ok(_) => {
panic!("Inserting vector should fail");
}
Err(e) => {
assert!(e.to_string().contains("too large"));
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/tests/test_appendable_dense_vector_storage.rs | lib/segment/src/vector_storage/tests/test_appendable_dense_vector_storage.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools;
use tempfile::Builder;
#[cfg(feature = "rocksdb")]
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::data_types::vectors::QueryVector;
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::id_tracker::IdTrackerSS;
use crate::index::hnsw_index::point_scorer::{BatchFilteredSearcher, FilteredScorer};
use crate::types::{Distance, PointIdType, QuantizationConfig, ScalarQuantizationConfig};
use crate::vector_storage::dense::appendable_dense_vector_storage::open_appendable_memmap_vector_storage;
#[cfg(feature = "rocksdb")]
use crate::vector_storage::dense::simple_dense_vector_storage::open_simple_dense_full_vector_storage;
use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage;
use crate::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use crate::vector_storage::{
DEFAULT_STOPPED, Random, VectorStorage, VectorStorageEnum, new_raw_scorer,
};
fn do_test_delete_points(storage: &mut VectorStorageEnum) {
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let delete_mask = [false, false, true, true, false];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
for (i, vec) in points.iter().enumerate() {
storage
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
}
// Delete select number of points
delete_mask
.into_iter()
.enumerate()
.filter(|(_, d)| *d)
.for_each(|(i, _)| {
storage.delete_vector(i as PointOffsetType).unwrap();
});
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted",
);
let vector = vec![0.0, 1.0, 1.1, 1.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 0);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 4);
// Delete 1, redelete 2
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(2 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
3,
"3 vectors must be deleted"
);
let vector = vec![1.0, 0.0, 0.0, 0.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 2, "must have 2 vectors, 3 are deleted");
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 0);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted",
);
let vector = vec![1.0, 0.0, 0.0, 0.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert!(closest.is_empty(), "must have no results, all deleted");
}
fn do_test_update_from_delete_points(storage: &mut VectorStorageEnum) {
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let delete_mask = [false, false, true, true, false];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
points.iter().enumerate().for_each(|(i, vec)| {
storage2
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
if delete_mask[i] {
storage2.delete_vector(i as PointOffsetType).unwrap();
}
});
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vec = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vec, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted from other storage",
);
let vector = vec![0.0, 1.0, 1.1, 1.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 0);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 4);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted",
);
}
fn do_test_score_points(storage: &mut VectorStorageEnum) {
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let id_tracker: Arc<AtomicRefCell<IdTrackerSS>> =
Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let mut borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
for (i, vec) in points.iter().enumerate() {
storage
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
}
let query: QueryVector = [0.0, 1.0, 1.1, 1.0].into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
storage,
borrowed_id_tracker.deleted_point_bitslice(),
2,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
let top_idx = match closest.first() {
Some(scored_point) => {
assert_eq!(scored_point.idx, 2);
scored_point.idx
}
None => panic!("No close vector found!"),
};
borrowed_id_tracker
.drop(PointIdType::NumId(u64::from(top_idx)))
.unwrap();
let mut raw_scorer = FilteredScorer::new(
query.clone(),
storage,
None,
None,
borrowed_id_tracker.deleted_point_bitslice(),
HardwareCounterCell::new(),
)
.unwrap();
let searcher = BatchFilteredSearcher::new(
&[&query],
storage,
None,
None,
2,
borrowed_id_tracker.deleted_point_bitslice(),
HardwareCounterCell::new(),
)
.unwrap();
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
let query_points = vec![0, 1, 2, 3, 4];
let raw_res1 = raw_scorer
.score_points(&mut query_points.clone(), 0)
.collect::<Vec<_>>();
let raw_res2 = raw_scorer
.score_points(&mut query_points.clone(), 0)
.collect::<Vec<_>>();
assert_eq!(raw_res1, raw_res2);
match closest.first() {
Some(scored_point) => {
assert_ne!(scored_point.idx, 2);
assert_eq!(&raw_res1[scored_point.idx as usize], scored_point);
}
None => panic!("No close vector found!"),
};
let all_ids1: Vec<_> = borrowed_id_tracker.iter_internal().collect();
let all_ids2: Vec<_> = borrowed_id_tracker.iter_internal().collect();
assert_eq!(all_ids1, all_ids2);
assert!(!all_ids1.contains(&top_idx))
}
fn test_score_quantized_points(storage: &mut VectorStorageEnum) {
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let hw_counter = HardwareCounterCell::new();
for (i, vec) in points.iter().enumerate() {
storage
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
}
let config: QuantizationConfig = ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into();
let dir = Builder::new()
.prefix("quantization_path")
.tempdir()
.unwrap();
let stopped = AtomicBool::new(false);
let quantized_vectors = QuantizedVectors::create(
storage,
&config,
QuantizedVectorsStorageType::Immutable,
dir.path(),
1,
&stopped,
)
.unwrap();
let query: QueryVector = vec![0.5, 0.5, 0.5, 0.5].into();
let scorer_quant = quantized_vectors
.raw_scorer(query.clone(), HardwareCounterCell::new())
.unwrap();
let scorer_orig = new_raw_scorer(query.clone(), storage, HardwareCounterCell::new()).unwrap();
for i in 0..5 {
let quant = scorer_quant.score_point(i);
let orig = scorer_orig.score_point(i);
assert!((orig - quant).abs() < 0.15);
let quant = scorer_quant.score_internal(0, i);
let orig = scorer_orig.score_internal(0, i);
assert!((orig - quant).abs() < 0.15);
}
let files = storage.files();
let quantization_files = quantized_vectors.files();
// test save-load
let quantized_vectors = QuantizedVectors::load(&config, storage, dir.path(), &stopped)
.unwrap()
.unwrap();
assert_eq!(files, storage.files());
assert_eq!(quantization_files, quantized_vectors.files());
let scorer_quant = quantized_vectors
.raw_scorer(query.clone(), HardwareCounterCell::new())
.unwrap();
let scorer_orig = new_raw_scorer(query, storage, HardwareCounterCell::new()).unwrap();
for i in 0..5 {
let quant = scorer_quant.score_point(i);
let orig = scorer_orig.score_point(i);
assert!((orig - quant).abs() < 0.15);
let quant = scorer_quant.score_internal(0, i);
let orig = scorer_orig.score_internal(0, i);
assert!((orig - quant).abs() < 0.15);
}
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_delete_points_in_simple_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
do_test_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_update_from_delete_points_simple_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
do_test_update_from_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_score_points_in_simple_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
do_test_score_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
}
#[test]
#[cfg(feature = "rocksdb")]
fn test_score_quantized_points_simple_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
test_score_quantized_points(&mut storage);
storage.flusher()().unwrap();
}
let db = open_db(dir.path(), &[DB_VECTOR_CF]).unwrap();
let _storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
4,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
}
// ----------------------------------------------
#[test]
fn test_delete_points_in_volatile_vector_storages() {
let mut storage = new_volatile_dense_vector_storage(4, Distance::Dot);
do_test_delete_points(&mut storage);
}
#[test]
fn test_update_from_delete_points_volatile_vector_storages() {
let mut storage = new_volatile_dense_vector_storage(4, Distance::Dot);
do_test_update_from_delete_points(&mut storage);
}
#[test]
fn test_score_points_in_volatile_vector_storages() {
let mut storage = new_volatile_dense_vector_storage(4, Distance::Dot);
do_test_score_points(&mut storage);
}
#[test]
fn test_score_quantized_points_volatile_vector_storages() {
let mut storage = new_volatile_dense_vector_storage(4, Distance::Dot);
test_score_quantized_points(&mut storage);
}
// ----------------------------------------------
#[test]
fn test_delete_points_in_appendable_memmap_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let mut storage =
open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
do_test_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let _storage = open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
}
#[test]
fn test_update_from_delete_points_appendable_memmap_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let mut storage =
open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
do_test_update_from_delete_points(&mut storage);
storage.flusher()().unwrap();
}
let _storage = open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
}
#[test]
fn test_score_points_in_appendable_memmap_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let mut storage =
open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
do_test_score_points(&mut storage);
storage.flusher()().unwrap();
}
let _storage = open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
}
#[test]
fn test_score_quantized_points_appendable_memmap_vector_storages() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
{
let mut storage =
open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
test_score_quantized_points(&mut storage);
storage.flusher()().unwrap();
}
let _storage = open_appendable_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/sparse/simple_sparse_vector_storage.rs | lib/segment/src/vector_storage/sparse/simple_sparse_vector_storage.rs | use std::ops::Range;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use parking_lot::RwLock;
use rocksdb::DB;
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::{DimId, DimWeight};
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::common::rocksdb_buffered_update_wrapper::DatabaseColumnScheduledUpdateWrapper;
use crate::common::rocksdb_wrapper::DatabaseColumnWrapper;
use crate::data_types::named_vectors::CowVector;
use crate::data_types::vectors::VectorRef;
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::common::StoredRecord;
use crate::vector_storage::{
AccessPattern, Random, SparseVectorStorage, VectorStorage, VectorStorageEnum,
};
type StoredSparseVector = StoredRecord<SparseVector>;
/// In-memory vector storage with on-update persistence using `store`
#[derive(Debug)]
pub struct SimpleSparseVectorStorage {
/// Database wrapper which only persists on flush
db_wrapper: DatabaseColumnScheduledUpdateWrapper,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
total_vector_count: usize,
/// Total number of non-zero elements in all vectors. Used to estimate average vector size.
total_sparse_size: usize,
}
pub fn open_simple_sparse_vector_storage(
database: Arc<RwLock<DB>>,
database_column_name: &str,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let (mut deleted, mut deleted_count) = (BitVec::new(), 0);
let db_wrapper = DatabaseColumnWrapper::new(database, database_column_name);
let db_wrapper = DatabaseColumnScheduledUpdateWrapper::new(db_wrapper);
let mut total_vector_count = 0;
let mut total_sparse_size = 0;
db_wrapper.lock_db().iter()?;
for (key, value) in db_wrapper.lock_db().iter()? {
let point_id: PointOffsetType = bincode::deserialize(&key)
.map_err(|_| OperationError::service_error("cannot deserialize point id from db"))?;
let stored_record: StoredSparseVector = bincode::deserialize(&value)
.map_err(|_| OperationError::service_error("cannot deserialize record from db"))?;
// Propagate deleted flag
if stored_record.deleted {
bitvec_set_deleted(&mut deleted, point_id, true);
deleted_count += 1;
} else {
total_sparse_size += stored_record.vector.values.len();
}
total_vector_count = total_vector_count.max(point_id as usize + 1);
check_process_stopped(stopped)?;
}
Ok(VectorStorageEnum::SparseSimple(SimpleSparseVectorStorage {
db_wrapper,
deleted,
deleted_count,
total_vector_count,
total_sparse_size,
}))
}
impl SimpleSparseVectorStorage {
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.total_vector_count {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
fn update_stored(
&mut self,
key: PointOffsetType,
deleted: bool,
vector: Option<&SparseVector>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
// Write vector state to buffer record
let record = StoredSparseVector {
deleted,
vector: vector.cloned().unwrap_or_default(),
};
if let Some(vector) = vector {
if deleted {
self.total_sparse_size = self.total_sparse_size.saturating_sub(vector.values.len());
} else {
self.total_sparse_size += vector.values.len();
}
}
let key_enc = bincode::serialize(&key).unwrap();
let record_enc = bincode::serialize(&record).unwrap();
hw_counter
.vector_io_write_counter()
.incr_delta(key_enc.len() + record_enc.len());
// Store updated record
self.db_wrapper.put(key_enc, record_enc)?;
Ok(())
}
pub fn size_of_available_vectors_in_bytes(&self) -> usize {
if self.total_vector_count == 0 {
return 0;
}
let available_fraction =
(self.total_vector_count - self.deleted_count) as f32 / self.total_vector_count as f32;
let available_size = (self.total_sparse_size as f32 * available_fraction) as usize;
available_size * (std::mem::size_of::<DimWeight>() + std::mem::size_of::<DimId>())
}
/// Destroy this vector storage, remove persisted data from RocksDB
pub fn destroy(&self) -> OperationResult<()> {
self.db_wrapper.remove_column_family()?;
Ok(())
}
}
impl SparseVectorStorage for SimpleSparseVectorStorage {
fn get_sparse<P: AccessPattern>(&self, key: PointOffsetType) -> OperationResult<SparseVector> {
// Already in memory, so no sequential optimizations available.
let bin_key = bincode::serialize(&key)
.map_err(|_| OperationError::service_error("Cannot serialize sparse vector key"))?;
let data = self.db_wrapper.get(bin_key)?;
let record: StoredSparseVector = bincode::deserialize(&data).map_err(|_| {
OperationError::service_error("Cannot deserialize sparse vector from db")
})?;
Ok(record.vector)
}
fn get_sparse_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> OperationResult<Option<SparseVector>> {
// Already in memory, so no sequential optimizations available.
let bin_key = bincode::serialize(&key)
.map_err(|_| OperationError::service_error("Cannot serialize sparse vector key"))?;
if let Some(data) = self.db_wrapper.get_opt(bin_key)? {
let StoredSparseVector { deleted, vector } =
bincode::deserialize(&data).map_err(|_| {
OperationError::service_error("Cannot deserialize sparse vector from db")
})?;
if deleted {
return Ok(None);
}
Ok(Some(vector))
} else {
Ok(None)
}
}
}
impl VectorStorage for SimpleSparseVectorStorage {
fn distance(&self) -> Distance {
super::SPARSE_VECTOR_DISTANCE
}
fn datatype(&self) -> VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn is_on_disk(&self) -> bool {
true
}
fn total_vector_count(&self) -> usize {
self.total_vector_count
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
// In memory, so no sequential read optimization.
let vector = self.get_vector_opt::<P>(key);
vector.unwrap_or_else(CowVector::default_sparse)
}
/// Get vector by key, if it exists.
///
/// ignore any error
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
match self.get_sparse_opt::<P>(key) {
Ok(Some(vector)) => Some(CowVector::from(vector)),
_ => None,
}
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector: &SparseVector = vector.try_into()?;
debug_assert!(vector.is_sorted());
self.total_vector_count = std::cmp::max(self.total_vector_count, key as usize + 1);
self.set_deleted(key, false);
self.update_stored(key, false, Some(vector), hw_counter)?;
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.total_vector_count as PointOffsetType;
let disposed_hw = HardwareCounterCell::disposable(); // This function is only used for internal operations.
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector = other_vector.as_vec_ref().try_into()?;
let new_id = self.total_vector_count as PointOffsetType;
self.total_vector_count += 1;
self.set_deleted(new_id, other_deleted);
self.update_stored(new_id, other_deleted, Some(other_vector), &disposed_hw)?;
}
Ok(start_index..self.total_vector_count as PointOffsetType)
}
fn flusher(&self) -> Flusher {
self.db_wrapper.flusher()
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
if is_deleted {
let old_vector = self.get_sparse_opt::<Random>(key).ok().flatten();
self.update_stored(
key,
true,
old_vector.as_ref(),
&HardwareCounterCell::disposable(), // We don't measure deletions
)?;
}
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
#[cfg(test)]
mod tests {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use sparse::common::sparse_vector_fixture::random_sparse_vector;
use tempfile::Builder;
use super::*;
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::segment_constructor::migrate_rocksdb_sparse_vector_storage_to_mmap;
use crate::vector_storage::Sequential;
const RAND_SEED: u64 = 42;
/// Create RocksDB based sparse vector storage.
///
/// Migrate it to the mmap based sparse vector storage and assert vector data is correct.
#[test]
fn test_migrate_simple_to_mmap() {
const POINT_COUNT: PointOffsetType = 128;
const DIM: usize = 1024;
const DELETE_PROBABILITY: f64 = 0.1;
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let db_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(db_dir.path(), &[DB_VECTOR_CF]).unwrap();
// Create simple sparse vector storage, insert test points and delete some of them again
let mut storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &AtomicBool::new(false)).unwrap();
for internal_id in 0..POINT_COUNT {
let vector = random_sparse_vector(&mut rng, DIM);
storage
.insert_vector(
internal_id,
VectorRef::from(&vector),
&HardwareCounterCell::disposable(),
)
.unwrap();
if rng.random_bool(DELETE_PROBABILITY) {
storage.delete_vector(internal_id).unwrap();
}
}
let deleted_vector_count = storage.deleted_vector_count();
let total_vector_count = storage.total_vector_count();
// Migrate from RocksDB to mmap storage
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let new_storage =
migrate_rocksdb_sparse_vector_storage_to_mmap(&storage, storage_dir.path())
.expect("failed to migrate from RocksDB to mmap");
// Destroy persisted RocksDB sparse vector data
match storage {
VectorStorageEnum::SparseSimple(storage) => storage.destroy().unwrap(),
_ => unreachable!("unexpected vector storage type"),
}
// We can drop RocksDB storage now
db_dir.close().expect("failed to drop RocksDB storage");
// Assert vector counts and data
let mut rng = StdRng::seed_from_u64(RAND_SEED);
assert_eq!(new_storage.deleted_vector_count(), deleted_vector_count);
assert_eq!(new_storage.total_vector_count(), total_vector_count);
for internal_id in 0..POINT_COUNT {
let vector = random_sparse_vector(&mut rng, DIM);
let deleted = new_storage.is_deleted_vector(internal_id);
assert_eq!(deleted, rng.random_bool(DELETE_PROBABILITY));
if !deleted {
assert_eq!(
new_storage.get_vector::<Sequential>(internal_id),
CowVector::from(vector),
);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/sparse/mmap_sparse_vector_storage.rs | lib/segment/src/vector_storage/sparse/mmap_sparse_vector_storage.rs | use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use bitvec::slice::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::iterator_ext::IteratorExt;
use common::types::PointOffsetType;
use fs_err as fs;
use gridstore::Gridstore;
use gridstore::config::{Compression, StorageOptions};
use sparse::common::sparse_vector::SparseVector;
use crate::common::flags::bitvec_flags::BitvecFlags;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::vectors::VectorRef;
use crate::types::VectorStorageDatatype;
use crate::vector_storage::sparse::stored_sparse_vectors::StoredSparseVector;
use crate::vector_storage::{AccessPattern, SparseVectorStorage, VectorStorage};
const DELETED_DIRNAME: &str = "deleted";
const STORAGE_DIRNAME: &str = "store";
/// Memory-mapped mutable sparse vector storage.
#[derive(Debug)]
pub struct MmapSparseVectorStorage {
storage: Gridstore<StoredSparseVector>,
/// Flags marking deleted vectors
///
/// Structure grows dynamically, but may be smaller than actual number of vectors. Must not
/// depend on its length.
deleted: BitvecFlags,
/// Current number of deleted vectors.
deleted_count: usize,
/// Maximum point offset in the storage + 1. This also means the total amount of point offsets
next_point_offset: usize,
}
impl MmapSparseVectorStorage {
pub fn open_or_create(path: &Path) -> OperationResult<Self> {
let deleted_dir = path.join(DELETED_DIRNAME);
if deleted_dir.is_dir() {
// Storage already exists, open it
return Self::open(path);
}
Self::create(path)
}
fn open(path: &Path) -> OperationResult<Self> {
// Storage
let storage_dir = path.join(STORAGE_DIRNAME);
let storage = Gridstore::open(storage_dir).map_err(|err| {
OperationError::service_error(format!(
"Failed to open mmap sparse vector storage: {err}"
))
})?;
// Payload storage does not need to be populated
// as it is not required in the index search step
let populate = false;
// Deleted flags
let deleted_path = path.join(DELETED_DIRNAME);
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
let deleted_count = deleted.count_trues();
let next_point_offset = deleted
.get_bitslice()
.last_one()
.max(Some(storage.max_point_id() as usize))
.unwrap_or_default();
Ok(Self {
storage,
deleted,
deleted_count,
next_point_offset,
})
}
fn create(path: &Path) -> OperationResult<Self> {
let path = path.to_path_buf();
// Storage
let storage_dir = path.join(STORAGE_DIRNAME);
fs::create_dir_all(&storage_dir)?;
let storage_config = StorageOptions {
// Don't use built-in compression, as we will use bitpacking instead
compression: Some(Compression::None),
..Default::default()
};
let storage = Gridstore::new(storage_dir, storage_config).map_err(|err| {
OperationError::service_error(format!(
"Failed to create storage for mmap sparse vectors: {err}"
))
})?;
// Payload storage does not need to be populated
// as it is not required in the index search step
let populate = false;
// Deleted flags
let deleted_path = path.join(DELETED_DIRNAME);
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
Ok(Self {
storage,
deleted,
deleted_count: 0,
next_point_offset: 0,
})
}
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.next_point_offset {
return false;
}
// set deleted flag
let previous_value = self.deleted.set(key, deleted);
// update deleted_count if it changed
match (previous_value, deleted) {
(false, true) => self.deleted_count += 1,
(true, false) => self.deleted_count = self.deleted_count.saturating_sub(1),
_ => {}
}
previous_value
}
fn update_stored(
&mut self,
key: PointOffsetType,
vector: Option<&SparseVector>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
if let Some(vector) = vector {
// upsert vector
self.storage.put_value(
key,
&StoredSparseVector::from(vector),
hw_counter.ref_vector_io_write_counter(),
)?;
} else {
// delete vector
self.storage.delete_value(key);
}
self.next_point_offset = std::cmp::max(self.next_point_offset, key as usize + 1);
Ok(())
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
// deleted bitvec is already in-memory
self.storage.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
self.deleted.clear_cache()?;
self.storage.clear_cache()?;
Ok(())
}
}
impl SparseVectorStorage for MmapSparseVectorStorage {
fn get_sparse<P: AccessPattern>(&self, key: PointOffsetType) -> OperationResult<SparseVector> {
self.get_sparse_opt::<P>(key)?
.ok_or_else(|| OperationError::service_error(format!("Key {key} not found")))
}
fn get_sparse_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> OperationResult<Option<SparseVector>> {
let result = if P::IS_SEQUENTIAL {
self.storage
.get_value::<true>(key, &HardwareCounterCell::disposable()) // Vector storage read IO not measured
} else {
self.storage
.get_value::<false>(key, &HardwareCounterCell::disposable())
};
result.map(SparseVector::try_from).transpose()
}
}
impl VectorStorage for MmapSparseVectorStorage {
fn distance(&self) -> crate::types::Distance {
super::SPARSE_VECTOR_DISTANCE
}
fn datatype(&self) -> crate::types::VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn is_on_disk(&self) -> bool {
true
}
fn total_vector_count(&self) -> usize {
self.next_point_offset
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key)
.unwrap_or_else(CowVector::default_sparse)
}
/// Get vector by key, if it exists.
///
/// Ignore any error
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
match self.get_sparse_opt::<P>(key) {
Ok(Some(vector)) => Some(CowVector::from(vector)),
_ => None,
}
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector = <&SparseVector>::try_from(vector)?;
debug_assert!(vector.is_sorted(), "Vector is not sorted {vector:?}");
self.set_deleted(key, false);
self.update_stored(key, Some(vector), hw_counter)?;
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let hw_counter = HardwareCounterCell::disposable(); // This function is only used for internal operations. No need to measure.
let start_index = self.next_point_offset as PointOffsetType;
for (other_vector, other_deleted) in other_vectors.stop_if(stopped) {
// Do not perform preprocessing - vectors should be already processed
let other_vector = other_vector.as_vec_ref().try_into()?;
let new_id = self.next_point_offset as PointOffsetType;
self.next_point_offset += 1;
self.set_deleted(new_id, other_deleted);
let vector = (!other_deleted).then_some(other_vector);
self.update_stored(new_id, vector, &hw_counter)?;
}
// return cancelled error if stopped
check_process_stopped(stopped)?;
Ok(start_index..self.next_point_offset as PointOffsetType)
}
fn flusher(&self) -> crate::common::Flusher {
let storage_flusher = self.storage.flusher();
let deleted_flags_flusher = self.deleted.flusher();
Box::new(move || {
deleted_flags_flusher()?;
storage_flusher().map_err(|err| {
OperationError::service_error(format!(
"Failed to flush mmap sparse vector gridstore: {err}"
))
})?;
Ok(())
})
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.storage.files();
files.extend(self.deleted.files());
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
self.storage.immutable_files()
}
fn delete_vector(
&mut self,
key: common::types::PointOffsetType,
) -> crate::common::operation_error::OperationResult<bool> {
let was_deleted = !self.set_deleted(key, true);
let hw_counter = HardwareCounterCell::disposable(); // Deletions not measured
self.update_stored(key, None, &hw_counter)?;
Ok(was_deleted)
}
fn is_deleted_vector(&self, key: common::types::PointOffsetType) -> bool {
self.deleted.get(key)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.get_bitslice()
}
}
/// Find files related to this sparse vector storage
#[cfg(any(test, feature = "rocksdb"))]
pub(crate) fn find_storage_files(vector_storage_path: &Path) -> OperationResult<Vec<PathBuf>> {
let storage_path = vector_storage_path.join(STORAGE_DIRNAME);
let deleted_path = vector_storage_path.join(DELETED_DIRNAME);
let mut files = vec![];
files.extend(common::disk::list_files(&storage_path)?);
files.extend(common::disk::list_files(&deleted_path)?);
Ok(files)
}
#[cfg(test)]
mod test {
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use common::counter::hardware_counter::HardwareCounterCell;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use sparse::common::sparse_vector;
use sparse::common::sparse_vector_fixture::random_sparse_vector;
use tempfile::Builder;
use super::*;
use crate::vector_storage::sparse::mmap_sparse_vector_storage::{
MmapSparseVectorStorage, VectorRef,
};
use crate::vector_storage::{Random, VectorStorage};
const RAND_SEED: u64 = 42;
fn visit_files_recursively(dir: &Path, cb: &mut impl FnMut(PathBuf)) -> std::io::Result<()> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_files_recursively(&path, cb)?;
} else {
cb(entry.path());
}
}
}
Ok(())
}
#[test]
fn test_files_consistency() {
let tmp_dir = tempfile::Builder::new()
.prefix("test_storage")
.tempdir()
.unwrap();
let storage = MmapSparseVectorStorage::open_or_create(tmp_dir.path()).unwrap();
let mut existing_files = HashSet::new();
visit_files_recursively(tmp_dir.path(), &mut |path| {
existing_files.insert(path);
})
.unwrap();
let storage_files = storage.files().into_iter().collect::<HashSet<_>>();
assert_eq!(storage_files, existing_files);
}
#[test]
fn test_create_insert_close_and_load() {
let tmp_dir = tempfile::Builder::new()
.prefix("test_storage")
.tempdir()
.unwrap();
let vector = sparse_vector::SparseVector {
indices: vec![1, 2, 3],
values: vec![0.1, 0.2, 0.3],
};
let hw_counter = HardwareCounterCell::new();
{
let mut storage = MmapSparseVectorStorage::open_or_create(tmp_dir.path()).unwrap();
storage
.insert_vector(0, VectorRef::from(&vector), &hw_counter)
.unwrap();
storage
.insert_vector(2, VectorRef::from(&vector), &hw_counter)
.unwrap();
storage
.insert_vector(4, VectorRef::from(&vector), &hw_counter)
.unwrap();
storage.flusher()().unwrap();
}
let storage = MmapSparseVectorStorage::open(tmp_dir.path()).unwrap();
let result_vector = storage.get_vector::<Random>(0);
match result_vector {
crate::data_types::named_vectors::CowVector::Sparse(sparse) => {
assert_eq!(sparse.values, vector.values);
}
_ => panic!("Expected sparse vector"),
};
}
/// Test that `find_storage_files` finds all files that are reported by the storage.
#[test]
fn test_find_storage_files() {
const POINT_COUNT: PointOffsetType = 1000;
const DIM: usize = 1024;
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage = MmapSparseVectorStorage::open_or_create(dir.path()).unwrap();
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let hw_counter = HardwareCounterCell::disposable();
// Insert points, delete 10% of it, and flush
for internal_id in 0..POINT_COUNT {
let vector = random_sparse_vector(&mut rng, DIM);
storage
.insert_vector(internal_id, VectorRef::from(&vector), &hw_counter)
.unwrap();
}
for internal_id in 0..POINT_COUNT {
if !rng.random_bool(0.1) {
continue;
}
storage.delete_vector(internal_id).unwrap();
}
storage.flusher()().unwrap();
let storage_files = storage.files().into_iter().collect::<HashSet<_>>();
let found_files = find_storage_files(dir.path())
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
assert_eq!(
storage_files, found_files,
"find_storage_files must find same files that storage reports",
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/sparse/volatile_sparse_vector_storage.rs | lib/segment/src/vector_storage/sparse/volatile_sparse_vector_storage.rs | use std::ops::Range;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::{DimId, DimWeight};
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::vectors::VectorRef;
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::{
AccessPattern, Random, SparseVectorStorage, VectorStorage, VectorStorageEnum,
};
pub const SPARSE_VECTOR_DISTANCE: Distance = Distance::Dot;
/// In-memory vector storage with on-update persistence using `store`
#[derive(Default, Debug)]
pub struct VolatileSparseVectorStorage {
vectors: Vec<Option<SparseVector>>,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
total_vector_count: usize,
/// Total number of non-zero elements in all vectors. Used to estimate average vector size.
total_sparse_size: usize,
}
pub fn new_volatile_sparse_vector_storage() -> VectorStorageEnum {
VectorStorageEnum::SparseVolatile(VolatileSparseVectorStorage::default())
}
impl VolatileSparseVectorStorage {
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.total_vector_count {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
fn update_stored(
&mut self,
key: PointOffsetType,
deleted: bool,
vector: Option<&SparseVector>,
) {
// Resize sparse vector container if needed
if key as usize >= self.vectors.len() {
if deleted {
return;
}
self.vectors.resize(key as usize + 1, None);
}
let entry = &mut self.vectors[key as usize];
// Update bookkeeping of total sparse size
let elements_removed = entry.as_ref().map_or(0, |v| v.indices.len());
let elements_added = vector
.as_ref()
.filter(|_| !deleted)
.map_or(0, |v| v.indices.len());
self.total_sparse_size = self
.total_sparse_size
.saturating_sub(elements_removed)
.saturating_add(elements_added);
if deleted {
entry.take();
} else {
*entry = vector.cloned();
}
}
pub fn size_of_available_vectors_in_bytes(&self) -> usize {
if self.total_vector_count == 0 {
return 0;
}
let available_fraction =
(self.total_vector_count - self.deleted_count) as f32 / self.total_vector_count as f32;
let available_size = (self.total_sparse_size as f32 * available_fraction) as usize;
available_size * (std::mem::size_of::<DimWeight>() + std::mem::size_of::<DimId>())
}
}
impl SparseVectorStorage for VolatileSparseVectorStorage {
fn get_sparse<P: AccessPattern>(&self, key: PointOffsetType) -> OperationResult<SparseVector> {
let vector = self
.get_sparse_opt::<P>(key)?
.ok_or_else(|| OperationError::service_error("Sparse vector not found in storage"))?;
Ok(vector)
}
fn get_sparse_opt<P: AccessPattern>(
&self,
key: PointOffsetType,
) -> OperationResult<Option<SparseVector>> {
// Already in memory, so no sequential optimizations available.
let opt_vector = self.vectors.get(key as usize).cloned().flatten();
Ok(opt_vector)
}
}
impl VectorStorage for VolatileSparseVectorStorage {
fn distance(&self) -> Distance {
SPARSE_VECTOR_DISTANCE
}
fn datatype(&self) -> VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn is_on_disk(&self) -> bool {
false
}
fn total_vector_count(&self) -> usize {
self.total_vector_count
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
let vector = self.get_vector_opt::<P>(key);
vector.unwrap_or_else(CowVector::default_sparse)
}
/// Get vector by key, if it exists.
///
/// ignore any error
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
// In memory, so no sequential read optimization.
match self.get_sparse_opt::<P>(key) {
Ok(Some(vector)) => Some(CowVector::from(vector)),
_ => None,
}
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector: &SparseVector = vector.try_into()?;
debug_assert!(vector.is_sorted());
self.total_vector_count = std::cmp::max(self.total_vector_count, key as usize + 1);
self.set_deleted(key, false);
self.update_stored(key, false, Some(vector));
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.total_vector_count as PointOffsetType;
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector = other_vector.as_vec_ref().try_into()?;
let new_id = self.total_vector_count as PointOffsetType;
self.total_vector_count += 1;
self.set_deleted(new_id, other_deleted);
self.update_stored(new_id, other_deleted, Some(other_vector));
}
Ok(start_index..self.total_vector_count as PointOffsetType)
}
fn flusher(&self) -> Flusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
if is_deleted {
let old_vector = self.get_sparse_opt::<Random>(key).ok().flatten();
self.update_stored(key, true, old_vector.as_ref());
}
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/sparse/mod.rs | lib/segment/src/vector_storage/sparse/mod.rs | pub mod mmap_sparse_vector_storage;
#[cfg(feature = "rocksdb")]
pub mod simple_sparse_vector_storage;
mod stored_sparse_vectors;
pub mod volatile_sparse_vector_storage;
use crate::types::Distance;
pub const SPARSE_VECTOR_DISTANCE: Distance = Distance::Dot;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/sparse/stored_sparse_vectors.rs | lib/segment/src/vector_storage/sparse/stored_sparse_vectors.rs | use common::delta_pack::{delta_pack, delta_unpack};
use gridstore::Blob;
use serde::{Deserialize, Serialize};
use sparse::common::sparse_vector::{SparseVector, double_sort};
use sparse::common::types::{DimId, DimId64, DimWeight};
use crate::common::operation_error::OperationError;
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct StoredSparseVector {
/// Compressed u64 indices
pub indices: Vec<u8>,
/// Values and indices must be the same length
pub values: Vec<DimWeight>,
}
impl StoredSparseVector {
/// Convert indices into a byte array
/// Use bitpacking and delta-encoding for additional compression
fn serialize_indices(indices: &[DimId64]) -> Vec<u8> {
delta_pack(indices)
}
/// Recover indices from a byte array
fn deserialize_indices(data: &[u8]) -> Vec<DimId64> {
delta_unpack(data)
}
}
impl From<&SparseVector> for StoredSparseVector {
fn from(vector: &SparseVector) -> Self {
let mut stored_indices: Vec<_> =
vector.indices.iter().copied().map(DimId64::from).collect();
let mut stored_values = vector.values.clone();
double_sort(&mut stored_indices, &mut stored_values);
let compressed_indices = StoredSparseVector::serialize_indices(&stored_indices);
Self {
indices: compressed_indices,
values: stored_values,
}
}
}
impl TryFrom<StoredSparseVector> for SparseVector {
type Error = OperationError;
fn try_from(value: StoredSparseVector) -> Result<Self, Self::Error> {
let decompressed_indices = StoredSparseVector::deserialize_indices(&value.indices);
Ok(SparseVector {
indices: decompressed_indices
.into_iter()
.map(DimId::try_from)
.collect::<Result<_, _>>()
.map_err(|err| {
OperationError::service_error(format!("Failed to convert indices: {err}"))
})?,
values: value.values,
})
}
}
impl Blob for StoredSparseVector {
fn to_bytes(&self) -> Vec<u8> {
bincode::serialize(&self).expect("Sparse vector serialization should not fail")
}
fn from_bytes(data: &[u8]) -> Self {
bincode::deserialize(data).expect("Sparse vector deserialization should not fail")
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/memmap_dense_vector_storage.rs | lib/segment/src/vector_storage/dense/memmap_dense_vector_storage.rs | use std::borrow::Cow;
use std::io::{self, BufWriter, Write};
use std::mem::MaybeUninit;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use fs_err as fs;
use fs_err::{File, OpenOptions};
use memory::fadvise::clear_disk_cache;
use memory::mmap_ops;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{VectorElementType, VectorRef};
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::common::get_async_scorer;
use crate::vector_storage::dense::mmap_dense_vectors::MmapDenseVectors;
use crate::vector_storage::{AccessPattern, DenseVectorStorage, VectorStorage, VectorStorageEnum};
const VECTORS_PATH: &str = "matrix.dat";
const DELETED_PATH: &str = "deleted.dat";
/// Stores all dense vectors in mem-mapped file
///
/// It is not possible to insert new vectors into mem-mapped storage,
/// but possible to mark some vectors as removed
///
/// Mem-mapped storage can only be constructed from another storage
#[derive(Debug)]
pub struct MemmapDenseVectorStorage<T: PrimitiveVectorElement> {
vectors_path: PathBuf,
deleted_path: PathBuf,
mmap_store: Option<MmapDenseVectors<T>>,
distance: Distance,
}
impl<T: PrimitiveVectorElement> MemmapDenseVectorStorage<T> {
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) {
if let Some(mmap_store) = &self.mmap_store {
mmap_store.populate();
}
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
clear_disk_cache(&self.vectors_path)?;
clear_disk_cache(&self.deleted_path)?;
Ok(())
}
}
pub fn open_memmap_vector_storage(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage = open_memmap_vector_storage_with_async_io_impl::<VectorElementType>(
path,
dim,
distance,
get_async_scorer(),
)?;
Ok(VectorStorageEnum::DenseMemmap(storage))
}
pub fn open_memmap_vector_storage_byte(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_memmap_vector_storage_with_async_io_impl(path, dim, distance, get_async_scorer())?;
Ok(VectorStorageEnum::DenseMemmapByte(storage))
}
pub fn open_memmap_vector_storage_half(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_memmap_vector_storage_with_async_io_impl(path, dim, distance, get_async_scorer())?;
Ok(VectorStorageEnum::DenseMemmapHalf(storage))
}
pub fn open_memmap_vector_storage_with_async_io(
path: &Path,
dim: usize,
distance: Distance,
with_async_io: bool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_memmap_vector_storage_with_async_io_impl::<VectorElementType>(
path,
dim,
distance,
with_async_io,
)?;
Ok(VectorStorageEnum::DenseMemmap(storage))
}
fn open_memmap_vector_storage_with_async_io_impl<T: PrimitiveVectorElement>(
path: &Path,
dim: usize,
distance: Distance,
with_async_io: bool,
) -> OperationResult<Box<MemmapDenseVectorStorage<T>>> {
fs::create_dir_all(path)?;
let vectors_path = path.join(VECTORS_PATH);
let deleted_path = path.join(DELETED_PATH);
let mmap_store = MmapDenseVectors::open(&vectors_path, &deleted_path, dim, with_async_io)?;
Ok(Box::new(MemmapDenseVectorStorage {
vectors_path,
deleted_path,
mmap_store: Some(mmap_store),
distance,
}))
}
impl<T: PrimitiveVectorElement> MemmapDenseVectorStorage<T> {
pub fn get_mmap_vectors(&self) -> &MmapDenseVectors<T> {
self.mmap_store.as_ref().unwrap()
}
pub fn has_async_reader(&self) -> bool {
self.mmap_store
.as_ref()
.map(|x| x.has_async_reader())
.unwrap_or(false)
}
}
impl<T: PrimitiveVectorElement> DenseVectorStorage<T> for MemmapDenseVectorStorage<T> {
fn vector_dim(&self) -> usize {
self.mmap_store.as_ref().unwrap().dim
}
fn get_dense<P: AccessPattern>(&self, key: PointOffsetType) -> &[T] {
self.mmap_store
.as_ref()
.unwrap()
.get_vector_opt::<P>(key)
.unwrap_or_else(|| panic!("vector not found: {key}"))
}
fn get_dense_batch<'a>(
&'a self,
keys: &[PointOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
let mmap_store = self.mmap_store.as_ref().unwrap();
mmap_store.get_vectors(keys, vectors)
}
}
impl<T: PrimitiveVectorElement> VectorStorage for MemmapDenseVectorStorage<T> {
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
T::datatype()
}
fn is_on_disk(&self) -> bool {
true
}
fn total_vector_count(&self) -> usize {
self.mmap_store.as_ref().unwrap().num_vectors
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.mmap_store
.as_ref()
.unwrap()
.get_vector_opt::<P>(key)
.map(|vector| T::slice_to_float_cow(vector.into()).into())
.expect("Vector not found")
}
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
self.mmap_store
.as_ref()
.unwrap()
.get_vector_opt::<P>(key)
.map(|vector| T::slice_to_float_cow(vector.into()).into())
}
fn insert_vector(
&mut self,
_key: PointOffsetType,
_vector: VectorRef,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
panic!("Can't directly update vector in mmap storage")
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let dim = self.vector_dim();
let start_index = self.mmap_store.as_ref().unwrap().num_vectors as PointOffsetType;
let mut end_index = start_index;
let with_async_io = self
.mmap_store
.take()
.map(|x| x.has_async_reader())
.unwrap_or(get_async_scorer());
// Extend vectors file, write other vectors into it
let mut vectors_file = BufWriter::new(open_append(&self.vectors_path)?);
let mut deleted_ids = vec![];
for (offset, (other_vector, other_deleted)) in other_vectors.enumerate() {
check_process_stopped(stopped)?;
let vector = T::slice_from_float_cow(Cow::try_from(other_vector)?);
let raw_bites = mmap_ops::transmute_to_u8_slice(vector.as_ref());
vectors_file.write_all(raw_bites)?;
end_index += 1;
// Remember deleted IDs so we can propagate deletions later
if other_deleted {
deleted_ids.push(start_index as PointOffsetType + offset as PointOffsetType);
}
}
// Explicitly fsync file contents to ensure durability
vectors_file.flush()?;
vectors_file
.into_inner()
.map_err(io::IntoInnerError::into_error)?
.sync_data()?;
// Load store with updated files
self.mmap_store.replace(MmapDenseVectors::open(
&self.vectors_path,
&self.deleted_path,
dim,
with_async_io,
)?);
// Flush deleted flags into store
// We must do that in the updated store, and cannot do it in the previous loop. That is
// because the file backing delete storage must be resized, and for that we'd need to know
// the exact number of vectors beforehand. When opening the store it is done automatically.
let store = self.mmap_store.as_mut().unwrap();
for id in deleted_ids {
check_process_stopped(stopped)?;
store.delete(id);
}
store.flusher()()?;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
match &self.mmap_store {
Some(mmap_store) => {
let mmap_flusher = mmap_store.flusher();
Box::new(move || mmap_flusher().map_err(OperationError::from))
}
None => Box::new(|| Ok(())),
}
}
fn files(&self) -> Vec<PathBuf> {
vec![self.vectors_path.clone(), self.deleted_path.clone()]
}
fn immutable_files(&self) -> Vec<PathBuf> {
// Vector storage is initialized by `SegmentBuilder` during segment construction
// and can't be changed after
vec![self.vectors_path.clone()]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
Ok(self.mmap_store.as_mut().unwrap().delete(key))
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.mmap_store.as_ref().unwrap().is_deleted_vector(key)
}
fn deleted_vector_count(&self) -> usize {
self.mmap_store.as_ref().unwrap().deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.mmap_store.as_ref().unwrap().deleted_vector_bitslice()
}
}
/// Open a file shortly for appending
fn open_append<P: AsRef<Path>>(path: P) -> io::Result<File> {
let path = path.as_ref().to_path_buf();
OpenOptions::new().append(true).open(path)
}
#[cfg(test)]
mod tests {
use std::mem::transmute;
use std::sync::Arc;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use itertools::Itertools;
use memory::mmap_ops::transmute_to_u8_slice;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::{DenseVector, QueryVector};
use crate::fixtures::payload_context_fixture::FixtureIdTracker;
use crate::id_tracker::id_tracker_base::IdTracker;
use crate::index::hnsw_index::point_scorer::{BatchFilteredSearcher, FilteredScorer};
use crate::types::{PointIdType, QuantizationConfig, ScalarQuantizationConfig};
use crate::vector_storage::dense::volatile_dense_vector_storage::new_volatile_dense_vector_storage;
use crate::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use crate::vector_storage::{DEFAULT_STOPPED, Random, new_raw_scorer};
#[test]
fn test_basic_persistence() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let mut storage = open_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
let mut borrowed_id_tracker = id_tracker.borrow_mut();
// Assert this storage lists both the vector and deleted file
let files = storage.files();
for file_name in [VECTORS_PATH, DELETED_PATH] {
files
.iter()
.find(|p| p.file_name().unwrap() == file_name)
.expect("storage is missing required file");
}
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
storage2
.insert_vector(0, points[0].as_slice().into(), &hw_counter)
.unwrap();
storage2
.insert_vector(1, points[1].as_slice().into(), &hw_counter)
.unwrap();
storage2
.insert_vector(2, points[2].as_slice().into(), &hw_counter)
.unwrap();
}
let mut iter = (0..3).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(storage.total_vector_count(), 3);
let vector = storage.get_vector::<Random>(1).to_owned();
let vector: DenseVector = vector.try_into().unwrap();
assert_eq!(points[1], vector);
borrowed_id_tracker.drop(PointIdType::NumId(2)).unwrap();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
storage2
.insert_vector(3, points[3].as_slice().into(), &hw_counter)
.unwrap();
storage2
.insert_vector(4, points[4].as_slice().into(), &hw_counter)
.unwrap();
}
let mut iter = (0..2).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(storage.total_vector_count(), 5);
let stored_ids: Vec<PointOffsetType> = borrowed_id_tracker.iter_internal().collect();
assert_eq!(stored_ids, [0, 1, 3, 4]);
let searcher = BatchFilteredSearcher::new_for_test(
&[points[2].as_slice().into()],
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
2,
);
let res = searcher
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(res.len(), 2);
assert_ne!(res[0].idx, 2);
let searcher = BatchFilteredSearcher::new_for_test(
&[points[2].as_slice().into()],
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
2,
);
let res = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(res.len(), 2);
assert_ne!(res[0].idx, 2);
}
#[test]
fn test_delete_points() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let delete_mask = [false, false, true, true, false];
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let mut storage = open_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
points.iter().enumerate().for_each(|(i, vec)| {
storage2
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
});
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(storage.total_vector_count(), 5);
assert_eq!(storage.deleted_vector_count(), 0);
// Delete select number of points
delete_mask
.into_iter()
.enumerate()
.filter(|(_, d)| *d)
.for_each(|(i, _)| {
storage.delete_vector(i as PointOffsetType).unwrap();
});
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted"
);
let vector = vec![0.0, 1.0, 1.1, 1.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 0);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 4);
// Delete 1, redelete 2
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(2 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
3,
"3 vectors must be deleted"
);
let vector = vec![1.0, 0.0, 0.0, 0.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 2, "must have 2 vectors, 3 are deleted");
assert_eq!(closest[0].idx, 4);
assert_eq!(closest[1].idx, 0);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted"
);
let vector = vec![1.0, 0.0, 0.0, 0.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert!(closest.is_empty(), "must have no results, all deleted");
}
/// Test that deleted points are properly transferred when updating from other storage.
#[test]
fn test_update_from_delete_points() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let delete_mask = [false, false, true, true, false];
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let mut storage = open_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
points.iter().enumerate().for_each(|(i, vec)| {
storage2
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
if delete_mask[i] {
storage2.delete_vector(i as PointOffsetType).unwrap();
}
});
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
assert_eq!(
storage.deleted_vector_count(),
2,
"2 vectors must be deleted from other storage"
);
let vector = vec![0.0, 1.0, 1.1, 1.0];
let query = vector.as_slice().into();
let searcher = BatchFilteredSearcher::new_for_test(
std::slice::from_ref(&query),
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
5,
);
let closest = searcher
.peek_top_iter(&mut [0, 1, 2, 3, 4].iter().cloned(), &DEFAULT_STOPPED)
.unwrap()
.into_iter()
.exactly_one()
.unwrap();
assert_eq!(closest.len(), 3, "must have 3 vectors, 2 are deleted");
assert_eq!(closest[0].idx, 0);
assert_eq!(closest[1].idx, 1);
assert_eq!(closest[2].idx, 4);
// Delete all
storage.delete_vector(0 as PointOffsetType).unwrap();
storage.delete_vector(1 as PointOffsetType).unwrap();
storage.delete_vector(4 as PointOffsetType).unwrap();
assert_eq!(
storage.deleted_vector_count(),
5,
"all vectors must be deleted"
);
}
#[test]
fn test_mmap_raw_scorer() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(points.len())));
let mut storage = open_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
let borrowed_id_tracker = id_tracker.borrow_mut();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
for (i, vec) in points.iter().enumerate() {
storage2
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
}
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
let vector = vec![-1.0, -1.0, -1.0, -1.0];
let query = vector.as_slice().into();
let mut scorer = FilteredScorer::new_for_test(
query,
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
);
let mut query_points: Vec<PointOffsetType> = vec![0, 2, 4];
let res = scorer
.score_points(&mut query_points, 0)
.collect::<Vec<_>>();
assert_eq!(res.len(), 3);
assert_eq!(res[0].idx, 0);
assert_eq!(res[1].idx, 2);
assert_eq!(res[2].idx, 4);
assert_eq!(res[2].score, -1.0);
}
#[test]
fn test_casts() {
let data: DenseVector = vec![0.42, 0.069, 333.1, 100500.];
let raw_data = transmute_to_u8_slice(&data);
eprintln!("raw_data.len() = {:#?}", raw_data.len());
let arr: &[VectorElementType] = unsafe { transmute(raw_data) };
let slice = &arr[0..data.len()];
eprintln!("slice.len() = {:#?}", slice.len());
for (idx, element) in slice.iter().enumerate() {
println!("slice[{idx}] = {element:?}");
}
}
#[test]
fn test_mmap_quantization() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let points = [
vec![1.0, 0.0, 1.0, 1.0],
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0, 1.0, 1.0, 1.0],
vec![1.0, 1.0, 0.0, 1.0],
vec![1.0, 0.0, 0.0, 0.0],
];
let mut storage = open_memmap_vector_storage(dir.path(), 4, Distance::Dot).unwrap();
let hw_counter = HardwareCounterCell::new();
{
let mut storage2 = new_volatile_dense_vector_storage(4, Distance::Dot);
{
for (i, vec) in points.iter().enumerate() {
storage2
.insert_vector(i as PointOffsetType, vec.as_slice().into(), &hw_counter)
.unwrap();
}
}
let mut iter = (0..points.len()).map(|i| {
let i = i as PointOffsetType;
let vector = storage2.get_vector::<Random>(i);
let deleted = storage2.is_deleted_vector(i);
(vector, deleted)
});
storage.update_from(&mut iter, &Default::default()).unwrap();
}
let config: QuantizationConfig = ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into();
let stopped = Arc::new(AtomicBool::new(false));
let hardware_counter = HardwareCounterCell::new();
let quantized_vectors = QuantizedVectors::create(
&storage,
&config,
QuantizedVectorsStorageType::Immutable,
dir.path(),
1,
&stopped,
)
.unwrap();
let query: QueryVector = [0.5, 0.5, 0.5, 0.5].into();
let scorer_quant = quantized_vectors
.raw_scorer(query.clone(), hardware_counter)
.unwrap();
let scorer_orig =
new_raw_scorer(query.clone(), &storage, HardwareCounterCell::new()).unwrap();
for i in 0..5 {
let quant = scorer_quant.score_point(i);
let orig = scorer_orig.score_point(i);
assert!((orig - quant).abs() < 0.15);
let quant = scorer_quant.score_internal(0, i);
let orig = scorer_orig.score_internal(0, i);
assert!((orig - quant).abs() < 0.15);
}
let files = storage.files();
let quantization_files = quantized_vectors.files();
// test save-load
let quantized_vectors = QuantizedVectors::load(&config, &storage, dir.path(), &stopped)
.unwrap()
.unwrap();
assert_eq!(files, storage.files());
assert_eq!(quantization_files, quantized_vectors.files());
let hardware_counter = HardwareCounterCell::new();
let scorer_quant = quantized_vectors
.raw_scorer(query.clone(), hardware_counter)
.unwrap();
let scorer_orig = new_raw_scorer(query, &storage, HardwareCounterCell::new()).unwrap();
for i in 0..5 {
let quant = scorer_quant.score_point(i);
let orig = scorer_orig.score_point(i);
assert!((orig - quant).abs() < 0.15);
let quant = scorer_quant.score_internal(0, i);
let orig = scorer_orig.score_internal(0, i);
assert!((orig - quant).abs() < 0.15);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/simple_dense_vector_storage.rs | lib/segment/src/vector_storage/dense/simple_dense_vector_storage.rs | use std::borrow::Cow;
use std::mem::size_of;
use std::ops::Range;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use log::debug;
use parking_lot::RwLock;
use rocksdb::DB;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::common::rocksdb_wrapper::DatabaseColumnWrapper;
use crate::data_types::named_vectors::CowVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{VectorElementType, VectorRef};
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::chunked_vectors::ChunkedVectors;
use crate::vector_storage::common::StoredRecord;
use crate::vector_storage::{AccessPattern, DenseVectorStorage, VectorStorage, VectorStorageEnum};
type StoredDenseVector<T> = StoredRecord<Vec<T>>;
/// In-memory vector storage with on-update persistence using `store`
#[derive(Debug)]
pub struct SimpleDenseVectorStorage<T: PrimitiveVectorElement> {
dim: usize,
distance: Distance,
vectors: ChunkedVectors<T>,
db_wrapper: DatabaseColumnWrapper,
update_buffer: StoredDenseVector<T>,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
}
fn open_simple_dense_vector_storage_impl<T: PrimitiveVectorElement>(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
stopped: &AtomicBool,
) -> OperationResult<SimpleDenseVectorStorage<T>> {
let mut vectors = ChunkedVectors::new(dim);
let (mut deleted, mut deleted_count) = (BitVec::new(), 0);
let db_wrapper = DatabaseColumnWrapper::new(database, database_column_name);
for (key, value) in db_wrapper.lock_db().iter()? {
let point_id: PointOffsetType = bincode::deserialize(&key)
.map_err(|_| OperationError::service_error("cannot deserialize point id from db"))?;
let stored_record: StoredDenseVector<T> = bincode::deserialize(&value)
.map_err(|_| OperationError::service_error("cannot deserialize record from db"))?;
// Propagate deleted flag
if stored_record.deleted {
bitvec_set_deleted(&mut deleted, point_id, true);
deleted_count += 1;
}
vectors.insert(point_id as VectorOffsetType, &stored_record.vector)?;
check_process_stopped(stopped)?;
}
debug!("Segment vectors: {}", vectors.len());
debug!(
"Estimated segment size {} MB",
vectors.len() * dim * size_of::<T>() / 1024 / 1024
);
Ok(SimpleDenseVectorStorage {
dim,
distance,
vectors,
db_wrapper,
update_buffer: StoredRecord {
deleted: false,
vector: vec![T::default(); dim],
},
deleted,
deleted_count,
})
}
pub fn open_simple_dense_vector_storage(
storage_element_type: VectorStorageDatatype,
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
match storage_element_type {
VectorStorageDatatype::Float32 => open_simple_dense_full_vector_storage(
database,
database_column_name,
dim,
distance,
stopped,
),
VectorStorageDatatype::Float16 => open_simple_dense_half_vector_storage(
database,
database_column_name,
dim,
distance,
stopped,
),
VectorStorageDatatype::Uint8 => open_simple_dense_byte_vector_storage(
database,
database_column_name,
dim,
distance,
stopped,
),
}
}
pub fn open_simple_dense_full_vector_storage(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_dense_vector_storage_impl::<VectorElementType>(
database,
database_column_name,
dim,
distance,
stopped,
)?;
Ok(VectorStorageEnum::DenseSimple(storage))
}
pub fn open_simple_dense_byte_vector_storage(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_dense_vector_storage_impl(
database,
database_column_name,
dim,
distance,
stopped,
)?;
Ok(VectorStorageEnum::DenseSimpleByte(storage))
}
pub fn open_simple_dense_half_vector_storage(
database: Arc<RwLock<DB>>,
database_column_name: &str,
dim: usize,
distance: Distance,
stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
let storage = open_simple_dense_vector_storage_impl(
database,
database_column_name,
dim,
distance,
stopped,
)?;
Ok(VectorStorageEnum::DenseSimpleHalf(storage))
}
impl<T: PrimitiveVectorElement> SimpleDenseVectorStorage<T> {
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.vectors.len() {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
fn update_stored(
&mut self,
key: PointOffsetType,
deleted: bool,
vector: Option<&[T]>,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
// Write vector state to buffer record
let record = &mut self.update_buffer;
record.deleted = deleted;
if let Some(vector) = vector {
record.vector.copy_from_slice(vector);
}
let key_enc = bincode::serialize(&key).unwrap();
let record_enc = bincode::serialize(&record).unwrap();
hw_counter
.vector_io_write_counter()
.incr_delta(key_enc.len() + record_enc.len());
// Store updated record
self.db_wrapper.put(key_enc, record_enc)?;
Ok(())
}
/// Destroy this vector storage, remove persisted data from RocksDB
pub fn destroy(&self) -> OperationResult<()> {
self.db_wrapper.remove_column_family()?;
Ok(())
}
}
impl<T: PrimitiveVectorElement> DenseVectorStorage<T> for SimpleDenseVectorStorage<T> {
fn vector_dim(&self) -> usize {
self.dim
}
fn get_dense<P: AccessPattern>(&self, key: PointOffsetType) -> &[T] {
self.vectors.get(key as VectorOffsetType)
}
}
impl<T: PrimitiveVectorElement> VectorStorage for SimpleDenseVectorStorage<T> {
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
T::datatype()
}
fn is_on_disk(&self) -> bool {
false
}
fn total_vector_count(&self) -> usize {
self.vectors.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key).expect("vector not found")
}
/// Get vector by key, if it exists.
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
// In memory so no optimization to be done for access pattern.
self.vectors
.get_opt(key as VectorOffsetType)
.map(|slice| CowVector::from(T::slice_to_float_cow(slice.into())))
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector: &[VectorElementType] = vector.try_into()?;
let vector = T::slice_from_float_cow(Cow::from(vector));
self.vectors
.insert(key as VectorOffsetType, vector.as_ref())?;
self.set_deleted(key, false);
self.update_stored(key, false, Some(vector.as_ref()), hw_counter)?;
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.vectors.len() as PointOffsetType;
let dispose_hw = HardwareCounterCell::disposable(); // This function is only used for internal operations.
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector = T::slice_from_float_cow(Cow::try_from(other_vector)?);
let new_id = self.vectors.push(other_vector.as_ref())? as PointOffsetType;
self.set_deleted(new_id, other_deleted);
self.update_stored(
new_id,
other_deleted,
Some(other_vector.as_ref()),
&dispose_hw,
)?;
}
let end_index = self.vectors.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
self.db_wrapper.flusher()
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
if is_deleted {
// Not measuring deletions
self.update_stored(key, true, None, &HardwareCounterCell::disposable())?;
}
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
#[cfg(test)]
mod tests {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tempfile::Builder;
use super::*;
use crate::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use crate::segment_constructor::migrate_rocksdb_dense_vector_storage_to_mmap;
use crate::vector_storage::Sequential;
const RAND_SEED: u64 = 42;
/// Create RocksDB based dense vector storage.
///
/// Migrate it to the mmap based dense vector storage and assert vector data is correct.
#[test]
fn test_migrate_simple_to_mmap() {
const POINT_COUNT: PointOffsetType = 128;
const DIM: usize = 128;
const DELETE_PROBABILITY: f64 = 0.1;
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let db_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(db_dir.path(), &[DB_VECTOR_CF]).unwrap();
// Create simple dense vector storage, insert test points and delete some of them again
let mut storage = open_simple_dense_full_vector_storage(
db,
DB_VECTOR_CF,
DIM,
Distance::Dot,
&AtomicBool::new(false),
)
.unwrap();
for internal_id in 0..POINT_COUNT {
let point = std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect::<Vec<_>>();
storage
.insert_vector(
internal_id,
VectorRef::from(&point),
&HardwareCounterCell::disposable(),
)
.unwrap();
if rng.random_bool(DELETE_PROBABILITY) {
storage.delete_vector(internal_id).unwrap();
}
}
let deleted_vector_count = storage.deleted_vector_count();
let total_vector_count = storage.total_vector_count();
// Migrate from RocksDB to mmap storage
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let new_storage =
migrate_rocksdb_dense_vector_storage_to_mmap(&storage, DIM, storage_dir.path())
.expect("failed to migrate from RocksDB to mmap");
// Destroy persisted RocksDB dense vector data
match storage {
VectorStorageEnum::DenseSimple(storage) => storage.destroy().unwrap(),
VectorStorageEnum::DenseSimpleByte(storage) => storage.destroy().unwrap(),
VectorStorageEnum::DenseSimpleHalf(storage) => storage.destroy().unwrap(),
_ => unreachable!("unexpected vector storage type"),
}
// We can drop RocksDB storage now
db_dir.close().expect("failed to drop RocksDB storage");
// Assert vector counts and data
let mut rng = StdRng::seed_from_u64(RAND_SEED);
assert_eq!(new_storage.deleted_vector_count(), deleted_vector_count);
assert_eq!(new_storage.total_vector_count(), total_vector_count);
for internal_id in 0..POINT_COUNT {
let point = std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect::<Vec<_>>();
assert_eq!(
new_storage.get_vector::<Sequential>(internal_id),
CowVector::from(point),
);
assert_eq!(
new_storage.is_deleted_vector(internal_id),
rng.random_bool(DELETE_PROBABILITY)
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/appendable_dense_vector_storage.rs | lib/segment/src/vector_storage/dense/appendable_dense_vector_storage.rs | use std::borrow::Cow;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_counter::HardwareCounterCell;
use common::maybe_uninit::maybe_uninit_fill_from;
use common::types::PointOffsetType;
use fs_err as fs;
use memory::madvise::AdviceSetting;
use crate::common::Flusher;
use crate::common::flags::bitvec_flags::BitvecFlags;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::operation_error::{OperationResult, check_process_stopped};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{VectorElementType, VectorRef};
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::chunked_mmap_vectors::ChunkedMmapVectors;
use crate::vector_storage::chunked_vector_storage::{ChunkedVectorStorage, VectorOffsetType};
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::in_ram_persisted_vectors::InRamPersistedVectors;
use crate::vector_storage::{AccessPattern, DenseVectorStorage, VectorStorage, VectorStorageEnum};
const VECTORS_DIR_PATH: &str = "vectors";
const DELETED_DIR_PATH: &str = "deleted";
#[derive(Debug)]
pub struct AppendableMmapDenseVectorStorage<T: PrimitiveVectorElement, S: ChunkedVectorStorage<T>> {
vectors: S,
/// Flags marking deleted vectors
///
/// Structure grows dynamically, but may be smaller than actual number of vectors. Must not
/// depend on its length.
deleted: BitvecFlags,
distance: Distance,
deleted_count: usize,
_phantom: std::marker::PhantomData<T>,
}
impl<T: PrimitiveVectorElement, S: ChunkedVectorStorage<T>> AppendableMmapDenseVectorStorage<T, S> {
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && self.vectors.len() <= key as usize {
return false;
}
// mark deletion
let previous = self.deleted.set(key, deleted);
// update counter
if !previous && deleted {
self.deleted_count += 1;
} else if previous && !deleted {
self.deleted_count -= 1;
}
previous
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
// deleted bitvec is already loaded
self.vectors.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
self.deleted.clear_cache()?;
self.vectors.clear_cache()?;
Ok(())
}
}
impl<T: PrimitiveVectorElement, S: ChunkedVectorStorage<T>> DenseVectorStorage<T>
for AppendableMmapDenseVectorStorage<T, S>
{
fn vector_dim(&self) -> usize {
self.vectors.dim()
}
fn get_dense<P: AccessPattern>(&self, key: PointOffsetType) -> &[T] {
self.vectors
.get::<P>(key as VectorOffsetType)
.expect("mmap vector not found")
}
fn get_dense_batch<'a>(
&'a self,
keys: &[PointOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
let mut vector_offsets = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let vector_offsets = maybe_uninit_fill_from(
&mut vector_offsets,
keys.iter().map(|key| *key as VectorOffsetType),
)
.0;
self.vectors.get_batch(vector_offsets, vectors)
}
}
impl<T: PrimitiveVectorElement, S: ChunkedVectorStorage<T>> VectorStorage
for AppendableMmapDenseVectorStorage<T, S>
{
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
T::datatype()
}
fn is_on_disk(&self) -> bool {
self.vectors.is_on_disk()
}
fn total_vector_count(&self) -> usize {
self.vectors.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.vectors
.get::<P>(key as VectorOffsetType)
.map(|slice| CowVector::from(T::slice_to_float_cow(slice.into())))
.expect("Vector not found")
}
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
self.vectors
.get::<P>(key as VectorOffsetType)
.map(|slice| CowVector::from(T::slice_to_float_cow(slice.into())))
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector: &[VectorElementType] = vector.try_into()?;
let vector = T::slice_from_float_cow(Cow::from(vector));
self.vectors
.insert(key as VectorOffsetType, vector.as_ref(), hw_counter)?;
self.set_deleted(key, false);
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.vectors.len() as PointOffsetType;
let disposed_hw = HardwareCounterCell::disposable(); // This function is only used for internal operations.
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector = T::slice_from_float_cow(Cow::try_from(other_vector)?);
let new_id = self.vectors.push(other_vector.as_ref(), &disposed_hw)?;
self.set_deleted(new_id as PointOffsetType, other_deleted);
}
let end_index = self.vectors.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
Box::new({
let vectors_flusher = self.vectors.flusher();
let deleted_flusher = self.deleted.flusher();
move || {
vectors_flusher()?;
deleted_flusher()?;
Ok(())
}
})
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.vectors.files();
files.extend(self.deleted.files());
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
self.vectors.immutable_files()
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
Ok(self.set_deleted(key, true))
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get(key)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.get_bitslice()
}
}
pub fn open_appendable_memmap_vector_storage(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_memmap_vector_storage_impl::<VectorElementType>(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableMemmap(Box::new(storage)))
}
pub fn open_appendable_memmap_vector_storage_byte(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_memmap_vector_storage_impl(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableMemmapByte(Box::new(
storage,
)))
}
pub fn open_appendable_memmap_vector_storage_half(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_memmap_vector_storage_impl(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableMemmapHalf(Box::new(
storage,
)))
}
pub fn open_appendable_memmap_vector_storage_impl<T: PrimitiveVectorElement>(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<AppendableMmapDenseVectorStorage<T, ChunkedMmapVectors<T>>> {
fs::create_dir_all(path)?;
let vectors_path = path.join(VECTORS_DIR_PATH);
let deleted_path = path.join(DELETED_DIR_PATH);
let populate = false;
let vectors =
ChunkedMmapVectors::<T>::open(&vectors_path, dim, AdviceSetting::Global, Some(populate))?;
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
let deleted_count = deleted.count_trues();
Ok(AppendableMmapDenseVectorStorage {
vectors,
deleted,
distance,
deleted_count,
_phantom: Default::default(),
})
}
pub fn open_appendable_in_ram_vector_storage(
storage_element_type: VectorStorageDatatype,
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
match storage_element_type {
VectorStorageDatatype::Float32 => {
open_appendable_in_ram_vector_storage_full(path, dim, distance)
}
VectorStorageDatatype::Float16 => {
open_appendable_in_ram_vector_storage_half(path, dim, distance)
}
VectorStorageDatatype::Uint8 => {
open_appendable_in_ram_vector_storage_byte(path, dim, distance)
}
}
}
fn open_appendable_in_ram_vector_storage_full(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage =
open_appendable_in_ram_vector_storage_impl::<VectorElementType>(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableInRam(Box::new(storage)))
}
fn open_appendable_in_ram_vector_storage_byte(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_in_ram_vector_storage_impl(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableInRamByte(Box::new(
storage,
)))
}
fn open_appendable_in_ram_vector_storage_half(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<VectorStorageEnum> {
let storage = open_appendable_in_ram_vector_storage_impl(path, dim, distance)?;
Ok(VectorStorageEnum::DenseAppendableInRamHalf(Box::new(
storage,
)))
}
pub fn open_appendable_in_ram_vector_storage_impl<T: PrimitiveVectorElement>(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<AppendableMmapDenseVectorStorage<T, InRamPersistedVectors<T>>> {
fs::create_dir_all(path)?;
let vectors_path = path.join(VECTORS_DIR_PATH);
let deleted_path = path.join(DELETED_DIR_PATH);
let vectors = InRamPersistedVectors::<T>::open(&vectors_path, dim)?;
let populate = true;
let deleted = BitvecFlags::new(DynamicMmapFlags::open(&deleted_path, populate)?);
let deleted_count = deleted.count_trues();
Ok(AppendableMmapDenseVectorStorage {
vectors,
deleted,
distance,
deleted_count,
_phantom: Default::default(),
})
}
/// Find files related to this dense vector storage
#[cfg(any(test, feature = "rocksdb"))]
pub(crate) fn find_storage_files(vector_storage_path: &Path) -> OperationResult<Vec<PathBuf>> {
let vectors_path = vector_storage_path.join(VECTORS_DIR_PATH);
let deleted_path = vector_storage_path.join(DELETED_DIR_PATH);
let mut files = vec![];
files.extend(common::disk::list_files(&vectors_path)?);
files.extend(common::disk::list_files(&deleted_path)?);
Ok(files)
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tempfile::Builder;
use super::*;
const RAND_SEED: u64 = 42;
/// Test that `find_storage_files` finds all files that are reported by the storage.
#[test]
fn test_find_storage_files() {
// Numbers chosen so we get 3 data chunks, not just 1
const POINT_COUNT: PointOffsetType = 2500;
const DIM: usize = 128;
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut storage =
open_appendable_memmap_vector_storage(dir.path(), DIM, Distance::Dot).unwrap();
let mut rng = StdRng::seed_from_u64(RAND_SEED);
let hw_counter = HardwareCounterCell::disposable();
// Insert points, delete 10% of it, and flush
for internal_id in 0..POINT_COUNT {
let point = std::iter::repeat_with(|| rng.random_range(-1.0..1.0))
.take(DIM)
.collect::<Vec<_>>();
storage
.insert_vector(internal_id, VectorRef::from(&point), &hw_counter)
.unwrap();
}
for internal_id in 0..POINT_COUNT {
if !rng.random_bool(0.1) {
continue;
}
storage.delete_vector(internal_id).unwrap();
}
storage.flusher()().unwrap();
let storage_files = storage.files().into_iter().collect::<HashSet<_>>();
let found_files = find_storage_files(dir.path())
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
assert_eq!(
storage_files, found_files,
"find_storage_files must find same files that storage reports",
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/mod.rs | lib/segment/src/vector_storage/dense/mod.rs | pub mod appendable_dense_vector_storage;
pub mod memmap_dense_vector_storage;
pub mod mmap_dense_vectors;
#[cfg(feature = "rocksdb")]
pub mod simple_dense_vector_storage;
pub mod volatile_dense_vector_storage;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/volatile_dense_vector_storage.rs | lib/segment/src/vector_storage/dense/volatile_dense_vector_storage.rs | use std::borrow::Cow;
use std::ops::Range;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::{BitSlice, BitVec};
use common::counter::hardware_counter::HardwareCounterCell;
use common::ext::BitSliceExt as _;
use common::types::PointOffsetType;
use crate::common::Flusher;
use crate::common::operation_error::{OperationResult, check_process_stopped};
use crate::data_types::named_vectors::CowVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{VectorElementType, VectorRef};
use crate::types::{Distance, VectorStorageDatatype};
use crate::vector_storage::bitvec::bitvec_set_deleted;
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::chunked_vectors::ChunkedVectors;
use crate::vector_storage::{AccessPattern, DenseVectorStorage, VectorStorage, VectorStorageEnum};
/// In-memory vector storage that is volatile
///
/// This storage is not persisted and intended for temporary use in tests.
#[derive(Debug)]
pub struct VolatileDenseVectorStorage<T: PrimitiveVectorElement> {
dim: usize,
distance: Distance,
vectors: ChunkedVectors<T>,
/// BitVec for deleted flags. Grows dynamically upto last set flag.
deleted: BitVec,
/// Current number of deleted vectors.
deleted_count: usize,
}
pub fn new_volatile_dense_vector_storage(dim: usize, distance: Distance) -> VectorStorageEnum {
VectorStorageEnum::DenseVolatile(VolatileDenseVectorStorage::new(dim, distance))
}
#[cfg(test)]
pub fn new_volatile_dense_byte_vector_storage(dim: usize, distance: Distance) -> VectorStorageEnum {
VectorStorageEnum::DenseVolatileByte(VolatileDenseVectorStorage::new(dim, distance))
}
#[cfg(test)]
pub fn new_volatile_dense_half_vector_storage(dim: usize, distance: Distance) -> VectorStorageEnum {
VectorStorageEnum::DenseVolatileHalf(VolatileDenseVectorStorage::new(dim, distance))
}
impl<T: PrimitiveVectorElement> VolatileDenseVectorStorage<T> {
pub fn new(dim: usize, distance: Distance) -> Self {
Self {
dim,
distance,
vectors: ChunkedVectors::new(dim),
deleted: BitVec::new(),
deleted_count: 0,
}
}
/// Set deleted flag for given key. Returns previous deleted state.
#[inline]
fn set_deleted(&mut self, key: PointOffsetType, deleted: bool) -> bool {
if !deleted && key as usize >= self.vectors.len() {
return false;
}
let was_deleted = bitvec_set_deleted(&mut self.deleted, key, deleted);
if was_deleted != deleted {
if !was_deleted {
self.deleted_count += 1;
} else {
self.deleted_count = self.deleted_count.saturating_sub(1);
}
}
was_deleted
}
}
impl<T: PrimitiveVectorElement> DenseVectorStorage<T> for VolatileDenseVectorStorage<T> {
fn vector_dim(&self) -> usize {
self.dim
}
fn get_dense<P: AccessPattern>(&self, key: PointOffsetType) -> &[T] {
self.vectors.get(key as VectorOffsetType)
}
}
impl<T: PrimitiveVectorElement> VectorStorage for VolatileDenseVectorStorage<T> {
fn distance(&self) -> Distance {
self.distance
}
fn datatype(&self) -> VectorStorageDatatype {
T::datatype()
}
fn is_on_disk(&self) -> bool {
false
}
fn total_vector_count(&self) -> usize {
self.vectors.len()
}
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> CowVector<'_> {
self.get_vector_opt::<P>(key).expect("vector not found")
}
/// Get vector by key, if it exists.
fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<CowVector<'_>> {
// In memory so no optimization to be done for access pattern
self.vectors
.get_opt(key as VectorOffsetType)
.map(|slice| CowVector::from(T::slice_to_float_cow(slice.into())))
}
fn insert_vector(
&mut self,
key: PointOffsetType,
vector: VectorRef,
_hw_counter: &HardwareCounterCell,
) -> OperationResult<()> {
let vector: &[VectorElementType] = vector.try_into()?;
let vector = T::slice_from_float_cow(Cow::from(vector));
self.vectors
.insert(key as VectorOffsetType, vector.as_ref())?;
self.set_deleted(key, false);
Ok(())
}
fn update_from<'a>(
&mut self,
other_vectors: &'a mut impl Iterator<Item = (CowVector<'a>, bool)>,
stopped: &AtomicBool,
) -> OperationResult<Range<PointOffsetType>> {
let start_index = self.vectors.len() as PointOffsetType;
for (other_vector, other_deleted) in other_vectors {
check_process_stopped(stopped)?;
// Do not perform preprocessing - vectors should be already processed
let other_vector = T::slice_from_float_cow(Cow::try_from(other_vector)?);
let new_id = self.vectors.push(other_vector.as_ref())? as PointOffsetType;
self.set_deleted(new_id, other_deleted);
}
let end_index = self.vectors.len() as PointOffsetType;
Ok(start_index..end_index)
}
fn flusher(&self) -> Flusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<std::path::PathBuf> {
vec![]
}
fn delete_vector(&mut self, key: PointOffsetType) -> OperationResult<bool> {
let is_deleted = !self.set_deleted(key, true);
Ok(is_deleted)
}
fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
fn deleted_vector_count(&self) -> usize {
self.deleted_count
}
fn deleted_vector_bitslice(&self) -> &BitSlice {
self.deleted.as_bitslice()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/dense/mmap_dense_vectors.rs | lib/segment/src/vector_storage/dense/mmap_dense_vectors.rs | use std::io::Write;
use std::mem::{self, MaybeUninit, size_of, transmute};
use std::path::Path;
use std::sync::Arc;
use bitvec::prelude::BitSlice;
use common::ext::BitSliceExt as _;
use common::maybe_uninit::maybe_uninit_fill_from;
use common::types::PointOffsetType;
use fs_err::{File, OpenOptions};
use memmap2::Mmap;
use memory::madvise::{Advice, AdviceSetting, Madviseable};
use memory::mmap_ops::{self, MULTI_MMAP_IS_SUPPORTED};
use memory::mmap_type::{MmapBitSlice, MmapFlusher};
use parking_lot::Mutex;
use crate::common::error_logging::LogError;
use crate::common::operation_error::OperationResult;
use crate::data_types::primitive::PrimitiveVectorElement;
#[cfg(target_os = "linux")]
use crate::vector_storage::async_io::UringReader;
#[cfg(not(target_os = "linux"))]
use crate::vector_storage::async_io_mock::UringReader;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query_scorer::is_read_with_prefetch_efficient_points;
use crate::vector_storage::{AccessPattern, Random, Sequential};
const HEADER_SIZE: usize = 4;
const VECTORS_HEADER: &[u8; HEADER_SIZE] = b"data";
const DELETED_HEADER: &[u8; HEADER_SIZE] = b"drop";
/// Mem-mapped file for dense vectors
#[derive(Debug)]
pub struct MmapDenseVectors<T: PrimitiveVectorElement> {
pub dim: usize,
pub num_vectors: usize,
/// Main vector data mmap for read/write
///
/// Has an exact size to fit a header and `num_vectors` of vectors.
/// Best suited for random reads.
mmap: Arc<Mmap>,
/// Read-only mmap best suited for sequential reads
///
/// `None` on platforms that do not support multiple memory maps to the same file.
/// Use [`mmap_seq`] utility function to access this mmap if available.
_mmap_seq: Option<Arc<Mmap>>,
/// Context for io_uring-base async IO
#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
uring_reader: Mutex<Option<UringReader<T>>>,
/// Memory mapped deletion flags
deleted: MmapBitSlice,
/// Current number of deleted vectors.
pub deleted_count: usize,
}
impl<T: PrimitiveVectorElement> MmapDenseVectors<T> {
pub fn open(
vectors_path: &Path,
deleted_path: &Path,
dim: usize,
with_async_io: bool,
) -> OperationResult<Self> {
// Allocate/open vectors mmap
ensure_mmap_file_size(vectors_path, VECTORS_HEADER, None)
.describe("Create mmap data file")?;
let mmap = mmap_ops::open_read_mmap(vectors_path, AdviceSetting::Global, false)
.describe("Open mmap for reading")?;
// Only open second mmap for sequential reads if supported
let mmap_seq = if *MULTI_MMAP_IS_SUPPORTED {
let mmap_seq = mmap_ops::open_read_mmap(
vectors_path,
AdviceSetting::Advice(Advice::Sequential),
false,
)
.describe("Open mmap for sequential reading")?;
Some(Arc::new(mmap_seq))
} else {
None
};
let num_vectors = (mmap.len() - HEADER_SIZE) / dim / size_of::<T>();
// Allocate/open deleted mmap
let deleted_mmap_size = deleted_mmap_size(num_vectors);
ensure_mmap_file_size(deleted_path, DELETED_HEADER, Some(deleted_mmap_size as u64))
.describe("Create mmap deleted file")?;
let deleted_mmap = mmap_ops::open_write_mmap(deleted_path, AdviceSetting::Global, false)
.describe("Open mmap deleted for writing")?;
// Advise kernel that we'll need this page soon so the kernel can prepare
#[cfg(unix)]
if let Err(err) = deleted_mmap.advise(memmap2::Advice::WillNeed) {
log::error!("Failed to advise MADV_WILLNEED for deleted flags: {err}");
}
// Transform into mmap BitSlice
let deleted = MmapBitSlice::try_from(deleted_mmap, deleted_mmap_data_start())?;
let deleted_count = deleted.count_ones();
let uring_reader = if with_async_io {
// Keep file handle open for async IO
let vectors_file = File::open(vectors_path)?;
let raw_size = dim * size_of::<T>();
Some(UringReader::new(vectors_file, raw_size, HEADER_SIZE)?)
} else {
None
};
Ok(MmapDenseVectors {
dim,
num_vectors,
mmap: mmap.into(),
_mmap_seq: mmap_seq,
uring_reader: Mutex::new(uring_reader),
deleted,
deleted_count,
})
}
pub fn has_async_reader(&self) -> bool {
self.uring_reader.lock().is_some()
}
pub fn flusher(&self) -> MmapFlusher {
self.deleted.flusher()
}
pub fn data_offset(&self, key: PointOffsetType) -> Option<usize> {
let vector_data_length = self.dim * size_of::<T>();
let offset = (key as usize) * vector_data_length + HEADER_SIZE;
if key >= (self.num_vectors as PointOffsetType) {
return None;
}
Some(offset)
}
pub fn raw_size(&self) -> usize {
self.dim * size_of::<T>()
}
/// Helper to get a slice suited for sequential reads if available, otherwise use the main mmap
#[inline]
fn mmap_seq(&self) -> Arc<Mmap> {
#[expect(clippy::used_underscore_binding)]
self._mmap_seq.clone().unwrap_or_else(|| self.mmap.clone())
}
fn raw_vector_offset<P: AccessPattern>(&self, offset: usize) -> &[T] {
let mmap = if P::IS_SEQUENTIAL {
&self.mmap_seq()
} else {
&self.mmap
};
let byte_slice = &mmap[offset..(offset + self.raw_size())];
let arr: &[T] = unsafe { transmute(byte_slice) };
&arr[0..self.dim]
}
/// Returns reference to vector data by key
fn get_vector<P: AccessPattern>(&self, key: PointOffsetType) -> &[T] {
self.get_vector_opt::<P>(key).expect("vector not found")
}
/// Returns an optional reference to vector data by key
pub fn get_vector_opt<P: AccessPattern>(&self, key: PointOffsetType) -> Option<&[T]> {
self.data_offset(key)
.map(|offset| self.raw_vector_offset::<P>(offset))
}
pub fn get_vectors<'a>(
&'a self,
keys: &[PointOffsetType],
vectors: &'a mut [MaybeUninit<&'a [T]>],
) -> &'a [&'a [T]] {
debug_assert_eq!(keys.len(), vectors.len());
debug_assert!(keys.len() <= VECTOR_READ_BATCH_SIZE);
if is_read_with_prefetch_efficient_points(keys) {
let iter = keys.iter().map(|key| self.get_vector::<Sequential>(*key));
maybe_uninit_fill_from(vectors, iter).0
} else {
let iter = keys.iter().map(|key| self.get_vector::<Random>(*key));
maybe_uninit_fill_from(vectors, iter).0
}
}
/// Marks the key as deleted.
///
/// Returns true if the key was not deleted before, and it is now deleted.
pub fn delete(&mut self, key: PointOffsetType) -> bool {
let is_deleted = !self.deleted.replace(key as usize, true);
if is_deleted {
self.deleted_count += 1;
}
is_deleted
}
pub fn is_deleted_vector(&self, key: PointOffsetType) -> bool {
self.deleted.get_bit(key as usize).unwrap_or(false)
}
/// Get [`BitSlice`] representation for deleted vectors with deletion flags
///
/// The size of this slice is not guaranteed. It may be smaller/larger than the number of
/// vectors in this segment.
pub fn deleted_vector_bitslice(&self) -> &BitSlice {
&self.deleted
}
#[cfg(target_os = "linux")]
fn process_points_uring(
&self,
points: impl Iterator<Item = PointOffsetType>,
callback: impl FnMut(usize, PointOffsetType, &[T]),
) -> OperationResult<()> {
self.uring_reader
.lock()
.as_mut()
.expect("io_uring reader should be initialized")
.read_stream(points, callback)
}
#[cfg(not(target_os = "linux"))]
fn process_points_simple(
&self,
points: impl Iterator<Item = PointOffsetType>,
mut callback: impl FnMut(usize, PointOffsetType, &[T]),
) {
for (idx, point) in points.enumerate() {
let vector = self.get_vector::<Random>(point);
callback(idx, point, vector);
}
}
/// Reads vectors for the given ids and calls the callback for each vector.
/// Tries to utilize asynchronous IO if possible.
/// In particular, uses io_uring on Linux and simple synchronous IO otherwise.
pub fn read_vectors_async(
&self,
points: impl Iterator<Item = PointOffsetType>,
callback: impl FnMut(usize, PointOffsetType, &[T]),
) -> OperationResult<()> {
#[cfg(target_os = "linux")]
{
self.process_points_uring(points, callback)
}
#[cfg(not(target_os = "linux"))]
{
self.process_points_simple(points, callback);
Ok(())
}
}
pub fn populate(&self) {
#[expect(clippy::used_underscore_binding)]
if let Some(mmap_seq) = &self._mmap_seq {
mmap_seq.populate();
}
}
}
/// Ensure the given mmap file exists and is the given size
///
/// # Arguments
/// * `path`: path of the file.
/// * `header`: header to set when the file is newly created.
/// * `size`: set the file size in bytes, filled with zeroes.
fn ensure_mmap_file_size(path: &Path, header: &[u8], size: Option<u64>) -> OperationResult<()> {
// If it exists, only set the length
if path.exists() {
if let Some(size) = size {
let file = OpenOptions::new().write(true).open(path)?;
file.set_len(size)?;
}
return Ok(());
}
// Create file, and make it the correct size
let mut file = File::create(path)?;
file.write_all(header)?;
if let Some(size) = size
&& size > header.len() as u64
{
file.set_len(size)?;
}
Ok(())
}
/// Get start position of flags `BitSlice` in deleted mmap.
#[inline]
const fn deleted_mmap_data_start() -> usize {
let align = mem::align_of::<usize>();
HEADER_SIZE.div_ceil(align) * align
}
/// Calculate size for deleted mmap to hold the given number of vectors.
///
/// The mmap will hold a file header and an aligned `BitSlice`.
fn deleted_mmap_size(num: usize) -> usize {
let unit_size = mem::size_of::<usize>();
let num_bytes = num.div_ceil(8);
let num_usizes = num_bytes.div_ceil(unit_size);
let data_size = num_usizes * unit_size;
deleted_mmap_data_start() + data_size
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/custom_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/custom_query_scorer.rs | use std::borrow::Cow;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::True;
use common::types::{PointOffsetType, ScoreType};
use zerocopy::FromBytes;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{DenseVector, TypedDenseVector};
use crate::spaces::metric::Metric;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query::{Query, TransformInto};
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::{DenseVectorStorage, Random};
pub struct CustomQueryScorer<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
TStoredQuery: Query<TypedDenseVector<TElement>>,
> {
vector_storage: &'a TVectorStorage,
query: TStoredQuery,
metric: PhantomData<TMetric>,
_element: PhantomData<TElement>,
hardware_counter: HardwareCounterCell,
}
impl<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
TStoredQuery: Query<TypedDenseVector<TElement>>,
> CustomQueryScorer<'a, TElement, TMetric, TVectorStorage, TStoredQuery>
{
pub fn new<TInputQuery>(
query: TInputQuery,
vector_storage: &'a TVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self
where
TInputQuery: Query<DenseVector>
+ TransformInto<TStoredQuery, DenseVector, TypedDenseVector<TElement>>,
{
let mut dim = 0;
let query = query
.transform(|vector| {
dim = vector.len();
let preprocessed_vector = TMetric::preprocess(vector);
Ok(TypedDenseVector::from(TElement::slice_from_float_cow(
Cow::from(preprocessed_vector),
)))
})
.unwrap();
hardware_counter.set_cpu_multiplier(dim * size_of::<TElement>());
if vector_storage.is_on_disk() {
hardware_counter.set_vector_io_read_multiplier(dim * size_of::<TElement>());
} else {
hardware_counter.set_vector_io_read_multiplier(0);
}
Self {
query,
vector_storage,
metric: PhantomData,
_element: PhantomData,
hardware_counter,
}
}
}
impl<
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
TStoredQuery: Query<TypedDenseVector<TElement>>,
> QueryScorer for CustomQueryScorer<'_, TElement, TMetric, TVectorStorage, TStoredQuery>
{
type TVector = [TElement];
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let stored = self.vector_storage.get_dense::<Random>(idx);
self.hardware_counter.vector_io_read().incr();
self.score(stored)
}
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert!(ids.len() <= VECTOR_READ_BATCH_SIZE);
debug_assert_eq!(ids.len(), scores.len());
let mut vectors = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let vectors = self
.vector_storage
.get_dense_batch(ids, &mut vectors[..ids.len()]);
self.hardware_counter.vector_io_read().incr_delta(ids.len());
for idx in 0..ids.len() {
scores[idx] = self.score(vectors[idx]);
}
}
#[inline]
fn score(&self, against: &[TElement]) -> ScoreType {
let cpu_counter = self.hardware_counter.cpu_counter();
self.query.score_by(|example| {
cpu_counter.incr();
TMetric::similarity(example, against)
})
}
fn score_internal(&self, _point_a: PointOffsetType, _point_b: PointOffsetType) -> ScoreType {
unimplemented!("Custom scorer can compare against multiple vectors, not just one")
}
type SupportsBytes = True;
fn score_bytes(&self, _enabled: Self::SupportsBytes, bytes: &[u8]) -> ScoreType {
self.score(<[TElement]>::ref_from_bytes(bytes).unwrap())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/sparse_custom_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/sparse_custom_query_scorer.rs | use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::{DimId, DimWeight};
use crate::vector_storage::query::{Query, TransformInto};
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::{Random, SparseVectorStorage};
pub struct SparseCustomQueryScorer<
'a,
TVectorStorage: SparseVectorStorage,
TQuery: Query<SparseVector>,
> {
vector_storage: &'a TVectorStorage,
query: TQuery,
hardware_counter: HardwareCounterCell,
}
impl<
'a,
TVectorStorage: SparseVectorStorage,
TQuery: Query<SparseVector> + TransformInto<TQuery, SparseVector, SparseVector>,
> SparseCustomQueryScorer<'a, TVectorStorage, TQuery>
{
pub fn new(
query: TQuery,
vector_storage: &'a TVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self {
let query: TQuery = TransformInto::transform(query, |mut vector| {
vector.sort_by_indices();
Ok(vector)
})
.unwrap();
hardware_counter.set_cpu_multiplier(size_of::<DimWeight>());
if vector_storage.is_on_disk() {
hardware_counter.set_vector_io_read_multiplier(size_of::<DimId>());
} else {
hardware_counter.set_vector_io_read_multiplier(0);
}
Self {
vector_storage,
query,
hardware_counter,
}
}
}
impl<TVectorStorage: SparseVectorStorage, TQuery: Query<SparseVector>> QueryScorer
for SparseCustomQueryScorer<'_, TVectorStorage, TQuery>
{
type TVector = SparseVector;
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let stored = self
.vector_storage
.get_sparse::<Random>(idx)
.expect("Failed to get sparse vector");
// not exactly correct for Gridstore where the indices are compressed into u8
self.hardware_counter
.vector_io_read()
.incr_delta(stored.indices.len() + stored.values.len());
self.query.score_by(|example| {
let cpu_units = example.indices.len() + stored.indices.len();
self.hardware_counter.cpu_counter().incr_delta(cpu_units);
stored.score(example).unwrap_or(0.0)
})
}
fn score(&self, v: &SparseVector) -> ScoreType {
self.query.score_by(|example| {
let cpu_units = v.indices.len() + example.indices.len();
self.hardware_counter.cpu_counter().incr_delta(cpu_units);
example.score(v).unwrap_or(0.0)
})
}
fn score_internal(&self, _point_a: PointOffsetType, _point_b: PointOffsetType) -> ScoreType {
unimplemented!("Custom scorer can compare against multiple vectors, not just one")
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/sparse_metric_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/sparse_metric_query_scorer.rs | use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use sparse::common::sparse_vector::SparseVector;
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::sparse::volatile_sparse_vector_storage::VolatileSparseVectorStorage;
use crate::vector_storage::{Random, SparseVectorStorage};
pub struct SparseMetricQueryScorer<'a> {
vector_storage: &'a VolatileSparseVectorStorage,
query: SparseVector,
hardware_counter: HardwareCounterCell,
}
impl<'a> SparseMetricQueryScorer<'a> {
pub fn new(
query: SparseVector,
vector_storage: &'a VolatileSparseVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self {
// We will count the number of intersections per pair of vectors.
hardware_counter.set_cpu_multiplier(1);
// We don't measure `vector_io_read` because we are dealing with a volatile storage,
// which is always in memory.
// If we refactor this into accepting on_disk storages, we would set `vector_io_read_multiplier`
// to 0 or 1 here, and measure it accordingly.
Self {
vector_storage,
query,
hardware_counter,
}
}
fn score_sparse(&self, a: &SparseVector, b: &SparseVector) -> ScoreType {
self.hardware_counter
.cpu_counter()
// Calculate the amount of comparisons needed for sparse vector scoring.
.incr_delta(std::cmp::min(a.len(), b.len()));
a.score(b).unwrap_or_default()
}
fn score_ref(&self, v2: &SparseVector) -> ScoreType {
self.score_sparse(&self.query, v2)
}
}
impl QueryScorer for SparseMetricQueryScorer<'_> {
type TVector = SparseVector;
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let stored = self
.vector_storage
.get_sparse::<Random>(idx)
.expect("Sparse vector not found");
self.score_ref(&stored)
}
#[inline]
fn score(&self, v2: &SparseVector) -> ScoreType {
self.score_ref(v2)
}
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert_eq!(ids.len(), scores.len());
for idx in 0..ids.len() {
scores[idx] = self.score_ref(
&self
.vector_storage
.get_sparse::<Random>(ids[idx])
.expect("Sparse vector not found"),
);
}
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
let v1 = self
.vector_storage
.get_sparse::<Random>(point_a)
.expect("Sparse vector not found");
let v2 = self
.vector_storage
.get_sparse::<Random>(point_b)
.expect("Sparse vector not found");
self.score_sparse(&v1, &v2)
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/mod.rs | lib/segment/src/vector_storage/query_scorer/mod.rs | use bytemuck::TransparentWrapper;
use common::typelevel::{TBool, TOption};
use common::types::{PointOffsetType, ScoreType};
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::TypedMultiDenseVectorRef;
use crate::spaces::metric::Metric;
use crate::types::{MultiVectorComparator, MultiVectorConfig};
use crate::vector_storage::chunked_vector_storage::VectorOffsetType;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
pub mod custom_query_scorer;
pub mod metric_query_scorer;
pub mod multi_custom_query_scorer;
pub mod multi_metric_query_scorer;
pub mod sparse_custom_query_scorer;
pub mod sparse_metric_query_scorer;
pub trait QueryScorer {
type TVector: ?Sized;
fn score_stored(&self, idx: PointOffsetType) -> ScoreType;
/// Score a batch of points
///
/// Enable underlying storage to optimize pre-fetching of data
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert!(ids.len() <= VECTOR_READ_BATCH_SIZE);
debug_assert_eq!(ids.len(), scores.len());
// no specific implementation for batch scoring
for (idx, id) in ids.iter().enumerate() {
scores[idx] = self.score_stored(*id);
}
}
fn score(&self, v2: &Self::TVector) -> ScoreType;
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType;
type SupportsBytes: TBool;
fn score_bytes(&self, _: Self::SupportsBytes, bytes: &[u8]) -> ScoreType;
}
pub trait QueryScorerBytes {
fn score_bytes(&self, bytes: &[u8]) -> ScoreType;
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
pub struct QueryScorerBytesImpl<TQueryScorer: QueryScorer>(
<TQueryScorer::SupportsBytes as TBool>::TOption<TQueryScorer>,
);
impl<TQueryScorer: QueryScorer> QueryScorerBytesImpl<TQueryScorer> {
pub fn new(query_scorer: &TQueryScorer) -> Option<&Self> {
TQueryScorer::SupportsBytes::then_some_ref(query_scorer).map(Self::wrap_ref)
}
}
impl<TQueryScorer: QueryScorer> QueryScorerBytes for QueryScorerBytesImpl<TQueryScorer> {
fn score_bytes(&self, bytes: &[u8]) -> ScoreType {
self.0.get().score_bytes(self.0.is_some(), bytes)
}
}
/// Colbert MaxSim metric, metric for multi-dense vectors
/// <https://arxiv.org/pdf/2112.01488.pdf>, figure 1
/// This metric is also implemented in `QuantizedMultivectorStorage` structure for quantized data.
///
/// Disclaimer: this score is not equivalent to the original Colbert metric for Euclidean space because we do not apply the post-processing step.
/// In that case the score value will be different but the ranking will be the same.
/// We do that because of performance reasons and complex sort ordering of the vectors when applying the post-processing step.
pub fn score_max_similarity<T: PrimitiveVectorElement, TMetric: Metric<T>>(
multi_dense_a: TypedMultiDenseVectorRef<'_, T>,
multi_dense_b: TypedMultiDenseVectorRef<'_, T>,
) -> ScoreType {
debug_assert!(!multi_dense_a.is_empty());
debug_assert!(!multi_dense_b.is_empty());
let mut sum = 0.0;
for dense_a in multi_dense_a.multi_vectors() {
let mut max_sim = ScoreType::NEG_INFINITY;
// manual `max_by` for performance
for dense_b in multi_dense_b.multi_vectors() {
let sim = TMetric::similarity(dense_a, dense_b);
if sim > max_sim {
max_sim = sim;
}
}
// sum of max similarity
sum += max_sim;
}
sum
}
fn score_multi<T: PrimitiveVectorElement, TMetric: Metric<T>>(
multi_vector_config: &MultiVectorConfig,
multi_dense_a: TypedMultiDenseVectorRef<'_, T>,
multi_dense_b: TypedMultiDenseVectorRef<'_, T>,
) -> ScoreType {
match multi_vector_config.comparator {
MultiVectorComparator::MaxSim => {
score_max_similarity::<T, TMetric>(multi_dense_a, multi_dense_b)
}
}
}
/// Check if ids are rather contiguous to enable further optimizations
/// TODO: this can be smarter, but requires experiments with actual mmap behaviour
/// TODO: For example
///
/// - If the whole batch is less than one page - don't use prefetch
/// - If one vector is bigger then the prefetch size - don't use prefetch
/// - ???
pub fn is_read_with_prefetch_efficient_points(ids: &[PointOffsetType]) -> bool {
is_read_with_prefetch_efficient(ids.iter().map(|x| *x as usize))
}
pub fn is_read_with_prefetch_efficient_vectors(ids: &[VectorOffsetType]) -> bool {
is_read_with_prefetch_efficient(ids.iter().copied())
}
fn is_read_with_prefetch_efficient(ids: impl IntoIterator<Item = usize>) -> bool {
let mut min = usize::MAX;
let mut max = 0;
let mut n = 0;
for id in ids {
if id < min {
min = id;
}
if id > max {
max = id;
}
n += 1;
}
if n < 2 {
return false;
}
let diff = max.saturating_sub(min);
diff < n * 2
}
#[cfg(test)]
mod tests {
use super::*;
use crate::data_types::vectors::MultiDenseVectorInternal;
use crate::spaces::simple::EuclidMetric;
#[test]
fn test_check_ids_rather_contiguous() {
assert!(!is_read_with_prefetch_efficient_points(&[]));
assert!(!is_read_with_prefetch_efficient_points(&[1]));
assert!(is_read_with_prefetch_efficient_points(&[1, 2]));
assert!(is_read_with_prefetch_efficient_points(&[2, 1]));
assert!(is_read_with_prefetch_efficient_points(&[1, 2, 3, 9, 10]));
assert!(is_read_with_prefetch_efficient_points(&[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
]));
assert!(is_read_with_prefetch_efficient_points(&[
1, 2, 3, 4, 5, 6, 7, 8, 9, 11
]));
assert!(!is_read_with_prefetch_efficient_points(&[
1, 2, 3, 4, 9, 1000, 12, 14
]));
}
#[test]
fn test_score_multi_euclidean() {
let a = MultiDenseVectorInternal::try_from(vec![
vec![1.0, 2.0, 3.0],
vec![3.0, 3.0, 3.0],
vec![4.0, 5.0, 6.0],
])
.unwrap();
// distance to itself
let score = score_max_similarity::<f32, EuclidMetric>((&a).into(), (&a).into());
assert_eq!(score, -0.0);
let b = MultiDenseVectorInternal::try_from(vec![vec![3.0, 3.0, 3.0], vec![4.0, 2.0, 1.0]])
.unwrap();
let score = score_max_similarity::<f32, EuclidMetric>((&a).into(), (&b).into());
// proper value according to theory should be `5.9777255` but we do not apply post-processing step
assert_eq!(score, -19.);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/multi_custom_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/multi_custom_query_scorer.rs | use std::marker::PhantomData;
use std::mem::MaybeUninit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use super::score_multi;
use crate::data_types::named_vectors::CowMultiVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
DenseVector, MultiDenseVectorInternal, TypedMultiDenseVector, TypedMultiDenseVectorRef,
};
use crate::spaces::metric::Metric;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query::{Query, TransformInto};
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::{MultiVectorStorage, Random};
pub struct MultiCustomQueryScorer<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
TQuery: Query<TypedMultiDenseVector<TElement>>,
> {
vector_storage: &'a TVectorStorage,
query: TQuery,
metric: PhantomData<TMetric>,
element: PhantomData<TElement>,
hardware_counter: HardwareCounterCell,
}
impl<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
TQuery: Query<TypedMultiDenseVector<TElement>>,
> MultiCustomQueryScorer<'a, TElement, TMetric, TVectorStorage, TQuery>
{
pub fn new<TInputQuery>(
query: TInputQuery,
vector_storage: &'a TVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self
where
TInputQuery: Query<MultiDenseVectorInternal>
+ TransformInto<TQuery, MultiDenseVectorInternal, TypedMultiDenseVector<TElement>>,
{
let mut dim = 0;
let query = query
.transform(|vector| {
dim = vector.dim;
let mut preprocessed = DenseVector::new();
for slice in vector.multi_vectors() {
preprocessed.extend_from_slice(&TMetric::preprocess(slice.to_vec()));
}
let preprocessed = MultiDenseVectorInternal::new(preprocessed, vector.dim);
let converted =
TElement::from_float_multivector(CowMultiVector::Owned(preprocessed))
.to_owned();
Ok(converted)
})
.unwrap();
hardware_counter.set_cpu_multiplier(dim * size_of::<TElement>());
if vector_storage.is_on_disk() {
hardware_counter.set_vector_io_read_multiplier(dim * size_of::<TElement>());
} else {
hardware_counter.set_vector_io_read_multiplier(0);
}
Self {
query,
vector_storage,
metric: PhantomData,
element: PhantomData,
hardware_counter,
}
}
}
impl<
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
TQuery: Query<TypedMultiDenseVector<TElement>>,
> MultiCustomQueryScorer<'_, TElement, TMetric, TVectorStorage, TQuery>
{
#[inline]
fn score_ref(&self, against: TypedMultiDenseVectorRef<TElement>) -> ScoreType {
let cpu_counter = self.hardware_counter.cpu_counter();
let against_vector_count = against.vectors_count();
self.query.score_by(|example| {
cpu_counter.incr_delta(example.vectors_count() * against_vector_count);
score_multi::<TElement, TMetric>(
self.vector_storage.multi_vector_config(),
TypedMultiDenseVectorRef::from(example),
against,
)
})
}
}
impl<
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
TQuery: Query<TypedMultiDenseVector<TElement>>,
> QueryScorer for MultiCustomQueryScorer<'_, TElement, TMetric, TVectorStorage, TQuery>
{
type TVector = TypedMultiDenseVector<TElement>;
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let stored = self.vector_storage.get_multi::<Random>(idx);
self.hardware_counter
.vector_io_read()
.incr_delta(stored.vectors_count());
self.score_ref(stored)
}
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert!(ids.len() <= VECTOR_READ_BATCH_SIZE);
debug_assert_eq!(ids.len(), scores.len());
let mut vectors = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let vectors = self
.vector_storage
.get_batch_multi(ids, &mut vectors[..ids.len()]);
let total_loaded_vectors: usize = vectors.iter().map(|v| v.vectors_count()).sum();
self.hardware_counter
.vector_io_read()
.incr_delta(total_loaded_vectors);
for idx in 0..ids.len() {
scores[idx] = self.score_ref(vectors[idx]);
}
}
#[inline]
fn score(&self, against: &TypedMultiDenseVector<TElement>) -> ScoreType {
self.score_ref(TypedMultiDenseVectorRef::from(against))
}
fn score_internal(&self, _point_a: PointOffsetType, _point_b: PointOffsetType) -> ScoreType {
unimplemented!("Custom scorer can compare against multiple vectors, not just one")
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/metric_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/metric_query_scorer.rs | use std::borrow::Cow;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::True;
use common::types::{PointOffsetType, ScoreType};
use zerocopy::FromBytes;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{TypedDenseVector, VectorElementType};
use crate::spaces::metric::Metric;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::{DenseVectorStorage, Random};
pub struct MetricQueryScorer<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
> {
vector_storage: &'a TVectorStorage,
query: TypedDenseVector<TElement>,
metric: PhantomData<TMetric>,
hardware_counter: HardwareCounterCell,
}
impl<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
> MetricQueryScorer<'a, TElement, TMetric, TVectorStorage>
{
pub fn new(
query: TypedDenseVector<VectorElementType>,
vector_storage: &'a TVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self {
let dim = query.len();
let preprocessed_vector = TMetric::preprocess(query);
hardware_counter.set_cpu_multiplier(dim * size_of::<TElement>());
if vector_storage.is_on_disk() {
hardware_counter.set_vector_io_read_multiplier(dim * size_of::<TElement>());
} else {
hardware_counter.set_vector_io_read_multiplier(0);
}
Self {
query: TypedDenseVector::from(TElement::slice_from_float_cow(Cow::from(
preprocessed_vector,
))),
vector_storage,
metric: PhantomData,
hardware_counter,
}
}
}
impl<
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: DenseVectorStorage<TElement>,
> QueryScorer for MetricQueryScorer<'_, TElement, TMetric, TVectorStorage>
{
type TVector = [TElement];
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
self.hardware_counter.cpu_counter().incr();
self.hardware_counter.vector_io_read().incr();
TMetric::similarity(&self.query, self.vector_storage.get_dense::<Random>(idx))
}
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert!(ids.len() <= VECTOR_READ_BATCH_SIZE);
debug_assert_eq!(ids.len(), scores.len());
let mut vectors = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let vectors = self
.vector_storage
.get_dense_batch(ids, &mut vectors[..ids.len()]);
self.hardware_counter.cpu_counter().incr_delta(ids.len());
self.hardware_counter.vector_io_read().incr_delta(ids.len());
for idx in 0..ids.len() {
scores[idx] = TMetric::similarity(&self.query, vectors[idx]);
}
}
#[inline]
fn score(&self, v2: &[TElement]) -> ScoreType {
self.hardware_counter.cpu_counter().incr();
TMetric::similarity(&self.query, v2)
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
self.hardware_counter.cpu_counter().incr();
let v1 = self.vector_storage.get_dense::<Random>(point_a);
let v2 = self.vector_storage.get_dense::<Random>(point_b);
TMetric::similarity(v1, v2)
}
type SupportsBytes = True;
fn score_bytes(&self, _enabled: Self::SupportsBytes, bytes: &[u8]) -> ScoreType {
self.score(<[TElement]>::ref_from_bytes(bytes).unwrap())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/vector_storage/query_scorer/multi_metric_query_scorer.rs | lib/segment/src/vector_storage/query_scorer/multi_metric_query_scorer.rs | use std::marker::PhantomData;
use std::mem::MaybeUninit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::False;
use common::types::{PointOffsetType, ScoreType};
use super::score_multi;
use crate::data_types::named_vectors::CowMultiVector;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{
DenseVector, MultiDenseVectorInternal, TypedMultiDenseVector, TypedMultiDenseVectorRef,
};
use crate::spaces::metric::Metric;
use crate::vector_storage::common::VECTOR_READ_BATCH_SIZE;
use crate::vector_storage::query_scorer::QueryScorer;
use crate::vector_storage::{MultiVectorStorage, Random};
pub struct MultiMetricQueryScorer<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
> {
vector_storage: &'a TVectorStorage,
query: TypedMultiDenseVector<TElement>,
metric: PhantomData<TMetric>,
hardware_counter: HardwareCounterCell,
}
impl<
'a,
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
> MultiMetricQueryScorer<'a, TElement, TMetric, TVectorStorage>
{
pub fn new(
query: &MultiDenseVectorInternal,
vector_storage: &'a TVectorStorage,
mut hardware_counter: HardwareCounterCell,
) -> Self {
let mut preprocessed = DenseVector::new();
for slice in query.multi_vectors() {
preprocessed.extend_from_slice(&TMetric::preprocess(slice.to_vec()));
}
let preprocessed = MultiDenseVectorInternal::new(preprocessed, query.dim);
hardware_counter.set_cpu_multiplier(query.dim * size_of::<TElement>());
if vector_storage.is_on_disk() {
hardware_counter.set_vector_io_read_multiplier(query.dim * size_of::<TElement>());
} else {
hardware_counter.set_vector_io_read_multiplier(0);
}
Self {
query: TElement::from_float_multivector(CowMultiVector::Owned(preprocessed)).to_owned(),
vector_storage,
metric: PhantomData,
hardware_counter,
}
}
fn score_multi(
&self,
multi_dense_a: TypedMultiDenseVectorRef<TElement>,
multi_dense_b: TypedMultiDenseVectorRef<TElement>,
) -> ScoreType {
self.hardware_counter
.cpu_counter()
// Calculate the amount of comparisons needed for multi vector scoring.
.incr_delta(multi_dense_a.vectors_count() * multi_dense_b.vectors_count());
score_multi::<TElement, TMetric>(
self.vector_storage.multi_vector_config(),
multi_dense_a,
multi_dense_b,
)
}
fn score_ref(&self, v2: TypedMultiDenseVectorRef<TElement>) -> ScoreType {
self.score_multi(TypedMultiDenseVectorRef::from(&self.query), v2)
}
}
impl<
TElement: PrimitiveVectorElement,
TMetric: Metric<TElement>,
TVectorStorage: MultiVectorStorage<TElement>,
> QueryScorer for MultiMetricQueryScorer<'_, TElement, TMetric, TVectorStorage>
{
type TVector = TypedMultiDenseVector<TElement>;
#[inline]
fn score_stored(&self, idx: PointOffsetType) -> ScoreType {
let stored = self.vector_storage.get_multi::<Random>(idx);
self.hardware_counter
.vector_io_read()
.incr_delta(stored.vectors_count());
self.score_multi(TypedMultiDenseVectorRef::from(&self.query), stored)
}
#[inline]
fn score(&self, v2: &TypedMultiDenseVector<TElement>) -> ScoreType {
self.score_multi(
TypedMultiDenseVectorRef::from(&self.query),
TypedMultiDenseVectorRef::from(v2),
)
}
fn score_stored_batch(&self, ids: &[PointOffsetType], scores: &mut [ScoreType]) {
debug_assert!(ids.len() <= VECTOR_READ_BATCH_SIZE);
debug_assert_eq!(ids.len(), scores.len());
let mut vectors = [MaybeUninit::uninit(); VECTOR_READ_BATCH_SIZE];
let vectors = self
.vector_storage
.get_batch_multi(ids, &mut vectors[..ids.len()]);
let total_read = vectors.iter().map(|v| v.vectors_count()).sum();
self.hardware_counter
.vector_io_read()
.incr_delta(total_read);
for idx in 0..ids.len() {
scores[idx] = self.score_ref(vectors[idx]);
}
}
fn score_internal(&self, point_a: PointOffsetType, point_b: PointOffsetType) -> ScoreType {
let v1 = self.vector_storage.get_multi::<Random>(point_a);
let v2 = self.vector_storage.get_multi::<Random>(point_b);
self.hardware_counter
.vector_io_read()
.incr_delta(v1.vectors_count() + v2.vectors_count());
self.score_multi(v1, v2)
}
type SupportsBytes = False;
fn score_bytes(&self, enabled: Self::SupportsBytes, _: &[u8]) -> ScoreType {
match enabled {}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/maybe_arc.rs | lib/segment/src/utils/maybe_arc.rs | use std::ops::Deref;
use std::sync::Arc;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
// Structure that acts as `T` most of the time but allows to interchange being wrapped within an `Arc` or not.
// This is helpful, when a variable can become memory-intensive but must remain the ability to get cloned.
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)]
#[serde(untagged)] // Make this type transparent when de/serializing and always deserialize as `NoArc`, since it's the first enum kind that matches.
pub enum MaybeArc<T> {
NoArc(T),
Arc(Arc<T>),
}
impl<T> MaybeArc<T> {
/// Create a new `MaybeArc` wrapper that uses an `Arc` internally.
#[inline]
pub fn arc(t: T) -> Self {
Self::Arc(Arc::new(t))
}
/// Create a new `MaybeArc` wrapper that doesn't use an `Arc` internally.
#[inline]
pub fn no_arc(t: T) -> Self {
Self::NoArc(t)
}
/// Returns `true` if the value is wrapped around an `Arc`.
pub fn is_arc(&self) -> bool {
matches!(self, Self::Arc(..))
}
}
impl<T> AsRef<T> for MaybeArc<T> {
#[inline]
fn as_ref(&self) -> &T {
self
}
}
impl<T: Clone> MaybeArc<T> {
/// Converts the `MaybeArc` back to `T`, potentially cloning the inner value
/// in case it's an `Arc` that has existing references.
#[inline]
pub fn into_inner(self) -> T {
match self {
Self::Arc(a) => Arc::unwrap_or_clone(a),
Self::NoArc(a) => a,
}
}
}
impl<T: Clone> Clone for MaybeArc<T> {
fn clone(&self) -> Self {
match self {
Self::Arc(a) => Self::Arc(a.clone()),
Self::NoArc(a) => Self::NoArc(a.clone()),
}
}
}
impl<T> Deref for MaybeArc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
match self {
Self::Arc(a) => a,
Self::NoArc(a) => a,
}
}
}
impl<T, I> FromIterator<I> for MaybeArc<T>
where
T: FromIterator<I>,
{
fn from_iter<U: IntoIterator<Item = I>>(iter: U) -> Self {
let inner = T::from_iter(iter);
// Using `NoArc` as default implementation to stay as close as possible to the type `T` and
// don't accidentally introducing overhead.
// A caller can always manually create the `MaybeArc` if using an `Arc` is preferred.
MaybeArc::NoArc(inner)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_serializing() {
let original = String::from("42");
let ma_original = MaybeArc::arc(original.clone());
let encoded = serde_json::to_string(&ma_original).unwrap();
let decoded: MaybeArc<String> = serde_json::from_str(&encoded).unwrap();
assert_eq!(decoded.as_ref(), &original);
assert!(!decoded.is_arc()); // Always using `NoArc` to deserialize.
// `MaybeArc` can be deserialized as inner type, since information about arc is not serialized.
let decoded: String = serde_json::from_str(&encoded).unwrap();
assert_eq!(decoded, original);
}
#[test]
fn test_deserializing() {
let original = String::from("42");
let encoded = serde_json::to_string(&original).unwrap();
// Any type can be deserialized as `MaybeArc`, defaulting to `NoArc`.
let decoded: MaybeArc<String> = serde_json::from_str(&encoded).unwrap();
assert_eq!(decoded.as_ref(), &original);
assert!(!decoded.is_arc());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/path.rs | lib/segment/src/utils/path.rs | use std::path::Path;
use crate::common::operation_error::{OperationError, OperationResult};
pub fn strip_prefix<'a>(path: &'a Path, prefix: &Path) -> OperationResult<&'a Path> {
path.strip_prefix(prefix).map_err(|err| {
OperationError::service_error(format!(
"failed to strip {prefix:?} prefix from {path:?}: {err}"
))
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/fs.rs | lib/segment/src/utils/fs.rs | use std::fmt;
use std::path::Path;
use fs_err as fs;
use crate::common::operation_error::{OperationError, OperationResult};
/// Move all files and directories from the `dir` directory to the `dest_dir` directory.
///
/// - `<dir>/child/directory` will be merged with `<dest-dir>/child/directory` if one already exists
/// - `<dir>/some/file` will overwrite `<dest-dir>/some/file` if one already exists
pub fn move_all(dir: &Path, dest_dir: &Path) -> OperationResult<()> {
assert_is_dir(dir)?;
assert_is_dir(dest_dir)?;
move_all_impl(dir, dir, dest_dir).map_err(|err| failed_to_move_error(dir, dest_dir, err))
}
fn move_all_impl(base: &Path, dir: &Path, dest_dir: &Path) -> OperationResult<()> {
let entries = fs::read_dir(dir).map_err(|err| {
if base != dir {
failed_to_read_dir_error(dir, err)
} else {
err.into()
}
})?;
for entry in entries {
let entry = entry.map_err(|err| {
if base != dir {
failed_to_read_dir_error(dir, err)
} else {
err.into()
}
})?;
let path = entry.path();
let name = path
.file_name()
.ok_or_else(|| failed_to_move_error(&path, dest_dir, "source path ends with .."))?;
let dest_path = dest_dir.join(name);
if path.is_dir() && dest_path.exists() {
move_all_impl(base, &path, &dest_path)?;
fs::remove_dir(path)?;
} else {
if let Some(dir) = dest_path.parent()
&& !dir.exists()
{
fs::create_dir_all(dir).map_err(|err| {
failed_to_move_error(
&path,
&dest_path,
format!("failed to create {dir:?} directory: {err}"),
)
})?;
}
fs::rename(&path, &dest_path)
.map_err(|err| failed_to_move_error(&path, &dest_path, err))?;
}
}
Ok(())
}
fn assert_is_dir(dir: &Path) -> OperationResult<()> {
if dir.is_dir() {
Ok(())
} else {
Err(not_a_dir_error(dir))
}
}
fn not_a_dir_error(dir: &Path) -> OperationError {
OperationError::service_error(format!(
"path {dir:?} is not a directory (or does not exist)"
))
}
fn failed_to_read_dir_error(dir: &Path, err: impl fmt::Display) -> OperationError {
OperationError::service_error(format!("failed to read {dir:?} directory: {err}"))
}
fn failed_to_move_error(path: &Path, dest: &Path, err: impl fmt::Display) -> OperationError {
OperationError::service_error(format!("failed to move {path:?} to {dest:?}: {err}"))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/mod.rs | lib/segment/src/utils/mod.rs | pub mod fmt;
pub mod fs;
pub mod maybe_arc;
pub mod mem;
pub mod path;
pub mod scored_point_ties;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/scored_point_ties.rs | lib/segment/src/utils/scored_point_ties.rs | use std::cmp::Ordering;
use crate::types::ScoredPoint;
// Newtype to provide alternative comparator for ScoredPoint which breaks ties by id
pub struct ScoredPointTies<'a>(pub &'a ScoredPoint);
impl<'a> From<&'a ScoredPoint> for ScoredPointTies<'a> {
fn from(scored_point: &'a ScoredPoint) -> Self {
ScoredPointTies(scored_point)
}
}
impl Ord for ScoredPointTies<'_> {
fn cmp(&self, other: &Self) -> Ordering {
self.0
.cmp(other.0)
// for identical scores, we fallback to sorting by ids to have a stable output
.then_with(|| self.0.id.cmp(&other.0.id))
}
}
impl PartialOrd for ScoredPointTies<'_> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for ScoredPointTies<'_> {}
impl PartialEq for ScoredPointTies<'_> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/mem.rs | lib/segment/src/utils/mem.rs | #[derive(Debug)]
pub struct Mem {
#[cfg(target_os = "linux")]
cgroups: Option<cgroups_mem::CgroupsMem>,
sysinfo: sysinfo_mem::SysinfoMem,
}
impl Mem {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self {
#[cfg(target_os = "linux")]
cgroups: cgroups_mem::CgroupsMem::new(),
sysinfo: sysinfo_mem::SysinfoMem::new(),
}
}
pub fn refresh(&mut self) {
#[cfg(target_os = "linux")]
if let Some(cgroups) = &mut self.cgroups {
cgroups.refresh();
}
self.sysinfo.refresh();
}
pub fn total_memory_bytes(&self) -> u64 {
#[cfg(target_os = "linux")]
if let Some(cgroups) = &self.cgroups
&& let Some(memory_limit_bytes) = cgroups.memory_limit_bytes()
{
return memory_limit_bytes;
}
self.sysinfo.total_memory_bytes()
}
pub fn available_memory_bytes(&self) -> u64 {
#[cfg(target_os = "linux")]
if let Some(cgroups) = &self.cgroups
&& let Some(memory_limit_bytes) = cgroups.memory_limit_bytes()
{
return memory_limit_bytes.saturating_sub(cgroups.used_memory_bytes());
}
self.sysinfo.available_memory_bytes()
}
}
#[cfg(target_os = "linux")]
mod cgroups_mem {
use cgroups_rs::{Cgroup, hierarchies, memory};
use procfs::process::Process;
#[derive(Clone, Debug)]
pub struct CgroupsMem {
mem_controller: memory::MemController,
memory_limit_bytes: Option<u64>,
used_memory_bytes: u64,
}
impl CgroupsMem {
pub fn new() -> Option<Self> {
let memory_cgroup_path = match get_current_process_memory_cgroup_path() {
Ok(memory_cgroup_path) => memory_cgroup_path?,
Err(err) => {
log::error!(
"Failed to query current process info \
while initializing CgroupsMem: {err}"
);
return None;
}
};
let cgroup = Cgroup::load(
hierarchies::auto(),
memory_cgroup_path.trim_start_matches('/'),
);
let mut mem = Self {
mem_controller: cgroup.controller_of::<memory::MemController>()?.clone(),
memory_limit_bytes: None,
used_memory_bytes: 0,
};
mem.refresh();
Some(mem)
}
pub fn refresh(&mut self) {
let stat = self.mem_controller.memory_stat();
self.memory_limit_bytes = stat.limit_in_bytes.try_into().ok();
self.used_memory_bytes = stat.usage_in_bytes;
}
pub fn memory_limit_bytes(&self) -> Option<u64> {
self.memory_limit_bytes
}
pub fn used_memory_bytes(&self) -> u64 {
self.used_memory_bytes
}
}
fn get_current_process_memory_cgroup_path() -> procfs::ProcResult<Option<String>> {
let process = Process::myself()?;
let cgroups = process.cgroups()?;
for cgroup in cgroups {
// TODO: Can a process belong to multiple v2 cgroups!?
let is_v2_cgroup = cgroup.controllers.is_empty()
|| cgroup
.controllers
.iter()
.all(|controller| controller.is_empty());
// TODO: Can a process belong to multiple v1 cgroups, with some of these cgroups having the same controllers (e.g., memory)!?
let is_v1_memory_cgroup = cgroup
.controllers
.iter()
.any(|controller| controller == "memory");
if is_v2_cgroup || is_v1_memory_cgroup {
return Ok(Some(cgroup.pathname));
}
}
Ok(None)
}
}
mod sysinfo_mem {
use sysinfo::{MemoryRefreshKind, RefreshKind, System};
#[derive(Debug)]
pub struct SysinfoMem {
system: System,
}
impl SysinfoMem {
pub fn new() -> Self {
let system = System::new_with_specifics(
RefreshKind::nothing().with_memory(MemoryRefreshKind::everything()),
);
Self { system }
}
pub fn refresh(&mut self) {
self.system.refresh_memory();
}
pub fn total_memory_bytes(&self) -> u64 {
self.system.total_memory()
}
pub fn available_memory_bytes(&self) -> u64 {
self.system.available_memory()
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/utils/fmt.rs | lib/segment/src/utils/fmt.rs | use std::fmt;
#[derive(Copy, Clone, Debug)]
pub struct SerdeValue<'a>(pub &'a serde_value::Value);
impl fmt::Display for SerdeValue<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let val: &dyn fmt::Display = match &self.0 {
serde_value::Value::Bool(val) => val,
serde_value::Value::U8(val) => val,
serde_value::Value::U16(val) => val,
serde_value::Value::U32(val) => val,
serde_value::Value::U64(val) => val,
serde_value::Value::I8(val) => val,
serde_value::Value::I16(val) => val,
serde_value::Value::I32(val) => val,
serde_value::Value::I64(val) => val,
serde_value::Value::F32(val) => val,
serde_value::Value::F64(val) => val,
serde_value::Value::Char(val) => val,
serde_value::Value::String(val) => val,
serde_value::Value::Unit => &"Unit",
serde_value::Value::Option(val) => return write!(f, "{val:?}"),
serde_value::Value::Newtype(val) => return write!(f, "{val:?}"),
serde_value::Value::Seq(val) => return write!(f, "{val:?}"),
serde_value::Value::Map(val) => return write!(f, "{val:?}"),
serde_value::Value::Bytes(val) => return write!(f, "{val:?}"),
};
val.fmt(f)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/operation_time_statistics.rs | lib/segment/src/common/operation_time_statistics.rs | use std::sync::Arc;
use std::time::{Duration, Instant};
use chrono::{DateTime, SubsecRound, Utc};
use common::types::DetailsLevel::Level1;
use common::types::TelemetryDetail;
use is_sorted::IsSorted;
use itertools::Itertools as _;
use parking_lot::Mutex;
use schemars::JsonSchema;
use serde::Serialize;
use smallvec::SmallVec;
use crate::common::anonymize::Anonymize;
const AVG_DATASET_LEN: usize = 128;
const SLIDING_WINDOW_LEN: usize = 8;
#[derive(Serialize, Clone, Default, Debug, JsonSchema, Anonymize)]
pub struct OperationDurationStatistics {
pub count: usize,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
pub fail_count: Option<usize>,
/// The average time taken by 128 latest operations, calculated as a weighted mean.
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub avg_duration_micros: Option<f32>,
/// The minimum duration of the operations across all the measurements.
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub min_duration_micros: Option<f32>,
/// The maximum duration of the operations across all the measurements.
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub max_duration_micros: Option<f32>,
/// The total duration of all operations in microseconds.
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub total_duration_micros: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_responded: Option<DateTime<Utc>>,
/// The cumulative histogram of the operation durations. Consists of a list of pairs of
/// [upper_boundary, cumulative_count], sorted by the upper boundary. Note that the last bucket
/// (aka `{le="+Inf"}` in Prometheus terms) is not stored in this list, and `count` should be
/// used instead.
#[serde(skip)] // openapi-generator-cli crashes on this field
#[anonymize(with = anonymize_histogram)]
pub duration_micros_histogram: Vec<(f32, usize)>,
}
pub const DEFAULT_BUCKET_BOUNDARIES_MICROS: [f32; 11] = [
// Milliseconds
1_000.0,
5_000.0,
10_000.0,
20_000.0,
50_000.0,
100_000.0,
500_000.0,
// Seconds
1_000_000.0,
5_000_000.0,
10_000_000.0,
50_000_000.0,
];
#[derive(Debug)]
pub struct OperationDurationsAggregator {
ok_count: usize,
fail_count: usize,
timings: [f32; AVG_DATASET_LEN],
timing_index: usize,
timing_loops: usize,
min_value: Option<f32>,
max_value: Option<f32>,
total_value: u64,
last_response_date: Option<DateTime<Utc>>,
/// The non-cumulative count of operations in each bucket.
/// The total operations count (aka the last bucket, or `{le="+Inf"}` in Prometheus terms) is
/// not stored in this vector, and `ok_count` should be used instead.
buckets: SmallVec<[usize; 16]>,
}
/// A wrapper around [`OperationDurationsAggregator`] that calls
/// [`OperationDurationsAggregator::add_operation_result()`] on drop.
pub struct ScopeDurationMeasurer<'a> {
aggregator: &'a Mutex<OperationDurationsAggregator>,
instant: Instant,
success: bool,
}
fn anonymize_histogram(histogram: &[(f32, usize)]) -> Vec<(f32, usize)> {
histogram
.iter()
.map(|(le, count)| (*le, count.anonymize()))
.collect()
}
impl std::ops::Add for OperationDurationStatistics {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
count: self.count + other.count,
fail_count: match (self.fail_count, other.fail_count) {
(Some(a), Some(b)) => Some(a + b),
_ => self.fail_count.or(other.fail_count),
},
avg_duration_micros: Self::weighted_mean_duration(
self.avg_duration_micros,
self.count,
other.avg_duration_micros,
other.count,
),
min_duration_micros: Self::compared_duration(
self.min_duration_micros,
other.min_duration_micros,
|a, b| a < b,
),
max_duration_micros: Self::compared_duration(
self.max_duration_micros,
other.max_duration_micros,
|a, b| a > b,
),
total_duration_micros: match (self.total_duration_micros, other.total_duration_micros) {
(Some(a), Some(b)) => Some(a + b),
_ => self.total_duration_micros.or(other.total_duration_micros),
},
last_responded: std::cmp::max(self.last_responded, other.last_responded),
duration_micros_histogram: merge_histograms(
&self.duration_micros_histogram,
&other.duration_micros_histogram,
self.count,
other.count,
),
}
}
}
impl OperationDurationStatistics {
pub fn is_empty(&self) -> bool {
self.count == 0
}
fn weighted_mean_duration(
duration1: Option<f32>,
count1: usize,
duration2: Option<f32>,
count2: usize,
) -> Option<f32> {
if let Some(duration1) = duration1 {
if let Some(duration2) = duration2 {
let count1 = count1 as f32;
let count2 = count2 as f32;
Some((duration1 * count1 + duration2 * count2) / (count1 + count2))
} else {
Some(duration1)
}
} else {
duration2
}
}
fn compared_duration(
duration1: Option<f32>,
duration2: Option<f32>,
compare: impl Fn(f32, f32) -> bool,
) -> Option<f32> {
if let Some(duration1) = duration1 {
if let Some(duration2) = duration2 {
if compare(duration1, duration2) {
Some(duration1)
} else {
Some(duration2)
}
} else {
Some(duration1)
}
} else {
duration2
}
}
}
impl<'a> ScopeDurationMeasurer<'a> {
pub fn new(aggregator: &'a Mutex<OperationDurationsAggregator>) -> Self {
Self {
aggregator,
instant: Instant::now(),
success: true,
}
}
pub fn new_with_instant(
aggregator: &'a Mutex<OperationDurationsAggregator>,
instant: Instant,
) -> Self {
Self {
aggregator,
instant,
success: true,
}
}
pub fn set_success(&mut self, success: bool) {
self.success = success
}
}
impl Drop for ScopeDurationMeasurer<'_> {
fn drop(&mut self) {
self.aggregator
.lock()
.add_operation_result(self.success, self.instant.elapsed());
}
}
impl OperationDurationsAggregator {
pub fn new() -> Arc<Mutex<Self>> {
Arc::new(Mutex::new(Self {
ok_count: 0,
fail_count: 0,
timings: [0.; AVG_DATASET_LEN],
timing_index: 0,
timing_loops: 0,
min_value: None,
max_value: None,
total_value: 0,
last_response_date: Some(Utc::now().round_subsecs(2)),
buckets: smallvec::smallvec![0; DEFAULT_BUCKET_BOUNDARIES_MICROS.len()],
}))
}
pub fn add_operation_result(&mut self, success: bool, duration: Duration) {
if success {
self.total_value += duration.as_micros() as u64;
let duration = duration.as_micros() as f32;
self.min_value = Some(match self.min_value {
Some(min_value) => min_value.min(duration),
None => duration,
});
self.max_value = Some(match self.max_value {
Some(max_value) => max_value.max(duration),
None => duration,
});
if let Some(bucket_no) = DEFAULT_BUCKET_BOUNDARIES_MICROS
.iter()
.position(|&b| duration <= b)
{
self.buckets[bucket_no] += 1;
}
self.ok_count += 1;
self.timings[self.timing_index] = duration;
self.timing_index += 1;
if self.timing_index >= AVG_DATASET_LEN {
self.timing_index = 0;
self.timing_loops += 1;
}
} else {
self.fail_count += 1;
}
self.last_response_date = Some(Utc::now().round_subsecs(2));
}
pub fn get_statistics(&self, detail: TelemetryDetail) -> OperationDurationStatistics {
let duration_micros_histogram = if detail.histograms {
let mut duration_micros_histogram =
Vec::with_capacity(DEFAULT_BUCKET_BOUNDARIES_MICROS.len());
let mut cumulative_count = 0;
for (&count, &le) in self.buckets.iter().zip(&DEFAULT_BUCKET_BOUNDARIES_MICROS) {
cumulative_count += count;
duration_micros_histogram.push((le, cumulative_count));
}
convert_histogram(&DEFAULT_BUCKET_BOUNDARIES_MICROS, &self.buckets)
} else {
Vec::new()
};
let detailed = detail.level >= Level1;
OperationDurationStatistics {
count: self.ok_count,
fail_count: (self.fail_count > 0).then_some(self.fail_count),
avg_duration_micros: (self.ok_count > 0).then(|| self.calculate_avg()),
min_duration_micros: detailed.then_some(self.min_value).flatten(),
max_duration_micros: detailed.then_some(self.max_value).flatten(),
total_duration_micros: detailed.then_some(self.total_value),
last_responded: detailed.then_some(self.last_response_date).flatten(),
duration_micros_histogram,
}
}
fn calculate_avg(&self) -> f32 {
let data: Vec<f32> = if self.timing_loops > 0 {
let mut result = Vec::new();
result.extend_from_slice(&self.timings[self.timing_index..]);
result.extend_from_slice(&self.timings[..self.timing_index]);
result
} else {
self.timings[..self.timing_index].to_vec()
};
let mut sliding_window_avg = vec![0.; data.len()];
for i in 0..data.len() {
let from = i.saturating_sub(SLIDING_WINDOW_LEN);
sliding_window_avg[i] = Self::simple_moving_average(&data[from..i + 1]);
}
Self::simple_moving_average(&sliding_window_avg)
}
fn simple_moving_average(data: &[f32]) -> f32 {
data.iter().sum::<f32>() / data.len() as f32
}
}
/// Convert a fixed-size non-cumulative histogram to a cumulative histogram.
fn convert_histogram(le_boundaries: &[f32], counts: &[usize]) -> Vec<(f32, usize)> {
let rough_len_estimation = std::cmp::min(
le_boundaries.len(),
counts.iter().filter(|&&c| c != 0).count() * 2,
);
let mut result = Vec::with_capacity(rough_len_estimation);
let mut cumulative_count = 0;
let mut prev = None;
for (idx, &le) in le_boundaries.iter().enumerate() {
let count = counts.get(idx).copied().unwrap_or(0);
if let Some(prev) = prev {
result.push((prev, cumulative_count));
}
cumulative_count += count;
result.push((le, cumulative_count));
prev = None;
}
if let Some(prev) = prev {
result.push((prev, cumulative_count));
}
result
}
/// Merge two sparse cumulative histograms, summing the counts of the same boundaries.
/// If one boundary is missing in one of the vectors, assume its value to be the same as the next
/// boundary in the same vector. NOTE: This assumption should be correct when merging histograms
/// produced by `convert_histogram` with the same set of boundaries, but it's not always the case.
fn merge_histograms(
a: &[(f32, usize)],
b: &[(f32, usize)],
total_a: usize,
total_b: usize,
) -> Vec<(f32, usize)> {
// TODO: drop is_sorted crate and use Iterator::is_sorted once it's stable
debug_assert!(
IsSorted::is_sorted(&mut a.iter().map(|(le, _)| le)),
"Boundaries are not sorted"
);
debug_assert!(
IsSorted::is_sorted(&mut b.iter().map(|(le, _)| le)),
"Boundaries are not sorted"
);
let unique_boundaries =
itertools::merge(a.iter().map(|(le, _)| le), b.iter().map(|(le, _)| le))
.dedup()
.count();
let mut result = Vec::with_capacity(unique_boundaries);
let mut it_a = a.iter().copied().peekable();
let mut it_b = b.iter().copied().peekable();
while it_a.peek().is_some() || it_b.peek().is_some() {
let (a_le, a_count) = it_a.peek().copied().unwrap_or((f32::INFINITY, total_a));
let (b_le, b_count) = it_b.peek().copied().unwrap_or((f32::INFINITY, total_b));
match a_le.partial_cmp(&b_le) {
Some(std::cmp::Ordering::Less) => {
result.push((a_le, a_count + b_count));
it_a.next();
}
Some(std::cmp::Ordering::Equal) => {
result.push((a_le, a_count + b_count));
it_a.next();
it_b.next();
}
Some(std::cmp::Ordering::Greater) => {
result.push((b_le, a_count + b_count));
it_b.next();
}
None => {
// One of the boundaries is NaN, which is not supposed to happen.
if a_le.is_nan() {
it_a.next();
}
if b_le.is_nan() {
it_b.next();
}
}
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_convert_histogram() {
// With all zeroes
assert_eq!(
convert_histogram(&[0., 1., 2., 3., 4., 5.], &[0, 0, 0, 0, 0, 0]),
vec![(0., 0), (1., 0), (2., 0), (3., 0), (4., 0), (5., 0)],
);
// Full
assert_eq!(
convert_histogram(&[0., 1., 2., 3.], &[1, 20, 300, 4000]),
vec![(0., 1), (1., 21), (2., 321), (3., 4321)],
);
// Sparse
assert_eq!(
convert_histogram(&[0., 1., 2., 3., 4., 5., 6.], &[0, 0, 1, 0, 0, 1, 0]),
vec![
(0.0, 0),
(1.0, 0),
(2.0, 1),
(3.0, 1),
(4.0, 1),
(5.0, 2),
(6.0, 2)
],
);
}
#[test]
fn test_merge_histograms() {
// Empty vectors
assert_eq!(merge_histograms(&[], &[], 9, 90), &[]);
// Simple case
#[rustfmt::skip]
let (a, b, result) = (
&[(0.0, 1), (1.0, 2), (2.0, 3)],
&[(0.0, 10), (1.0, 20), (2.0, 30)],
&[(0.0, 11), (1.0, 22), (2.0, 33)],
);
assert_eq!(merge_histograms(a, b, 9, 90), result);
// Missing boundary in the middle
#[rustfmt::skip]
let (a, b, result) = (
&[(0.0, 1), (1.0, 2), (3.0, 3), (4.0, 4)],
&[(0.0, 10), (1.0, 20), (2.0, 30), (4.0, 40)],
&[(0.0, 11), (1.0, 22), (2.0, 33), (3.0, 43), (4.0, 44)],
);
assert_eq!(merge_histograms(a, b, 9, 90), result);
// Missing boundary at the end
#[rustfmt::skip]
let (a, b, result) = (
&[(0.0, 1), ],
&[(0.0, 10), (1.0, 20)],
&[(0.0, 11), (1.0, 29)],
);
assert_eq!(merge_histograms(a, b, 9, 90), result);
}
/// Check that convert-then-merge produces the same result as merge-then-convert, i.e. both
/// functions play well together.
#[test]
fn test_convert_and_merge_histograms() {
case(&[33, 23, 86, 39, 75], &[86, 50, 47, 84, 52], 256, 319);
case(&[00, 00, 00, 00, 00], &[86, 50, 47, 84, 52], 256, 319);
case(&[00, 23, 00, 00, 00], &[00, 00, 00, 84, 00], 30, 90);
case(&[00, 00, 00, 00, 00], &[86, 50, 47, 84, 52], 0, 319);
fn case(a: &[usize], b: &[usize], total_a: usize, total_b: usize) {
assert_eq!(
merge_histograms(
&convert_histogram(&DEFAULT_BUCKET_BOUNDARIES_MICROS, a),
&convert_histogram(&DEFAULT_BUCKET_BOUNDARIES_MICROS, b),
total_a,
total_b,
),
convert_histogram(
&DEFAULT_BUCKET_BOUNDARIES_MICROS,
&std::iter::zip(a, b).map(|(a, b)| a + b).collect::<Vec<_>>(),
),
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/rocksdb_buffered_delete_wrapper.rs | lib/segment/src/common/rocksdb_buffered_delete_wrapper.rs | use std::sync::Arc;
use ahash::AHashSet;
use parking_lot::{Mutex, RwLock};
use rocksdb::DB;
use super::rocksdb_wrapper::DatabaseColumnIterator;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
use crate::common::rocksdb_wrapper::{DatabaseColumnWrapper, LockedDatabaseColumnWrapper};
/// Wrapper around `DatabaseColumnWrapper` that ensures, that keys that were removed from the
/// database are only persisted on flush explicitly.
///
/// This might be required to guarantee consistency of the database component.
/// E.g. copy-on-write implementation should guarantee that data in the `write` component is
/// persisted before it is removed from the `copy` component.
///
/// WARN: this structure is expected to be write-only.
#[derive(Debug)]
pub struct DatabaseColumnScheduledDeleteWrapper {
db: DatabaseColumnWrapper,
deleted_pending_persistence: Arc<Mutex<AHashSet<Vec<u8>>>>,
}
impl Clone for DatabaseColumnScheduledDeleteWrapper {
fn clone(&self) -> Self {
Self {
db: self.db.clone(),
deleted_pending_persistence: self.deleted_pending_persistence.clone(),
}
}
}
impl DatabaseColumnScheduledDeleteWrapper {
pub fn new(db: DatabaseColumnWrapper) -> Self {
Self {
db,
deleted_pending_persistence: Arc::new(Mutex::new(AHashSet::new())),
}
}
pub fn put<K, V>(&self, key: K, value: V) -> OperationResult<()>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.deleted_pending_persistence.lock().remove(key.as_ref());
self.db.put(key, value)
}
pub fn remove<K>(&self, key: K) -> OperationResult<()>
where
K: AsRef<[u8]>,
{
self.deleted_pending_persistence
.lock()
.insert(key.as_ref().to_vec());
Ok(())
}
fn is_pending_removal<K>(&self, key: K) -> bool
where
K: AsRef<[u8]>,
{
self.deleted_pending_persistence
.lock()
.contains(key.as_ref())
}
pub fn flusher(&self) -> Flusher {
let ids_to_delete = self.deleted_pending_persistence.lock().clone();
let wrapper = self.db.clone();
let deleted_pending_persistence = Arc::downgrade(&self.deleted_pending_persistence);
Box::new(move || {
let Some(deleted_pending_persistence_arc) = deleted_pending_persistence.upgrade()
else {
return Ok(());
};
for id in &ids_to_delete {
wrapper.remove(id)?;
}
wrapper.flusher()()?;
Self::reconcile_persisted_deletes(ids_to_delete, &deleted_pending_persistence_arc);
Ok(())
})
}
/// Removes from `deleted_pending_persistence` all results that are flushed.
fn reconcile_persisted_deletes(
persisted: AHashSet<Vec<u8>>,
pending_operations: &Mutex<AHashSet<Vec<u8>>>,
) {
pending_operations
.lock()
.retain(|pending| !persisted.contains(pending));
}
pub fn lock_db(&self) -> LockedDatabaseColumnScheduledDeleteWrapper<'_> {
LockedDatabaseColumnScheduledDeleteWrapper {
base: self.db.lock_db(),
deleted_pending_persistence: &self.deleted_pending_persistence,
}
}
pub fn get_pinned<T, F>(&self, key: &[u8], f: F) -> OperationResult<Option<T>>
where
F: FnOnce(&[u8]) -> T,
{
if self.is_pending_removal(key) {
return Ok(None);
}
self.db.get_pinned(key, f)
}
pub fn recreate_column_family(&self) -> OperationResult<()> {
self.db.recreate_column_family()
}
pub fn get_database(&self) -> Arc<RwLock<DB>> {
self.db.get_database()
}
pub fn get_column_name(&self) -> &str {
self.db.get_column_name()
}
pub fn has_column_family(&self) -> OperationResult<bool> {
self.db.has_column_family()
}
pub fn remove_column_family(&self) -> OperationResult<()> {
self.db.remove_column_family()
}
pub fn get_storage_size_bytes(&self) -> OperationResult<usize> {
self.db.get_storage_size_bytes()
}
}
pub struct LockedDatabaseColumnScheduledDeleteWrapper<'a> {
base: LockedDatabaseColumnWrapper<'a>,
deleted_pending_persistence: &'a Mutex<AHashSet<Vec<u8>>>,
}
impl LockedDatabaseColumnScheduledDeleteWrapper<'_> {
pub fn iter(&self) -> OperationResult<DatabaseColumnScheduledDeleteIterator<'_>> {
Ok(DatabaseColumnScheduledDeleteIterator {
base: self.base.iter()?,
deleted_pending_persistence: self.deleted_pending_persistence,
})
}
}
pub struct DatabaseColumnScheduledDeleteIterator<'a> {
base: DatabaseColumnIterator<'a>,
deleted_pending_persistence: &'a Mutex<AHashSet<Vec<u8>>>,
}
impl Iterator for DatabaseColumnScheduledDeleteIterator<'_> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<Self::Item> {
loop {
let (key, value) = self.base.next()?;
if !self
.deleted_pending_persistence
.lock()
.contains(key.as_ref())
{
return Some((key, value));
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/error_logging.rs | lib/segment/src/common/error_logging.rs | use log::debug;
pub trait LogError {
fn describe(self, msg: &str) -> Self;
}
impl<T, E> LogError for Result<T, E> {
fn describe(self, msg: &str) -> Self {
if self.is_err() {
debug!("Error while: {msg}");
}
self
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/reciprocal_rank_fusion.rs | lib/segment/src/common/reciprocal_rank_fusion.rs | //! Reciprocal Rank Fusion (RRF) is a method for combining rankings from multiple sources.
//! See <https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf>
use std::collections::hash_map::Entry;
use ahash::AHashMap;
use ordered_float::OrderedFloat;
use crate::types::{ExtendedPointId, ScoredPoint};
/// Mitigates the impact of high rankings by outlier systems
pub const DEFAULT_RRF_K: usize = 2;
/// Compute the RRF score for a given position.
fn position_score(position: usize, k: usize) -> f32 {
1.0 / (position as f32 + k as f32)
}
/// Compute RRF scores for multiple results from different sources.
/// Each response can have a different length.
/// The input scores are irrelevant, only the order matters.
///
/// The output is a single sorted list of ScoredPoint.
/// Does not break ties.
pub fn rrf_scoring(
responses: impl IntoIterator<Item = Vec<ScoredPoint>>,
k: usize,
) -> Vec<ScoredPoint> {
// track scored points by id
let mut points_by_id: AHashMap<ExtendedPointId, ScoredPoint> = AHashMap::new();
for response in responses {
for (pos, mut point) in response.into_iter().enumerate() {
let rrf_score = position_score(pos, k);
match points_by_id.entry(point.id) {
Entry::Occupied(mut entry) => {
// accumulate score
entry.get_mut().score += rrf_score;
}
Entry::Vacant(entry) => {
point.score = rrf_score;
// init score
entry.insert(point);
}
}
}
}
let mut scores: Vec<_> = points_by_id.into_values().collect();
scores.sort_unstable_by(|a, b| {
// sort by score descending
OrderedFloat(b.score).cmp(&OrderedFloat(a.score))
});
scores
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::ScoredPoint;
fn make_scored_point(id: u64, score: f32) -> ScoredPoint {
ScoredPoint {
id: id.into(),
version: 0,
score,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
#[test]
fn test_rrf_scoring_empty() {
let responses = vec![];
let scored_points = rrf_scoring(responses, DEFAULT_RRF_K);
assert_eq!(scored_points.len(), 0);
}
#[test]
fn test_rrf_scoring_one() {
let responses = vec![vec![make_scored_point(1, 0.9)]];
let scored_points = rrf_scoring(responses, DEFAULT_RRF_K);
assert_eq!(scored_points.len(), 1);
assert_eq!(scored_points[0].id, 1.into());
assert_eq!(scored_points[0].score, 0.5); // 1 / (0 + 2)
}
#[test]
fn test_rrf_scoring() {
let responses = vec![
vec![make_scored_point(2, 0.9), make_scored_point(1, 0.8)],
vec![
make_scored_point(1, 0.7),
make_scored_point(2, 0.6),
make_scored_point(3, 0.5),
],
vec![
make_scored_point(5, 0.9),
make_scored_point(3, 0.5),
make_scored_point(1, 0.4),
],
];
// top 10
let scored_points = rrf_scoring(responses, DEFAULT_RRF_K);
assert_eq!(scored_points.len(), 4);
// assert that the list is sorted
assert!(scored_points.windows(2).all(|w| w[0].score >= w[1].score));
assert_eq!(scored_points.len(), 4);
assert_eq!(scored_points[0].id, 1.into());
assert_eq!(scored_points[0].score, 1.0833334);
assert_eq!(scored_points[1].id, 2.into());
assert_eq!(scored_points[1].score, 0.8333334);
assert_eq!(scored_points[2].id, 3.into());
assert_eq!(scored_points[2].score, 0.5833334);
assert_eq!(scored_points[3].id, 5.into());
assert_eq!(scored_points[3].score, 0.5);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/mmap_bitslice_buffered_update_wrapper.rs | lib/segment/src/common/mmap_bitslice_buffered_update_wrapper.rs | use std::sync::Arc;
use ahash::AHashMap;
use common::ext::BitSliceExt as _;
use common::is_alive_lock::IsAliveLock;
use memory::mmap_type::MmapBitSlice;
use parking_lot::{Mutex, RwLock};
use crate::common::Flusher;
/// A wrapper around `MmapBitSlice` that delays writing changes to the underlying file until they get
/// flushed manually.
/// This expects the underlying MmapBitSlice not to grow in size.
#[derive(Debug)]
pub struct MmapBitSliceBufferedUpdateWrapper {
bitslice: Arc<RwLock<MmapBitSlice>>,
len: usize,
pending_updates: Arc<Mutex<AHashMap<usize, bool>>>,
/// Lock to prevent concurrent flush and drop
is_alive_flush_lock: IsAliveLock,
}
impl MmapBitSliceBufferedUpdateWrapper {
pub fn new(bitslice: MmapBitSlice) -> Self {
let len = bitslice.len();
Self {
bitslice: Arc::new(RwLock::new(bitslice)),
len,
pending_updates: Arc::new(Mutex::new(AHashMap::new())),
is_alive_flush_lock: IsAliveLock::new(),
}
}
/// Sets the bit at `index` to `value` buffered.
///
/// ## Panics
/// Panics if the index is out of bounds.
pub fn set(&self, index: usize, value: bool) {
assert!(index < self.len, "index {index} out of range: {}", self.len);
self.pending_updates.lock().insert(index, value);
}
pub fn get(&self, index: usize) -> Option<bool> {
if index >= self.len {
return None;
}
if let Some(value) = self.pending_updates.lock().get(&index) {
Some(*value)
} else {
self.bitslice.read().get_bit(index)
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Removes from `pending_updates` all results that are flushed.
/// If values in `pending_updates` are changed, do not remove them.
fn reconcile_persisted_updates(
pending_updates: &Mutex<AHashMap<usize, bool>>,
persisted: AHashMap<usize, bool>,
) {
pending_updates
.lock()
.retain(|point_id, a| persisted.get(point_id).is_none_or(|b| a != b));
}
pub fn flusher(&self) -> Flusher {
let updates = {
let updates_guard = self.pending_updates.lock();
if updates_guard.is_empty() {
return Box::new(|| Ok(()));
}
updates_guard.clone()
};
let bitslice = Arc::downgrade(&self.bitslice);
let pending_updates_weak = Arc::downgrade(&self.pending_updates);
let is_alive_flush_lock = self.is_alive_flush_lock.handle();
Box::new(move || {
let (Some(is_alive_flush_guard), Some(bitslice), Some(pending_updates_arc)) = (
is_alive_flush_lock.lock_if_alive(),
bitslice.upgrade(),
pending_updates_weak.upgrade(),
) else {
log::debug!(
"Aborted flushing on a dropped MmapBitSliceBufferedUpdateWrapper instance"
);
return Ok(());
};
let mut mmap_slice_write = bitslice.write();
for (index, value) in updates.iter() {
mmap_slice_write.set(*index, *value);
}
mmap_slice_write.flusher()()?;
Self::reconcile_persisted_updates(&pending_updates_arc, updates);
// Keep the guard till the end of the flush to prevent concurrent drop/flushes
drop(is_alive_flush_guard);
Ok(())
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/rocksdb_buffered_update_wrapper.rs | lib/segment/src/common/rocksdb_buffered_update_wrapper.rs | use std::sync::Arc;
use ahash::{AHashMap, AHashSet};
use parking_lot::Mutex;
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::common::rocksdb_wrapper::{DatabaseColumnWrapper, LockedDatabaseColumnWrapper};
/// Wrapper around `DatabaseColumnWrapper` that ensures,
/// that all changes are only persisted on flush explicitly.
///
/// This might be required to guarantee consistency of the database component.
/// E.g. copy-on-write implementation should guarantee that data in the `write` component is
/// persisted before it is removed from the `copy` component.
#[derive(Debug)]
pub struct DatabaseColumnScheduledUpdateWrapper {
db: DatabaseColumnWrapper,
pending_operations: Arc<Mutex<PendingOperations>>, // in-flight operations persisted on flush
}
#[derive(Debug, Default, Clone)]
struct PendingOperations {
deleted: AHashSet<Vec<u8>>,
inserted: AHashMap<Vec<u8>, Vec<u8>>,
}
impl DatabaseColumnScheduledUpdateWrapper {
pub fn new(db: DatabaseColumnWrapper) -> Self {
Self {
db,
pending_operations: Arc::new(Mutex::new(PendingOperations::default())),
}
}
pub fn put<K, V>(&self, key: K, value: V) -> OperationResult<()>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let mut pending_guard = self.pending_operations.lock();
pending_guard
.inserted
.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
pending_guard.deleted.remove(key.as_ref());
Ok(())
}
pub fn remove<K>(&self, key: K) -> OperationResult<()>
where
K: AsRef<[u8]>,
{
let key = key.as_ref();
let mut pending_guard = self.pending_operations.lock();
pending_guard.inserted.remove(key);
pending_guard.deleted.insert(key.to_vec());
Ok(())
}
/// Removes from `pending_updates` all results that are flushed.
/// If values in `pending_updates` are changed, do not remove them.
fn reconcile_persisted_updates(
flushed: PendingOperations,
pending_operations: Arc<Mutex<PendingOperations>>,
) {
let mut pending_guard = pending_operations.lock();
for id in flushed.deleted {
pending_guard.deleted.remove(&id);
}
pending_guard
.inserted
.retain(|point_id, a| flushed.inserted.get(point_id).is_none_or(|b| a != b));
}
pub fn flusher(&self) -> Flusher {
let PendingOperations { deleted, inserted } = self.pending_operations.lock().clone();
debug_assert!(
inserted.keys().all(|key| !deleted.contains(key)),
"Key to marked for insertion is also marked for deletion!"
);
let wrapper = self.db.clone();
let pending_operations_arc = self.pending_operations.clone();
Box::new(move || {
for id in deleted.iter() {
wrapper.remove(id)?;
}
for (id, value) in inserted.iter() {
wrapper.put(id, value)?;
}
wrapper.flusher()()?;
Self::reconcile_persisted_updates(
PendingOperations { deleted, inserted },
pending_operations_arc,
);
Ok(())
})
}
pub fn lock_db(&self) -> LockedDatabaseColumnWrapper<'_> {
self.db.lock_db()
}
pub fn get<K>(&self, key: K) -> OperationResult<Vec<u8>>
where
K: AsRef<[u8]>,
{
let pending_guard = self.pending_operations.lock();
if let Some(value) = pending_guard.inserted.get(key.as_ref()) {
return Ok(value.clone());
}
if pending_guard.deleted.contains(key.as_ref()) {
return Err(OperationError::service_error(
"RocksDB get_cf error: key not found",
));
}
self.db.get(key)
}
pub fn get_opt<K>(&self, key: K) -> OperationResult<Option<Vec<u8>>>
where
K: AsRef<[u8]>,
{
let pending_guard = self.pending_operations.lock();
if let Some(value) = pending_guard.inserted.get(key.as_ref()) {
return Ok(Some(value.clone()));
}
if pending_guard.deleted.contains(key.as_ref()) {
return Ok(None);
}
self.db.get_opt(key)
}
pub fn remove_column_family(&self) -> OperationResult<()> {
self.db.remove_column_family()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/anonymize.rs | lib/segment/src/common/anonymize.rs | use std::collections::hash_map::DefaultHasher;
use std::collections::{BTreeMap, HashMap};
use std::hash::{Hash, Hasher};
use chrono::{DateTime, Utc};
use ecow::{EcoString, eco_format};
pub use macros::Anonymize;
use uuid::Uuid;
/// This trait provides a derive macro.
///
/// # Usage example
///
/// ```ignore
/// #[derive(Anonymize)]
/// struct Test {
/// foo: Foo,
/// bar: Bar,
/// baz: Baz,
/// }
/// ```
///
/// This will generate code that calls `anonymize()` recursively on each field:
/// ```ignore
/// impl Anonymize for Test {
/// fn anonymize(&self) -> Self {
/// Self {
/// foo: Anonymize::anonymize(&self.foo),
/// bar: Anonymize::anonymize(&self.bar),
/// baz: Anonymize::anonymize(&self.baz),
/// }
/// }
/// }
/// ```
///
/// # Attributes
///
/// The following attributes can be used to customize the behavior:
/// - `#[anonymize(true)]` to enable anonymization for a field (default).
/// - `#[anonymize(false)]` to disable anonymization for a field.
/// An equivalent of `#[anonymize(with = Clone::clone)]`.
/// - `#[anonymize(value = None)]` to specify a value to replace the field with.
/// - `#[anonymize(with = path:to:function)]` to specify a custom function.
pub trait Anonymize {
fn anonymize(&self) -> Self;
}
impl<T: Anonymize> Anonymize for Option<T> {
fn anonymize(&self) -> Self {
self.as_ref().map(|t| t.anonymize())
}
}
impl<T: Anonymize> Anonymize for Vec<T> {
fn anonymize(&self) -> Self {
self.iter().map(|e| e.anonymize()).collect()
}
}
impl<T: Anonymize> Anonymize for Box<T> {
fn anonymize(&self) -> Self {
Box::new(self.as_ref().anonymize())
}
}
impl<K: Anonymize + Hash + Eq, V: Anonymize> Anonymize for HashMap<K, V> {
fn anonymize(&self) -> Self {
self.iter()
.map(|(k, v)| (k.anonymize(), v.anonymize()))
.collect()
}
}
impl<K: Anonymize + Eq + Ord, V: Anonymize> Anonymize for BTreeMap<K, V> {
fn anonymize(&self) -> Self {
self.iter()
.map(|(k, v)| (k.anonymize(), v.anonymize()))
.collect()
}
}
/// Anonymize the values of a collection, but keeps the keys intact.
pub fn anonymize_collection_values<C, K, V>(collection: &C) -> C
where
for<'a> &'a C: IntoIterator<Item = (&'a K, &'a V)>,
C: FromIterator<(K, V)>,
K: Clone,
V: Anonymize,
{
collection
.into_iter()
.map(|(k, v)| (k.clone(), v.anonymize()))
.collect()
}
/// Anonymize the values of a collection wrapped into an [`Option`], but keeps the keys intact.
///
/// Similar to [`anonymize_collection_values`].
pub fn anonymize_collection_values_opt<C, K, V>(collection_opt: &Option<C>) -> Option<C>
where
for<'a> &'a C: IntoIterator<Item = (&'a K, &'a V)>,
C: FromIterator<(K, V)>,
K: Clone,
V: Anonymize,
{
collection_opt
.as_ref()
.map(|c| anonymize_collection_values(c))
}
impl Anonymize for String {
fn anonymize(&self) -> Self {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
hasher.finish().to_string()
}
}
impl Anonymize for EcoString {
fn anonymize(&self) -> Self {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
eco_format!("{}", hasher.finish())
}
}
impl Anonymize for Uuid {
fn anonymize(&self) -> Self {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
Uuid::from_u128(u128::from(hasher.finish()))
}
}
impl Anonymize for usize {
fn anonymize(&self) -> Self {
let log10 = (*self as f32).log10().round() as u32;
if log10 > 4 {
let skip_digits = log10 - 4;
let coeff = 10usize.pow(skip_digits);
(*self / coeff) * coeff
} else {
*self
}
}
}
impl Anonymize for bool {
fn anonymize(&self) -> Self {
*self
}
}
impl Anonymize for DateTime<Utc> {
fn anonymize(&self) -> Self {
let coeff: f32 = rand::random();
*self + chrono::Duration::try_seconds(((coeff * 20.0) - 10.0) as i64).unwrap_or_default()
}
}
impl Anonymize for serde_json::Value {
fn anonymize(&self) -> Self {
match self {
serde_json::Value::Null => serde_json::Value::Null,
serde_json::Value::Bool(b) => serde_json::Value::Bool(b.anonymize()),
serde_json::Value::Number(n) => serde_json::Value::Number(n.clone()),
serde_json::Value::String(s) => serde_json::Value::String(s.anonymize()),
serde_json::Value::Array(a) => {
serde_json::Value::Array(a.iter().map(|v| v.anonymize()).collect())
}
serde_json::Value::Object(o) => serde_json::Value::Object(
o.iter()
.map(|(k, v)| (k.anonymize(), v.anonymize()))
.collect(),
),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/score_fusion.rs | lib/segment/src/common/score_fusion.rs | use std::iter;
use ahash::AHashMap;
use common::types::ScoreType;
use itertools::{Itertools, MinMaxResult};
use ordered_float::OrderedFloat;
use crate::types::{Order, PointIdType, ScoredPoint};
pub struct ScoreFusion {
/// Defines how to combine the scores of the same point in different lists
pub method: Aggregation,
/// Defines how to normalize the scores in each list
pub norm: Normalization,
/// Multipliers for each list of scores
pub weights: Vec<f32>,
/// Final ordering of the results
pub order: Order,
}
impl ScoreFusion {
/// Params for the distribution-based score fusion
pub fn dbsf() -> Self {
Self {
method: Aggregation::Sum,
norm: Normalization::Distr,
weights: vec![],
order: Order::LargeBetter,
}
}
}
/// Defines how to combine the scores of the same point in different lists
pub enum Aggregation {
/// Sums the scores
Sum,
}
pub enum Normalization {
/// Uses the minimum and maximum scores as extremes
MinMax,
/// Uses the 3rd standard deviation as extremes
Distr,
}
pub fn score_fusion(
all_results: impl IntoIterator<Item = Vec<ScoredPoint>>,
params: ScoreFusion,
) -> Vec<ScoredPoint> {
let ScoreFusion {
method,
norm,
weights,
order,
} = params;
let weights = weights.into_iter().chain(iter::repeat(1.0));
all_results
.into_iter()
// normalize
.map(|points| match norm {
Normalization::MinMax => min_max_norm(points),
Normalization::Distr => distr_norm(points),
})
// weight each list of points
.zip(weights)
.flat_map(|(points, weight)| {
points.into_iter().map(move |p| ScoredPoint {
score: p.score * weight,
..p
})
})
// combine to deduplicate
.fold(
AHashMap::<PointIdType, ScoredPoint>::new(),
|mut acc, point| {
acc.entry(point.id)
.and_modify(|entry| match method {
Aggregation::Sum => entry.score += point.score,
})
.or_insert(point);
acc
},
)
// sort and return
.into_values()
.sorted_by(|a, b| match order {
Order::SmallBetter => a.cmp(b),
Order::LargeBetter => b.cmp(a),
})
.collect()
}
/// Normalizes the scores of the given points between 0.0 and 1.0, using the given minimum and maximum scores as extremes.
fn norm(mut points: Vec<ScoredPoint>, min: ScoreType, max: ScoreType) -> Vec<ScoredPoint> {
// Protect against division by zero
if min == max {
points.iter_mut().for_each(|p| p.score = 0.5);
return points;
}
points.iter_mut().for_each(|p| {
p.score = (p.score - min) / (max - min);
});
points
}
pub fn min_max_norm(points: Vec<ScoredPoint>) -> Vec<ScoredPoint> {
let (min, max) = match points.iter().map(|p| OrderedFloat(p.score)).minmax() {
MinMaxResult::NoElements | MinMaxResult::OneElement(_) => return points,
MinMaxResult::MinMax(min, max) => (min.0, max.0),
};
norm(points, min, max)
}
/// Welford's method for stable one-pass mean and variance calculation.
/// <https://jonisalonen.com/2013/deriving-welfords-method-for-computing-variance/>
///
/// # Panics
///
/// Panics if the given vector of points has less than 2 elements.
fn welfords_mean_variance(points: &[ScoredPoint]) -> (f32, f32) {
debug_assert!(
points.len() > 1,
"Not enough points to calculate mean and variance"
);
let mut mean = 0.0;
let mut aggregate = 0.0;
for (p, k) in points.iter().zip(1usize..) {
let old_delta = p.score - mean;
mean += old_delta / (k as f32);
let delta = p.score - mean;
aggregate += old_delta * delta;
}
let sample_variance = aggregate / (points.len() as f32 - 1.0);
(mean, sample_variance)
}
/// Estimates the mean and variance of the given points and normalizes them between 0.0 and 1.0, using the 3rd
/// standard deviation as extremes.
pub fn distr_norm(mut points: Vec<ScoredPoint>) -> Vec<ScoredPoint> {
if points.len() < 2 {
if points.len() == 1 {
points[0].score = 0.5;
}
return points;
}
let (mean, variance) = welfords_mean_variance(&points);
let std_dev = variance.sqrt();
let min = mean - 3.0 * std_dev;
let max = mean + 3.0 * std_dev;
norm(points, min, max)
}
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::*;
fn point(id: usize, score: ScoreType) -> ScoredPoint {
ScoredPoint {
id: PointIdType::NumId(id as u64),
version: 0,
score,
payload: None,
vector: None,
shard_key: None,
order_value: None,
}
}
fn assert_close(a: f32, b: f32) {
// Choose the more relaxed tolerance, absolute or relative based on the values.
let abs_tolerance = 1e-5f32;
let rel_tolerance = 1e-4f32;
let diff = (a - b).abs();
let max_val = a.abs().max(b.abs());
let tolerance = abs_tolerance.max(rel_tolerance * max_val);
assert!(
diff <= tolerance,
"{a} is not close to {b}: difference {diff} exceeds tolerance {tolerance}"
);
}
proptest! {
#[test]
fn welford_calc_vs_naive(scores in prop::collection::vec(-100.0..100.0f32, 2..1000)) {
let naive_mean = scores.iter().sum::<f32>() / scores.len() as f32;
let naive_variance = scores.iter().map(|p| (p - naive_mean).powi(2)).sum::<f32>()
/ (scores.len() - 1) as f32;
let points = scores
.into_iter()
.enumerate()
.map(|(i, s)| point(i, s))
.collect_vec();
let (mean, variance) = welfords_mean_variance(&points);
assert_close(mean, naive_mean);
assert_close(variance, naive_variance);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/utils.rs | lib/segment/src/common/utils.rs | use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use smallvec::SmallVec;
use crate::data_types::named_vectors::NamedVectors;
use crate::data_types::vectors::VectorInternal;
use crate::index::field_index::FieldIndex;
use crate::types::{PayloadKeyType, VectorNameBuf};
pub type IndexesMap = HashMap<PayloadKeyType, Vec<FieldIndex>>;
/// A container for JSON values, optimized for the common case of a single value.
pub type MultiValue<T> = SmallVec<[T; 1]>;
pub fn check_is_empty<'a>(values: impl IntoIterator<Item = &'a Value>) -> bool {
values.into_iter().all(|x| match x {
serde_json::Value::Null => true,
serde_json::Value::Array(arr) => arr.is_empty(),
_ => false,
})
}
pub fn check_is_null<'a>(values: impl IntoIterator<Item = &'a Value>) -> bool {
values.into_iter().any(|x| x.is_null())
// { "a": [ { "b": null }, { "b": 1 } ] } => true
// { "a": [ { "b": 1 }, { "b": null } ] } => true
// { "a": [ { "b": 1 }, { "b": 2 } ] } => false
}
pub fn rev_range(a: usize, b: usize) -> impl Iterator<Item = usize> {
(b + 1..=a).rev()
}
// Merge source map into destination map
pub fn merge_map(
dest: &mut serde_json::Map<String, Value>,
source: &serde_json::Map<String, Value>,
) {
for (key, value) in source {
match value {
Value::Null => dest.remove(key),
_ => dest.insert(key.to_owned(), value.to_owned()),
};
}
}
pub fn transpose_map_into_named_vector<TVector: Into<VectorInternal>>(
map: HashMap<VectorNameBuf, Vec<TVector>>,
) -> Vec<NamedVectors<'static>> {
let mut result = Vec::new();
for (key, values) in map {
result.resize_with(values.len(), NamedVectors::default);
for (i, value) in values.into_iter().enumerate() {
result[i].insert(key.clone(), value.into());
}
}
result
}
/// Hashes an iterator of unique items in an order-independent way.
///
/// The order of items does not affect the resulting hash.
/// Assumes that each item is unique. Suitable for hashing, e.g., sets and maps,
/// but not for vectors or multisets.
#[inline(always)]
pub fn unordered_hash_unique<T: Hash, I: Iterator<Item = T>, H: Hasher>(state: &mut H, iter: I) {
iter.fold(0u64, |res, item| {
let mut hasher = DefaultHasher::new();
item.hash(&mut hasher);
// NOTE: xor is used, duplicates will cancel each other out
res ^ hasher.finish()
})
.hash(state);
}
/// Deserializer helper for `Option<Vec<T>>` that allows deserializing both single and an array of values.
///
/// Use via `#[serde(with = "MaybeOneOrMany")]` and `#[schemars(with="MaybeOneOrMany<T>")]` field attributes
pub struct MaybeOneOrMany<T>(pub Option<Vec<T>>);
impl<T: Serialize> MaybeOneOrMany<T> {
pub fn serialize<S>(value: &Option<Vec<T>>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
value.serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> MaybeOneOrMany<T> {
pub fn deserialize<D>(deserializer: D) -> Result<Option<Vec<T>>, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde_untagged::UntaggedEnumVisitor;
UntaggedEnumVisitor::new()
.unit(|| Ok(None))
.seq(|x| x.deserialize().map(Some))
.map(|x| x.deserialize().map(|x| vec![x]).map(Some))
.deserialize(deserializer)
}
}
impl<T: JsonSchema> JsonSchema for MaybeOneOrMany<T> {
fn json_schema(generator: &mut schemars::r#gen::SchemaGenerator) -> schemars::schema::Schema {
use schemars::schema::SchemaObject;
#[derive(JsonSchema)]
#[serde(untagged)]
enum OneOrMany<T> {
_One(T),
_Many(Vec<T>),
_None(()),
}
let schema: SchemaObject = <OneOrMany<T>>::json_schema(generator).into();
schema.into()
}
fn schema_name() -> String {
<Vec<T>>::schema_name()
}
fn is_referenceable() -> bool {
false
}
}
#[cfg(test)]
mod tests {
use schemars::{JsonSchema, schema_for};
use serde::{Deserialize, Serialize};
use crate::common::utils::MaybeOneOrMany;
#[test]
fn test_deserialize_one_or_many() {
#[derive(Serialize, Deserialize)]
struct Test {
#[serde(with = "MaybeOneOrMany")]
data: Option<Vec<Inner>>,
}
#[derive(Serialize, Deserialize)]
struct Inner {
key: String,
}
let res = serde_json::from_str::<Test>(
r#"
{
"data": null
}
"#,
)
.unwrap();
assert!(res.data.is_none());
let res = serde_json::from_str::<Test>(
r#"
{
"data": {
"key": "value"
}
}
"#,
)
.unwrap();
assert_eq!(res.data.as_ref().unwrap().len(), 1);
assert_eq!(res.data.as_ref().unwrap()[0].key, "value".to_string());
let res = serde_json::from_str::<Test>(
r#"
{
"data": [
{
"key": "value"
}
]
}
"#,
)
.unwrap();
assert_eq!(res.data.as_ref().unwrap().len(), 1);
assert_eq!(res.data.as_ref().unwrap()[0].key, "value".to_string());
}
#[test]
fn test_schema_one_or_many() {
#[derive(JsonSchema)]
struct Test {
#[schemars(with = "MaybeOneOrMany<String>")]
_field: Option<Vec<String>>,
}
let mut field_schema = dbg!(
schemars::schema_for!(Test)
.schema
.object
.unwrap()
.properties
.remove("_field")
.unwrap()
.into_object(),
);
assert!(field_schema.subschemas.is_some());
let any_of = field_schema.subschemas().any_of.clone().unwrap();
assert_eq!(any_of.len(), 3);
assert_eq!(
any_of[0].clone().into_object().instance_type,
schema_for!(String).schema.instance_type
);
assert_eq!(
any_of[1].clone().into_object().array,
schema_for!(Vec<String>).schema.array
);
assert_eq!(
any_of[2].clone().into_object().instance_type,
schema_for!(()).schema.instance_type
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/macros.rs | lib/segment/src/common/macros.rs | /// Similar to `#[derive(JsonSchema)]`, but allows to override `schema_name()`
/// for each generic specialization using the following syntax:
/// ```ignore
/// #[macro_rules_attribute::macro_rules_derive(schemars_rename_generics)]
/// #[derive_args(<i32> => "NewName", ...)]
/// ```
/// Workaround for <https://github.com/GREsau/schemars/issues/193>
macro_rules! schemars_rename_generics {
{
#[doc = $doc:literal]
#[derive_args(
$(,)*
<$($old_params:ident),*> => $new_name:literal
$( $rest:tt )*
)]
$( #[$attrs:meta] )*
$vis:vis struct $name:ident<$($param:ident),*> { $($body:tt)* }
} => {
impl ::schemars::JsonSchema for $name<$($old_params),*> {
fn schema_name() -> String {
$new_name.to_string()
}
fn json_schema(generator: &mut ::schemars::r#gen::SchemaGenerator) -> ::schemars::schema::Schema {
#[doc = $doc]
#[derive(::schemars::JsonSchema)]
$( #[$attrs] )*
struct Temp<$($param),*>{ $($body)* }
Temp::<$($old_params),*>::json_schema(generator)
}
}
$crate::common::macros::schemars_rename_generics! {
#[doc = $doc]
#[derive_args( $( $rest )* )]
$( #[$attrs] )*
$vis struct $name<$($param),*>
{ $($body)* }
}
};
{ #[doc = $doc:literal] #[derive_args()] $( $rest:tt )* } => {}
}
pub(crate) use schemars_rename_generics;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/mod.rs | lib/segment/src/common/mod.rs | pub mod anonymize;
pub mod error_logging;
pub mod flags;
pub mod macros;
pub mod mmap_bitslice_buffered_update_wrapper;
pub mod mmap_slice_buffered_update_wrapper;
pub mod operation_error;
pub mod operation_time_statistics;
pub mod reciprocal_rank_fusion;
#[cfg(feature = "rocksdb")]
pub mod rocksdb_buffered_delete_wrapper;
#[cfg(feature = "rocksdb")]
pub mod rocksdb_buffered_update_wrapper;
#[cfg(feature = "rocksdb")]
pub mod rocksdb_wrapper;
pub mod score_fusion;
pub mod utils;
pub mod validate_snapshot_archive;
pub mod vector_utils;
use std::sync::atomic::AtomicBool;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::data_types::named_vectors::NamedVectors;
use crate::data_types::vectors::{QueryVector, VectorRef};
use crate::types::{SegmentConfig, SparseVectorDataConfig, VectorDataConfig, VectorName};
pub type Flusher = Box<dyn FnOnce() -> OperationResult<()> + Send>;
/// Check that the given vector name is part of the segment config.
///
/// Returns an error if incompatible.
pub fn check_vector_name(
vector_name: &VectorName,
segment_config: &SegmentConfig,
) -> OperationResult<()> {
// TODO(sparse) it's a wrong error check. We use the fact,
// that get_vector_config_or_error can return only one type of error - VectorNameNotExists
if get_vector_config_or_error(vector_name, segment_config).is_err() {
get_sparse_vector_config_or_error(vector_name, segment_config)?;
}
Ok(())
}
/// Check that the given vector name and elements are compatible with the given segment config.
///
/// Returns an error if incompatible.
pub fn check_vector(
vector_name: &VectorName,
query_vector: &QueryVector,
segment_config: &SegmentConfig,
) -> OperationResult<()> {
let vector_config = get_vector_config_or_error(vector_name, segment_config);
if vector_config.is_ok() {
check_query_vector(query_vector, vector_config?)
} else {
let sparse_vector_config = get_sparse_vector_config_or_error(vector_name, segment_config)?;
check_query_sparse_vector(query_vector, sparse_vector_config)
}
}
fn check_query_vector(
query_vector: &QueryVector,
vector_config: &VectorDataConfig,
) -> OperationResult<()> {
match query_vector {
QueryVector::Nearest(vector) => {
check_vector_against_config(VectorRef::from(vector), vector_config)?
}
QueryVector::RecommendBestScore(reco_query)
| QueryVector::RecommendSumScores(reco_query) => {
reco_query.flat_iter().try_for_each(|vector| {
check_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::Discovery(discovery_query) => {
discovery_query.flat_iter().try_for_each(|vector| {
check_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::Context(discovery_context_query) => {
discovery_context_query.flat_iter().try_for_each(|vector| {
check_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::FeedbackNaive(feedback_query) => {
feedback_query.flat_iter().try_for_each(|vector| {
check_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
}
Ok(())
}
fn check_query_sparse_vector(
query_vector: &QueryVector,
vector_config: &SparseVectorDataConfig,
) -> OperationResult<()> {
match query_vector {
QueryVector::Nearest(vector) => {
check_sparse_vector_against_config(VectorRef::from(vector), vector_config)?
}
QueryVector::RecommendBestScore(reco_query)
| QueryVector::RecommendSumScores(reco_query) => {
reco_query.flat_iter().try_for_each(|vector| {
check_sparse_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::Discovery(discovery_query) => {
discovery_query.flat_iter().try_for_each(|vector| {
check_sparse_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::Context(discovery_context_query) => {
discovery_context_query.flat_iter().try_for_each(|vector| {
check_sparse_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
QueryVector::FeedbackNaive(feedback_query) => {
feedback_query.flat_iter().try_for_each(|vector| {
check_sparse_vector_against_config(VectorRef::from(vector), vector_config)
})?
}
}
Ok(())
}
/// Check that the given vector name and elements are compatible with the given segment config.
///
/// Returns an error if incompatible.
pub fn check_query_vectors(
vector_name: &VectorName,
query_vectors: &[&QueryVector],
segment_config: &SegmentConfig,
) -> OperationResult<()> {
let vector_config = get_vector_config_or_error(vector_name, segment_config);
if let Ok(vector_config) = vector_config {
query_vectors
.iter()
.try_for_each(|qv| check_query_vector(qv, vector_config))?;
} else {
let sparse_vector_config = get_sparse_vector_config_or_error(vector_name, segment_config)?;
query_vectors
.iter()
.try_for_each(|qv| check_query_sparse_vector(qv, sparse_vector_config))?;
}
Ok(())
}
/// Check that the given named vectors are compatible with the given segment config.
///
/// Returns an error if incompatible.
pub fn check_named_vectors(
vectors: &NamedVectors,
segment_config: &SegmentConfig,
) -> OperationResult<()> {
for (vector_name, vector_data) in vectors.iter() {
check_vector(vector_name, &vector_data.into(), segment_config)?;
}
Ok(())
}
/// Get the vector config for the given name, or return a name error.
///
/// Returns an error if incompatible.
fn get_vector_config_or_error<'a>(
vector_name: &VectorName,
segment_config: &'a SegmentConfig,
) -> OperationResult<&'a VectorDataConfig> {
segment_config
.vector_data
.get(vector_name)
.ok_or_else(|| OperationError::vector_name_not_exists(vector_name))
}
/// Get the sparse vector config for the given name, or return a name error.
///
/// Returns an error if incompatible.
fn get_sparse_vector_config_or_error<'a>(
vector_name: &VectorName,
segment_config: &'a SegmentConfig,
) -> OperationResult<&'a SparseVectorDataConfig> {
segment_config
.sparse_vector_data
.get(vector_name)
.ok_or_else(|| OperationError::vector_name_not_exists(vector_name))
}
/// Check if the given dense vector data is compatible with the given configuration.
///
/// Returns an error if incompatible.
fn check_vector_against_config(
vector: VectorRef,
vector_config: &VectorDataConfig,
) -> OperationResult<()> {
match vector {
VectorRef::Dense(vector) => {
// Check dimensionality
let dim = vector_config.size;
if vector.len() != dim {
return Err(OperationError::WrongVectorDimension {
expected_dim: dim,
received_dim: vector.len(),
});
}
Ok(())
}
VectorRef::Sparse(_) => Err(OperationError::WrongSparse),
VectorRef::MultiDense(multi_vector) => {
// Check dimensionality
let dim = vector_config.size;
for vector in multi_vector.multi_vectors() {
if vector.len() != dim {
return Err(OperationError::WrongVectorDimension {
expected_dim: dim,
received_dim: vector.len(),
});
}
}
Ok(())
}
}
}
fn check_sparse_vector_against_config(
vector: VectorRef,
_vector_config: &SparseVectorDataConfig,
) -> OperationResult<()> {
match vector {
VectorRef::Dense(_) => Err(OperationError::WrongSparse),
VectorRef::Sparse(_vector) => Ok(()), // TODO(sparse) check vector by config
VectorRef::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
pub fn check_stopped(is_stopped: &AtomicBool) -> OperationResult<()> {
if is_stopped.load(std::sync::atomic::Ordering::Relaxed) {
return Err(OperationError::Cancelled {
description: "Operation is stopped externally".to_string(),
});
}
Ok(())
}
pub const BYTES_IN_KB: usize = 1024;
pub const BYTES_IN_MB: usize = 1_048_576;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/vector_utils.rs | lib/segment/src/common/vector_utils.rs | use std::collections::TryReserveError;
pub trait TrySetCapacity {
fn try_set_capacity(&mut self, capacity: usize) -> Result<(), TryReserveError>;
}
pub trait TrySetCapacityExact {
fn try_set_capacity_exact(&mut self, capacity: usize) -> Result<(), TryReserveError>;
}
impl<T> TrySetCapacity for Vec<T> {
fn try_set_capacity(&mut self, capacity: usize) -> Result<(), TryReserveError> {
let additional = capacity.saturating_sub(self.len());
self.try_reserve(additional)
}
}
impl<T> TrySetCapacityExact for Vec<T> {
fn try_set_capacity_exact(&mut self, capacity: usize) -> Result<(), TryReserveError> {
let additional = capacity.saturating_sub(self.len());
self.try_reserve_exact(additional)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_set_capacity() {
// Constructing uses exact capacity
let mut v = vec![1, 2, 3];
assert_eq!(v.capacity(), 3);
// Set capacity to 5, but it will double to 6
v.try_set_capacity(5).unwrap();
assert_eq!(v.capacity(), 6);
// Setting capacity again does nothing
v.try_set_capacity(5).unwrap();
assert_eq!(v.capacity(), 6);
// Fill up to capacity
v.extend([4, 5, 6]);
assert_eq!(v.capacity(), 6);
// Push over capacity will double it
v.push(7);
assert_eq!(v.capacity(), 12);
// Set capacity to 100
v.try_set_capacity(100).unwrap();
assert_eq!(v.capacity(), 100);
// Capacity will never shrink
v.try_set_capacity(5).unwrap();
assert_eq!(v.capacity(), 100);
}
#[test]
fn test_try_set_capacity_exact() {
// Constructing uses exact capacity
let mut v = vec![1, 2, 3];
assert_eq!(v.capacity(), 3);
// Set capacity to exactly 5
v.try_set_capacity_exact(5).unwrap();
assert_eq!(v.capacity(), 5);
// Setting capacity again does nothing
v.try_set_capacity_exact(5).unwrap();
assert_eq!(v.capacity(), 5);
// Fill up to capacity
v.extend([4, 5]);
assert_eq!(v.capacity(), 5);
// Push over capacity will double it
v.push(6);
assert_eq!(v.capacity(), 10);
// Set capacity to exactly 100
v.try_set_capacity_exact(100).unwrap();
assert_eq!(v.capacity(), 100);
// Capacity will never shrink
v.try_set_capacity_exact(5).unwrap();
assert_eq!(v.capacity(), 100);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/rocksdb_wrapper.rs | lib/segment/src/common/rocksdb_wrapper.rs | use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use parking_lot::RwLock;
//use atomic_refcell::{AtomicRef, AtomicRefCell};
use rocksdb::{ColumnFamily, DB, DBRecoveryMode, LogLevel, Options, WriteOptions};
use crate::common::Flusher;
//use crate::common::arc_rwlock_iterator::ArcRwLockIterator;
use crate::common::operation_error::{OperationError, OperationResult};
const DB_CACHE_SIZE: usize = 10 * 1024 * 1024; // 10 mb
const DB_MAX_LOG_SIZE: usize = 1024 * 1024; // 1 mb
const DB_MAX_OPEN_FILES: usize = 256;
const DB_DELETE_OBSOLETE_FILES_PERIOD: u64 = 3 * 60 * 1_000_000; // 3 minutes in microseconds
pub const DB_VECTOR_CF: &str = "vector";
pub const DB_PAYLOAD_CF: &str = "payload";
pub const DB_MAPPING_CF: &str = "mapping";
pub const DB_VERSIONS_CF: &str = "version";
/// If there is no Column Family specified, key-value pair is associated with Column Family "default".
pub const DB_DEFAULT_CF: &str = "default";
#[derive(Clone)]
pub struct DatabaseColumnWrapper {
database: Arc<RwLock<DB>>,
column_name: String,
write_options: Arc<WriteOptions>,
db_options: Arc<Options>,
}
impl Debug for DatabaseColumnWrapper {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DatabaseColumnWrapper")
.field("column_name", &self.column_name)
.finish()
}
}
pub struct DatabaseColumnIterator<'a> {
pub handle: &'a ColumnFamily,
pub iter: rocksdb::DBRawIterator<'a>,
}
pub struct LockedDatabaseColumnWrapper<'a> {
guard: parking_lot::RwLockReadGuard<'a, DB>,
column_name: &'a str,
}
/// RocksDB options (both global and for column families)
pub fn make_db_options() -> Options {
let mut options: Options = Options::default();
options.set_write_buffer_size(DB_CACHE_SIZE); // write_buffer_size is enforced per column family.
options.create_if_missing(true);
options.set_log_level(LogLevel::Error);
options.set_recycle_log_file_num(1);
options.set_keep_log_file_num(1); // must be greater than zero
options.set_max_log_file_size(DB_MAX_LOG_SIZE);
options.set_delete_obsolete_files_period_micros(DB_DELETE_OBSOLETE_FILES_PERIOD);
options.create_missing_column_families(true);
options.set_max_open_files(DB_MAX_OPEN_FILES as i32);
options.set_compression_type(rocksdb::DBCompressionType::Lz4);
// Qdrant relies on it's own WAL for durability
options.set_wal_recovery_mode(DBRecoveryMode::TolerateCorruptedTailRecords);
#[cfg(debug_assertions)]
{
options.set_paranoid_checks(true);
}
options
}
pub fn open_db<T: AsRef<str>>(
path: &Path,
vector_paths: &[T],
) -> Result<Arc<RwLock<DB>>, rocksdb::Error> {
let options = make_db_options();
let mut column_families = vec![DB_PAYLOAD_CF, DB_DEFAULT_CF];
// We're using new ID tracker, only add RocksDB ID tracker column families if they already exist
// Not adding them prevents older Qdrant versions from trying to load the unused RocksDB ID tracker
{
let exists = check_db_exists(path);
let existing_column_families = if exists {
DB::list_cf(&options, path)?
} else {
vec![]
};
// Add column families to create or open
// - on database creation: always add CFs
// - on database open: only add CFs if they already exist
column_families.extend(
[DB_MAPPING_CF, DB_VERSIONS_CF]
.into_iter()
.filter(|cf| !exists || existing_column_families.iter().any(|other| other == cf)),
);
}
for vector_path in vector_paths {
column_families.push(vector_path.as_ref());
}
// Make sure that all column families have the same options
let column_with_options = column_families
.into_iter()
.map(|cf| (cf, options.clone()))
.collect::<Vec<_>>();
let db = DB::open_cf_with_opts(&options, path, column_with_options)?;
Ok(Arc::new(RwLock::new(db)))
}
pub fn check_db_exists(path: &Path) -> bool {
let db_file = path.join("CURRENT");
db_file.exists()
}
pub fn open_db_with_existing_cf(path: &Path) -> Result<Arc<RwLock<DB>>, rocksdb::Error> {
let options = make_db_options();
let existing_column_families = if check_db_exists(path) {
DB::list_cf(&options, path)?
} else {
vec![]
};
// Make sure that all column families have the same options
let column_with_options = existing_column_families
.into_iter()
.map(|cf| (cf, options.clone()))
.collect::<Vec<_>>();
let db = DB::open_cf_with_opts(&options, path, column_with_options)?;
Ok(Arc::new(RwLock::new(db)))
}
impl DatabaseColumnWrapper {
pub fn new(database: Arc<RwLock<DB>>, column_name: &str) -> Self {
let write_options = Arc::new(Self::make_write_options());
let db_options = Arc::new(make_db_options());
Self {
database,
column_name: column_name.to_string(),
write_options,
db_options,
}
}
pub fn put<K, V>(&self, key: K, value: V) -> OperationResult<()>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
db.put_cf_opt(cf_handle, key, value, &self.write_options)
.map_err(|err| OperationError::service_error(format!("RocksDB put_cf error: {err}")))?;
Ok(())
}
pub fn get<K>(&self, key: K) -> OperationResult<Vec<u8>>
where
K: AsRef<[u8]>,
{
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
db.get_cf(cf_handle, key)
.map_err(|err| OperationError::service_error(format!("RocksDB get_cf error: {err}")))?
.ok_or_else(|| OperationError::service_error("RocksDB get_cf error: key not found"))
}
pub fn get_opt<K>(&self, key: K) -> OperationResult<Option<Vec<u8>>>
where
K: AsRef<[u8]>,
{
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
db.get_cf(cf_handle, key)
.map_err(|err| OperationError::service_error(format!("RocksDB get_cf error: {err}")))
}
pub fn get_pinned<T, F>(&self, key: &[u8], f: F) -> OperationResult<Option<T>>
where
F: FnOnce(&[u8]) -> T,
{
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
let result = db
.get_pinned_cf(cf_handle, key)
.map_err(|err| {
OperationError::service_error(format!("RocksDB get_pinned_cf error: {err}"))
})?
.map(|value| f(&value));
Ok(result)
}
pub fn remove<K>(&self, key: K) -> OperationResult<()>
where
K: AsRef<[u8]>,
{
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
db.delete_cf_opt(cf_handle, key, &self.write_options)
.map_err(|err| {
OperationError::service_error(format!("RocksDB delete_cf error: {err}"))
})?;
Ok(())
}
pub fn lock_db(&self) -> LockedDatabaseColumnWrapper<'_> {
LockedDatabaseColumnWrapper {
guard: self.database.read(),
column_name: &self.column_name,
}
}
pub fn flusher(&self) -> Flusher {
let database = self.database.clone();
let column_name = self.column_name.clone();
Box::new(move || {
let db = database.read();
let Some(column_family) = db.cf_handle(&column_name) else {
return Err(OperationError::RocksDbColumnFamilyNotFound {
name: column_name.clone(),
});
};
db.flush_cf(column_family).map_err(|err| {
OperationError::service_error(format!("RocksDB flush_cf error: {err}"))
})?;
Ok(())
})
}
pub fn create_column_family_if_not_exists(&self) -> OperationResult<()> {
let mut db = self.database.write();
if db.cf_handle(&self.column_name).is_none() {
db.create_cf(&self.column_name, &self.db_options)
.map_err(|err| {
OperationError::service_error(format!("RocksDB create_cf error: {err}"))
})?;
}
Ok(())
}
pub fn recreate_column_family(&self) -> OperationResult<()> {
self.remove_column_family()?;
self.create_column_family_if_not_exists()
}
pub fn remove_column_family(&self) -> OperationResult<()> {
let mut db = self.database.write();
if db.cf_handle(&self.column_name).is_some() {
db.drop_cf(&self.column_name).map_err(|err| {
OperationError::service_error(format!("RocksDB drop_cf error: {err}"))
})?;
}
Ok(())
}
pub fn has_column_family(&self) -> OperationResult<bool> {
let db = self.database.read();
Ok(db.cf_handle(&self.column_name).is_some())
}
fn make_write_options() -> WriteOptions {
let mut write_options = WriteOptions::default();
write_options.set_sync(false);
// RocksDB WAL is required for durability even if data is flushed
write_options.disable_wal(false);
write_options
}
fn get_column_family<'a>(
&self,
db: &'a parking_lot::RwLockReadGuard<'_, DB>,
) -> OperationResult<&'a ColumnFamily> {
db.cf_handle(&self.column_name)
.ok_or_else(|| OperationError::RocksDbColumnFamilyNotFound {
name: self.column_name.clone(),
})
}
pub fn get_database(&self) -> Arc<RwLock<DB>> {
self.database.clone()
}
pub fn get_column_name(&self) -> &str {
&self.column_name
}
/// Get the size of the storage in bytes
///
/// The size of this column family in bytes, which is equal to the sum of the file size of its "levels"
pub fn get_storage_size_bytes(&self) -> OperationResult<usize> {
let db = self.database.read();
let cf_handle = self.get_column_family(&db)?;
let size = db.get_column_family_metadata_cf(cf_handle).size;
Ok(size as usize)
}
}
impl LockedDatabaseColumnWrapper<'_> {
pub fn iter(&self) -> OperationResult<DatabaseColumnIterator<'_>> {
DatabaseColumnIterator::new(&self.guard, self.column_name)
}
}
impl<'a> DatabaseColumnIterator<'a> {
pub fn new(db: &'a DB, column_name: &str) -> OperationResult<DatabaseColumnIterator<'a>> {
let handle = db.cf_handle(column_name).ok_or_else(|| {
OperationError::service_error(format!(
"RocksDB cf_handle error: Cannot find column family {column_name}"
))
})?;
let mut iter = db.raw_iterator_cf(&handle);
iter.seek_to_first();
Ok(DatabaseColumnIterator { handle, iter })
}
}
impl Iterator for DatabaseColumnIterator<'_> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<Self::Item> {
// Stop if iterator has ended or errored
if !self.iter.valid() {
return None;
}
let item = (
Box::from(self.iter.key().unwrap()),
Box::from(self.iter.value().unwrap()),
);
// Search to next item for next iteration
self.iter.next();
Some(item)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/mmap_slice_buffered_update_wrapper.rs | lib/segment/src/common/mmap_slice_buffered_update_wrapper.rs | use std::sync::Arc;
use ahash::AHashMap;
use common::is_alive_lock::IsAliveLock;
use common::types::PointOffsetType;
use memory::mmap_type::MmapSlice;
use parking_lot::{Mutex, RwLock};
use crate::common::Flusher;
/// A wrapper around `MmapSlice` that delays writing changes to the underlying file until they get
/// flushed manually.
/// This expects the underlying MmapSlice not to grow in size.
///
/// WARN: this structure is expected to be write-only.
#[derive(Debug)]
pub struct MmapSliceBufferedUpdateWrapper<T>
where
T: 'static,
{
mmap_slice: Arc<RwLock<MmapSlice<T>>>,
len: usize,
pending_updates: Arc<Mutex<AHashMap<PointOffsetType, T>>>,
is_alive_lock: IsAliveLock,
}
impl<T> MmapSliceBufferedUpdateWrapper<T>
where
T: 'static,
{
pub fn new(mmap_slice: MmapSlice<T>) -> Self {
let len = mmap_slice.len();
Self {
mmap_slice: Arc::new(RwLock::new(mmap_slice)),
len,
pending_updates: Arc::new(Mutex::new(AHashMap::new())),
is_alive_lock: IsAliveLock::new(),
}
}
/// Sets the item at `index` to `value` buffered.
///
/// ## Panics
/// Panics if the index is out of bounds.
pub fn set(&self, index: PointOffsetType, value: T) {
assert!(
(index as usize) < self.len,
"index {index} out of range: {}",
self.len
);
self.pending_updates.lock().insert(index, value);
}
}
impl<T> MmapSliceBufferedUpdateWrapper<T>
where
T: 'static + Sync + Send + Clone + PartialEq,
{
pub fn flusher(&self) -> Flusher {
let updates = {
let updates_guard = self.pending_updates.lock();
if updates_guard.is_empty() {
return Box::new(|| Ok(()));
}
updates_guard.clone()
};
let pending_updates_weak = Arc::downgrade(&self.pending_updates);
let slice = Arc::downgrade(&self.mmap_slice);
let is_alive_handle = self.is_alive_lock.handle();
Box::new(move || {
let (Some(is_alive_guard), Some(pending_updates_arc), Some(slice)) = (
is_alive_handle.lock_if_alive(),
pending_updates_weak.upgrade(),
slice.upgrade(),
) else {
log::debug!(
"Aborted flushing on a dropped MmapSliceBufferedUpdateWrapper instance"
);
return Ok(());
};
let mut mmap_slice_write = slice.write();
for (&index, value) in &updates {
mmap_slice_write[index as usize] = value.clone();
}
mmap_slice_write.flusher()()?;
Self::reconcile_persisted_changes(&pending_updates_arc, updates);
drop(is_alive_guard);
Ok(())
})
}
/// Removes the persisted updates from the pending ones.
fn reconcile_persisted_changes(
pending: &Mutex<AHashMap<PointOffsetType, T>>,
persisted: AHashMap<PointOffsetType, T>,
) {
pending.lock().retain(|point_offset, pending_value| {
persisted
.get(point_offset)
.is_none_or(|persisted_value| pending_value != persisted_value)
});
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/operation_error.rs | lib/segment/src/common/operation_error.rs | use std::backtrace::Backtrace;
use std::collections::TryReserveError;
use std::io::{Error as IoError, ErrorKind};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use atomicwrites::Error as AtomicIoError;
use gridstore::error::GridstoreError;
use io::file_operations::FileStorageError;
use memory::mmap_type::Error as MmapError;
use rayon::ThreadPoolBuildError;
use thiserror::Error;
use crate::types::{PayloadKeyType, PointIdType, SeqNumberType, VectorNameBuf};
use crate::utils::mem::Mem;
pub const PROCESS_CANCELLED_BY_SERVICE_MESSAGE: &str = "process cancelled by service";
#[derive(Error, Debug, Clone, PartialEq)]
#[error("{0}")]
pub enum OperationError {
#[error("Vector dimension error: expected dim: {expected_dim}, got {received_dim}")]
WrongVectorDimension {
expected_dim: usize,
received_dim: usize,
},
#[error("Not existing vector name error: {received_name}")]
VectorNameNotExists { received_name: VectorNameBuf },
#[error("No point with id {missed_point_id}")]
PointIdError { missed_point_id: PointIdType },
#[error(
"Payload type does not match with previously given for field {field_name}. Expected: {expected_type}"
)]
TypeError {
field_name: PayloadKeyType,
expected_type: String,
},
#[error("Unable to infer type for the field '{field_name}'. Please specify `field_type`")]
TypeInferenceError { field_name: PayloadKeyType },
/// Service Error prevents further update of the collection until it is fixed.
/// Should only be used for hardware, data corruption, IO, or other unexpected internal errors.
#[error("Service runtime error: {description}")]
ServiceError {
description: String,
backtrace: Option<String>,
},
#[error("Inconsistent storage: {description}")]
InconsistentStorage { description: String },
#[error("Out of memory, free: {free}, {description}")]
OutOfMemory { description: String, free: u64 },
#[error("Operation cancelled: {description}")]
Cancelled { description: String },
#[error("Timeout error: {description}")]
Timeout { description: String },
#[error("Validation failed: {description}")]
ValidationError { description: String },
#[error("Wrong usage of sparse vectors")]
WrongSparse,
#[error("Wrong usage of multi vectors")]
WrongMulti,
#[error(
"No range index for `order_by` key: `{key}`. Please create one to use `order_by`. Check https://qdrant.tech/documentation/concepts/indexing/#payload-index to see which payload schemas support Range conditions"
)]
MissingRangeIndexForOrderBy { key: String },
#[error(
"No appropriate index for faceting: `{key}`. Please create one to facet on this field. Check https://qdrant.tech/documentation/concepts/indexing/#payload-index to see which payload schemas support Match conditions"
)]
MissingMapIndexForFacet { key: String },
#[error(
"Expected {expected_type} value for {field_name} in the payload and/or in the formula defaults. Error: {description}"
)]
VariableTypeError {
field_name: PayloadKeyType,
expected_type: String,
description: String,
},
#[error("The expression {expression} produced a non-finite number")]
NonFiniteNumber { expression: String },
// ToDo: Remove after RocksDB is deprecated
#[error("RocksDB column family {name} not found")]
RocksDbColumnFamilyNotFound { name: String },
}
impl OperationError {
/// Create a new service error with a description and a backtrace
/// Warning: capturing a backtrace can be an expensive operation on some platforms, so this should be used with caution in performance-sensitive parts of code.
pub fn service_error(description: impl Into<String>) -> Self {
Self::ServiceError {
description: description.into(),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
/// Create a new service error with a description and no backtrace
pub fn service_error_light(description: impl Into<String>) -> Self {
Self::ServiceError {
description: description.into(),
backtrace: None,
}
}
pub fn validation_error(description: impl Into<String>) -> Self {
Self::ValidationError {
description: description.into(),
}
}
pub fn inconsistent_storage(description: impl Into<String>) -> Self {
Self::InconsistentStorage {
description: description.into(),
}
}
pub fn vector_name_not_exists(vector_name: impl Into<String>) -> Self {
Self::VectorNameNotExists {
received_name: vector_name.into(),
}
}
pub fn timeout(timeout: Duration, operation: impl Into<String>) -> Self {
Self::Timeout {
description: format!(
"Operation '{}' timed out after {timeout:?}",
operation.into(),
),
}
}
}
/// Contains information regarding last operation error, which should be fixed before next operation could be processed
#[derive(Debug, Clone)]
pub struct SegmentFailedState {
pub version: SeqNumberType,
pub point_id: Option<PointIdType>,
pub error: OperationError,
}
impl From<ThreadPoolBuildError> for OperationError {
fn from(error: ThreadPoolBuildError) -> Self {
OperationError::ServiceError {
description: format!("{error}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<FileStorageError> for OperationError {
fn from(err: FileStorageError) -> Self {
Self::service_error(err.to_string())
}
}
impl From<MmapError> for OperationError {
fn from(err: MmapError) -> Self {
Self::service_error(err.to_string())
}
}
impl From<serde_cbor::Error> for OperationError {
fn from(err: serde_cbor::Error) -> Self {
OperationError::service_error(format!("Failed to parse data: {err}"))
}
}
impl<E> From<AtomicIoError<E>> for OperationError {
fn from(err: AtomicIoError<E>) -> Self {
match err {
AtomicIoError::Internal(io_err) => OperationError::from(io_err),
AtomicIoError::User(_user_err) => {
OperationError::service_error("Unknown atomic write error")
}
}
}
}
impl From<IoError> for OperationError {
fn from(err: IoError) -> Self {
match err.kind() {
ErrorKind::OutOfMemory => {
let free_memory = Mem::new().available_memory_bytes();
OperationError::OutOfMemory {
description: format!("IO Error: {err}"),
free: free_memory,
}
}
_ => OperationError::service_error(format!("IO Error: {err}")),
}
}
}
impl From<serde_json::Error> for OperationError {
fn from(err: serde_json::Error) -> Self {
OperationError::service_error(format!("Json error: {err}"))
}
}
impl From<fs_extra::error::Error> for OperationError {
fn from(err: fs_extra::error::Error) -> Self {
OperationError::service_error(format!("File system error: {err}"))
}
}
impl From<geohash::GeohashError> for OperationError {
fn from(err: geohash::GeohashError) -> Self {
OperationError::service_error(format!("Geohash error: {err}"))
}
}
impl From<quantization::EncodingError> for OperationError {
fn from(err: quantization::EncodingError) -> Self {
match err {
quantization::EncodingError::IOError(err)
| quantization::EncodingError::EncodingError(err)
| quantization::EncodingError::ArgumentsError(err) => {
OperationError::service_error(format!("Quantization encoding error: {err}"))
}
quantization::EncodingError::Stopped => OperationError::Cancelled {
description: PROCESS_CANCELLED_BY_SERVICE_MESSAGE.to_string(),
},
}
}
}
impl From<TryReserveError> for OperationError {
fn from(err: TryReserveError) -> Self {
let free_memory = Mem::new().available_memory_bytes();
OperationError::OutOfMemory {
description: format!("Failed to reserve memory: {err}"),
free: free_memory,
}
}
}
impl From<GridstoreError> for OperationError {
fn from(err: GridstoreError) -> Self {
match err {
GridstoreError::ServiceError { description } => {
Self::service_error(format!("Gridstore error: {description}"))
}
GridstoreError::FlushCancelled => Self::Cancelled {
description: "Gridstore flushing was cancelled".to_string(),
},
GridstoreError::Io(_) | GridstoreError::Mmap(_) | GridstoreError::SerdeJson(_) => {
Self::service_error(err.to_string())
}
GridstoreError::ValidationError { message } => Self::validation_error(message),
}
}
}
#[cfg(feature = "gpu")]
impl From<gpu::GpuError> for OperationError {
fn from(err: gpu::GpuError) -> Self {
Self::service_error(format!("GPU error: {err:?}"))
}
}
pub type OperationResult<T> = Result<T, OperationError>;
pub fn get_service_error<T>(err: &OperationResult<T>) -> Option<OperationError> {
match err {
Ok(_) => None,
Err(error) => match error {
OperationError::ServiceError { .. } => Some(error.clone()),
_ => None,
},
}
}
#[derive(Debug, Copy, Clone)]
pub struct CancelledError;
pub type CancellableResult<T> = Result<T, CancelledError>;
impl From<CancelledError> for OperationError {
fn from(CancelledError: CancelledError) -> Self {
OperationError::Cancelled {
description: PROCESS_CANCELLED_BY_SERVICE_MESSAGE.to_string(),
}
}
}
pub fn check_process_stopped(stopped: &AtomicBool) -> CancellableResult<()> {
if stopped.load(Ordering::Relaxed) {
return Err(CancelledError);
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use super::*;
#[test]
fn test_timeout_error_formatting() {
// Test sub-second timeout (500ms)
let timeout = Duration::from_millis(500);
let error = OperationError::timeout(timeout, "test operation");
let error_msg = format!("{error}");
assert!(
error_msg.contains("500ms"),
"Expected '500ms' but got: {error_msg}"
);
// Test exact second timeout (1000ms = 1s)
let timeout = Duration::from_millis(1000);
let error = OperationError::timeout(timeout, "test operation");
let error_msg = format!("{error}");
assert!(
error_msg.contains("1s"),
"Expected '1s' but got: {error_msg}"
);
// Test multi-second timeout with sub-second precision (2500ms = 2.5s)
let timeout = Duration::from_millis(2500);
let error = OperationError::timeout(timeout, "test operation");
let error_msg = format!("{error}");
assert!(
error_msg.contains("2.5s"),
"Expected '2.5s' but got: {error_msg}"
);
// Test large timeout (60000ms = 60s)
let timeout = Duration::from_millis(60000);
let error = OperationError::timeout(timeout, "test operation");
let error_msg = format!("{error}");
assert!(
error_msg.contains("60s"),
"Expected '60s' but got: {error_msg}"
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/validate_snapshot_archive.rs | lib/segment/src/common/validate_snapshot_archive.rs | use std::io;
use std::path::Path;
use fs_err as fs;
use crate::common::operation_error::{OperationError, OperationResult};
pub fn open_snapshot_archive_with_validation(
path: &Path,
) -> OperationResult<tar::Archive<impl io::Read + io::Seek>> {
validate_snapshot_archive(path)?;
open_snapshot_archive(path)
}
pub fn validate_snapshot_archive(path: &Path) -> OperationResult<()> {
let mut ar = open_snapshot_archive(path)?;
let entries = ar.entries_with_seek().map_err(|err| {
OperationError::service_error(format!(
"failed to read snapshot archive {}: {err}",
path.display()
))
})?;
for entry in entries {
let entry = entry.map_err(|err| {
log::error!("Failed to read snapshot archive {}: {err}", path.display());
// Deliberately mask underlying error from API users, because it can expose arbitrary file contents
OperationError::service_error(format!(
"failed to read snapshot archive {}",
path.display(),
))
})?;
match entry.header().entry_type() {
tar::EntryType::Directory | tar::EntryType::Regular | tar::EntryType::GNUSparse => (),
entry_type => {
return Err(OperationError::validation_error(format!(
"malformed snapshot archive {}: archive contains {entry_type:?} entry",
path.display(),
)));
}
}
}
Ok(())
}
pub fn open_snapshot_archive(
path: &Path,
) -> OperationResult<tar::Archive<impl io::Read + io::Seek>> {
let file = fs::File::open(path).map_err(|err| {
OperationError::service_error(format!(
"failed to open snapshot archive {}: {err}",
path.display()
))
})?;
let mut ar = tar::Archive::new(io::BufReader::new(file));
ar.set_overwrite(false);
ar.set_sync(true);
Ok(ar)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/flags/buffered_dynamic_flags.rs | lib/segment/src/common/flags/buffered_dynamic_flags.rs | use std::path::PathBuf;
use std::sync::Arc;
use ahash::AHashMap;
use common::is_alive_lock::IsAliveLock;
use common::types::PointOffsetType;
use parking_lot::{Mutex, RwLock};
use super::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
/// A buffered wrapper around DynamicMmapFlags that provides manual flushing, without interface for reading.
///
/// Changes are buffered until explicitly flushed.
#[derive(Debug)]
pub(crate) struct BufferedDynamicFlags {
/// Persisted flags.
storage: Arc<Mutex<DynamicMmapFlags>>,
/// Pending changes to the storage flags.
buffer: Arc<RwLock<AHashMap<PointOffsetType, bool>>>,
/// Lock to prevent concurrent flush and drop
is_alive_flush_lock: IsAliveLock,
}
impl BufferedDynamicFlags {
pub fn new(mmap_flags: DynamicMmapFlags) -> Self {
let buffer = Arc::new(RwLock::new(AHashMap::new()));
let is_alive_flush_lock = IsAliveLock::new();
Self {
storage: Arc::new(Mutex::new(mmap_flags)),
buffer,
is_alive_flush_lock,
}
}
pub fn buffer_set(&self, index: PointOffsetType, value: bool) {
// queue write in buffer
self.buffer.write().insert(index, value);
}
pub fn clear_cache(&self) -> OperationResult<()> {
self.storage.lock().clear_cache()?;
Ok(())
}
pub fn files(&self) -> Vec<PathBuf> {
self.storage.lock().files()
}
pub fn flusher(&self) -> Flusher {
let updates = {
let buffer_guard = self.buffer.read();
if buffer_guard.is_empty() {
return Box::new(|| Ok(()));
}
buffer_guard.clone()
};
let Some(required_len) = updates.keys().max().map(|&max_id| max_id as usize + 1) else {
return Box::new(|| Ok(()));
};
// Weak reference to detect when the storage has been deleted
let flags_arc = Arc::downgrade(&self.storage);
let buffer = Arc::downgrade(&self.buffer);
let is_alive_flush_lock = self.is_alive_flush_lock.handle();
Box::new(move || {
let (Some(is_alive_flush_guard), Some(flags_arc), Some(buffer_arc)) = (
is_alive_flush_lock.lock_if_alive(),
flags_arc.upgrade(),
buffer.upgrade(),
) else {
log::debug!("skipping flushing on deleted storage");
return Ok(());
};
// lock for the entire flushing process
let mut flags_guard = flags_arc.lock();
// resize if needed
if required_len > flags_guard.len() {
flags_guard.set_len(required_len)?;
}
for (&index, &value) in &updates {
flags_guard.set(index as usize, value);
}
flags_guard.flusher()()?;
reconcile_persisted_buffer(&buffer_arc, updates);
// Keep the guard till the end of the flush to prevent concurrent drop/flushes
drop(is_alive_flush_guard);
Ok(())
})
}
}
/// Removes from `buffer` all results that are flushed.
/// If values in `pending_updates` are changed, do not remove them.
fn reconcile_persisted_buffer(
buffer: &RwLock<AHashMap<u32, bool>>,
persisted: AHashMap<u32, bool>,
) {
buffer
.write()
.retain(|point_id, a| persisted.get(point_id).is_none_or(|b| a != b));
}
#[cfg(test)]
mod tests {
use common::types::PointOffsetType;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use crate::common::flags::buffered_dynamic_flags::BufferedDynamicFlags;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
#[test]
fn test_buffered_flags_growth_persistence() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_growth")
.tempdir()
.unwrap();
// Start with smaller flags
{
let mut mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
mmap_flags.set_len(3).unwrap();
mmap_flags.set(0, true);
mmap_flags.set(2, true);
mmap_flags.flusher()().unwrap();
}
// Grow and update with BufferedDynamicFlags
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
let flags = buffered_flags.storage.lock();
// Initial state should match
assert_eq!(flags.count_flags(), 2);
assert_eq!(flags.len(), 3);
drop(flags);
// Set flags beyond current length - this should grow the length
buffered_flags.buffer_set(5, true);
buffered_flags.buffer_set(7, true);
buffered_flags.buffer_set(8, false); // Also grows on false flag.
buffered_flags.buffer_set(1, true); // Update existing
// For this test, we need to simulate growth by setting flags beyond current length
// The flusher will handle the growth when it's called
// Flush changes
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify growth persisted
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
assert_eq!(mmap_flags.len(), 9);
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
let flags = buffered_flags.storage.lock();
let expected_trues = vec![0, 1, 2, 5, 7];
let actual_trues: Vec<_> = flags.iter_trues().collect();
assert_eq!(actual_trues, expected_trues);
assert_eq!(flags.count_flags(), 5);
assert_eq!(flags.len() - flags.count_flags(), 4);
}
}
#[test]
fn test_buffered_flags_large_dataset_persistence() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_large")
.tempdir()
.unwrap();
let num_flags = 1000000;
let mut rng = StdRng::seed_from_u64(42);
// Generate random initial state
let initial_flags: Vec<bool> = (0..num_flags).map(|_| rng.random()).collect();
// Create initial flags
{
let mut mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
mmap_flags.set_len(num_flags).unwrap();
for (i, &value) in initial_flags.iter().enumerate() {
mmap_flags.set(i, value);
}
mmap_flags.flusher()().unwrap();
}
// Generate random updates
let num_updates = 1000;
let updates: Vec<(PointOffsetType, bool)> = (0..num_updates)
.map(|_| {
(
rng.random_range(0..num_flags) as PointOffsetType,
rng.random(),
)
})
.collect();
// Apply updates and flush
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
// Verify initial state loaded correctly
let initial_true_count = initial_flags.iter().filter(|&&b| b).count();
assert_eq!(
buffered_flags.storage.lock().count_flags(),
initial_true_count
);
// Apply updates
for &(index, value) in &updates {
buffered_flags.buffer_set(index, value);
}
// Flush
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify persistence
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
// Calculate expected final state
let mut expected_state = initial_flags.clone();
for &(index, value) in &updates {
expected_state[index as usize] = value;
}
let expected_true_count = expected_state.iter().filter(|&&b| b).count();
let flags = buffered_flags.storage.lock();
assert_eq!(flags.count_flags(), expected_true_count);
assert_eq!(flags.len(), num_flags);
// Verify specific values for a sample
for i in (0..num_flags).step_by(100) {
let expected = expected_state[i];
let actual = flags.get(i);
assert_eq!(actual, expected, "Mismatch at index {i}");
}
}
}
#[test]
fn test_buffered_flags_multiple_flush_cycles() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_cycles")
.tempdir()
.unwrap();
// Initial empty state
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
mmap_flags.flusher()().unwrap();
}
let cycles = [
vec![(0, true), (1, true), (2, false)],
vec![(1, false), (3, true), (4, true)],
vec![(0, false), (2, true), (5, true)],
];
let mut expected_state = [false; 6];
for (cycle_num, updates) in cycles.iter().enumerate() {
// Apply updates and flush
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
// The flusher will handle length expansion as needed
for &(index, value) in updates {
buffered_flags.buffer_set(index, value);
expected_state[index as usize] = value;
}
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify state after each cycle
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
for (i, &expected) in expected_state.iter().enumerate() {
let actual = buffered_flags.storage.lock().get(i);
assert_eq!(
actual, expected,
"Cycle {cycle_num}, index {i}: expected {expected}, got {actual}"
);
}
let expected_true_count = expected_state.iter().filter(|&&b| b).count();
assert_eq!(
buffered_flags.storage.lock().count_flags(),
expected_true_count
);
}
}
}
#[test]
fn test_buffered_flags_single_element_persistence() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_single")
.tempdir()
.unwrap();
// Test with single true flag
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
buffered_flags.buffer_set(0, true);
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify single flag persisted
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
let flags = buffered_flags.storage.lock();
assert_eq!(flags.len(), 1);
assert_eq!(flags.count_flags(), 1);
assert_eq!(flags.len() - flags.count_flags(), 0);
assert!(flags.get(0));
let trues: Vec<_> = flags.iter_trues().collect();
assert_eq!(trues, vec![0]);
}
}
#[test]
fn test_buffered_flags_sparse_indices_persistence() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_sparse")
.tempdir()
.unwrap();
// Test with very sparse indices (large gaps)
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
// Set flags at sparse indices
buffered_flags.buffer_set(0, true);
buffered_flags.buffer_set(1000, true);
buffered_flags.buffer_set(50000, true);
buffered_flags.buffer_set(100000, true);
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify sparse indices persisted correctly
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
let flags = buffered_flags.storage.lock();
assert_eq!(flags.len(), 100001);
assert_eq!(flags.count_flags(), 4);
// Verify specific indices
assert!(flags.get(0));
assert!(flags.get(1000));
assert!(flags.get(50000));
assert!(flags.get(100000));
// Verify some gaps are false
assert!(!flags.get(500));
assert!(!flags.get(25000));
assert!(!flags.get(75000));
let trues: Vec<_> = flags.iter_trues().collect();
assert_eq!(trues, vec![0, 1000, 50000, 100000]);
}
}
#[test]
fn test_buffered_flags_overwrite_persistence() {
let dir = tempfile::Builder::new()
.prefix("buffered_flags_overwrite")
.tempdir()
.unwrap();
// Create initial state
{
let mut mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
mmap_flags.set_len(10).unwrap();
for i in 0..10 {
mmap_flags.set(i, i % 2 == 0); // Even indices true
}
mmap_flags.flusher()().unwrap();
}
// Test overwriting existing flags multiple times
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
// Initial state: [true, false, true, false, true, false, true, false, true, false]
assert_eq!(buffered_flags.storage.lock().count_flags(), 5);
// First overwrite: flip all values
for i in 0..10 {
buffered_flags.buffer_set(i, i % 2 == 1); // Odd indices true
}
// Second overwrite: set all to true
for i in 0..10 {
buffered_flags.buffer_set(i, true);
}
// Third overwrite: set all to false
for i in 0..10 {
buffered_flags.buffer_set(i, false);
}
let flusher = buffered_flags.flusher();
flusher().unwrap();
}
// Verify final state (all false) persisted
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let buffered_flags = BufferedDynamicFlags::new(mmap_flags);
let flags = buffered_flags.storage.lock();
assert_eq!(flags.count_flags(), 0);
assert_eq!(flags.len() - flags.count_flags(), 10);
for i in 0..10 {
assert!(!flags.get(i), "Index {i} should be false");
}
let trues: Vec<_> = flags.iter_trues().collect();
assert!(trues.is_empty());
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/flags/dynamic_mmap_flags.rs | lib/segment/src/common/flags/dynamic_mmap_flags.rs | use std::cmp::max;
use std::fmt;
use std::path::{Path, PathBuf};
use bitvec::prelude::BitSlice;
use common::counter::referenced_counter::HwMetricRefCounter;
use common::types::PointOffsetType;
use fs_err as fs;
use memmap2::MmapMut;
use memory::fadvise::clear_disk_cache;
use memory::madvise::{self, AdviceSetting, Madviseable as _};
use memory::mmap_ops::{create_and_ensure_length, open_write_mmap};
use memory::mmap_type::{MmapBitSlice, MmapType};
use crate::common::Flusher;
use crate::common::operation_error::{OperationError, OperationResult};
#[cfg(debug_assertions)]
const MINIMAL_MMAP_SIZE: usize = 128; // 128 bytes -> 1024 flags
#[cfg(not(debug_assertions))]
const MINIMAL_MMAP_SIZE: usize = 1024 * 1024; // 1Mb
const FLAGS_FILE: &str = "flags_a.dat";
const FLAGS_FILE_LEGACY: &str = "flags_b.dat";
const STATUS_FILE_NAME: &str = "status.dat";
fn status_file(directory: &Path) -> PathBuf {
directory.join(STATUS_FILE_NAME)
}
#[repr(C)]
struct DynamicMmapStatus {
/// Amount of flags (bits)
len: usize,
/// Should be 0 in the current version. Old versions used it to indicate which flags file
/// (flags_a.dat or flags_b.dat) is currently in use.
current_file_id: usize,
}
fn ensure_status_file(directory: &Path) -> OperationResult<MmapMut> {
let status_file = status_file(directory);
if !status_file.exists() {
let length = std::mem::size_of::<DynamicMmapStatus>();
create_and_ensure_length(&status_file, length)?;
}
Ok(open_write_mmap(&status_file, AdviceSetting::Global, false)?)
}
pub struct DynamicMmapFlags {
/// Current mmap'ed BitSlice for flags
flags: MmapBitSlice,
status: MmapType<DynamicMmapStatus>,
directory: PathBuf,
}
impl fmt::Debug for DynamicMmapFlags {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DynamicMmapFlags")
.field("flags", &self.flags)
.field("status", &self.status)
.field("directory", &self.directory)
.finish_non_exhaustive()
}
}
/// Based on the number of flags determines the size of the mmap file.
fn mmap_capacity_bytes(num_flags: usize) -> usize {
let number_of_bytes = num_flags.div_ceil(u8::BITS as usize);
max(MINIMAL_MMAP_SIZE, number_of_bytes.next_power_of_two())
}
/// Based on the current length determines how many flags can fit into the mmap file without resizing it.
fn mmap_max_current_size(len: usize) -> usize {
let mmap_capacity_bytes = mmap_capacity_bytes(len);
mmap_capacity_bytes * u8::BITS as usize
}
impl DynamicMmapFlags {
pub fn len(&self) -> usize {
self.status.len
}
pub fn is_empty(&self) -> bool {
self.status.len == 0
}
pub fn open(directory: &Path, populate: bool) -> OperationResult<Self> {
fs::create_dir_all(directory)?;
let status_mmap = ensure_status_file(directory)?;
let mut status: MmapType<DynamicMmapStatus> = unsafe { MmapType::try_from(status_mmap)? };
if status.current_file_id != 0 {
// Migrate
fs::copy(
directory.join(FLAGS_FILE_LEGACY),
directory.join(FLAGS_FILE),
)?;
status.current_file_id = 0;
status.flusher()()?;
}
// Open first mmap
let flags = Self::open_mmap(status.len, directory, populate)?;
Ok(Self {
flags,
status,
directory: directory.to_owned(),
})
}
fn open_mmap(
num_flags: usize,
directory: &Path,
populate: bool,
) -> OperationResult<MmapBitSlice> {
let capacity_bytes = mmap_capacity_bytes(num_flags);
let file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(false)
.open(directory.join(FLAGS_FILE))?;
file.set_len(capacity_bytes as u64)?;
let flags_mmap = unsafe { MmapMut::map_mut(&file)? };
drop(file);
flags_mmap.madvise(madvise::get_global())?;
if populate {
flags_mmap.populate();
} else {
#[cfg(unix)]
if let Err(err) = flags_mmap.advise(memmap2::Advice::WillNeed) {
log::error!("Failed to advise MADV_WILLNEED for deleted flags: {err}");
}
}
let flags = MmapBitSlice::try_from(flags_mmap, 0)?;
Ok(flags)
}
/// Set the length of the vector to the given value.
/// If the vector is grown, the new elements will be set to `false`.
///
/// NOTE: capacity can be up to 2x the current length.
///
/// Errors if the vector is shrunk.
pub fn set_len(&mut self, new_len: usize) -> OperationResult<()> {
debug_assert!(new_len >= self.status.len);
if new_len == self.status.len {
return Ok(());
}
if new_len < self.status.len {
return Err(OperationError::service_error(format!(
"Cannot shrink the mmap flags from {} to {new_len}",
self.status.len,
)));
}
// Capacity can be up to 2x the current length
let current_capacity = mmap_max_current_size(self.status.len);
if new_len > current_capacity {
// Flush the current mmaps before resizing
self.flags.flusher()()?;
// Don't read the whole file on resize
let populate = false;
let flags = Self::open_mmap(new_len, &self.directory, populate)?;
// Swap operation. It is important this section is not interrupted by errors.
self.flags = flags;
}
self.status.len = new_len;
Ok(())
}
pub fn get<TKey>(&self, key: TKey) -> bool
where
TKey: num_traits::cast::AsPrimitive<usize>,
{
let key: usize = key.as_();
if key >= self.status.len {
return false;
}
self.flags[key]
}
/// Count number of set flags
pub fn count_flags(&self) -> usize {
// Take a bitslice of our set length, count ones in it
// This uses bit-indexing, returning a new bitslice, extra bits within capacity are not counted
self.flags[..self.status.len].count_ones()
}
/// Set the `true` value of the flag at the given index.
/// Ignore the call if the index is out of bounds.
///
/// Returns previous value of the flag.
pub fn set<TKey>(&mut self, key: TKey, value: bool) -> bool
where
TKey: num_traits::cast::AsPrimitive<usize>,
{
let key: usize = key.as_();
debug_assert!(key < self.status.len);
if key >= self.status.len {
return false;
}
self.flags.replace(key, value)
}
/// This method will set the flag at the given index to the given value.
/// If current length is not enough, it will resize the flags with amortized cost (x2)
/// All new flags will be set to false.
///
/// Returns previous value of the flag.
pub fn set_with_resize<TKey>(
&mut self,
key: TKey,
value: bool,
hw_counter_ref: HwMetricRefCounter,
) -> OperationResult<bool>
where
TKey: num_traits::cast::AsPrimitive<usize>,
{
// Measure write of single bool.
hw_counter_ref.incr_delta(size_of::<bool>());
let key: usize = key.as_();
if key >= self.status.len {
if value {
let new_len = key + 1;
hw_counter_ref.incr_delta(new_len - self.status.len);
self.set_len(new_len)?;
} else {
// Default value is false, so we don't need to resize
return Ok(false);
}
}
Ok(self.flags.replace(key, value))
}
pub fn flusher(&self) -> Flusher {
Box::new({
let flags_flusher = self.flags.flusher();
let status_flusher = self.status.flusher();
move || {
flags_flusher()?;
status_flusher()?;
Ok(())
}
})
}
pub fn get_bitslice(&self) -> &BitSlice {
// Take subslice with actual length, bitslice may be larger due to extra allocated capacity
// See `mmap_capacity_bytes`
&self.flags[..self.len()]
}
// no immutable files, everything is mutable
pub fn files(&self) -> Vec<PathBuf> {
vec![
status_file(&self.directory),
self.directory.join(FLAGS_FILE),
]
}
/// Iterate over all "true" flags
pub fn iter_trues(&self) -> impl Iterator<Item = PointOffsetType> + '_ {
self.flags.iter_ones().map(|x| x as PointOffsetType)
}
/// Populate all pages in the mmap.
/// Block until all pages are populated.
pub fn populate(&self) -> OperationResult<()> {
self.flags.populate()?;
Ok(())
}
/// Drop disk cache.
pub fn clear_cache(&self) -> OperationResult<()> {
let flags_file = self.directory.join(FLAGS_FILE);
clear_disk_cache(&flags_file)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::iter;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use tempfile::Builder;
use super::*;
#[test]
fn test_bitflags_saving() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let num_flags = 5000;
let mut rng = StdRng::seed_from_u64(42);
let random_flags: Vec<bool> = iter::repeat_with(|| rng.random()).take(num_flags).collect();
{
let mut dynamic_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
dynamic_flags.set_len(num_flags).unwrap();
random_flags
.iter()
.enumerate()
.filter(|(_, flag)| **flag)
.for_each(|(i, _)| assert!(!dynamic_flags.set(i, true)));
dynamic_flags.set_len(num_flags * 2).unwrap();
random_flags
.iter()
.enumerate()
.filter(|(_, flag)| !*flag)
.for_each(|(i, _)| assert!(!dynamic_flags.set(num_flags + i, true)));
dynamic_flags.flusher()().unwrap();
}
{
let dynamic_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
assert_eq!(dynamic_flags.status.len, num_flags * 2);
for (i, flag) in random_flags.iter().enumerate() {
assert_eq!(dynamic_flags.get(i), *flag);
assert_eq!(dynamic_flags.get(num_flags + i), !*flag);
}
}
}
#[test]
fn test_bitflags_counting() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let num_flags = 5003; // Prime number, not byte aligned
let mut rng = StdRng::seed_from_u64(42);
// Create randomized dynamic mmap flags to test counting
let mut dynamic_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
dynamic_flags.set_len(num_flags).unwrap();
let random_flags: Vec<bool> = iter::repeat_with(|| rng.random()).take(num_flags).collect();
random_flags
.iter()
.enumerate()
.filter(|(_, flag)| **flag)
.for_each(|(i, _)| assert!(!dynamic_flags.set(i, true)));
dynamic_flags.flusher()().unwrap();
// Test count flags method
let count = dynamic_flags.count_flags();
// Compare against manually counting every flag
let mut manual_count = 0;
for i in 0..num_flags {
if dynamic_flags.get(i) {
manual_count += 1;
}
}
assert_eq!(count, manual_count);
}
#[test]
fn test_capacity() {
assert_eq!(mmap_capacity_bytes(0), 128);
assert_eq!(mmap_capacity_bytes(1), 128);
assert_eq!(mmap_capacity_bytes(1023), 128);
assert_eq!(mmap_capacity_bytes(1024), 128);
assert_eq!(mmap_capacity_bytes(1025), 256);
assert_eq!(mmap_capacity_bytes(10000), 2048);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/flags/bitvec_flags.rs | lib/segment/src/common/flags/bitvec_flags.rs | use std::path::PathBuf;
use bitvec::slice::BitSlice;
use bitvec::vec::BitVec;
use common::types::PointOffsetType;
use super::buffered_dynamic_flags::BufferedDynamicFlags;
use super::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
/// A buffered, growable, and persistent bitslice with a separate in-memory bitvec.
///
/// Use [`RoaringFlags`][1] if you need a reference to a bitmap.
///
/// Changes are buffered until explicitly flushed.
///
/// [1]: super::roaring_flags::RoaringFlags
#[derive(Debug)]
pub struct BitvecFlags {
/// Buffered persisted flags.
storage: BufferedDynamicFlags,
/// In-memory bitvec of true and false flags.
bitvec: BitVec,
/// Total length of the flags, including the trailing ones which have been set to false
len: usize,
}
impl BitvecFlags {
pub fn new(mmap_flags: DynamicMmapFlags) -> Self {
// load flags into memory
let bitvec = BitVec::from_bitslice(mmap_flags.get_bitslice());
if let Err(err) = mmap_flags.clear_cache() {
log::warn!("Failed to clear bitslice cache: {err}");
}
Self {
len: mmap_flags.len(),
storage: BufferedDynamicFlags::new(mmap_flags),
bitvec,
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn get_bitslice(&self) -> &BitSlice {
&self.bitvec
}
pub fn get(&self, index: PointOffsetType) -> bool {
self.bitvec.get(index as usize).is_some_and(|bit| *bit)
}
pub fn iter_trues(&self) -> impl Iterator<Item = PointOffsetType> {
self.bitvec
.iter_ones()
.map(|index| index as PointOffsetType)
}
pub fn iter_falses(&self) -> impl Iterator<Item = PointOffsetType> {
self.bitvec
.iter_zeros()
.map(|index| index as PointOffsetType)
}
#[inline]
pub fn count_trues(&self) -> usize {
self.bitvec.count_ones()
}
#[inline]
pub fn count_falses(&self) -> usize {
self.bitvec.count_zeros()
}
/// Set the value of a flag at the given index, grows the bitvec if needed.
/// Returns the previous value of the flag.
pub fn set(&mut self, index: PointOffsetType, value: bool) -> bool {
// queue write in buffer
self.storage.buffer_set(index, value);
// update length if needed
let index_usize = index as usize;
if index_usize >= self.len {
self.len = index_usize + 1;
self.bitvec.resize(self.len, false);
}
// update bitmap
self.bitvec.replace(index_usize, value)
}
pub fn clear_cache(&self) -> OperationResult<()> {
self.storage.clear_cache()?;
Ok(())
}
pub fn files(&self) -> Vec<PathBuf> {
self.storage.files()
}
pub fn flusher(&self) -> Flusher {
self.storage.flusher()
}
}
#[cfg(test)]
mod tests {
use common::types::PointOffsetType;
use crate::common::flags::bitvec_flags::BitvecFlags;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
#[test]
fn test_roaring_flags_consistency_after_persistence() {
let dir = tempfile::Builder::new()
.prefix("roaring_flags_consistency")
.tempdir()
.unwrap();
// Create and update flags
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
let mut bitvec_flags = BitvecFlags::new(mmap_flags);
// Set various flags - we'll set up to index 19 to have a length of 20
for i in 16..20 {
bitvec_flags.set(i, false); // Ensure we have length 20
}
bitvec_flags.set(0, true);
bitvec_flags.set(5, true);
bitvec_flags.set(10, true);
bitvec_flags.set(15, true);
bitvec_flags.set(7, false); // This should be no-op since default is false
// Verify iteration consistency after reload
let iter_trues: Vec<_> = bitvec_flags.iter_trues().collect();
// Verify expected values
assert_eq!(iter_trues, vec![0, 5, 10, 15]);
// Verify count consistency
assert_eq!(bitvec_flags.count_trues(), 4);
// Flush
let flusher = bitvec_flags.flusher();
flusher().unwrap();
}
// Verify bitmap consistency after reload
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let bitvec_flags = BitvecFlags::new(mmap_flags);
// Verify iteration consistency after reload
let iter_trues: Vec<_> = bitvec_flags.iter_trues().collect();
// Verify expected values
assert_eq!(iter_trues, vec![0, 5, 10, 15]);
// Verify count consistency
assert_eq!(bitvec_flags.count_trues(), 4);
assert_eq!(
bitvec_flags.count_falses(),
bitvec_flags.len() - bitvec_flags.count_trues()
);
// Verify iteration covers all indices
let all_trues: Vec<_> = bitvec_flags.iter_trues().collect();
let all_falses: Vec<_> = bitvec_flags.iter_falses().collect();
let mut all_indices = all_trues;
all_indices.extend(all_falses);
all_indices.sort();
let expected_all: Vec<_> = (0..bitvec_flags.len() as PointOffsetType).collect();
assert_eq!(all_indices, expected_all);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/flags/roaring_flags.rs | lib/segment/src/common/flags/roaring_flags.rs | use std::path::PathBuf;
use common::types::PointOffsetType;
use roaring::RoaringBitmap;
use super::buffered_dynamic_flags::BufferedDynamicFlags;
use super::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::Flusher;
use crate::common::operation_error::OperationResult;
/// A buffered, growable, and persistent bitslice with fast in-memory roaring bitmap.
///
/// Use [`BitvecFlags`][1] if you need a reference to a bitslice.
///
/// Changes are buffered until explicitly flushed.
///
/// [1]: super::bitvec_flags::BitvecFlags
pub struct RoaringFlags {
/// Buffered persisted flags.
storage: BufferedDynamicFlags,
/// In-memory bitmap of true flags.
// Potential optimization: add a secondary bitmap for false values for faster iter_falses implementation.
bitmap: RoaringBitmap,
/// Total length of the flags, including the trailing ones which have been set to false
len: usize,
}
impl RoaringFlags {
pub fn new(mmap_flags: DynamicMmapFlags) -> Self {
// load flags into memory
let bitmap = RoaringBitmap::from_sorted_iter(mmap_flags.iter_trues())
.expect("iter_trues iterates in sorted order");
if let Err(err) = mmap_flags.clear_cache() {
log::warn!("Failed to clear bitslice cache: {err}");
}
Self {
len: mmap_flags.len(),
storage: BufferedDynamicFlags::new(mmap_flags),
bitmap,
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn get_bitmap(&self) -> &RoaringBitmap {
&self.bitmap
}
pub fn get(&self, index: PointOffsetType) -> bool {
self.bitmap.contains(index)
}
pub fn iter_trues(&self) -> impl Iterator<Item = PointOffsetType> {
self.bitmap.iter()
}
pub fn iter_falses(&self) -> impl Iterator<Item = PointOffsetType> {
// potential optimization:
// Create custom iterator which leverages bitmap's iterator for knowing ranges where the flags are false.
// This will help by not checking the bitmap for indices that are already known to be false.
(0..self.len as PointOffsetType).filter(|&i| !self.bitmap.contains(i))
}
pub fn count_trues(&self) -> usize {
self.bitmap.len() as usize
}
pub fn count_falses(&self) -> usize {
self.len.saturating_sub(self.count_trues())
}
/// Set the value of a flag at the given index.
/// Returns the previous value of the flag.
pub fn set(&mut self, index: PointOffsetType, value: bool) -> bool {
// queue write in buffer
self.storage.buffer_set(index, value);
// update length if needed
let index_usize = index as usize;
if index_usize >= self.len {
self.len = index_usize + 1;
}
// update bitmap
if value {
!self.bitmap.insert(index)
} else {
self.bitmap.remove(index)
}
}
pub fn clear_cache(&self) -> OperationResult<()> {
self.storage.clear_cache()?;
Ok(())
}
pub fn files(&self) -> Vec<PathBuf> {
self.storage.files()
}
pub fn flusher(&self) -> Flusher {
self.storage.flusher()
}
}
#[cfg(test)]
mod tests {
use common::types::PointOffsetType;
use crate::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
use crate::common::flags::roaring_flags::RoaringFlags;
#[test]
fn test_roaring_flags_consistency_after_persistence() {
let dir = tempfile::Builder::new()
.prefix("roaring_flags_consistency")
.tempdir()
.unwrap();
// Create and update flags
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
let mut roaring_flags = RoaringFlags::new(mmap_flags);
// Set various flags - we'll set up to index 19 to have a length of 20
for i in 16..20 {
roaring_flags.set(i, false); // Ensure we have length 20
}
roaring_flags.set(0, true);
roaring_flags.set(5, true);
roaring_flags.set(10, true);
roaring_flags.set(15, true);
roaring_flags.set(7, false); // This should be no-op since default is false
// Flush
let flusher = roaring_flags.flusher();
flusher().unwrap();
}
// Verify bitmap consistency after reload
{
let mmap_flags = DynamicMmapFlags::open(dir.path(), true).unwrap();
let roaring_flags = RoaringFlags::new(mmap_flags);
// Verify iteration consistency after reload
let iter_trues: Vec<_> = roaring_flags.iter_trues().collect();
// Verify expected values
assert_eq!(iter_trues, vec![0, 5, 10, 15]);
// Verify count consistency
assert_eq!(roaring_flags.count_trues(), 4);
assert_eq!(
roaring_flags.count_falses(),
roaring_flags.len() - roaring_flags.count_trues()
);
// Verify iteration covers all indices
let all_trues: Vec<_> = roaring_flags.iter_trues().collect();
let all_falses: Vec<_> = roaring_flags.iter_falses().collect();
let mut all_indices = all_trues;
all_indices.extend(all_falses);
all_indices.sort();
let expected_all: Vec<_> = (0..roaring_flags.len() as PointOffsetType).collect();
assert_eq!(all_indices, expected_all);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/common/flags/mod.rs | lib/segment/src/common/flags/mod.rs | //! Different flavors of flags structures, akin to a Vec<bool>, but persistent and efficient.
//!
//! Here's a brief overview of the different flavors of flags structures:
//! - `dynamic_mmap_flags`: Base implementation of storage in mmapped files.
//! - `buffered_dynamic_flags`: Builds on top of `dynamic_mmap_flags` to provide buffered writes.
//! - `bitvec_flags`: `buffered_dynamic_flags` with in-memory bitvec for reads.
//! - `roaring_flags`: `buffered_dynamic_flags` with in-memory roaring bitmap for reads.
pub mod bitvec_flags;
mod buffered_dynamic_flags;
pub mod dynamic_mmap_flags;
pub mod roaring_flags;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/simple_neon.rs | lib/segment/src/spaces/simple_neon.rs | #[cfg(target_feature = "neon")]
use std::arch::aarch64::*;
#[cfg(target_feature = "neon")]
use common::types::ScoreType;
use super::tools::is_length_zero_or_normalized;
use crate::data_types::vectors::DenseVector;
#[cfg(target_feature = "neon")]
use crate::data_types::vectors::VectorElementType;
#[cfg(target_feature = "neon")]
pub(crate) unsafe fn euclid_similarity_neon(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum1 = vdupq_n_f32(0.);
let mut sum2 = vdupq_n_f32(0.);
let mut sum3 = vdupq_n_f32(0.);
let mut sum4 = vdupq_n_f32(0.);
let mut i: usize = 0;
while i < m {
let sub1 = vsubq_f32(vld1q_f32(ptr1), vld1q_f32(ptr2));
sum1 = vfmaq_f32(sum1, sub1, sub1);
let sub2 = vsubq_f32(vld1q_f32(ptr1.add(4)), vld1q_f32(ptr2.add(4)));
sum2 = vfmaq_f32(sum2, sub2, sub2);
let sub3 = vsubq_f32(vld1q_f32(ptr1.add(8)), vld1q_f32(ptr2.add(8)));
sum3 = vfmaq_f32(sum3, sub3, sub3);
let sub4 = vsubq_f32(vld1q_f32(ptr1.add(12)), vld1q_f32(ptr2.add(12)));
sum4 = vfmaq_f32(sum4, sub4, sub4);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = vaddvq_f32(sum1) + vaddvq_f32(sum2) + vaddvq_f32(sum3) + vaddvq_f32(sum4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).powi(2);
}
-result
}
}
#[cfg(target_feature = "neon")]
pub(crate) unsafe fn manhattan_similarity_neon(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum1 = vdupq_n_f32(0.);
let mut sum2 = vdupq_n_f32(0.);
let mut sum3 = vdupq_n_f32(0.);
let mut sum4 = vdupq_n_f32(0.);
let mut i: usize = 0;
while i < m {
let sub1 = vsubq_f32(vld1q_f32(ptr1), vld1q_f32(ptr2));
sum1 = vaddq_f32(sum1, vabsq_f32(sub1));
let sub2 = vsubq_f32(vld1q_f32(ptr1.add(4)), vld1q_f32(ptr2.add(4)));
sum2 = vaddq_f32(sum2, vabsq_f32(sub2));
let sub3 = vsubq_f32(vld1q_f32(ptr1.add(8)), vld1q_f32(ptr2.add(8)));
sum3 = vaddq_f32(sum3, vabsq_f32(sub3));
let sub4 = vsubq_f32(vld1q_f32(ptr1.add(12)), vld1q_f32(ptr2.add(12)));
sum4 = vaddq_f32(sum4, vabsq_f32(sub4));
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = vaddvq_f32(sum1) + vaddvq_f32(sum2) + vaddvq_f32(sum3) + vaddvq_f32(sum4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).abs();
}
-result
}
}
#[cfg(target_feature = "neon")]
pub(crate) unsafe fn cosine_preprocess_neon(vector: DenseVector) -> DenseVector {
unsafe {
let n = vector.len();
let m = n - (n % 16);
let mut ptr: *const f32 = vector.as_ptr();
let mut sum1 = vdupq_n_f32(0.);
let mut sum2 = vdupq_n_f32(0.);
let mut sum3 = vdupq_n_f32(0.);
let mut sum4 = vdupq_n_f32(0.);
let mut i: usize = 0;
while i < m {
let d1 = vld1q_f32(ptr);
sum1 = vfmaq_f32(sum1, d1, d1);
let d2 = vld1q_f32(ptr.add(4));
sum2 = vfmaq_f32(sum2, d2, d2);
let d3 = vld1q_f32(ptr.add(8));
sum3 = vfmaq_f32(sum3, d3, d3);
let d4 = vld1q_f32(ptr.add(12));
sum4 = vfmaq_f32(sum4, d4, d4);
ptr = ptr.add(16);
i += 16;
}
let mut length = vaddvq_f32(sum1) + vaddvq_f32(sum2) + vaddvq_f32(sum3) + vaddvq_f32(sum4);
for v in vector.iter().take(n).skip(m) {
length += v.powi(2);
}
if is_length_zero_or_normalized(length) {
return vector;
}
let length = length.sqrt();
vector.into_iter().map(|x| x / length).collect()
}
}
#[cfg(target_feature = "neon")]
pub(crate) unsafe fn dot_similarity_neon(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum1 = vdupq_n_f32(0.);
let mut sum2 = vdupq_n_f32(0.);
let mut sum3 = vdupq_n_f32(0.);
let mut sum4 = vdupq_n_f32(0.);
let mut i: usize = 0;
while i < m {
sum1 = vfmaq_f32(sum1, vld1q_f32(ptr1), vld1q_f32(ptr2));
sum2 = vfmaq_f32(sum2, vld1q_f32(ptr1.add(4)), vld1q_f32(ptr2.add(4)));
sum3 = vfmaq_f32(sum3, vld1q_f32(ptr1.add(8)), vld1q_f32(ptr2.add(8)));
sum4 = vfmaq_f32(sum4, vld1q_f32(ptr1.add(12)), vld1q_f32(ptr2.add(12)));
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = vaddvq_f32(sum1) + vaddvq_f32(sum2) + vaddvq_f32(sum3) + vaddvq_f32(sum4);
for i in 0..n - m {
result += (*ptr1.add(i)) * (*ptr2.add(i));
}
result
}
}
#[cfg(test)]
mod tests {
#[cfg(target_feature = "neon")]
#[test]
fn test_spaces_neon() {
use super::*;
use crate::spaces::simple::*;
if std::arch::is_aarch64_feature_detected!("neon") {
let v1: Vec<f32> = vec![
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
26., 27., 28., 29., 30., 31.,
];
let v2: Vec<f32> = vec![
40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55.,
56., 57., 58., 59., 60., 61.,
];
let euclid_simd = unsafe { euclid_similarity_neon(&v1, &v2) };
let euclid = euclid_similarity(&v1, &v2);
assert_eq!(euclid_simd, euclid);
let manhattan_simd = unsafe { manhattan_similarity_neon(&v1, &v2) };
let manhattan = manhattan_similarity(&v1, &v2);
assert_eq!(manhattan_simd, manhattan);
let dot_simd = unsafe { dot_similarity_neon(&v1, &v2) };
let dot = dot_similarity(&v1, &v2);
assert_eq!(dot_simd, dot);
let cosine_simd = unsafe { cosine_preprocess_neon(v1.clone()) };
let cosine = cosine_preprocess(v1);
assert_eq!(cosine_simd, cosine);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/simple.rs | lib/segment/src/spaces/simple.rs | use common::types::ScoreType;
use super::metric::{Metric, MetricPostProcessing};
#[cfg(target_arch = "x86_64")]
use super::simple_avx::*;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use super::simple_neon::*;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use super::simple_sse::*;
use super::tools::is_length_zero_or_normalized;
use crate::data_types::vectors::{DenseVector, VectorElementType};
use crate::types::Distance;
#[cfg(target_arch = "x86_64")]
pub(crate) const MIN_DIM_SIZE_AVX: usize = 32;
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
all(target_arch = "aarch64", target_feature = "neon")
))]
pub(crate) const MIN_DIM_SIZE_SIMD: usize = 16;
#[derive(Clone)]
pub struct DotProductMetric;
#[derive(Clone)]
pub struct CosineMetric;
#[derive(Clone)]
pub struct EuclidMetric;
#[derive(Clone)]
pub struct ManhattanMetric;
impl Metric<VectorElementType> for EuclidMetric {
fn distance() -> Distance {
Distance::Euclid
}
fn similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { euclid_similarity_avx(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { euclid_similarity_sse(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { euclid_similarity_neon(v1, v2) };
}
}
euclid_similarity(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
impl MetricPostProcessing for EuclidMetric {
fn postprocess(score: ScoreType) -> ScoreType {
score.abs().sqrt()
}
}
impl Metric<VectorElementType> for ManhattanMetric {
fn distance() -> Distance {
Distance::Manhattan
}
fn similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { manhattan_similarity_avx(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { manhattan_similarity_sse(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { manhattan_similarity_neon(v1, v2) };
}
}
manhattan_similarity(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
impl MetricPostProcessing for ManhattanMetric {
fn postprocess(score: ScoreType) -> ScoreType {
score.abs()
}
}
impl Metric<VectorElementType> for DotProductMetric {
fn distance() -> Distance {
Distance::Dot
}
fn similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { dot_similarity_avx(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { dot_similarity_sse(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { dot_similarity_neon(v1, v2) };
}
}
dot_similarity(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
impl MetricPostProcessing for DotProductMetric {
fn postprocess(score: ScoreType) -> ScoreType {
score
}
}
/// Equivalent to DotProductMetric with normalization of the vectors in preprocessing.
impl Metric<VectorElementType> for CosineMetric {
fn distance() -> Distance {
Distance::Cosine
}
fn similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
DotProductMetric::similarity(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& vector.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { cosine_preprocess_avx(vector) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && vector.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { cosine_preprocess_sse(vector) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && vector.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { cosine_preprocess_neon(vector) };
}
}
cosine_preprocess(vector)
}
}
impl MetricPostProcessing for CosineMetric {
fn postprocess(score: ScoreType) -> ScoreType {
score
}
}
pub fn euclid_similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| (a - b).powi(2))
.sum::<ScoreType>()
}
pub fn manhattan_similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| (a - b).abs())
.sum::<ScoreType>()
}
pub fn cosine_preprocess(vector: DenseVector) -> DenseVector {
let mut length: f32 = vector.iter().map(|x| x * x).sum();
if is_length_zero_or_normalized(length) {
return vector;
}
length = length.sqrt();
vector.iter().map(|x| x / length).collect()
}
pub fn dot_similarity(v1: &[VectorElementType], v2: &[VectorElementType]) -> ScoreType {
v1.iter().zip(v2).map(|(a, b)| a * b).sum()
}
#[cfg(test)]
mod tests {
use rand::Rng;
use super::*;
#[test]
fn test_cosine_preprocessing() {
let res = <CosineMetric as Metric<VectorElementType>>::preprocess(vec![0.0, 0.0, 0.0, 0.0]);
assert_eq!(res, vec![0.0, 0.0, 0.0, 0.0]);
}
/// If we preprocess a vector multiple times, we expect the same result.
/// Renormalization should not produce something different.
#[test]
fn test_cosine_stable_preprocessing() {
const DIM: usize = 1500;
const ATTEMPTS: usize = 100;
let mut rng = rand::rng();
for attempt in 0..ATTEMPTS {
let range = rng.random_range(-2.5..=0.0)..=rng.random_range(0.0..2.5);
let vector: Vec<_> = (0..DIM).map(|_| rng.random_range(range.clone())).collect();
// Preprocess and re-preprocess
let preprocess1 = <CosineMetric as Metric<VectorElementType>>::preprocess(vector);
let preprocess2: DenseVector =
<CosineMetric as Metric<VectorElementType>>::preprocess(preprocess1.clone());
// All following preprocess attempts must be the same
assert_eq!(
preprocess1, preprocess2,
"renormalization is not stable (vector #{attempt})"
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/simple_sse.rs | lib/segment/src/spaces/simple_sse.rs | #[cfg(target_arch = "x86")]
use std::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
use common::types::ScoreType;
use super::tools::is_length_zero_or_normalized;
use crate::data_types::vectors::{DenseVector, VectorElementType};
#[target_feature(enable = "sse")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn hsum128_ps_sse(x: __m128) -> f32 {
let x64: __m128 = _mm_add_ps(x, _mm_movehl_ps(x, x));
let x32: __m128 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
_mm_cvtss_f32(x32)
}
#[target_feature(enable = "sse")]
pub(crate) unsafe fn euclid_similarity_sse(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum128_1: __m128 = _mm_setzero_ps();
let mut sum128_2: __m128 = _mm_setzero_ps();
let mut sum128_3: __m128 = _mm_setzero_ps();
let mut sum128_4: __m128 = _mm_setzero_ps();
let mut i: usize = 0;
while i < m {
let sub128_1 = _mm_sub_ps(_mm_loadu_ps(ptr1), _mm_loadu_ps(ptr2));
sum128_1 = _mm_add_ps(_mm_mul_ps(sub128_1, sub128_1), sum128_1);
let sub128_2 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(4)), _mm_loadu_ps(ptr2.add(4)));
sum128_2 = _mm_add_ps(_mm_mul_ps(sub128_2, sub128_2), sum128_2);
let sub128_3 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(8)), _mm_loadu_ps(ptr2.add(8)));
sum128_3 = _mm_add_ps(_mm_mul_ps(sub128_3, sub128_3), sum128_3);
let sub128_4 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(12)), _mm_loadu_ps(ptr2.add(12)));
sum128_4 = _mm_add_ps(_mm_mul_ps(sub128_4, sub128_4), sum128_4);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = hsum128_ps_sse(sum128_1)
+ hsum128_ps_sse(sum128_2)
+ hsum128_ps_sse(sum128_3)
+ hsum128_ps_sse(sum128_4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).powi(2);
}
-result
}
}
#[target_feature(enable = "sse")]
pub(crate) unsafe fn manhattan_similarity_sse(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let mask: __m128 = _mm_set1_ps(-0.0f32); // 1 << 31 used to clear sign bit to mimic abs
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum128_1: __m128 = _mm_setzero_ps();
let mut sum128_2: __m128 = _mm_setzero_ps();
let mut sum128_3: __m128 = _mm_setzero_ps();
let mut sum128_4: __m128 = _mm_setzero_ps();
let mut i: usize = 0;
while i < m {
let sub128_1 = _mm_sub_ps(_mm_loadu_ps(ptr1), _mm_loadu_ps(ptr2));
sum128_1 = _mm_add_ps(_mm_andnot_ps(mask, sub128_1), sum128_1);
let sub128_2 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(4)), _mm_loadu_ps(ptr2.add(4)));
sum128_2 = _mm_add_ps(_mm_andnot_ps(mask, sub128_2), sum128_2);
let sub128_3 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(8)), _mm_loadu_ps(ptr2.add(8)));
sum128_3 = _mm_add_ps(_mm_andnot_ps(mask, sub128_3), sum128_3);
let sub128_4 = _mm_sub_ps(_mm_loadu_ps(ptr1.add(12)), _mm_loadu_ps(ptr2.add(12)));
sum128_4 = _mm_add_ps(_mm_andnot_ps(mask, sub128_4), sum128_4);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = hsum128_ps_sse(sum128_1)
+ hsum128_ps_sse(sum128_2)
+ hsum128_ps_sse(sum128_3)
+ hsum128_ps_sse(sum128_4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).abs();
}
-result
}
}
#[target_feature(enable = "sse")]
pub(crate) unsafe fn cosine_preprocess_sse(vector: DenseVector) -> DenseVector {
unsafe {
let n = vector.len();
let m = n - (n % 16);
let mut ptr: *const f32 = vector.as_ptr();
let mut sum128_1: __m128 = _mm_setzero_ps();
let mut sum128_2: __m128 = _mm_setzero_ps();
let mut sum128_3: __m128 = _mm_setzero_ps();
let mut sum128_4: __m128 = _mm_setzero_ps();
let mut i: usize = 0;
while i < m {
let m128_1 = _mm_loadu_ps(ptr);
sum128_1 = _mm_add_ps(_mm_mul_ps(m128_1, m128_1), sum128_1);
let m128_2 = _mm_loadu_ps(ptr.add(4));
sum128_2 = _mm_add_ps(_mm_mul_ps(m128_2, m128_2), sum128_2);
let m128_3 = _mm_loadu_ps(ptr.add(8));
sum128_3 = _mm_add_ps(_mm_mul_ps(m128_3, m128_3), sum128_3);
let m128_4 = _mm_loadu_ps(ptr.add(12));
sum128_4 = _mm_add_ps(_mm_mul_ps(m128_4, m128_4), sum128_4);
ptr = ptr.add(16);
i += 16;
}
let mut length = hsum128_ps_sse(sum128_1)
+ hsum128_ps_sse(sum128_2)
+ hsum128_ps_sse(sum128_3)
+ hsum128_ps_sse(sum128_4);
for i in 0..n - m {
length += (*ptr.add(i)).powi(2);
}
if is_length_zero_or_normalized(length) {
return vector;
}
length = length.sqrt();
vector.into_iter().map(|x| x / length).collect()
}
}
#[target_feature(enable = "sse")]
pub(crate) unsafe fn dot_similarity_sse(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 16);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum128_1: __m128 = _mm_setzero_ps();
let mut sum128_2: __m128 = _mm_setzero_ps();
let mut sum128_3: __m128 = _mm_setzero_ps();
let mut sum128_4: __m128 = _mm_setzero_ps();
let mut i: usize = 0;
while i < m {
sum128_1 = _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(ptr1), _mm_loadu_ps(ptr2)), sum128_1);
sum128_2 = _mm_add_ps(
_mm_mul_ps(_mm_loadu_ps(ptr1.add(4)), _mm_loadu_ps(ptr2.add(4))),
sum128_2,
);
sum128_3 = _mm_add_ps(
_mm_mul_ps(_mm_loadu_ps(ptr1.add(8)), _mm_loadu_ps(ptr2.add(8))),
sum128_3,
);
sum128_4 = _mm_add_ps(
_mm_mul_ps(_mm_loadu_ps(ptr1.add(12)), _mm_loadu_ps(ptr2.add(12))),
sum128_4,
);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
i += 16;
}
let mut result = hsum128_ps_sse(sum128_1)
+ hsum128_ps_sse(sum128_2)
+ hsum128_ps_sse(sum128_3)
+ hsum128_ps_sse(sum128_4);
for i in 0..n - m {
result += (*ptr1.add(i)) * (*ptr2.add(i));
}
result
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_sse() {
use super::*;
use crate::spaces::simple::*;
if is_x86_feature_detected!("sse") {
let v1: Vec<f32> = vec![
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
26., 27., 28., 29., 30., 31.,
];
let v2: Vec<f32> = vec![
40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
56., 57., 58., 59., 60., 61.,
];
let euclid_simd = unsafe { euclid_similarity_sse(&v1, &v2) };
let euclid = euclid_similarity(&v1, &v2);
assert_eq!(euclid_simd, euclid);
let manhattan_simd = unsafe { manhattan_similarity_sse(&v1, &v2) };
let manhattan = manhattan_similarity(&v1, &v2);
assert_eq!(manhattan_simd, manhattan);
let dot_simd = unsafe { dot_similarity_sse(&v1, &v2) };
let dot = dot_similarity(&v1, &v2);
assert_eq!(dot_simd, dot);
let cosine_simd = unsafe { cosine_preprocess_sse(v1.clone()) };
let cosine = cosine_preprocess(v1);
assert_eq!(cosine_simd, cosine);
} else {
println!("sse test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/simple_avx.rs | lib/segment/src/spaces/simple_avx.rs | use std::arch::x86_64::*;
use common::types::ScoreType;
use super::tools::is_length_zero_or_normalized;
use crate::data_types::vectors::{DenseVector, VectorElementType};
#[target_feature(enable = "avx")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn hsum256_ps_avx(x: __m256) -> f32 {
let lr_sum: __m128 = _mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x));
let hsum = _mm_hadd_ps(lr_sum, lr_sum);
let p1 = _mm_extract_ps(hsum, 0);
let p2 = _mm_extract_ps(hsum, 1);
f32::from_bits(p1 as u32) + f32::from_bits(p2 as u32)
}
/// Calculates the hsum (horizontal sum) of four 32 byte registers.
#[target_feature(enable = "avx")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn four_way_hsum(a: __m256, b: __m256, c: __m256, d: __m256) -> f32 {
unsafe {
let sum1 = _mm256_add_ps(a, b);
let sum2 = _mm256_add_ps(c, d);
let total = _mm256_add_ps(sum1, sum2);
hsum256_ps_avx(total)
}
}
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
pub(crate) unsafe fn euclid_similarity_avx(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut i: usize = 0;
while i < m {
let sub256_1: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(0)), _mm256_loadu_ps(ptr2.add(0)));
sum256_1 = _mm256_fmadd_ps(sub256_1, sub256_1, sum256_1);
let sub256_2: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(8)), _mm256_loadu_ps(ptr2.add(8)));
sum256_2 = _mm256_fmadd_ps(sub256_2, sub256_2, sum256_2);
let sub256_3: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(16)), _mm256_loadu_ps(ptr2.add(16)));
sum256_3 = _mm256_fmadd_ps(sub256_3, sub256_3, sum256_3);
let sub256_4: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(24)), _mm256_loadu_ps(ptr2.add(24)));
sum256_4 = _mm256_fmadd_ps(sub256_4, sub256_4, sum256_4);
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
i += 32;
}
let mut result = four_way_hsum(sum256_1, sum256_2, sum256_3, sum256_4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).powi(2);
}
-result
}
}
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
pub(crate) unsafe fn manhattan_similarity_avx(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let mask: __m256 = _mm256_set1_ps(-0.0f32); // 1 << 31 used to clear sign bit to mimic abs
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut i: usize = 0;
while i < m {
let sub256_1: __m256 = _mm256_sub_ps(_mm256_loadu_ps(ptr1), _mm256_loadu_ps(ptr2));
sum256_1 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_1), sum256_1);
let sub256_2: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(8)), _mm256_loadu_ps(ptr2.add(8)));
sum256_2 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_2), sum256_2);
let sub256_3: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(16)), _mm256_loadu_ps(ptr2.add(16)));
sum256_3 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_3), sum256_3);
let sub256_4: __m256 =
_mm256_sub_ps(_mm256_loadu_ps(ptr1.add(24)), _mm256_loadu_ps(ptr2.add(24)));
sum256_4 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_4), sum256_4);
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
i += 32;
}
let mut result = four_way_hsum(sum256_1, sum256_2, sum256_3, sum256_4);
for i in 0..n - m {
result += (*ptr1.add(i) - *ptr2.add(i)).abs();
}
-result
}
}
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
pub(crate) unsafe fn cosine_preprocess_avx(vector: DenseVector) -> DenseVector {
unsafe {
let n = vector.len();
let m = n - (n % 32);
let mut ptr: *const f32 = vector.as_ptr();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut i: usize = 0;
while i < m {
let m256_1 = _mm256_loadu_ps(ptr);
sum256_1 = _mm256_fmadd_ps(m256_1, m256_1, sum256_1);
let m256_2 = _mm256_loadu_ps(ptr.add(8));
sum256_2 = _mm256_fmadd_ps(m256_2, m256_2, sum256_2);
let m256_3 = _mm256_loadu_ps(ptr.add(16));
sum256_3 = _mm256_fmadd_ps(m256_3, m256_3, sum256_3);
let m256_4 = _mm256_loadu_ps(ptr.add(24));
sum256_4 = _mm256_fmadd_ps(m256_4, m256_4, sum256_4);
ptr = ptr.add(32);
i += 32;
}
let mut length = four_way_hsum(sum256_1, sum256_2, sum256_3, sum256_4);
for i in 0..n - m {
length += (*ptr.add(i)).powi(2);
}
if is_length_zero_or_normalized(length) {
return vector;
}
length = length.sqrt();
vector.into_iter().map(|x| x / length).collect()
}
}
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
pub(crate) unsafe fn dot_similarity_avx(
v1: &[VectorElementType],
v2: &[VectorElementType],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const f32 = v1.as_ptr();
let mut ptr2: *const f32 = v2.as_ptr();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut i: usize = 0;
while i < m {
sum256_1 = _mm256_fmadd_ps(_mm256_loadu_ps(ptr1), _mm256_loadu_ps(ptr2), sum256_1);
sum256_2 = _mm256_fmadd_ps(
_mm256_loadu_ps(ptr1.add(8)),
_mm256_loadu_ps(ptr2.add(8)),
sum256_2,
);
sum256_3 = _mm256_fmadd_ps(
_mm256_loadu_ps(ptr1.add(16)),
_mm256_loadu_ps(ptr2.add(16)),
sum256_3,
);
sum256_4 = _mm256_fmadd_ps(
_mm256_loadu_ps(ptr1.add(24)),
_mm256_loadu_ps(ptr2.add(24)),
sum256_4,
);
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
i += 32;
}
let mut result = four_way_hsum(sum256_1, sum256_2, sum256_3, sum256_4);
for i in 0..n - m {
result += (*ptr1.add(i)) * (*ptr2.add(i));
}
result
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_avx() {
use super::*;
use crate::spaces::simple::*;
if is_x86_feature_detected!("avx") && is_x86_feature_detected!("fma") {
let v1: Vec<f32> = vec![
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
26., 27., 28., 29., 30., 31.,
];
let v2: Vec<f32> = vec![
40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
56., 57., 58., 59., 60., 61.,
];
let euclid_simd = unsafe { euclid_similarity_avx(&v1, &v2) };
let euclid = euclid_similarity(&v1, &v2);
assert_eq!(euclid_simd, euclid);
let manhattan_simd = unsafe { manhattan_similarity_avx(&v1, &v2) };
let manhattan = manhattan_similarity(&v1, &v2);
assert_eq!(manhattan_simd, manhattan);
let dot_simd = unsafe { dot_similarity_avx(&v1, &v2) };
let dot = dot_similarity(&v1, &v2);
assert_eq!(dot_simd, dot);
let cosine_simd = unsafe { cosine_preprocess_avx(v1.clone()) };
let cosine = cosine_preprocess(v1);
assert_eq!(cosine_simd, cosine);
} else {
println!("avx test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/mod.rs | lib/segment/src/spaces/mod.rs | pub mod metric;
pub mod simple;
pub mod tools;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub mod simple_sse;
#[cfg(target_arch = "x86_64")]
pub mod simple_avx;
pub mod metric_f16;
pub mod metric_uint;
#[cfg(target_arch = "aarch64")]
pub mod simple_neon;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/tools.rs | lib/segment/src/spaces/tools.rs | use std::cmp::Reverse;
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
/// Check if the length is zero or normalized enough.
///
/// When checking if normalized, we don't check if it's exactly 1.0 but rather whether it is close
/// enough. It prevents multiple normalization iterations from being unstable due to floating point
/// errors.
///
/// When checking normalized, we use 1.0e-6 as threshold. It should be big enough to make
/// renormalizing stable, while small enough to not affect regular normalizations.
#[inline]
pub fn is_length_zero_or_normalized(length: f32) -> bool {
length < f32::EPSILON || (length - 1.0).abs() <= 1.0e-6
}
pub fn peek_top_smallest_iterable<I, E: Ord>(elements: I, top: usize) -> Vec<E>
where
I: IntoIterator<Item = E>,
{
if top == 0 {
return vec![];
}
// If the caller is interested in smallest
// values coming first, the priority queue should be a min-heap
let mut pq = FixedLengthPriorityQueue::new(top);
for element in elements {
pq.push(Reverse(element));
}
pq.into_sorted_vec()
.into_iter()
.map(|Reverse(x)| x)
.collect()
}
pub fn peek_top_largest_iterable<I, E: Ord>(elements: I, top: usize) -> Vec<E>
where
I: IntoIterator<Item = E>,
{
if top == 0 {
return vec![];
}
// If the caller is interested in greatest
// values coming first, the priority queue should be a max-heap
let mut pq = FixedLengthPriorityQueue::new(top);
for element in elements {
pq.push(element);
}
pq.into_sorted_vec()
}
pub fn peek_top_scores<E: Ord + Clone>(scores: &[E], top: usize) -> Vec<E> {
peek_top_largest_iterable(scores.iter().cloned(), top)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_peek_top() {
let data = vec![10, 20, 40, 5, 100, 33, 84, 65, 20, 43, 44, 42];
let res = peek_top_scores(&data, 3);
assert_eq!(res, vec![100, 84, 65]);
}
#[test]
fn test_peek_top_rev() {
let data = vec![10, 20, 40, 5, 100, 33, 84, 65, 20, 43, 44, 42];
let res = peek_top_smallest_iterable(data, 3);
assert_eq!(res, vec![5, 10, 20]);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric.rs | lib/segment/src/spaces/metric.rs | use common::types::ScoreType;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::DenseVector;
use crate::types::Distance;
/// Defines how to compare vectors
pub trait Metric<T: PrimitiveVectorElement> {
fn distance() -> Distance;
/// Greater the value - closer the vectors
fn similarity(v1: &[T], v2: &[T]) -> ScoreType;
/// Necessary vector transformations performed before adding it to the collection (like normalization)
/// If no transformation is needed - returns the same vector
fn preprocess(vector: DenseVector) -> DenseVector;
}
pub trait MetricPostProcessing {
/// correct metric score for displaying
fn postprocess(score: ScoreType) -> ScoreType;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.