repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/actix_telemetry.rs | src/actix/actix_telemetry.rs | use std::future::{Ready, ready};
use std::sync::Arc;
use actix_web::Error;
use actix_web::dev::{Service, ServiceRequest, ServiceResponse, Transform};
use futures_util::future::LocalBoxFuture;
use parking_lot::Mutex;
use crate::common::telemetry_ops::requests_telemetry::{
ActixTelemetryCollector, ActixWorkerTelemetryCollector,
};
pub struct ActixTelemetryService<S> {
service: S,
telemetry_data: Arc<Mutex<ActixWorkerTelemetryCollector>>,
}
pub struct ActixTelemetryTransform {
telemetry_collector: Arc<Mutex<ActixTelemetryCollector>>,
}
/// Actix telemetry service. It hooks every request and looks into response status code.
///
/// More about actix service with similar example
/// <https://actix.rs/docs/middleware/>
impl<S, B> Service<ServiceRequest> for ActixTelemetryService<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_web::dev::forward_ready!(service);
fn call(&self, request: ServiceRequest) -> Self::Future {
let match_pattern = request
.match_pattern()
.unwrap_or_else(|| "unknown".to_owned());
let request_key = format!("{} {}", request.method(), match_pattern);
let future = self.service.call(request);
let telemetry_data = self.telemetry_data.clone();
Box::pin(async move {
let instant = std::time::Instant::now();
let response = future.await?;
let status = response.response().status().as_u16();
telemetry_data
.lock()
.add_response(request_key, status, instant);
Ok(response)
})
}
}
impl ActixTelemetryTransform {
pub fn new(telemetry_collector: Arc<Mutex<ActixTelemetryCollector>>) -> Self {
Self {
telemetry_collector,
}
}
}
/// Actix telemetry transform. It's a builder for an actix service
///
/// More about actix transform with similar example
/// <https://actix.rs/docs/middleware/>
impl<S, B> Transform<S, ServiceRequest> for ActixTelemetryTransform
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error> + 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Transform = ActixTelemetryService<S>;
type InitError = ();
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(ActixTelemetryService {
service,
telemetry_data: self
.telemetry_collector
.lock()
.create_web_worker_telemetry(),
}))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/count_api.rs | src/actix/api/count_api.rs | use actix_web::{Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::CountRequest;
use storage::content_manager::collection_verification::check_strict_mode;
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use super::CollectionPath;
use crate::actix::api::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, get_request_hardware_counter, process_response_error};
use crate::common::query::do_count_points;
use crate::settings::ServiceConfig;
#[post("/collections/{name}/points/count")]
async fn count_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<CountRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let CountRequest {
count_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&count_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selector = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => ShardSelectorInternal::from(shard_keys),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_count_points(
dispatcher.toc(&access, &pass),
&collection.name,
count_request,
params.consistency,
params.timeout(),
shard_selector,
access,
request_hw_counter.get_counter(),
)
.await;
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/cluster_api.rs | src/actix/api/cluster_api.rs | use std::future::Future;
use actix_web::{HttpResponse, delete, get, post, put, web};
use actix_web_validator::Query;
use collection::operations::verification::new_unchecked_verification_pass;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use storage::content_manager::consensus_ops::ConsensusOperations;
use storage::content_manager::errors::StorageError;
use storage::dispatcher::Dispatcher;
use storage::rbac::AccessRequirements;
use validator::Validate;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers;
#[derive(Debug, Deserialize, Validate)]
struct QueryParams {
#[serde(default)]
force: bool,
#[serde(default)]
#[validate(range(min = 1))]
timeout: Option<u64>,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct MetadataParams {
#[serde(default)]
pub wait: bool,
}
#[get("/cluster")]
fn cluster_status(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> impl Future<Output = HttpResponse> {
helpers::time(async move {
access.check_global_access(AccessRequirements::new())?;
Ok(dispatcher.cluster_status())
})
}
#[post("/cluster/recover")]
fn recover_current_peer(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> impl Future<Output = HttpResponse> {
// Not a collection level request.
let pass = new_unchecked_verification_pass();
helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
dispatcher.toc(&access, &pass).request_snapshot()?;
Ok(true)
})
}
#[delete("/cluster/peer/{peer_id}")]
fn remove_peer(
dispatcher: web::Data<Dispatcher>,
peer_id: web::Path<u64>,
Query(params): Query<QueryParams>,
ActixAccess(access): ActixAccess,
) -> impl Future<Output = HttpResponse> {
// Not a collection level request.
let pass = new_unchecked_verification_pass();
helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
let dispatcher = dispatcher.into_inner();
let toc = dispatcher.toc(&access, &pass);
let peer_id = peer_id.into_inner();
let has_shards = toc.peer_has_shards(peer_id).await;
if !params.force && has_shards {
return Err(StorageError::BadRequest {
description: format!("Cannot remove peer {peer_id} as there are shards on it"),
});
}
match dispatcher.consensus_state() {
Some(consensus_state) => {
consensus_state
.propose_consensus_op_with_await(
ConsensusOperations::RemovePeer(peer_id),
params.timeout.map(std::time::Duration::from_secs),
)
.await
}
None => Err(StorageError::BadRequest {
description: "Distributed mode disabled.".to_string(),
}),
}
})
}
#[get("/cluster/metadata/keys")]
async fn get_cluster_metadata_keys(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
helpers::time(async move {
access.check_global_access(AccessRequirements::new())?;
let keys = dispatcher
.consensus_state()
.ok_or_else(|| StorageError::service_error("Qdrant is running in standalone mode"))?
.persistent
.read()
.get_cluster_metadata_keys();
Ok(keys)
})
.await
}
#[get("/cluster/metadata/keys/{key}")]
async fn get_cluster_metadata_key(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
key: web::Path<String>,
) -> HttpResponse {
helpers::time(async move {
access.check_global_access(AccessRequirements::new())?;
let value = dispatcher
.consensus_state()
.ok_or_else(|| StorageError::service_error("Qdrant is running in standalone mode"))?
.persistent
.read()
.get_cluster_metadata_key(key.as_ref());
Ok(value)
})
.await
}
#[put("/cluster/metadata/keys/{key}")]
async fn update_cluster_metadata_key(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
key: web::Path<String>,
params: Query<MetadataParams>,
value: web::Json<serde_json::Value>,
) -> HttpResponse {
// Not a collection level request.
let pass = new_unchecked_verification_pass();
helpers::time(async move {
let toc = dispatcher.toc(&access, &pass);
access.check_global_access(AccessRequirements::new().write())?;
toc.update_cluster_metadata(key.into_inner(), value.into_inner(), params.wait)
.await?;
Ok(true)
})
.await
}
#[delete("/cluster/metadata/keys/{key}")]
async fn delete_cluster_metadata_key(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
key: web::Path<String>,
params: Query<MetadataParams>,
) -> HttpResponse {
// Not a collection level request.
let pass = new_unchecked_verification_pass();
helpers::time(async move {
let toc = dispatcher.toc(&access, &pass);
access.check_global_access(AccessRequirements::new().write())?;
toc.update_cluster_metadata(key.into_inner(), serde_json::Value::Null, params.wait)
.await?;
Ok(true)
})
.await
}
// Configure services
pub fn config_cluster_api(cfg: &mut web::ServiceConfig) {
cfg.service(cluster_status)
.service(remove_peer)
.service(recover_current_peer)
.service(get_cluster_metadata_keys)
.service(get_cluster_metadata_key)
.service(update_cluster_metadata_key)
.service(delete_cluster_metadata_key);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/facet_api.rs | src/actix/api/facet_api.rs | use actix_web::{Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use api::rest::{FacetRequest, FacetResponse};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use storage::content_manager::collection_verification::check_strict_mode;
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use crate::actix::api::CollectionPath;
use crate::actix::api::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{
get_request_hardware_counter, process_response, process_response_error,
};
use crate::settings::ServiceConfig;
#[post("/collections/{name}/facet")]
async fn facet(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<FacetRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let FacetRequest {
facet_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&facet_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, timing, None),
};
let facet_params = From::from(facet_request);
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let response = dispatcher
.toc(&access, &pass)
.facet(
&collection.name,
facet_params,
shard_selection,
params.consistency,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(FacetResponse::from);
process_response(response, timing, request_hw_counter.to_rest_api())
}
pub fn config_facet_api(cfg: &mut web::ServiceConfig) {
cfg.service(facet);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/debug_api.rs | src/actix/api/debug_api.rs | use actix_web::{Responder, get, patch, web};
use storage::rbac::AccessRequirements;
use crate::actix::auth::ActixAccess;
use crate::common::debugger::{DebugConfigPatch, DebuggerState};
#[get("/debugger")]
async fn get_debugger_config(
ActixAccess(access): ActixAccess,
debugger_state: web::Data<DebuggerState>,
) -> impl Responder {
crate::actix::helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
Ok(debugger_state.get_config())
})
.await
}
#[patch("/debugger")]
async fn update_debugger_config(
ActixAccess(access): ActixAccess,
debugger_state: web::Data<DebuggerState>,
debug_patch: web::Json<DebugConfigPatch>,
) -> impl Responder {
crate::actix::helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
Ok(debugger_state.apply_config_patch(debug_patch.into_inner()))
})
.await
}
// Configure services
pub fn config_debugger_api(cfg: &mut web::ServiceConfig) {
cfg.service(get_debugger_config);
cfg.service(update_debugger_config);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/read_params.rs | src/actix/api/read_params.rs | use std::num::NonZeroU64;
use std::time::Duration;
use collection::operations::consistency_params::ReadConsistency;
use schemars::JsonSchema;
use serde::Deserialize;
use validator::Validate;
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Deserialize, JsonSchema, Validate)]
pub struct ReadParams {
#[serde(default, deserialize_with = "deserialize_read_consistency")]
#[validate(nested)]
pub consistency: Option<ReadConsistency>,
/// If set, overrides global timeout for this request. Unit is seconds.
pub timeout: Option<NonZeroU64>,
}
impl ReadParams {
pub fn timeout(&self) -> Option<Duration> {
self.timeout.map(|num| Duration::from_secs(num.get()))
}
pub(crate) fn timeout_as_secs(&self) -> Option<usize> {
self.timeout.map(|i| i.get() as usize)
}
}
fn deserialize_read_consistency<'de, D>(
deserializer: D,
) -> Result<Option<ReadConsistency>, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum Helper<'a> {
ReadConsistency(ReadConsistency),
Str(&'a str),
}
match Helper::deserialize(deserializer)? {
Helper::ReadConsistency(read_consistency) => Ok(Some(read_consistency)),
Helper::Str("") => Ok(None),
Helper::Str(x) => Err(serde::de::Error::custom(format!(
"failed to deserialize read consistency query parameter value '{x}'"
))),
}
}
#[cfg(test)]
mod test {
use collection::operations::consistency_params::ReadConsistencyType;
use super::*;
#[test]
fn deserialize_empty_string() {
test_str("", ReadParams::default());
}
#[test]
fn deserialize_empty_value() {
test("", ReadParams::default());
}
#[test]
fn deserialize_type() {
test("all", from_type(ReadConsistencyType::All));
test("majority", from_type(ReadConsistencyType::Majority));
test("quorum", from_type(ReadConsistencyType::Quorum));
}
#[test]
fn deserialize_factor() {
for factor in 1..42 {
test(&factor.to_string(), from_factor(factor));
}
}
#[test]
fn try_deserialize_factor_0() {
assert!(try_deserialize(&str("0")).is_err());
}
fn test(value: &str, params: ReadParams) {
test_str(&str(value), params);
}
fn test_str(str: &str, params: ReadParams) {
assert_eq!(deserialize(str), params);
}
fn deserialize(str: &str) -> ReadParams {
try_deserialize(str).unwrap()
}
fn try_deserialize(str: &str) -> Result<ReadParams, serde_urlencoded::de::Error> {
serde_urlencoded::from_str(str)
}
fn str(value: &str) -> String {
format!("consistency={value}")
}
fn from_type(r#type: ReadConsistencyType) -> ReadParams {
ReadParams {
consistency: Some(ReadConsistency::Type(r#type)),
..Default::default()
}
}
fn from_factor(factor: usize) -> ReadParams {
ReadParams {
consistency: Some(ReadConsistency::Factor(factor)),
..Default::default()
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/retrieve_api.rs | src/actix/api/retrieve_api.rs | use std::time::Duration;
use actix_web::{Responder, get, post, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{PointRequest, PointRequestInternal, ScrollRequest};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::TryFutureExt;
use itertools::Itertools;
use segment::types::{PointIdType, WithPayloadInterface};
use serde::Deserialize;
use shard::retrieve::record_internal::RecordInternal;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_timeout,
};
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use tokio::time::Instant;
use validator::Validate;
use super::CollectionPath;
use super::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{
get_request_hardware_counter, process_response, process_response_error,
};
use crate::common::query::do_get_points;
use crate::settings::ServiceConfig;
#[derive(Deserialize, Validate)]
struct PointPath {
#[validate(length(min = 1))]
// TODO: validate this is a valid ID type (usize or UUID)? Does currently error on deserialize.
id: String,
}
async fn do_get_point(
toc: &TableOfContent,
collection_name: &str,
point_id: PointIdType,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
access: Access,
hw_counter: HwMeasurementAcc,
) -> Result<Option<RecordInternal>, StorageError> {
let request = PointRequestInternal {
ids: vec![point_id],
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: true.into(),
};
let shard_selection = ShardSelectorInternal::All;
toc.retrieve(
collection_name,
request,
read_consistency,
timeout,
shard_selection,
access,
hw_counter,
)
.await
.map(|points| points.into_iter().next())
}
#[get("/collections/{name}/points/{id}")]
async fn get_point(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
point: Path<PointPath>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let pass = match check_strict_mode_timeout(
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(p) => p,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let Ok(point_id) = point.id.parse::<PointIdType>() else {
let err = StorageError::BadInput {
description: format!("Can not recognize \"{}\" as point id", point.id),
};
return process_response_error(err, Instant::now(), None);
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let res = do_get_point(
dispatcher.toc(&access, &pass),
&collection.name,
point_id,
params.consistency,
params.timeout(),
access,
request_hw_counter.get_counter(),
)
.await
.and_then(|i| {
i.ok_or_else(|| StorageError::NotFound {
description: format!("Point with id {point_id} does not exists!"),
})
})
.map(api::rest::Record::from);
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points")]
async fn get_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<PointRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let pass = match check_strict_mode_timeout(
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(p) => p,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let PointRequest {
point_request,
shard_key,
} = request.into_inner();
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => ShardSelectorInternal::from(shard_keys),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let res = do_get_points(
dispatcher.toc(&access, &pass),
&collection.name,
point_request,
params.consistency,
params.timeout(),
shard_selection,
access,
request_hw_counter.get_counter(),
)
.map_ok(|response| {
response
.into_iter()
.map(api::rest::Record::from)
.collect_vec()
})
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/scroll")]
async fn scroll_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<ScrollRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let ScrollRequest {
scroll_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&scroll_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => ShardSelectorInternal::from(shard_keys),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let res = dispatcher
.toc(&access, &pass)
.scroll(
&collection.name,
scroll_request,
params.consistency,
params.timeout(),
shard_selection,
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/snapshot_api.rs | src/actix/api/snapshot_api.rs | use std::path::Path;
use ::common::tempfile_ext::MaybeTempPath;
use actix_multipart::form::MultipartForm;
use actix_multipart::form::tempfile::TempFile;
use actix_web::{Responder, Result, delete, get, post, put, web};
use collection::common::file_utils::move_file;
use collection::common::sha_256;
use collection::common::snapshot_stream::SnapshotStream;
use collection::operations::snapshot_ops::{
ShardSnapshotRecover, SnapshotPriority, SnapshotRecover,
};
use collection::operations::verification::new_unchecked_verification_pass;
use collection::shards::replica_set::snapshots::RecoveryType;
use collection::shards::shard::ShardId;
use collection::shards::shard_holder::shard_not_found_error;
use fs_err::tokio as tokio_fs;
use futures::{FutureExt as _, StreamExt as _, TryFutureExt as _};
use reqwest::Url;
use schemars::JsonSchema;
use segment::common::BYTES_IN_MB;
use segment::data_types::manifest::SnapshotManifest;
use serde::{Deserialize, Serialize};
use storage::content_manager::errors::{StorageError, StorageResult};
use storage::content_manager::snapshots::recover::do_recover_from_snapshot;
use storage::content_manager::snapshots::{
do_create_full_snapshot, do_delete_collection_snapshot, do_delete_full_snapshot,
do_list_full_snapshots,
};
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use tokio::io::AsyncWriteExt as _;
use uuid::Uuid;
use validator::Validate;
use {actix_web_validator as valid, fs_err as fs};
use super::{CollectionPath, StrictCollectionPath};
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, HttpError};
use crate::common;
use crate::common::collections::*;
use crate::common::http_client::HttpClient;
use crate::common::snapshots::try_take_partial_snapshot_recovery_lock;
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct SnapshotUploadingParam {
pub wait: Option<bool>,
pub priority: Option<SnapshotPriority>,
/// Optional SHA256 checksum to verify snapshot integrity before recovery.
#[serde(default)]
#[validate(custom(function = "::common::validation::validate_sha256_hash"))]
pub checksum: Option<String>,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct SnapshottingParam {
pub wait: Option<bool>,
}
#[derive(MultipartForm)]
pub struct SnapshottingForm {
snapshot: TempFile,
}
// Actix specific code
pub async fn do_get_full_snapshot(
toc: &TableOfContent,
access: Access,
snapshot_name: &str,
) -> Result<SnapshotStream, HttpError> {
access.check_global_access(AccessRequirements::new())?;
let snapshots_storage_manager = toc.get_snapshots_storage_manager()?;
let snapshot_path =
snapshots_storage_manager.get_full_snapshot_path(toc.snapshots_path(), snapshot_name)?;
let snapshot_stream = snapshots_storage_manager
.get_snapshot_stream(&snapshot_path)
.await?;
Ok(snapshot_stream)
}
pub async fn do_save_uploaded_snapshot(
toc: &TableOfContent,
collection_name: &str,
snapshot: TempFile,
) -> Result<Url, StorageError> {
let filename = snapshot
.file_name
// Sanitize the file name:
// - only take the top level path (no directories such as ../)
// - require the file name to be valid UTF-8
.and_then(|x| {
Path::new(&x)
.file_name()
.map(|filename| filename.to_owned())
})
.and_then(|x| x.to_str().map(|x| x.to_owned()))
.unwrap_or_else(|| Uuid::new_v4().to_string());
let collection_snapshot_path = toc.snapshots_path_for_collection(collection_name);
if !collection_snapshot_path.exists() {
log::debug!("Creating missing collection snapshots directory for {collection_name}");
toc.create_snapshots_path(collection_name).await?;
}
let path = collection_snapshot_path.join(filename);
move_file(snapshot.file.path(), &path).await?;
let absolute_path = fs::canonicalize(&path)?;
let snapshot_location = Url::from_file_path(&absolute_path).map_err(|_| {
StorageError::service_error(format!(
"Failed to convert path to URL: {}",
absolute_path.display()
))
})?;
Ok(snapshot_location)
}
// Actix specific code
pub async fn do_get_snapshot(
toc: &TableOfContent,
access: Access,
collection_name: &str,
snapshot_name: &str,
) -> Result<SnapshotStream, HttpError> {
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new().extras())?;
let collection: tokio::sync::RwLockReadGuard<collection::collection::Collection> =
toc.get_collection(&collection_pass).await?;
let snapshot_storage_manager = collection.get_snapshots_storage_manager()?;
let snapshot_path =
snapshot_storage_manager.get_snapshot_path(collection.snapshots_path(), snapshot_name)?;
let snapshot_stream = snapshot_storage_manager
.get_snapshot_stream(&snapshot_path)
.await?;
Ok(snapshot_stream)
}
#[get("/collections/{name}/snapshots")]
async fn list_snapshots(
dispatcher: web::Data<Dispatcher>,
path: web::Path<String>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// Nothing to verify.
let pass = new_unchecked_verification_pass();
helpers::time(do_list_snapshots(
dispatcher.toc(&access, &pass),
access,
&path,
))
.await
}
#[post("/collections/{name}/snapshots")]
async fn create_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<String>,
params: valid::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// Nothing to verify.
let pass = new_unchecked_verification_pass();
let collection_name = path.into_inner();
let future = async move {
do_create_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
&collection_name,
)
.await
};
helpers::time_or_accept(future, params.wait.unwrap_or(true)).await
}
#[post("/collections/{name}/snapshots/upload")]
async fn upload_snapshot(
dispatcher: web::Data<Dispatcher>,
http_client: web::Data<HttpClient>,
collection: valid::Path<StrictCollectionPath>,
MultipartForm(form): MultipartForm<SnapshottingForm>,
params: valid::Query<SnapshotUploadingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let wait = params.wait;
// Nothing to verify.
let pass = new_unchecked_verification_pass();
let future = async move {
let snapshot = form.snapshot;
access.check_global_access(AccessRequirements::new().manage())?;
if let Some(checksum) = ¶ms.checksum {
let snapshot_checksum = sha_256::hash_file(snapshot.file.path()).await?;
if !sha_256::hashes_equal(&snapshot_checksum, checksum) {
return Err(StorageError::checksum_mismatch(snapshot_checksum, checksum));
}
}
let snapshot_location =
do_save_uploaded_snapshot(dispatcher.toc(&access, &pass), &collection.name, snapshot)
.await?;
// Snapshot is a local file, we do not need an API key for that
let http_client = http_client.client(None)?;
let snapshot_recover = SnapshotRecover {
location: snapshot_location,
priority: params.priority,
checksum: None,
api_key: None,
};
do_recover_from_snapshot(
dispatcher.get_ref(),
&collection.name,
snapshot_recover,
access,
http_client,
)
.await
};
helpers::time_or_accept(future, wait.unwrap_or(true)).await
}
#[put("/collections/{name}/snapshots/recover")]
async fn recover_from_snapshot(
dispatcher: web::Data<Dispatcher>,
http_client: web::Data<HttpClient>,
collection: valid::Path<CollectionPath>,
request: valid::Json<SnapshotRecover>,
params: valid::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let future = async move {
let snapshot_recover = request.into_inner();
let http_client = http_client.client(snapshot_recover.api_key.as_deref())?;
do_recover_from_snapshot(
dispatcher.get_ref(),
&collection.name,
snapshot_recover,
access,
http_client,
)
.await
};
helpers::time_or_accept(future, params.wait.unwrap_or(true)).await
}
#[get("/collections/{name}/snapshots/{snapshot_name}")]
async fn get_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, String)>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// Nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection_name, snapshot_name) = path.into_inner();
do_get_snapshot(
dispatcher.toc(&access, &pass),
access,
&collection_name,
&snapshot_name,
)
.await
}
#[get("/snapshots")]
async fn list_full_snapshots(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
helpers::time(do_list_full_snapshots(
dispatcher.toc(&access, &pass),
access,
))
.await
}
#[post("/snapshots")]
async fn create_full_snapshot(
dispatcher: web::Data<Dispatcher>,
params: valid::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let future = async move { do_create_full_snapshot(dispatcher.get_ref(), access).await };
helpers::time_or_accept(future, params.wait.unwrap_or(true)).await
}
#[get("/snapshots/{snapshot_name}")]
async fn get_full_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<String>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let snapshot_name = path.into_inner();
do_get_full_snapshot(dispatcher.toc(&access, &pass), access, &snapshot_name).await
}
#[delete("/snapshots/{snapshot_name}")]
async fn delete_full_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<String>,
params: valid::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let future = async move {
let snapshot_name = path.into_inner();
do_delete_full_snapshot(dispatcher.get_ref(), access, &snapshot_name).await
};
helpers::time_or_accept(future, params.wait.unwrap_or(true)).await
}
#[delete("/collections/{name}/snapshots/{snapshot_name}")]
async fn delete_collection_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, String)>,
params: valid::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let future = async move {
let (collection_name, snapshot_name) = path.into_inner();
do_delete_collection_snapshot(
dispatcher.get_ref(),
access,
&collection_name,
&snapshot_name,
)
.await
};
helpers::time_or_accept(future, params.wait.unwrap_or(true)).await
}
#[get("/collections/{collection}/shards/{shard}/snapshots")]
async fn list_shard_snapshots(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard) = path.into_inner();
let future = common::snapshots::list_shard_snapshots(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
)
.map_err(Into::into);
helpers::time(future).await
}
#[post("/collections/{collection}/shards/{shard}/snapshots")]
async fn create_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
query: web::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard) = path.into_inner();
let future = common::snapshots::create_shard_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
);
helpers::time_or_accept(future, query.wait.unwrap_or(true)).await
}
#[get("/collections/{collection}/shards/{shard}/snapshot")]
async fn stream_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
ActixAccess(access): ActixAccess,
) -> Result<SnapshotStream, HttpError> {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard) = path.into_inner();
Ok(common::snapshots::stream_shard_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
None,
)
.await?)
}
// TODO: `PUT` (same as `recover_from_snapshot`) or `POST`!?
#[put("/collections/{collection}/shards/{shard}/snapshots/recover")]
async fn recover_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
http_client: web::Data<HttpClient>,
path: web::Path<(String, ShardId)>,
query: web::Query<SnapshottingParam>,
web::Json(request): web::Json<ShardSnapshotRecover>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let future = async move {
let (collection, shard) = path.into_inner();
common::snapshots::recover_shard_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
request.location,
request.priority.unwrap_or_default(),
request.checksum,
http_client.as_ref().clone(),
request.api_key,
)
.await?;
Ok(true)
};
helpers::time_or_accept(future, query.wait.unwrap_or(true)).await
}
// TODO: `POST` (same as `upload_snapshot`) or `PUT`!?
#[post("/collections/{collection}/shards/{shard}/snapshots/upload")]
async fn upload_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
query: web::Query<SnapshotUploadingParam>,
MultipartForm(form): MultipartForm<SnapshottingForm>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard) = path.into_inner();
let SnapshotUploadingParam {
wait,
priority,
checksum,
} = query.into_inner();
// - `recover_shard_snapshot_impl` is *not* cancel safe
// - but the task is *spawned* on the runtime and won't be cancelled, if request is cancelled
let future = cancel::future::spawn_cancel_on_drop(async move |cancel| {
// TODO: Run this check before the multipart blob is uploaded
let collection_pass = access
.check_global_access(AccessRequirements::new().manage())?
.issue_pass(&collection);
let cancel_safe = async {
if let Some(checksum) = checksum {
let snapshot_checksum = sha_256::hash_file(form.snapshot.file.path()).await?;
if !sha_256::hashes_equal(&snapshot_checksum, &checksum) {
return Err(StorageError::checksum_mismatch(snapshot_checksum, checksum));
}
}
let collection = dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?;
collection.assert_shard_exists(shard).await?;
Ok(collection)
};
let collection = cancel::future::cancel_on_token(cancel.clone(), cancel_safe).await??;
// `recover_shard_snapshot_impl` is *not* cancel safe
common::snapshots::recover_shard_snapshot_impl(
dispatcher.toc(&access, &pass),
&collection,
shard,
MaybeTempPath::from(form.snapshot.file.into_temp_path()),
priority.unwrap_or_default(),
RecoveryType::Full,
cancel,
)
.await?;
Ok(())
})
.map(|res| res.map_err(Into::into).and_then(|res| res));
helpers::time_or_accept(future, wait.unwrap_or(true)).await
}
#[get("/collections/{collection}/shards/{shard}/snapshots/{snapshot}")]
async fn download_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId, String)>,
ActixAccess(access): ActixAccess,
) -> Result<impl Responder, HttpError> {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard, snapshot) = path.into_inner();
let collection_pass =
access.check_collection_access(&collection, AccessRequirements::new().extras())?;
let collection = dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?;
let snapshots_storage_manager = collection.get_snapshots_storage_manager()?;
let snapshot_path = collection
.shards_holder()
.read()
.await
.get_shard_snapshot_path(collection.snapshots_path(), shard, &snapshot)
.await?;
let snapshot_stream = snapshots_storage_manager
.get_snapshot_stream(&snapshot_path)
.await?;
Ok(snapshot_stream)
}
#[delete("/collections/{collection}/shards/{shard}/snapshots/{snapshot}")]
async fn delete_shard_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId, String)>,
query: web::Query<SnapshottingParam>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// nothing to verify.
let pass = new_unchecked_verification_pass();
let (collection, shard, snapshot) = path.into_inner();
let future = common::snapshots::delete_shard_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
snapshot,
)
.map_ok(|_| true)
.map_err(Into::into);
helpers::time_or_accept(future, query.wait.unwrap_or(true)).await
}
#[post("/collections/{collection}/shards/{shard}/snapshot/partial/create")]
async fn create_partial_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
manifest: web::Json<SnapshotManifest>,
ActixAccess(access): ActixAccess,
) -> Result<SnapshotStream, HttpError> {
let (collection, shard) = path.into_inner();
let manifest = manifest.into_inner();
// nothing to verify.
let pass = new_unchecked_verification_pass();
let snapshot_stream = common::snapshots::stream_shard_snapshot(
dispatcher.toc(&access, &pass).clone(),
access,
collection,
shard,
Some(manifest),
)
.await?;
Ok(snapshot_stream)
}
#[post("/collections/{collection}/shards/{shard}/snapshot/partial/recover")]
async fn recover_partial_snapshot(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
query: web::Query<SnapshotUploadingParam>,
MultipartForm(form): MultipartForm<SnapshottingForm>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let (collection, shard) = path.into_inner();
let SnapshotUploadingParam {
wait,
priority,
checksum,
} = query.into_inner();
// nothing to verify.
let pass = new_unchecked_verification_pass();
let try_take_recovery_lock_future =
try_take_partial_snapshot_recovery_lock(&dispatcher, &collection, shard, &access, &pass);
let recovery_lock = match try_take_recovery_lock_future.await {
Ok(recovery_lock) => recovery_lock,
Err(StorageError::ShardUnavailable { .. }) => {
return helpers::already_in_progress_response();
}
Err(err) => {
return helpers::process_response_error(err, tokio::time::Instant::now(), None);
}
};
let future = cancel::future::spawn_cancel_on_drop(async move |cancel| {
let _recovery_lock = recovery_lock;
// TODO: Run this check before the multipart blob is uploaded
let collection_pass = access
.check_global_access(AccessRequirements::new().manage())?
.issue_pass(&collection);
let cancel_safe = async {
if let Some(checksum) = checksum {
let snapshot_checksum = sha_256::hash_file(form.snapshot.file.path()).await?;
if !sha_256::hashes_equal(&snapshot_checksum, &checksum) {
return Err(StorageError::checksum_mismatch(snapshot_checksum, checksum));
}
}
let collection = dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?;
collection.assert_shard_exists(shard).await?;
Ok(collection)
};
let collection = cancel::future::cancel_on_token(cancel.clone(), cancel_safe).await??;
// `recover_shard_snapshot_impl` is *not* cancel safe
common::snapshots::recover_shard_snapshot_impl(
dispatcher.toc(&access, &pass),
&collection,
shard,
MaybeTempPath::from(form.snapshot.file.into_temp_path()),
priority.unwrap_or_default(),
RecoveryType::Partial,
cancel,
)
.await?;
Ok(())
})
.map(|res| res.map_err(Into::into).and_then(|res| res));
helpers::time_or_accept(future, wait.unwrap_or(true)).await
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize, schemars::JsonSchema)]
pub struct PartialSnapshotRecoverFrom {
peer_url: Url,
api_key: Option<String>,
}
#[post("/collections/{collection}/shards/{shard}/snapshot/partial/recover_from")]
async fn recover_partial_snapshot_from(
dispatcher: web::Data<Dispatcher>,
http_client: web::Data<HttpClient>,
path: web::Path<(String, ShardId)>,
query: web::Query<SnapshottingParam>,
web::Json(request): web::Json<PartialSnapshotRecoverFrom>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let (collection_name, shard_id) = path.into_inner();
let PartialSnapshotRecoverFrom { peer_url, api_key } = request;
let SnapshottingParam { wait } = query.into_inner();
// nothing to verify
let pass = new_unchecked_verification_pass();
let try_take_recovery_lock_future = try_take_partial_snapshot_recovery_lock(
&dispatcher,
&collection_name,
shard_id,
&access,
&pass,
);
let recovery_lock = match try_take_recovery_lock_future.await {
Ok(recovery_lock) => recovery_lock,
Err(StorageError::ShardUnavailable { .. }) => {
return helpers::already_in_progress_response();
}
Err(err) => {
return helpers::process_response_error(err, tokio::time::Instant::now(), None);
}
};
let future = cancel::future::spawn_cancel_on_drop(async move |cancel| {
let _recovery_lock = recovery_lock;
let download_start_time = tokio::time::Instant::now();
let cancel_safe = async {
let toc = dispatcher.toc(&access, &pass);
let collection_pass = access
.check_global_access(AccessRequirements::new().manage())?
.issue_pass(&collection_name)
.into_static();
let collection = toc.get_collection(&collection_pass).await?;
collection.assert_shard_exists(shard_id).await?;
let http_client = http_client.client(api_key.as_deref())?;
let encoded_collection_name = urlencoding::encode(&collection_name);
let create_snapshot_url = format!(
"{peer_url}/collections/{encoded_collection_name}/shards/{shard_id}/snapshot/partial/create"
);
let snapshot_manifest = collection.get_partial_snapshot_manifest(shard_id).await?;
let download_dir = toc.optional_temp_or_snapshot_temp_path()?;
let (partial_snapshot_file, partial_snapshot_temp_path) = tempfile::Builder::new()
.prefix("partial-snapshot")
.suffix(".download")
.tempfile_in(&download_dir)?
.into_parts();
let partial_snapshot_file = fs::File::from_parts::<&Path>(
partial_snapshot_file,
partial_snapshot_temp_path.as_ref(),
);
let response = http_client
.post(create_snapshot_url)
.json(&snapshot_manifest)
.send()
.await?
.error_for_status()?;
if response.status() == reqwest::StatusCode::NOT_MODIFIED {
let shard_holder = collection.shards_holder();
let shard_holder = shard_holder.read().await;
let replica_set = shard_holder
.get_shard(shard_id)
.ok_or_else(|| shard_not_found_error(shard_id))?;
// The replica is up to date so we bump the recovered timestamp
// This prevents CM from immediately trying to recover again
replica_set.partial_snapshot_meta.snapshot_recovered();
return Err(StorageError::EmptyPartialSnapshot { shard_id });
}
let mut partial_snapshot_file =
tokio::io::BufWriter::new(tokio_fs::File::from_std(partial_snapshot_file));
let mut partial_snapshot_stream = response.bytes_stream();
let mut total_bytes_downloaded = 0u64;
while let Some(chunk) = partial_snapshot_stream.next().await {
let chunk = chunk?;
total_bytes_downloaded += chunk.len() as u64;
partial_snapshot_file.write_all(&chunk).await?;
}
partial_snapshot_file.flush().await?;
StorageResult::Ok((collection, partial_snapshot_temp_path, total_bytes_downloaded))
};
let create_partial_snapshot_result =
cancel::future::cancel_on_token(cancel.clone(), cancel_safe).await?;
let (collection, partial_snapshot_temp_path, bytes_downloaded) =
match create_partial_snapshot_result {
Ok(output) => output,
Err(StorageError::EmptyPartialSnapshot { .. }) => return Ok(false),
Err(err) => return Err(err),
};
let download_duration = download_start_time.elapsed();
let total_size_mb = bytes_downloaded as f64 / BYTES_IN_MB as f64;
let download_speed_mbps = total_size_mb / download_duration.as_secs_f64();
log::debug!(
"Partial snapshot download completed: path={}, size={:.2} MB, duration={:.2}s, speed={:.2} MB/s, shard_id={}",
partial_snapshot_temp_path.display(),
total_size_mb,
download_duration.as_secs_f64(),
download_speed_mbps,
shard_id
);
common::snapshots::recover_shard_snapshot_impl(
dispatcher.toc(&access, &pass),
&collection,
shard_id,
MaybeTempPath::from(partial_snapshot_temp_path),
SnapshotPriority::NoSync,
RecoveryType::Partial,
cancel,
)
.await?;
Ok(true)
})
.map(|res| res.map_err(Into::into).and_then(|res| res));
helpers::time_or_accept(future, wait.unwrap_or(true)).await
}
#[get("/collections/{collection}/shards/{shard}/snapshot/partial/manifest")]
async fn get_partial_snapshot_manifest(
dispatcher: web::Data<Dispatcher>,
path: web::Path<(String, ShardId)>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let (collection, shard) = path.into_inner();
let pass = new_unchecked_verification_pass();
let future = async move {
let collection_pass = access
.check_global_access(AccessRequirements::new().extras())?
.issue_pass(&collection);
dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?
.get_partial_snapshot_manifest(shard)
.await
.map_err(StorageError::from)
};
helpers::time(future).await
}
// Configure services
pub fn config_snapshots_api(cfg: &mut web::ServiceConfig) {
cfg.service(list_snapshots)
.service(create_snapshot)
.service(upload_snapshot)
.service(recover_from_snapshot)
.service(get_snapshot)
.service(list_full_snapshots)
.service(create_full_snapshot)
.service(get_full_snapshot)
.service(delete_full_snapshot)
.service(delete_collection_snapshot)
.service(list_shard_snapshots)
.service(create_shard_snapshot)
.service(stream_shard_snapshot)
.service(recover_shard_snapshot)
.service(upload_shard_snapshot)
.service(download_shard_snapshot)
.service(delete_shard_snapshot)
.service(create_partial_snapshot)
.service(recover_partial_snapshot)
.service(recover_partial_snapshot_from)
.service(get_partial_snapshot_manifest);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/profiler_api.rs | src/actix/api/profiler_api.rs | use actix_web::{Responder, get, web};
use actix_web_validator::Query;
use collection::profiling::interface::get_requests_profile_log;
use collection::profiling::slow_requests_log::LogEntry;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use storage::rbac::AccessRequirements;
use validator::Validate;
use crate::actix::auth::ActixAccess;
#[derive(Deserialize, Validate)]
struct LogParams {
limit: Option<usize>,
/// Optional filter by request name (substring match)
request: Option<String>,
}
#[derive(Serialize, JsonSchema)]
struct SlowRequestsResponse {
requests: Vec<LogEntry>,
}
const DEFAULT_SLOW_REQUESTS_LIMIT: usize = 10;
#[get("/profiler/slow_requests")]
async fn get_slow_requests(
ActixAccess(access): ActixAccess,
params: Query<LogParams>,
) -> impl Responder {
crate::actix::helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
let LogParams { limit, request } = params.into_inner();
let slow_requests = get_requests_profile_log(
limit.unwrap_or(DEFAULT_SLOW_REQUESTS_LIMIT),
request.as_deref(),
)
.await;
Ok(SlowRequestsResponse {
requests: slow_requests,
})
})
.await
}
pub fn config_profiler_api(cfg: &mut web::ServiceConfig) {
cfg.service(get_slow_requests);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/service_api.rs | src/actix/api/service_api.rs | use std::future::Future;
use std::sync::Arc;
use std::time::Duration;
use actix_web::http::StatusCode;
use actix_web::http::header::ContentType;
use actix_web::rt::time::Instant;
use actix_web::web::Data;
use actix_web::{HttpResponse, Responder, get, post, web};
use actix_web_validator::Query;
use common::types::{DetailsLevel, TelemetryDetail};
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::{Deserialize, Serialize};
use storage::content_manager::errors::StorageError;
use storage::rbac::AccessRequirements;
use tokio::sync::Mutex;
use validator::Validate;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, process_response_error};
use crate::common::health;
use crate::common::metrics::MetricsData;
use crate::common::stacktrace::get_stack_trace;
use crate::common::telemetry::TelemetryCollector;
use crate::settings::ServiceConfig;
use crate::tracing;
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct TelemetryParam {
pub anonymize: Option<bool>,
pub details_level: Option<usize>,
#[validate(range(min = 1))]
pub timeout: Option<u64>,
}
impl TelemetryParam {
pub fn timeout(&self) -> Option<Duration> {
self.timeout.map(Duration::from_secs)
}
}
#[get("/telemetry")]
fn telemetry(
telemetry_collector: Data<Mutex<TelemetryCollector>>,
params: Query<TelemetryParam>,
ActixAccess(access): ActixAccess,
) -> impl Future<Output = HttpResponse> {
helpers::time(async move {
let anonymize = params.anonymize.unwrap_or(false);
let details_level = params
.details_level
.map_or(DetailsLevel::Level0, Into::into);
let detail = TelemetryDetail {
level: details_level,
histograms: false,
};
let telemetry_data = telemetry_collector
.lock()
.await
.prepare_data(&access, detail, params.timeout())
.await?;
let telemetry_data = if anonymize {
telemetry_data.anonymize()
} else {
telemetry_data
};
Ok(telemetry_data)
})
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct MetricsParam {
pub anonymize: Option<bool>,
#[validate(range(min = 1))]
pub timeout: Option<u64>,
}
impl MetricsParam {
pub fn timeout(&self) -> Option<Duration> {
self.timeout.map(Duration::from_secs)
}
}
#[get("/metrics")]
async fn metrics(
telemetry_collector: Data<Mutex<TelemetryCollector>>,
params: Query<MetricsParam>,
config: Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
if let Err(err) = access.check_global_access(AccessRequirements::new()) {
return process_response_error(err, Instant::now(), None);
}
let anonymize = params.anonymize.unwrap_or(false);
let telemetry_data = telemetry_collector
.lock()
.await
.prepare_data(
&access,
TelemetryDetail {
level: DetailsLevel::Level4,
histograms: true,
},
params.timeout(),
)
.await;
match telemetry_data {
Err(err) => process_response_error(err, Instant::now(), None),
Ok(telemetry_data) => {
let telemetry_data = if anonymize {
telemetry_data.anonymize()
} else {
telemetry_data
};
let metrics_prefix = config.metrics_prefix.as_deref();
HttpResponse::Ok()
.content_type(ContentType::plaintext())
.body(
MetricsData::new_from_telemetry(telemetry_data, metrics_prefix)
.format_metrics(),
)
}
}
}
#[get("/stacktrace")]
fn get_stacktrace(ActixAccess(access): ActixAccess) -> impl Future<Output = HttpResponse> {
helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
Ok(get_stack_trace())
})
}
#[get("/healthz")]
async fn healthz() -> impl Responder {
kubernetes_healthz()
}
#[get("/livez")]
async fn livez() -> impl Responder {
kubernetes_healthz()
}
#[get("/readyz")]
async fn readyz(health_checker: web::Data<Option<Arc<health::HealthChecker>>>) -> impl Responder {
let is_ready = match health_checker.as_ref() {
Some(health_checker) => health_checker.check_ready().await,
None => true,
};
let (status, body) = if is_ready {
(StatusCode::OK, "all shards are ready")
} else {
(StatusCode::SERVICE_UNAVAILABLE, "some shards are not ready")
};
HttpResponse::build(status)
.content_type(ContentType::plaintext())
.body(body)
}
/// Basic Kubernetes healthz endpoint
fn kubernetes_healthz() -> impl Responder {
HttpResponse::Ok()
.content_type(ContentType::plaintext())
.body("healthz check passed")
}
#[get("/logger")]
async fn get_logger_config(
ActixAccess(access): ActixAccess,
handle: web::Data<tracing::LoggerHandle>,
) -> impl Responder {
let timing = Instant::now();
let future = async {
let _ = access.check_global_access(AccessRequirements::new())?;
let config = handle.get_config().await;
Ok(config)
};
helpers::process_response(future.await, timing, None)
}
#[post("/logger")]
async fn update_logger_config(
ActixAccess(access): ActixAccess,
handle: web::Data<tracing::LoggerHandle>,
mut config: web::Json<tracing::LoggerConfig>,
) -> impl Responder {
let timing = Instant::now();
let future = async {
let _ = access.check_global_access(AccessRequirements::new().manage())?;
// Log file can only be set in Qdrant config file
config.on_disk.log_file = None;
handle
.update_config(config.into_inner())
.await
.map_err(|err| StorageError::service_error(err.to_string()))?;
Ok(true)
};
helpers::process_response(future.await, timing, None)
}
// Configure services
pub fn config_service_api(cfg: &mut web::ServiceConfig) {
cfg.service(telemetry)
.service(metrics)
.service(get_stacktrace)
.service(healthz)
.service(livez)
.service(readyz)
.service(get_logger_config)
.service(update_logger_config);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/search_api.rs | src/actix/api/search_api.rs | use actix_web::{HttpResponse, Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use api::rest::{SearchMatrixOffsetsResponse, SearchMatrixPairsResponse, SearchMatrixRequest};
use collection::collection::distance_matrix::CollectionSearchMatrixRequest;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{
CoreSearchRequest, SearchGroupsRequest, SearchRequest, SearchRequestBatch,
};
use itertools::Itertools;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_batch,
};
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use super::CollectionPath;
use super::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{
get_request_hardware_counter, process_response, process_response_error,
};
use crate::common::query::{
do_core_search_points, do_search_batch_points, do_search_point_groups, do_search_points_matrix,
};
use crate::settings::ServiceConfig;
#[post("/collections/{name}/points/search")]
async fn search_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<SearchRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
let SearchRequest {
search_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&search_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_core_search_points(
dispatcher.toc(&access, &pass),
&collection.name,
search_request.into(),
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
});
process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/search/batch")]
async fn batch_search_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<SearchRequestBatch>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
let requests = request
.into_inner()
.searches
.into_iter()
.map(|req| {
let SearchRequest {
search_request,
shard_key,
} = req;
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let core_request: CoreSearchRequest = search_request.into();
(core_request, shard_selection)
})
.collect::<Vec<_>>();
let pass = match check_strict_mode_batch(
requests.iter().map(|i| &i.0),
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_search_batch_points(
dispatcher.toc(&access, &pass),
&collection.name,
requests,
params.consistency,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|batch_scored_points| {
batch_scored_points
.into_iter()
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
})
.collect_vec()
});
process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/search/groups")]
async fn search_point_groups(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<SearchGroupsRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
let SearchGroupsRequest {
search_group_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&search_group_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_search_point_groups(
dispatcher.toc(&access, &pass),
&collection.name,
search_group_request,
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await;
process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/search/matrix/pairs")]
async fn search_points_matrix_pairs(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<SearchMatrixRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let SearchMatrixRequest {
search_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&search_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let response = do_search_points_matrix(
dispatcher.toc(&access, &pass),
&collection.name,
CollectionSearchMatrixRequest::from(search_request),
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(SearchMatrixPairsResponse::from);
process_response(response, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/search/matrix/offsets")]
async fn search_points_matrix_offsets(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<SearchMatrixRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let SearchMatrixRequest {
search_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&search_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let response = do_search_points_matrix(
dispatcher.toc(&access, &pass),
&collection.name,
CollectionSearchMatrixRequest::from(search_request),
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(SearchMatrixOffsetsResponse::from);
process_response(response, timing, request_hw_counter.to_rest_api())
}
// Configure services
pub fn config_search_api(cfg: &mut web::ServiceConfig) {
cfg.service(search_points)
.service(batch_search_points)
.service(search_point_groups)
.service(search_points_matrix_pairs)
.service(search_points_matrix_offsets);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/query_api.rs | src/actix/api/query_api.rs | use actix_web::{Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use api::rest::models::InferenceUsage;
use api::rest::{QueryGroupsRequest, QueryRequest, QueryRequestBatch, QueryResponse};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use itertools::Itertools;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_batch,
};
use storage::content_manager::errors::StorageError;
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use super::CollectionPath;
use super::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, get_request_hardware_counter};
use crate::common::inference::params::InferenceParams;
use crate::common::inference::query_requests_rest::{
CollectionQueryGroupsRequestWithUsage, CollectionQueryRequestWithUsage,
convert_query_groups_request_from_rest, convert_query_request_from_rest,
};
use crate::common::inference::token::InferenceToken;
use crate::common::query::do_query_point_groups;
use crate::settings::ServiceConfig;
#[post("/collections/{name}/points/query")]
async fn query_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<QueryRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let QueryRequest {
internal: query_request,
shard_key,
} = request.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let hw_measurement_acc = request_hw_counter.get_counter();
let mut inference_usage = InferenceUsage::default();
let inference_params = InferenceParams::new(inference_token, params.timeout());
let result = async {
let CollectionQueryRequestWithUsage { request, usage } =
convert_query_request_from_rest(query_request, &inference_params).await?;
inference_usage.merge_opt(usage);
let pass = check_strict_mode(
&request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await?;
let points = dispatcher
.toc(&access, &pass)
.query_batch(
&collection.name,
vec![(request, shard_selection)],
params.consistency,
access,
params.timeout(),
hw_measurement_acc,
)
.await?
.pop()
.ok_or_else(|| {
StorageError::service_error("Expected at least one response for one query")
})?
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec();
Ok(QueryResponse { points })
}
.await;
helpers::process_response_with_inference_usage(
result,
timing,
request_hw_counter.to_rest_api(),
inference_usage.into_non_empty(),
)
}
#[post("/collections/{name}/points/query/batch")]
async fn query_points_batch(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<QueryRequestBatch>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let QueryRequestBatch { searches } = request.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let hw_measurement_acc = request_hw_counter.get_counter();
let mut all_usages: InferenceUsage = InferenceUsage::default();
let inference_params = InferenceParams::new(inference_token, params.timeout());
let result = async {
let mut batch = Vec::with_capacity(searches.len());
for request_item in searches {
let QueryRequest {
internal,
shard_key,
} = request_item;
let CollectionQueryRequestWithUsage { request, usage } =
convert_query_request_from_rest(internal, &inference_params).await?;
all_usages.merge_opt(usage);
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
batch.push((request, shard_selection));
}
let pass = check_strict_mode_batch(
batch.iter().map(|i| &i.0),
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await?;
let res = dispatcher
.toc(&access, &pass)
.query_batch(
&collection.name,
batch,
params.consistency,
access,
params.timeout(),
hw_measurement_acc,
)
.await?
.into_iter()
.map(|response| QueryResponse {
points: response
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec(),
})
.collect_vec();
Ok(res)
}
.await;
helpers::process_response_with_inference_usage(
result,
timing,
request_hw_counter.to_rest_api(),
all_usages.into_non_empty(),
)
}
#[post("/collections/{name}/points/query/groups")]
async fn query_points_groups(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<QueryGroupsRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let QueryGroupsRequest {
search_group_request,
shard_key,
} = request.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let hw_measurement_acc = request_hw_counter.get_counter();
let mut inference_usage = InferenceUsage::default();
let inference_params = InferenceParams::new(inference_token, params.timeout());
let result = async {
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let CollectionQueryGroupsRequestWithUsage { request, usage } =
convert_query_groups_request_from_rest(search_group_request, inference_params).await?;
inference_usage.merge_opt(usage);
let pass = check_strict_mode(
&request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await?;
let query_result = do_query_point_groups(
dispatcher.toc(&access, &pass),
&collection.name,
request,
params.consistency,
shard_selection,
access,
params.timeout(),
hw_measurement_acc,
)
.await?;
Ok(query_result)
}
.await;
helpers::process_response_with_inference_usage(
result,
timing,
request_hw_counter.to_rest_api(),
inference_usage.into_non_empty(),
)
}
pub fn config_query_api(cfg: &mut web::ServiceConfig) {
cfg.service(query_points);
cfg.service(query_points_batch);
cfg.service(query_points_groups);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/update_api.rs | src/actix/api/update_api.rs | use actix_web::rt::time::Instant;
use actix_web::{Responder, delete, post, put, web};
use actix_web_validator::{Json, Path, Query};
use api::rest::UpdateVectors;
use api::rest::schema::PointInsertOperations;
use collection::operations::payload_ops::{DeletePayload, SetPayload};
use collection::operations::point_ops::PointsSelector;
use collection::operations::vector_ops::DeleteVectors;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::json_path::JsonPath;
use serde::Deserialize;
use storage::dispatcher::Dispatcher;
use validator::Validate;
use super::CollectionPath;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{
get_request_hardware_counter, process_response, process_response_with_inference_usage,
};
use crate::common::inference::params::InferenceParams;
use crate::common::inference::token::InferenceToken;
use crate::common::strict_mode::*;
use crate::common::update::*;
use crate::settings::ServiceConfig;
#[derive(Deserialize, Validate)]
struct FieldPath {
#[serde(rename = "field_name")]
name: JsonPath,
}
#[put("/collections/{name}/points")]
async fn upsert_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<PointInsertOperations>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let inference_params = InferenceParams::new(inference_token, params.timeout);
let result_with_usage = do_upsert_points(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
inference_params,
request_hw_counter.get_counter(),
)
.await;
let (res, inference_usage) = match result_with_usage {
Ok((update_result, usage)) => (Ok(update_result), usage),
Err(err) => (Err(err), None),
};
process_response_with_inference_usage(
res,
timing,
request_hw_counter.to_rest_api(),
inference_usage,
)
}
#[post("/collections/{name}/points/delete")]
async fn delete_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<PointsSelector>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let res = do_delete_points(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[put("/collections/{name}/points/vectors")]
async fn update_vectors(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<UpdateVectors>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let inference_params = InferenceParams::new(inference_token, params.timeout);
let res = do_update_vectors(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
inference_params,
request_hw_counter.get_counter(),
)
.await;
let (res, inference_usage) = match res {
Ok((update_result, usage)) => (Ok(update_result), usage),
Err(err) => (Err(err), None),
};
process_response_with_inference_usage(
res,
timing,
request_hw_counter.to_rest_api(),
inference_usage,
)
}
#[post("/collections/{name}/points/vectors/delete")]
async fn delete_vectors(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<DeleteVectors>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let response = do_delete_vectors(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(response, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/payload")]
async fn set_payload(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<SetPayload>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let res = do_set_payload(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[put("/collections/{name}/points/payload")]
async fn overwrite_payload(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<SetPayload>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let res = do_overwrite_payload(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/payload/delete")]
async fn delete_payload(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<DeletePayload>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let res = do_delete_payload(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/payload/clear")]
async fn clear_payload(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<PointsSelector>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let timing = Instant::now();
let res = do_clear_payload(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(res, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/batch")]
async fn update_batch(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operations: Json<UpdateOperations>,
params: Query<UpdateParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
inference_token: InferenceToken,
) -> impl Responder {
let operations = operations.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let inference_params = InferenceParams::new(inference_token.clone(), params.timeout);
let timing = Instant::now();
let result_with_usage = do_batch_update_points(
StrictModeCheckedTocProvider::new(&dispatcher),
collection.into_inner().name,
operations.operations,
InternalUpdateParams::default(),
params.into_inner(),
access,
inference_params,
request_hw_counter.get_counter(),
)
.await;
let (response_data, inference_usage) = match result_with_usage {
Ok((update_results, usage)) => (Ok(update_results), usage),
Err(err) => (Err(err), None),
};
process_response_with_inference_usage(
response_data,
timing,
request_hw_counter.to_rest_api(),
inference_usage,
)
}
#[put("/collections/{name}/index")]
async fn create_field_index(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<CreateFieldIndex>,
params: Query<UpdateParams>,
ActixAccess(access): ActixAccess,
service_config: web::Data<ServiceConfig>,
) -> impl Responder {
let timing = Instant::now();
let operation = operation.into_inner();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
Some(params.wait),
);
let response = do_create_index(
dispatcher.into_inner(),
collection.into_inner().name,
operation,
InternalUpdateParams::default(),
params.into_inner(),
access,
request_hw_counter.get_counter(),
)
.await;
process_response(
response, timing,
None, // Do not report hardware counter for index creation, as it might be not accurate due to consensus
)
}
#[delete("/collections/{name}/index/{field_name}")]
async fn delete_field_index(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
field: Path<FieldPath>,
params: Query<UpdateParams>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let response = do_delete_index(
dispatcher.into_inner(),
collection.into_inner().name,
field.name.clone(),
InternalUpdateParams::default(),
params.into_inner(),
access,
HwMeasurementAcc::disposable(), // API unmeasured
)
.await;
process_response(response, timing, None)
}
/// Request body for the staging test delay endpoint.
/// Only available when the `staging` feature is enabled.
#[cfg(feature = "staging")]
#[derive(Debug, Deserialize, Validate)]
pub struct TestDelayRequest {
/// Duration of the delay in seconds (default: 1.0, max: 300.0).
#[serde(default = "default_test_delay_duration")]
#[validate(range(min = 0.0, max = 300.0))]
pub duration: f64,
}
#[cfg(feature = "staging")]
fn default_test_delay_duration() -> f64 {
1.0
}
/// Staging endpoint that introduces an artificial delay for testing purposes.
/// Only available when the `staging` feature is enabled.
#[cfg(feature = "staging")]
#[post("/collections/{name}/points/staging")]
async fn staging_test_delay(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<TestDelayRequest>,
params: Query<UpdateParams>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
use collection::operations::point_ops::PointOperations;
use collection::operations::verification::new_unchecked_verification_pass;
use shard::operations::CollectionUpdateOperations;
use shard::operations::staging::TestDelayOperation;
let timing = Instant::now();
let operation = operation.into_inner();
let collection_name = collection.into_inner().name;
let point_operation = PointOperations::TestDelay(TestDelayOperation::new(operation.duration));
let collection_operation = CollectionUpdateOperations::PointOperation(point_operation);
// Get TOC with unchecked verification pass (staging operations don't need strict mode)
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass);
let result = crate::common::update::update(
toc,
&collection_name,
collection_operation,
InternalUpdateParams::default(),
params.into_inner(),
None, // shard_key
access,
HwMeasurementAcc::disposable(),
)
.await;
process_response(result, timing, None)
}
// Configure services
pub fn config_update_api(cfg: &mut web::ServiceConfig) {
cfg.service(upsert_points)
.service(delete_points)
.service(update_vectors)
.service(delete_vectors)
.service(set_payload)
.service(overwrite_payload)
.service(delete_payload)
.service(clear_payload)
.service(create_field_index)
.service(delete_field_index)
.service(update_batch);
#[cfg(feature = "staging")]
cfg.service(staging_test_delay);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/mod.rs | src/actix/api/mod.rs | use common::validation::{validate_collection_name, validate_collection_name_legacy};
use serde::Deserialize;
use validator::Validate;
pub mod cluster_api;
pub mod collections_api;
pub mod count_api;
pub mod debug_api;
pub mod discovery_api;
pub mod facet_api;
pub mod issues_api;
pub mod local_shard_api;
pub mod profiler_api;
pub mod query_api;
pub mod read_params;
pub mod recommend_api;
pub mod retrieve_api;
pub mod search_api;
pub mod service_api;
pub mod shards_api;
pub mod snapshot_api;
pub mod update_api;
/// A collection path with stricter validation
///
/// Validation for collection paths has been made more strict over time.
/// To prevent breaking changes on existing collections, this is only enforced for newly created
/// collections. Basic validation is enforced everywhere else.
#[derive(Deserialize, Validate)]
struct StrictCollectionPath {
#[validate(
length(min = 1, max = 255),
custom(function = "validate_collection_name")
)]
name: String,
}
/// A collection path with basic validation
///
/// Validation for collection paths has been made more strict over time.
/// To prevent breaking changes on existing collections, this is only enforced for newly created
/// collections. Basic validation is enforced everywhere else.
#[derive(Deserialize, Validate)]
struct CollectionPath {
#[validate(
length(min = 1, max = 255),
custom(function = "validate_collection_name_legacy")
)]
name: String,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/collections_api.rs | src/actix/api/collections_api.rs | use std::time::Duration;
use actix_web::rt::time::Instant;
use actix_web::{HttpResponse, Responder, delete, get, patch, post, put, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::cluster_ops::ClusterOperations;
use collection::operations::verification::new_unchecked_verification_pass;
use serde::Deserialize;
use storage::content_manager::collection_meta_ops::{
ChangeAliasesOperation, CollectionMetaOperations, CreateCollection, CreateCollectionOperation,
DeleteCollectionOperation, UpdateCollection, UpdateCollectionOperation,
};
use storage::dispatcher::Dispatcher;
use storage::rbac::AccessRequirements;
use validator::Validate;
use super::CollectionPath;
use crate::actix::api::StrictCollectionPath;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, process_response};
use crate::common::collections::*;
#[derive(Debug, Deserialize, Validate)]
pub struct WaitTimeout {
#[validate(range(min = 1))]
timeout: Option<u64>,
}
impl WaitTimeout {
pub fn timeout(&self) -> Option<Duration> {
self.timeout.map(Duration::from_secs)
}
}
#[get("/collections")]
async fn get_collections(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_list_collections(dispatcher.toc(&access, &pass), access)).await
}
#[get("/aliases")]
async fn get_aliases(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_list_aliases(dispatcher.toc(&access, &pass), access)).await
}
#[get("/collections/{name}")]
async fn get_collection(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_get_collection(
dispatcher.toc(&access, &pass),
access,
&collection.name,
None,
))
.await
}
#[get("/collections/{name}/exists")]
async fn get_collection_existence(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_collection_exists(
dispatcher.toc(&access, &pass),
access,
&collection.name,
))
.await
}
#[get("/collections/{name}/aliases")]
async fn get_collection_aliases(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_list_collection_aliases(
dispatcher.toc(&access, &pass),
access,
&collection.name,
))
.await
}
#[put("/collections/{name}")]
async fn create_collection(
dispatcher: web::Data<Dispatcher>,
collection: Path<StrictCollectionPath>,
operation: Json<CreateCollection>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> HttpResponse {
let timing = Instant::now();
let create_collection_op =
CreateCollectionOperation::new(collection.name.clone(), operation.into_inner());
let Ok(create_collection_op) = create_collection_op else {
return process_response(create_collection_op, timing, None);
};
let response = dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::CreateCollection(create_collection_op),
access,
query.timeout(),
)
.await;
process_response(response, timing, None)
}
#[patch("/collections/{name}")]
async fn update_collection(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<UpdateCollection>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let name = collection.name.clone();
let response = dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::UpdateCollection(UpdateCollectionOperation::new(
name,
operation.into_inner(),
)),
access,
query.timeout(),
)
.await;
process_response(response, timing, None)
}
#[delete("/collections/{name}")]
async fn delete_collection(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let response = dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::DeleteCollection(DeleteCollectionOperation(
collection.name.clone(),
)),
access,
query.timeout(),
)
.await;
process_response(response, timing, None)
}
#[post("/collections/aliases")]
async fn update_aliases(
dispatcher: web::Data<Dispatcher>,
operation: Json<ChangeAliasesOperation>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let response = dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::ChangeAliases(operation.0),
access,
query.timeout(),
)
.await;
process_response(response, timing, None)
}
#[get("/collections/{name}/cluster")]
async fn get_cluster_info(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// No request to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_get_collection_cluster(
dispatcher.toc(&access, &pass),
access,
&collection.name,
))
.await
}
#[post("/collections/{name}/cluster")]
async fn update_collection_cluster(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
operation: Json<ClusterOperations>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let wait_timeout = query.timeout();
let response = do_update_collection_cluster(
&dispatcher.into_inner(),
collection.name.clone(),
operation.0,
access,
wait_timeout,
)
.await;
process_response(response, timing, None)
}
#[derive(Deserialize, Copy, Clone, Validate)]
struct OptimizationsParam {
completed: Option<bool>,
completed_limit: Option<u64>,
}
const DEFAULT_OPTIMIZATIONS_COMPLETED_LIMIT: u64 = 16;
#[get("/collections/{name}/optimizations")]
fn get_optimizations(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
params: Query<OptimizationsParam>,
) -> impl Future<Output = HttpResponse> {
let completed_limit = params.completed.unwrap_or(false).then(|| {
params
.completed_limit
.unwrap_or(DEFAULT_OPTIMIZATIONS_COMPLETED_LIMIT) as usize
});
helpers::time(async move {
let pass = new_unchecked_verification_pass();
let collection_pass =
access.check_collection_access(&collection.name, AccessRequirements::new())?;
Ok(dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?
.optimizations(completed_limit)
.await?)
})
}
// Configure services
pub fn config_collections_api(cfg: &mut web::ServiceConfig) {
// Ordering of services is important for correct path pattern matching
// See: <https://github.com/qdrant/qdrant/issues/3543>
cfg.service(update_aliases)
.service(get_collections)
.service(get_collection)
.service(get_collection_existence)
.service(create_collection)
.service(update_collection)
.service(delete_collection)
.service(get_aliases)
.service(get_collection_aliases)
.service(get_cluster_info)
.service(get_optimizations)
.service(update_collection_cluster);
}
#[cfg(test)]
mod tests {
use actix_web::web::Query;
use super::WaitTimeout;
#[test]
fn timeout_is_deserialized() {
let timeout: WaitTimeout = Query::from_query("").unwrap().0;
assert!(timeout.timeout.is_none());
let timeout: WaitTimeout = Query::from_query("timeout=10").unwrap().0;
assert_eq!(timeout.timeout, Some(10))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/issues_api.rs | src/actix/api/issues_api.rs | use actix_web::{Responder, delete, get, web};
use collection::operations::types::IssuesReport;
use storage::rbac::{Access, AccessRequirements};
use crate::actix::auth::ActixAccess;
#[get("/issues")]
async fn get_issues(ActixAccess(access): ActixAccess) -> impl Responder {
crate::actix::helpers::time(async move {
match access {
Access::Global(_) => Ok(IssuesReport {
issues: issues::all_issues(),
}),
Access::Collection(collection_access_list) => {
let requirements = AccessRequirements::new();
let mut allowed_issues = Vec::new();
for collection_name in collection_access_list.meeting_requirements(requirements) {
let collection_issues = issues::all_collection_issues(collection_name);
allowed_issues.extend(collection_issues);
}
Ok(IssuesReport {
issues: allowed_issues,
})
}
}
})
.await
}
#[delete("/issues")]
async fn clear_issues(ActixAccess(access): ActixAccess) -> impl Responder {
crate::actix::helpers::time(async move {
access.check_global_access(AccessRequirements::new().manage())?;
issues::clear();
Ok(true)
})
.await
}
// Configure services
pub fn config_issues_api(cfg: &mut web::ServiceConfig) {
cfg.service(get_issues);
cfg.service(clear_issues);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/discovery_api.rs | src/actix/api/discovery_api.rs | use actix_web::{Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{DiscoverRequest, DiscoverRequestBatch};
use itertools::Itertools;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_batch,
};
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use crate::actix::api::CollectionPath;
use crate::actix::api::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, get_request_hardware_counter, process_response_error};
use crate::common::query::do_discover_batch_points;
use crate::settings::ServiceConfig;
#[post("/collections/{name}/points/discover")]
async fn discover_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<DiscoverRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let DiscoverRequest {
discover_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&discover_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = dispatcher
.toc(&access, &pass)
.discover(
&collection.name,
discover_request,
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
});
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/discover/batch")]
async fn discover_batch_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<DiscoverRequestBatch>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let request = request.into_inner();
let pass = match check_strict_mode_batch(
request.searches.iter().map(|i| &i.discover_request),
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_discover_batch_points(
dispatcher.toc(&access, &pass),
&collection.name,
request,
params.consistency,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|batch_scored_points| {
batch_scored_points
.into_iter()
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
})
.collect_vec()
});
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
pub fn config_discovery_api(cfg: &mut web::ServiceConfig) {
cfg.service(discover_points);
cfg.service(discover_batch_points);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/shards_api.rs | src/actix/api/shards_api.rs | use actix_web::{Responder, get, post, put, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::cluster_ops::{
ClusterOperations, CreateShardingKey, CreateShardingKeyOperation, DropShardingKey,
DropShardingKeyOperation,
};
use collection::operations::verification::new_unchecked_verification_pass;
use storage::dispatcher::Dispatcher;
use tokio::time::Instant;
use crate::actix::api::CollectionPath;
use crate::actix::api::collections_api::WaitTimeout;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, process_response};
use crate::common::collections::{do_get_collection_shard_keys, do_update_collection_cluster};
#[get("/collections/{name}/shards")]
async fn list_shard_keys(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
// No strict-mode checks to verify
let pass = new_unchecked_verification_pass();
helpers::time(do_get_collection_shard_keys(
dispatcher.toc(&access, &pass),
access,
&collection.name,
))
.await
}
#[put("/collections/{name}/shards")]
async fn create_shard_key(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<CreateShardingKey>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let wait_timeout = query.timeout();
let dispatcher = dispatcher.into_inner();
let request = request.into_inner();
let operation = ClusterOperations::CreateShardingKey(CreateShardingKeyOperation {
create_sharding_key: request,
});
let response = do_update_collection_cluster(
&dispatcher,
collection.name.clone(),
operation,
access,
wait_timeout,
)
.await;
process_response(response, timing, None)
}
#[post("/collections/{name}/shards/delete")]
async fn delete_shard_key(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<DropShardingKey>,
Query(query): Query<WaitTimeout>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let timing = Instant::now();
let wait_timeout = query.timeout();
let dispatcher = dispatcher.into_inner();
let request = request.into_inner();
let operation = ClusterOperations::DropShardingKey(DropShardingKeyOperation {
drop_sharding_key: request,
});
let response = do_update_collection_cluster(
&dispatcher,
collection.name.clone(),
operation,
access,
wait_timeout,
)
.await;
process_response(response, timing, None)
}
pub fn config_shards_api(cfg: &mut web::ServiceConfig) {
cfg.service(list_shard_keys)
.service(create_shard_key)
.service(delete_shard_key);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/local_shard_api.rs | src/actix/api/local_shard_api.rs | use std::num::NonZeroU64;
use std::sync::Arc;
use std::time::Duration;
use actix_web::{Responder, post, web};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{
CountRequestInternal, PointRequestInternal, ScrollRequestInternal,
};
use collection::operations::verification::{VerificationPass, new_unchecked_verification_pass};
use collection::shards::shard::ShardId;
use futures::FutureExt;
use segment::types::{Condition, Filter};
use serde::Deserialize;
use storage::content_manager::collection_verification::check_strict_mode;
use storage::content_manager::errors::{StorageError, StorageResult};
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use tokio::time::Instant;
use crate::actix::api::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{
self, get_request_hardware_counter, process_response, process_response_error,
};
use crate::common::query;
use crate::settings::ServiceConfig;
// Configure services
pub fn config_local_shard_api(cfg: &mut web::ServiceConfig) {
cfg.service(get_points)
.service(scroll_points)
.service(count_points)
.service(cleanup_shard);
}
#[post("/collections/{collection}/shards/{shard}/points")]
async fn get_points(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
path: web::Path<CollectionShard>,
request: web::Json<PointRequestInternal>,
params: web::Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
) -> impl Responder {
// No strict mode verification needed
let pass = new_unchecked_verification_pass();
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
path.collection.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let records = query::do_get_points(
dispatcher.toc(&access, &pass),
&path.collection,
request.into_inner(),
params.consistency,
params.timeout(),
ShardSelectorInternal::ShardId(path.shard),
access,
request_hw_counter.get_counter(),
)
.await
.map(|records| {
records
.into_iter()
.map(api::rest::Record::from)
.collect::<Vec<_>>()
});
process_response(records, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{collection}/shards/{shard}/points/scroll")]
async fn scroll_points(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
path: web::Path<CollectionShard>,
request: web::Json<WithFilter<ScrollRequestInternal>>,
params: web::Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
) -> impl Responder {
let WithFilter {
mut request,
hash_ring_filter,
} = request.into_inner();
let path = path.into_inner();
let pass = match check_strict_mode(
&request,
params.timeout_as_secs(),
&path.collection,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
path.collection.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let hash_ring_filter = match hash_ring_filter {
Some(filter) => {
get_hash_ring_filter(
&dispatcher,
&access,
&path.collection.clone(),
AccessRequirements::new(),
filter.expected_shard_id,
&pass,
)
.map(|i| i.map(Some))
.await
}
None => Ok(None),
};
let res_future = hash_ring_filter.map(|hash_ring_filter| {
request.filter = merge_with_optional_filter(request.filter.take(), hash_ring_filter);
dispatcher.toc(&access, &pass).scroll(
&path.collection,
request,
params.consistency,
params.timeout(),
ShardSelectorInternal::ShardId(path.shard),
access,
request_hw_counter.get_counter(),
)
});
let result = match res_future {
Ok(e) => e.await,
Err(err) => Err(err),
};
process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{collection}/shards/{shard}/points/count")]
async fn count_points(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
path: web::Path<CollectionShard>,
request: web::Json<WithFilter<CountRequestInternal>>,
params: web::Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
) -> impl Responder {
let WithFilter {
mut request,
hash_ring_filter,
} = request.into_inner();
let pass = match check_strict_mode(
&request,
params.timeout_as_secs(),
&path.collection,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
path.collection.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let hw_measurement_acc = request_hw_counter.get_counter();
let result = async move {
let hash_ring_filter = match hash_ring_filter {
Some(filter) => get_hash_ring_filter(
&dispatcher,
&access,
&path.collection,
AccessRequirements::new(),
filter.expected_shard_id,
&pass,
)
.await?
.into(),
None => None,
};
request.filter = merge_with_optional_filter(request.filter.take(), hash_ring_filter);
query::do_count_points(
dispatcher.toc(&access, &pass),
&path.collection,
request,
params.consistency,
params.timeout(),
ShardSelectorInternal::ShardId(path.shard),
access,
hw_measurement_acc,
)
.await
}
.await;
process_response(result, timing, request_hw_counter.to_rest_api())
}
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Deserialize)]
pub struct CleanParams {
/// Wait until cleanup is finished, or just acknowledge and return right away
#[serde(default)]
pub wait: bool,
/// Maximum time to wait, otherwise return acknowledged status
pub timeout: Option<NonZeroU64>,
}
#[post("/collections/{collection}/shards/{shard}/cleanup")]
async fn cleanup_shard(
dispatcher: web::Data<Dispatcher>,
ActixAccess(access): ActixAccess,
path: web::Path<CollectionShard>,
params: web::Query<CleanParams>,
) -> impl Responder {
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
helpers::time(async move {
let path = path.into_inner();
let timeout = params.timeout.map(|sec| Duration::from_secs(sec.get()));
dispatcher
.toc(&access, &pass)
.cleanup_local_shard(&path.collection, path.shard, access, params.wait, timeout)
.await
})
.await
}
#[derive(serde::Deserialize, validator::Validate)]
struct CollectionShard {
#[validate(length(min = 1, max = 255))]
collection: String,
shard: ShardId,
}
#[derive(Clone, Debug, serde::Deserialize)]
struct WithFilter<T> {
#[serde(flatten)]
request: T,
#[serde(default)]
hash_ring_filter: Option<SerdeHelper>,
}
#[derive(Clone, Debug, serde::Deserialize)]
struct SerdeHelper {
expected_shard_id: ShardId,
}
async fn get_hash_ring_filter(
dispatcher: &Dispatcher,
access: &Access,
collection: &str,
reqs: AccessRequirements,
expected_shard_id: ShardId,
verification_pass: &VerificationPass,
) -> StorageResult<Filter> {
let pass = access.check_collection_access(collection, reqs)?;
let shard_holder = dispatcher
.toc(access, verification_pass)
.get_collection(&pass)
.await?
.shards_holder();
let hash_ring_filter = shard_holder
.read()
.await
.hash_ring_filter(expected_shard_id)
.ok_or_else(|| {
StorageError::bad_request(format!(
"shard {expected_shard_id} does not exist in collection {collection}"
))
})?;
let condition = Condition::new_custom(Arc::new(hash_ring_filter));
let filter = Filter::new_must(condition);
Ok(filter)
}
fn merge_with_optional_filter(filter: Option<Filter>, hash_ring: Option<Filter>) -> Option<Filter> {
match (filter, hash_ring) {
(Some(filter), Some(hash_ring)) => hash_ring.merge_owned(filter).into(),
(Some(filter), None) => filter.into(),
(None, Some(hash_ring)) => hash_ring.into(),
_ => None,
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/api/recommend_api.rs | src/actix/api/recommend_api.rs | use std::time::Duration;
use actix_web::{Responder, post, web};
use actix_web_validator::{Json, Path, Query};
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{
RecommendGroupsRequest, RecommendRequest, RecommendRequestBatch,
};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use itertools::Itertools;
use segment::types::ScoredPoint;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_batch,
};
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use tokio::time::Instant;
use super::CollectionPath;
use super::read_params::ReadParams;
use crate::actix::auth::ActixAccess;
use crate::actix::helpers::{self, get_request_hardware_counter, process_response_error};
use crate::settings::ServiceConfig;
#[post("/collections/{name}/points/recommend")]
async fn recommend_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<RecommendRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let RecommendRequest {
recommend_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&recommend_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = dispatcher
.toc(&access, &pass)
.recommend(
&collection.name,
recommend_request,
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
});
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
async fn do_recommend_batch_points(
toc: &TableOfContent,
collection_name: &str,
request: RecommendRequestBatch,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<Vec<ScoredPoint>>, StorageError> {
let requests = request
.searches
.into_iter()
.map(|req| {
let shard_selector = match req.shard_key {
None => ShardSelectorInternal::All,
Some(shard_key) => ShardSelectorInternal::from(shard_key),
};
(req.recommend_request, shard_selector)
})
.collect();
toc.recommend_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[post("/collections/{name}/points/recommend/batch")]
async fn recommend_batch_points(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<RecommendRequestBatch>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let pass = match check_strict_mode_batch(
request.searches.iter().map(|i| &i.recommend_request),
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = do_recommend_batch_points(
dispatcher.toc(&access, &pass),
&collection.name,
request.into_inner(),
params.consistency,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await
.map(|batch_scored_points| {
batch_scored_points
.into_iter()
.map(|scored_points| {
scored_points
.into_iter()
.map(api::rest::ScoredPoint::from)
.collect_vec()
})
.collect_vec()
});
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
#[post("/collections/{name}/points/recommend/groups")]
async fn recommend_point_groups(
dispatcher: web::Data<Dispatcher>,
collection: Path<CollectionPath>,
request: Json<RecommendGroupsRequest>,
params: Query<ReadParams>,
service_config: web::Data<ServiceConfig>,
ActixAccess(access): ActixAccess,
) -> impl Responder {
let RecommendGroupsRequest {
recommend_group_request,
shard_key,
} = request.into_inner();
let pass = match check_strict_mode(
&recommend_group_request,
params.timeout_as_secs(),
&collection.name,
&dispatcher,
&access,
)
.await
{
Ok(pass) => pass,
Err(err) => return process_response_error(err, Instant::now(), None),
};
let shard_selection = match shard_key {
None => ShardSelectorInternal::All,
Some(shard_keys) => shard_keys.into(),
};
let request_hw_counter = get_request_hardware_counter(
&dispatcher,
collection.name.clone(),
service_config.hardware_reporting(),
None,
);
let timing = Instant::now();
let result = crate::common::query::do_recommend_point_groups(
dispatcher.toc(&access, &pass),
&collection.name,
recommend_group_request,
params.consistency,
shard_selection,
access,
params.timeout(),
request_hw_counter.get_counter(),
)
.await;
helpers::process_response(result, timing, request_hw_counter.to_rest_api())
}
// Configure services
pub fn config_recommend_api(cfg: &mut web::ServiceConfig) {
cfg.service(recommend_points)
.service(recommend_batch_points)
.service(recommend_point_groups);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/lib.rs | lib/storage/src/lib.rs | //! Storage - is a crate which contains all service functions, abstracted from the external interface
//!
//! It provides all functions, which could be used from REST (or any other interface), but do not
//! implement any concrete interface.
use content_manager::collection_meta_ops::CollectionMetaOperations;
use content_manager::consensus_manager::ConsensusStateRef;
use content_manager::consensus_ops::ConsensusOperations;
use content_manager::errors::StorageError;
use content_manager::toc::TableOfContent;
use types::ClusterStatus;
pub mod content_manager;
pub mod dispatcher;
pub mod issues_subscribers;
pub mod rbac;
pub mod types;
pub mod serialize_peer_addresses {
use std::collections::HashMap;
use itertools::Itertools;
use serde::{self, Deserialize, Deserializer, Serialize, Serializer, de};
use crate::types::PeerAddressById;
pub fn serialize<S>(addresses: &PeerAddressById, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let addresses: HashMap<u64, String> = addresses
.clone()
.into_iter()
.map(|(id, address)| (id, format!("{address}")))
.collect();
addresses.serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<PeerAddressById, D::Error>
where
D: Deserializer<'de>,
{
let addresses: HashMap<u64, String> = HashMap::deserialize(deserializer)?;
addresses
.into_iter()
.map(|(id, address)| address.parse().map(|address| (id, address)))
.try_collect()
.map_err(|err| de::Error::custom(format!("Failed to parse uri: {err}")))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/dispatcher.rs | lib/storage/src/dispatcher.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use api::rest::models::HardwareUsage;
use collection::common::fetch_vectors::CollectionName;
use collection::config::ShardingMethod;
use collection::operations::verification::VerificationPass;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use common::counter::hardware_accumulator::HwSharedDrain;
use common::defaults::CONSENSUS_META_OP_WAIT;
use futures::StreamExt as _;
use futures::stream::FuturesUnordered;
use segment::types::ShardKey;
use crate::content_manager::collection_meta_ops::AliasOperations;
use crate::content_manager::shard_distribution::ShardDistributionProposal;
use crate::rbac::{Access, CollectionMultipass};
use crate::{
ClusterStatus, CollectionMetaOperations, ConsensusOperations, ConsensusStateRef, StorageError,
TableOfContent,
};
#[derive(Clone)]
pub struct Dispatcher {
toc: Arc<TableOfContent>,
consensus_state: Option<ConsensusStateRef>,
resharding_enabled: bool,
}
impl Dispatcher {
pub fn new(toc: Arc<TableOfContent>) -> Self {
Self {
toc,
consensus_state: None,
resharding_enabled: false,
}
}
pub fn with_consensus(self, state_ref: ConsensusStateRef, resharding_enabled: bool) -> Self {
Self {
consensus_state: Some(state_ref),
resharding_enabled,
..self
}
}
/// Get the table of content.
/// The `_access` and `_verification_pass` parameter are not used, but it's required to verify caller's possession
/// of both objects.
pub fn toc(
&self,
_access: &Access,
_verification_pass: &VerificationPass,
) -> &Arc<TableOfContent> {
&self.toc
}
pub fn consensus_state(&self) -> Option<&ConsensusStateRef> {
self.consensus_state.as_ref()
}
pub fn is_resharding_enabled(&self) -> bool {
self.resharding_enabled
}
/// If `wait_timeout` is not supplied - then default duration will be used.
///
/// This function needs to be called from a runtime with timers enabled.
///
/// ## Cancel safety
///
/// This function is cancel safe.
///
/// On deployments without consensus - a submitted operation is always run to completion.
pub async fn submit_collection_meta_op(
&self,
operation: CollectionMetaOperations,
access: Access,
wait_timeout: Option<Duration>,
) -> Result<bool, StorageError> {
access.check_collection_meta_operation(&operation)?;
// if distributed deployment is enabled
if let Some(state) = self.consensus_state.as_ref() {
let start = Instant::now();
// List of operations to await for collection to be operational
let mut expect_operations: Vec<ConsensusOperations> = vec![];
let op = match operation {
CollectionMetaOperations::CreateCollection(mut op) => {
if !op.is_distribution_set() {
match op.create_collection.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
// Suggest even distribution of shards across nodes
let number_of_peers = state.0.peer_count();
let collection_defaults =
self.toc.storage_config.collection.as_ref();
let shard_distribution = self.toc.suggest_shard_distribution(
&op,
collection_defaults,
number_of_peers,
);
// Expect all replicas to become active eventually
for (shard_id, peer_ids) in &shard_distribution.distribution {
for peer_id in peer_ids {
expect_operations.push(
ConsensusOperations::initialize_replica(
op.collection_name.clone(),
*shard_id,
*peer_id,
),
);
}
}
op.set_distribution(shard_distribution);
}
ShardingMethod::Custom => {
// If custom sharding is used - we don't create any shards in advance
let empty_distribution = ShardDistributionProposal::empty();
op.set_distribution(empty_distribution);
}
}
}
if let Some(uuid) = &op.create_collection.uuid {
log::warn!(
"Collection UUID {uuid} explicitly specified, \
when proposing create collection {} operation, \
new random UUID will be generated instead",
op.collection_name,
);
}
op.create_collection.uuid = Some(uuid::Uuid::new_v4());
CollectionMetaOperations::CreateCollection(op)
}
CollectionMetaOperations::CreateShardKey(op) => {
CollectionMetaOperations::CreateShardKey(op)
}
op => op,
};
let operation_awaiter =
// If explicit timeout is set - then we need to wait for all expected operations.
// E.g. in case of `CreateCollection` we will explicitly wait for all replicas to be activated.
// We need to register receivers(by calling the function) before submitting the operation.
if !expect_operations.is_empty() {
Some(state.await_for_multiple_operations(expect_operations, wait_timeout))
} else {
None
};
let do_sync_nodes = match &op {
// Sync nodes after collection or shard key creation
CollectionMetaOperations::CreateCollection(_)
| CollectionMetaOperations::CreateShardKey(_) => true,
// Sync nodes when creating or renaming collection aliases
CollectionMetaOperations::ChangeAliases(changes) => {
changes.actions.iter().any(|change| match change {
AliasOperations::CreateAlias(_) | AliasOperations::RenameAlias(_) => true,
AliasOperations::DeleteAlias(_) => false,
})
}
// TODO(resharding): Do we need/want to synchronize `Resharding` operations?
CollectionMetaOperations::Resharding(_, _) => false,
// No need to sync nodes for other operations
CollectionMetaOperations::UpdateCollection(_)
| CollectionMetaOperations::DeleteCollection(_)
| CollectionMetaOperations::TransferShard(_, _)
| CollectionMetaOperations::SetShardReplicaState(_)
| CollectionMetaOperations::DropShardKey(_)
| CollectionMetaOperations::CreatePayloadIndex(_)
| CollectionMetaOperations::DropPayloadIndex(_)
| CollectionMetaOperations::Nop { .. } => false,
#[cfg(feature = "staging")]
CollectionMetaOperations::TestSlowDown(_) => false,
};
// During creation of a shard key, we must ensure that all replicas are ready to accept
// write requests, so the client-side script can rely on the fact that the
// shard creation request is complete.
//
// For this we explicitly wait for validation this, we do following checks:
//
// 1. Wait for consensus to accept shard create operation on current machine.
// ( here newly created shards should start to report state change from `Inactive` to `Active` )
// 2. Wait for all local shards to become active.
// ( At this stage we are sure, that all consensus operations are created, but might not be applied everywhere )
// 3. Wait for all remote peers to have at least the same state as the current peer.
// ( So we are sure, that all remote peers have also switched to `Active` state )
let create_shard_key = match &op {
CollectionMetaOperations::CreateShardKey(op) => {
let collection_name: CollectionName = op.collection_name.clone();
let shard_key = op.shard_key.clone();
let initial_state = op.initial_state;
Some((collection_name, shard_key, initial_state))
}
_ => None,
};
// Send operation to consensus and wait for it to be applied locally
let res = state
.propose_consensus_op_with_await(
ConsensusOperations::CollectionMeta(Box::new(op)),
wait_timeout,
)
.await?;
if let Some(operation_awaiter) = operation_awaiter {
// Actually await for expected operations to complete on the consensus
match operation_awaiter.await {
Ok(Ok(())) => {} // all good
Ok(Err(err)) => {
log::warn!("Not all expected operations were completed: {err}")
}
Err(err) => log::warn!("Awaiting for expected operations timed out: {err}"),
}
}
// Wait for shards activation
if let Some((collection_name, shard_key, initial_state)) = create_shard_key
&& initial_state.is_none()
{
// Only do if initial state is not set because we only wanted to wait for Active since introducing
// the Initial state which needs a transition to Active.
let remaining_timeout =
wait_timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
self.wait_for_shard_key_activation(collection_name, shard_key, remaining_timeout)
.await?;
};
// On some operations, synchronize all nodes to ensure all are ready for point operations
if do_sync_nodes {
let remaining_timeout =
wait_timeout.map(|timeout| timeout.saturating_sub(start.elapsed()));
if let Err(err) = self.await_consensus_sync(remaining_timeout).await {
log::warn!(
"Failed to synchronize all nodes after collection operation in time, some nodes may not be ready: {err}",
);
}
}
Ok(res)
} else {
let toc = self.toc.clone();
tokio::task::spawn(async move { toc.perform_collection_meta_op(operation).await })
.await?
}
}
pub fn cluster_status(&self) -> ClusterStatus {
match self.consensus_state.as_ref() {
Some(state) => state.cluster_status(),
None => ClusterStatus::Disabled,
}
}
pub async fn await_consensus_sync(
&self,
timeout: Option<Duration>,
) -> Result<(), StorageError> {
let timeout = timeout.unwrap_or(CONSENSUS_META_OP_WAIT);
let Some(state) = self.consensus_state.as_ref() else {
return Ok(());
};
let state = state.hard_state();
let term = state.term;
let commit = state.commit;
let channel_service = self.toc.get_channel_service();
let this_peer_id = self.toc.this_peer_id;
channel_service
.await_commit_on_all_peers(this_peer_id, commit, term, timeout)
.await?;
log::debug!("Consensus is synchronized with term: {term}, commit: {commit}");
Ok(())
}
/// Waits for all shards of a specific shard key to become active.
pub async fn wait_for_shard_key_activation(
&self,
collection_name: CollectionName,
shard_key: ShardKey,
timeout: Option<Duration>,
) -> Result<(), StorageError> {
let timeout = timeout.unwrap_or(CONSENSUS_META_OP_WAIT);
let mut wait_for_active = FuturesUnordered::new();
{
let shard_holder = self
.toc
.get_collection(&CollectionMultipass.issue_pass(&collection_name))
.await?
.shards_holder()
.read_owned()
.await;
for replica_set in shard_holder.all_shards() {
if replica_set.shard_key() != Some(&shard_key) {
continue;
}
for (peer_id, replica_state) in replica_set.peers() {
if replica_state == ReplicaState::Active {
continue;
}
wait_for_active.push(replica_set.wait_for_state(
peer_id,
ReplicaState::Active,
timeout,
));
}
}
}
while let Some(result) = wait_for_active.next().await {
result?;
}
Ok(())
}
pub fn all_hw_metrics(&self) -> HashMap<String, HardwareUsage> {
self.toc.all_hw_metrics()
}
#[must_use]
pub fn get_collection_hw_metrics(&self, collection: String) -> HwSharedDrain {
self.toc.get_collection_hw_metrics(collection)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/types.rs | lib/storage/src/types.rs | use std::collections::HashMap;
use std::num::NonZeroUsize;
use std::time::Duration;
use chrono::{DateTime, Utc};
use collection::common::snapshots_manager::SnapshotsConfig;
use collection::config::{WalConfig, default_on_disk_payload};
use collection::operations::config_diff::OptimizersConfigDiff;
use collection::operations::shared_storage_config::{
DEFAULT_IO_SHARD_TRANSFER_LIMIT, DEFAULT_SNAPSHOTS_PATH, SharedStorageConfig,
};
use collection::operations::types::{NodeType, PeerMetadata};
use collection::optimizers_builder::OptimizersConfig;
use collection::shards::shard::PeerId;
use collection::shards::transfer::ShardTransferMethod;
use memory::madvise;
use schemars::JsonSchema;
use segment::common::anonymize::{Anonymize, anonymize_collection_values};
use segment::data_types::collection_defaults::CollectionConfigDefaults;
use segment::types::{HnswConfig, HnswGlobalConfig};
use serde::{Deserialize, Serialize};
use tonic::transport::Uri;
use validator::Validate;
pub type PeerAddressById = HashMap<PeerId, Uri>;
pub type PeerMetadataById = HashMap<PeerId, PeerMetadata>;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct PerformanceConfig {
pub max_search_threads: usize,
#[serde(default)]
pub max_optimization_runtime_threads: usize,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub update_rate_limit: Option<usize>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub search_timeout_sec: Option<usize>,
/// CPU budget, how many CPUs (threads) to allocate for an optimization job.
/// If 0 - auto selection, keep 1 or more CPUs unallocated depending on CPU size
/// If negative - subtract this relative number of CPUs from the available CPUs.
/// If positive - use this absolute number of CPUs.
#[serde(default)]
pub optimizer_cpu_budget: isize,
/// IO budget, how many parallel IO operations to allow for an optimization job.
/// IO usage per optimization job is equivalent to number of indexing threads.
/// If 0 - auto selection, one IO operation per each CPU.
/// Otherwise - use this exact number of IO operations.
#[serde(default)]
pub optimizer_io_budget: usize,
#[serde(default = "default_io_shard_transfers_limit")]
pub incoming_shard_transfers_limit: Option<usize>,
#[serde(default = "default_io_shard_transfers_limit")]
pub outgoing_shard_transfers_limit: Option<usize>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub async_scorer: Option<bool>,
}
const fn default_io_shard_transfers_limit() -> Option<usize> {
DEFAULT_IO_SHARD_TRANSFER_LIMIT
}
/// Global configuration of the storage, loaded on the service launch, default stored in ./config
#[derive(Clone, Debug, Deserialize, Validate)]
pub struct StorageConfig {
#[validate(length(min = 1))]
pub storage_path: String,
#[serde(default = "default_snapshots_path")]
#[validate(length(min = 1))]
pub snapshots_path: String,
#[serde(default)]
pub snapshots_config: SnapshotsConfig,
#[validate(length(min = 1))]
#[serde(default)]
pub temp_path: Option<String>,
#[serde(default = "default_on_disk_payload")]
pub on_disk_payload: bool,
#[validate(nested)]
pub optimizers: OptimizersConfig,
#[validate(nested)]
#[serde(default)]
pub optimizers_overwrite: Option<OptimizersConfigDiff>,
#[validate(nested)]
pub wal: WalConfig,
pub performance: PerformanceConfig,
#[validate(nested)]
pub hnsw_index: HnswConfig,
#[validate(nested)]
#[serde(default)]
pub hnsw_global_config: HnswGlobalConfig,
#[serde(default = "default_mmap_advice")]
pub mmap_advice: madvise::Advice,
#[serde(default)]
pub node_type: NodeType,
#[serde(default)]
pub update_queue_size: Option<usize>,
#[serde(default)]
pub handle_collection_load_errors: bool,
/// If provided - qdrant will start in recovery mode, which means that it will not accept any new data.
/// Only collection metadata will be available, and it will only process collection delete requests.
/// Provided value will be used error message for unavailable requests.
#[serde(default)]
pub recovery_mode: Option<String>,
#[serde(default)]
pub update_concurrency: Option<NonZeroUsize>,
/// Default method used for transferring shards.
#[serde(default)]
pub shard_transfer_method: Option<ShardTransferMethod>,
/// Default values for collections.
#[validate(nested)]
#[serde(default)]
pub collection: Option<CollectionConfigDefaults>,
/// Maximum number of collections to allow in the cluster.
#[serde(default)]
pub max_collections: Option<usize>,
}
impl StorageConfig {
pub fn to_shared_storage_config(&self, is_distributed: bool) -> SharedStorageConfig {
SharedStorageConfig::new(
self.update_queue_size,
self.node_type,
self.handle_collection_load_errors,
self.recovery_mode.clone(),
self.performance
.search_timeout_sec
.map(|x| Duration::from_secs(x as u64)),
self.update_concurrency,
is_distributed,
self.shard_transfer_method,
self.performance.incoming_shard_transfers_limit,
self.performance.outgoing_shard_transfers_limit,
self.snapshots_path.clone(),
self.snapshots_config.clone(),
self.hnsw_global_config.clone(),
common::defaults::search_thread_count(self.performance.max_search_threads),
)
}
}
fn default_snapshots_path() -> String {
DEFAULT_SNAPSHOTS_PATH.to_string()
}
const fn default_mmap_advice() -> madvise::Advice {
madvise::Advice::Random
}
/// Information of a peer in the cluster
#[derive(Anonymize, Debug, Serialize, JsonSchema, Clone)]
pub struct PeerInfo {
pub uri: String,
// ToDo: How long ago was the last communication? In milliseconds
// pub last_responded_millis: usize
}
/// Summary information about the current raft state
#[derive(Debug, Serialize, JsonSchema, Anonymize, Clone)]
#[anonymize(false)]
pub struct RaftInfo {
/// Raft divides time into terms of arbitrary length, each beginning with an election.
/// If a candidate wins the election, it remains the leader for the rest of the term.
/// The term number increases monotonically.
/// Each server stores the current term number which is also exchanged in every communication.
pub term: u64,
/// The index of the latest committed (finalized) operation that this peer is aware of.
pub commit: u64,
/// Number of consensus operations pending to be applied on this peer
pub pending_operations: usize,
/// Leader of the current term
pub leader: Option<u64>,
/// Role of this peer in the current term
pub role: Option<StateRole>,
/// Is this peer a voter or a learner
pub is_voter: bool,
}
/// Role of the peer in the consensus
#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, JsonSchema, Anonymize)]
pub enum StateRole {
// The node is a follower of the leader.
Follower,
// The node could become a leader.
Candidate,
// The node is a leader.
Leader,
// The node could become a candidate, if `prevote` is enabled.
PreCandidate,
}
impl From<raft::StateRole> for StateRole {
fn from(role: raft::StateRole) -> Self {
match role {
raft::StateRole::Follower => Self::Follower,
raft::StateRole::Candidate => Self::Candidate,
raft::StateRole::Leader => Self::Leader,
raft::StateRole::PreCandidate => Self::PreCandidate,
}
}
}
/// Message send failures for a particular peer
#[derive(Debug, Serialize, JsonSchema, Clone, Default)]
pub struct MessageSendErrors {
pub count: usize,
pub latest_error: Option<String>,
/// Timestamp of the latest error
pub latest_error_timestamp: Option<chrono::DateTime<chrono::Utc>>,
}
/// Description of enabled cluster
#[derive(Debug, Serialize, JsonSchema, Clone, Anonymize)]
pub struct ClusterInfo {
/// ID of this peer
#[anonymize(false)]
pub peer_id: PeerId,
/// Peers composition of the cluster with main information
#[anonymize(with = anonymize_collection_values)]
pub peers: HashMap<PeerId, PeerInfo>,
/// Status of the Raft consensus
pub raft_info: RaftInfo,
/// Status of the thread that executes raft consensus
pub consensus_thread_status: ConsensusThreadStatus,
/// Consequent failures of message send operations in consensus by peer address.
/// On the first success to send to that peer - entry is removed from this hashmap.
#[anonymize(false)]
pub message_send_failures: HashMap<String, MessageSendErrors>,
}
/// Information about current cluster status and structure
#[derive(Debug, Serialize, JsonSchema, Anonymize, Clone)]
#[serde(tag = "status")]
#[serde(rename_all = "snake_case")]
pub enum ClusterStatus {
Disabled,
Enabled(ClusterInfo),
}
/// Information about current consensus thread status
#[derive(Debug, Serialize, JsonSchema, Anonymize, Clone)]
#[serde(tag = "consensus_thread_status")]
#[serde(rename_all = "snake_case")]
#[anonymize(false)]
pub enum ConsensusThreadStatus {
Working { last_update: DateTime<Utc> },
Stopped,
StoppedWithErr { err: String },
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/issues_subscribers.rs | lib/storage/src/issues_subscribers.rs | use std::sync::Arc;
use collection::events::{CollectionDeletedEvent, IndexCreatedEvent, SlowQueryEvent};
use collection::problems::UnindexedField;
use issues::Code;
use issues::broker::Subscriber;
#[derive(Clone, Copy)]
pub struct UnindexedFieldSubscriber;
impl Subscriber<SlowQueryEvent> for UnindexedFieldSubscriber {
fn notify(&self, event: Arc<SlowQueryEvent>) {
if event.filters.is_empty() {
return;
}
for filter in &event.filters {
collection::problems::UnindexedField::submit_possible_suspects(
filter,
&event.schema,
event.collection_id.clone(),
)
}
}
}
impl Subscriber<CollectionDeletedEvent> for UnindexedFieldSubscriber {
fn notify(&self, event: Arc<CollectionDeletedEvent>) {
issues::solve_by_filter::<UnindexedField, _>(|code| {
UnindexedField::get_collection_name(code) == event.collection_id
});
}
}
impl Subscriber<IndexCreatedEvent> for UnindexedFieldSubscriber {
fn notify(&self, event: Arc<IndexCreatedEvent>) {
issues::solve(Code::new::<UnindexedField>(
UnindexedField::get_instance_id(&event.collection_id, &event.field_name),
));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/errors.rs | lib/storage/src/content_manager/errors.rs | use std::backtrace::Backtrace;
use std::io::Error as IoError;
use std::time::Duration;
use collection::operations::types::CollectionError;
use collection::shards::shard::ShardId;
use io::file_operations::FileStorageError;
use tempfile::PersistError;
use thiserror::Error;
pub type StorageResult<T> = Result<T, StorageError>;
#[derive(Error, Debug, Clone)]
#[error("{0}")]
pub enum StorageError {
#[error("Wrong input: {description}")]
BadInput { description: String },
#[error("Wrong input: {description}")]
AlreadyExists { description: String },
#[error("Not found: {description}")]
NotFound { description: String },
#[error("Service internal error: {description}")]
ServiceError {
description: String,
backtrace: Option<String>,
},
#[error("Bad request: {description}")]
BadRequest { description: String },
#[error("Storage locked: {description}")]
Locked { description: String },
#[error("Timeout: {description}")]
Timeout { description: String },
#[error("Checksum mismatch: expected {expected}, actual {actual}")]
ChecksumMismatch { expected: String, actual: String },
#[error("Forbidden: {description}")]
Forbidden { description: String },
#[error("Pre-condition failure: {description}")]
PreconditionFailed { description: String }, // system is not in the state to perform the operation
#[error("{description}")]
InferenceError { description: String },
#[error("Rate limiting exceeded: {description}")]
RateLimitExceeded {
description: String,
retry_after: Option<Duration>,
},
#[error("Shard temporarily unavailable: {description}")]
ShardUnavailable { description: String },
#[error("Partial snapshot for shard {shard_id} contains no changes")]
EmptyPartialSnapshot { shard_id: ShardId },
}
impl StorageError {
pub fn inference_error(description: impl Into<String>) -> Self {
Self::InferenceError {
description: description.into(),
}
}
pub fn service_error(description: impl Into<String>) -> Self {
Self::ServiceError {
description: description.into(),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
pub fn bad_request(description: impl Into<String>) -> Self {
Self::BadRequest {
description: description.into(),
}
}
pub fn bad_input(description: impl Into<String>) -> Self {
Self::BadInput {
description: description.into(),
}
}
pub fn already_exists(description: impl Into<String>) -> Self {
Self::AlreadyExists {
description: description.into(),
}
}
pub fn not_found(description: impl Into<String>) -> Self {
Self::NotFound {
description: description.into(),
}
}
pub fn checksum_mismatch(expected: impl Into<String>, actual: impl Into<String>) -> Self {
Self::ChecksumMismatch {
expected: expected.into(),
actual: actual.into(),
}
}
pub fn forbidden(description: impl Into<String>) -> Self {
Self::Forbidden {
description: description.into(),
}
}
pub fn timeout(timeout: Duration, operation: impl Into<String>) -> Self {
Self::Timeout {
description: format!(
"Operation '{}' timed out after {timeout:?}",
operation.into(),
),
}
}
pub fn rate_limit_exceeded(
description: impl Into<String>,
retry_after: Option<Duration>,
) -> StorageError {
StorageError::RateLimitExceeded {
description: description.into(),
retry_after,
}
}
/// Used to override the `description` field of the resulting `StorageError`
pub fn from_inconsistent_shard_failure(
err: CollectionError,
overriding_description: String,
) -> StorageError {
match err {
CollectionError::BadInput { .. } => StorageError::BadInput {
description: overriding_description,
},
CollectionError::NotFound { .. } => StorageError::NotFound {
description: overriding_description,
},
CollectionError::PointNotFound { .. } => StorageError::NotFound {
description: overriding_description,
},
CollectionError::ServiceError { backtrace, .. } => StorageError::ServiceError {
description: overriding_description,
backtrace,
},
CollectionError::BadRequest { .. } => StorageError::BadRequest {
description: overriding_description,
},
CollectionError::Cancelled { .. } => StorageError::ServiceError {
description: format!("Operation cancelled: {overriding_description}"),
backtrace: None,
},
CollectionError::InconsistentShardFailure { ref first_err, .. } => {
StorageError::from_inconsistent_shard_failure(
*first_err.clone(),
overriding_description,
)
}
CollectionError::BadShardSelection { .. } => StorageError::BadRequest {
description: overriding_description,
},
CollectionError::ForwardProxyError { error, .. } => {
Self::from_inconsistent_shard_failure(*error, overriding_description)
}
CollectionError::OutOfMemory { .. } => StorageError::ServiceError {
description: overriding_description,
backtrace: None,
},
CollectionError::Timeout { .. } => StorageError::Timeout {
description: overriding_description,
},
CollectionError::PreConditionFailed { .. } => StorageError::PreconditionFailed {
description: overriding_description,
},
CollectionError::ObjectStoreError { .. } => StorageError::ServiceError {
description: overriding_description,
backtrace: None,
},
CollectionError::StrictMode { description } => StorageError::BadRequest { description },
CollectionError::InferenceError { description } => {
StorageError::InferenceError { description }
}
CollectionError::RateLimitExceeded {
description,
retry_after,
} => StorageError::RateLimitExceeded {
description,
retry_after,
},
CollectionError::ShardUnavailable { .. } => StorageError::ShardUnavailable {
description: overriding_description,
},
}
}
}
impl From<CollectionError> for StorageError {
fn from(err: CollectionError) -> Self {
match err {
CollectionError::BadInput { description } => StorageError::BadInput { description },
CollectionError::NotFound { .. } => StorageError::NotFound {
description: format!("{err}"),
},
CollectionError::PointNotFound { .. } => StorageError::NotFound {
description: format!("{err}"),
},
CollectionError::ServiceError { error, backtrace } => StorageError::ServiceError {
description: error,
backtrace,
},
CollectionError::BadRequest { description } => StorageError::BadRequest { description },
CollectionError::Cancelled { description } => StorageError::ServiceError {
description: format!("Operation cancelled: {description}"),
backtrace: None,
},
CollectionError::InconsistentShardFailure { ref first_err, .. } => {
let full_description = format!("{}", &err);
StorageError::from_inconsistent_shard_failure(*first_err.clone(), full_description)
}
CollectionError::BadShardSelection { description } => {
StorageError::BadRequest { description }
}
CollectionError::ForwardProxyError { error, .. } => {
let full_description = format!("{error}");
StorageError::from_inconsistent_shard_failure(*error, full_description)
}
CollectionError::OutOfMemory { .. } => StorageError::ServiceError {
description: format!("{err}"),
backtrace: None,
},
CollectionError::Timeout { .. } => StorageError::Timeout {
description: format!("{err}"),
},
CollectionError::PreConditionFailed { .. } => StorageError::PreconditionFailed {
description: format!("{err}"),
},
CollectionError::ObjectStoreError { .. } => StorageError::ServiceError {
description: format!("{err}"),
backtrace: None,
},
CollectionError::StrictMode { description } => StorageError::BadRequest { description },
CollectionError::InferenceError { description } => {
StorageError::InferenceError { description }
}
CollectionError::RateLimitExceeded {
description,
retry_after,
} => StorageError::RateLimitExceeded {
description,
retry_after,
},
CollectionError::ShardUnavailable { description } => {
StorageError::ShardUnavailable { description }
}
}
}
}
impl From<IoError> for StorageError {
fn from(err: IoError) -> Self {
StorageError::service_error(format!("{err}"))
}
}
impl From<FileStorageError> for StorageError {
fn from(err: FileStorageError) -> Self {
Self::service_error(err.to_string())
}
}
impl From<tempfile::PathPersistError> for StorageError {
fn from(err: tempfile::PathPersistError) -> Self {
Self::service_error(format!(
"failed to persist temporary file path {}: {}",
err.path.display(),
err.error,
))
}
}
impl<Guard> From<std::sync::PoisonError<Guard>> for StorageError {
fn from(err: std::sync::PoisonError<Guard>) -> Self {
StorageError::ServiceError {
description: format!("Mutex lock poisoned: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl<T> From<std::sync::mpsc::SendError<T>> for StorageError {
fn from(err: std::sync::mpsc::SendError<T>) -> Self {
StorageError::ServiceError {
description: format!("Channel closed: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<tokio::sync::oneshot::error::RecvError> for StorageError {
fn from(err: tokio::sync::oneshot::error::RecvError) -> Self {
StorageError::ServiceError {
description: format!("Oneshot channel sender dropped: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<tokio::sync::broadcast::error::RecvError> for StorageError {
fn from(err: tokio::sync::broadcast::error::RecvError) -> Self {
StorageError::ServiceError {
description: format!("Broadcast channel sender dropped: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<serde_cbor::Error> for StorageError {
fn from(err: serde_cbor::Error) -> Self {
StorageError::ServiceError {
description: format!("cbor (de)serialization error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<serde_json::Error> for StorageError {
fn from(err: serde_json::Error) -> Self {
StorageError::ServiceError {
description: format!("json (de)serialization error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<prost_for_raft::EncodeError> for StorageError {
fn from(err: prost_for_raft::EncodeError) -> Self {
StorageError::ServiceError {
description: format!("prost encode error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<prost_for_raft::DecodeError> for StorageError {
fn from(err: prost_for_raft::DecodeError) -> Self {
StorageError::ServiceError {
description: format!("prost decode error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<raft::Error> for StorageError {
fn from(err: raft::Error) -> Self {
StorageError::ServiceError {
description: format!("Error in Raft consensus: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl<E: std::fmt::Display> From<atomicwrites::Error<E>> for StorageError {
fn from(err: atomicwrites::Error<E>) -> Self {
StorageError::ServiceError {
description: format!("Failed to write file: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<tonic::transport::Error> for StorageError {
fn from(err: tonic::transport::Error) -> Self {
StorageError::ServiceError {
description: format!("Tonic transport error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<reqwest::Error> for StorageError {
fn from(err: reqwest::Error) -> Self {
StorageError::ServiceError {
description: format!("Http request error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<tokio::task::JoinError> for StorageError {
fn from(err: tokio::task::JoinError) -> Self {
StorageError::ServiceError {
description: format!("Tokio task join error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<PersistError> for StorageError {
fn from(err: PersistError) -> Self {
StorageError::ServiceError {
description: format!("Persist error: {err}"),
backtrace: Some(Backtrace::force_capture().to_string()),
}
}
}
impl From<cancel::Error> for StorageError {
fn from(err: cancel::Error) -> Self {
CollectionError::from(err).into()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/shard_distribution.rs | lib/storage/src/content_manager/shard_distribution.rs | use std::cmp::{self, Reverse};
use std::collections::BinaryHeap;
use std::iter::repeat_with;
use std::num::NonZeroU32;
use collection::shards::collection_shard_distribution::CollectionShardDistribution;
use collection::shards::shard::{PeerId, ShardId};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Eq)]
struct PeerShardCount {
shard_count: usize,
/// Randomized bias value, to prevent having a consistent order of peers across multiple
/// generated distributions. This roughly balances nodes across all nodes, if the number of
/// shards is less than the number of nodes.
bias: usize,
peer_id: PeerId,
}
impl PeerShardCount {
fn new(peer_id: PeerId) -> Self {
Self {
shard_count: 0,
bias: rand::random::<u32>() as usize,
peer_id,
}
}
fn get_and_inc_shard_count(&mut self) -> PeerId {
self.shard_count += 1;
self.peer_id
}
}
impl PartialOrd for PeerShardCount {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
/// Explicitly implement ordering to make sure we don't accidentally break this.
///
/// Ordering:
/// - shard_count: lowest number of shards first
/// - bias: randomize order of peers with same number of shards
/// - peer_id
impl Ord for PeerShardCount {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.shard_count
.cmp(&other.shard_count)
.then(self.bias.cmp(&other.bias))
// It is very unlikely that we need this, so `then_with` is a bit faster
.then_with(|| self.peer_id.cmp(&other.peer_id))
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
pub struct ShardDistributionProposal {
/// A shard can be located on several peers if it has replicas
pub distribution: Vec<(ShardId, Vec<PeerId>)>,
}
impl ShardDistributionProposal {
/// Suggest an empty shard distribution placement
/// This is useful when a collection is configured for custom sharding and
/// we don't want to create any shards in advance.
pub fn empty() -> Self {
Self {
distribution: Vec::new(),
}
}
/// Builds a proposal for the distribution of shards.
/// It will propose to allocate shards so that all peers have the same number of shards of this collection at the end.
pub fn new(
shard_number: NonZeroU32,
replication_factor: NonZeroU32,
known_peers: &[PeerId],
) -> Self {
// Min-heap: peer with lowest number of shards is on top
let mut min_heap: BinaryHeap<_> = known_peers
.iter()
.map(|peer| Reverse(PeerShardCount::new(*peer)))
.collect();
// There should not be more than 1 replica per peer
let replica_number = cmp::min(replication_factor.get() as usize, known_peers.len());
// Get fair distribution of shards on peers
let distribution = (0..shard_number.get())
.map(|shard_id| {
let replicas =
repeat_with(|| min_heap.peek_mut().unwrap().0.get_and_inc_shard_count())
.take(replica_number)
.collect();
(shard_id, replicas)
})
.collect();
Self { distribution }
}
}
impl From<ShardDistributionProposal> for CollectionShardDistribution {
fn from(proposal: ShardDistributionProposal) -> Self {
let ShardDistributionProposal { distribution } = proposal;
CollectionShardDistribution {
shards: distribution
.into_iter()
.map(|(shard_id, peers)| (shard_id, peers.into_iter().collect()))
.collect(),
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
#[test]
fn test_distribution() {
let known_peers = vec![1, 2, 3, 4];
let distribution = ShardDistributionProposal::new(
NonZeroU32::new(6).unwrap(),
NonZeroU32::new(1).unwrap(),
&known_peers,
);
// Check it distribution is as even as possible
let mut shard_counts: Vec<usize> = vec![0; known_peers.len()];
for (_shard_id, peers) in &distribution.distribution {
for peer_id in peers {
let peer_offset = known_peers
.iter()
.enumerate()
.find(|(_, x)| *x == peer_id)
.unwrap()
.0;
shard_counts[peer_offset] += 1;
}
}
assert_eq!(shard_counts.iter().sum::<usize>(), 6);
assert_eq!(shard_counts.iter().min(), Some(&1));
assert_eq!(shard_counts.iter().max(), Some(&2));
}
#[test]
fn test_distribution_is_spread() {
let known_peers = vec![1, 2, 3, 4];
let shard_numbers = 1..=3;
let replication_factors = 1..=4;
let tries = 100;
// With 4 peers, for various shard number and replication factor ranges, always generate
// distributions that inhabit all peers across 100 retries.
for shard_number in shard_numbers {
for replication_factor in replication_factors.clone() {
let inhabited_peers = (0..tries)
// Generate distribution
.map(|_| {
ShardDistributionProposal::new(
NonZeroU32::new(shard_number).unwrap(),
NonZeroU32::new(replication_factor).unwrap(),
&known_peers,
)
})
// Take just the inhabited peer IDs
.flat_map(|proposal| {
proposal
.distribution
.into_iter()
.flat_map(|(_, peers)| peers)
})
.collect::<HashSet<_>>();
assert_eq!(
inhabited_peers.len(),
known_peers.len(),
"must inhabit all {} peers across {tries} distributions",
known_peers.len(),
);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/collections_ops.rs | lib/storage/src/content_manager/collections_ops.rs | use std::collections::HashMap;
use collection::collection::Collection;
use collection::shards::CollectionId;
use crate::content_manager::errors::StorageError;
pub type Collections = HashMap<CollectionId, Collection>;
pub trait Checker {
fn collection_exists(&self, collection_name: &str) -> bool;
fn validate_collection_not_exists(&self, collection_name: &str) -> Result<(), StorageError> {
if self.collection_exists(collection_name) {
return Err(StorageError::AlreadyExists {
description: format!("Collection `{collection_name}` already exists!"),
});
}
Ok(())
}
fn validate_collection_exists(&self, collection_name: &str) -> Result<(), StorageError> {
if !self.collection_exists(collection_name) {
return Err(StorageError::NotFound {
description: format!("Collection `{collection_name}` doesn't exist!"),
});
}
Ok(())
}
}
impl Checker for Collections {
fn collection_exists(&self, collection_name: &str) -> bool {
self.contains_key(collection_name)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/mod.rs | lib/storage/src/content_manager/mod.rs | use collection::shards::shard::PeerId;
use self::collection_meta_ops::CollectionMetaOperations;
use self::consensus_manager::CollectionsSnapshot;
use self::errors::StorageError;
pub mod alias_mapping;
pub mod collection_meta_ops;
pub mod collection_verification;
mod collections_ops;
pub mod consensus;
pub mod consensus_manager;
pub mod conversions;
pub mod errors;
pub mod shard_distribution;
pub mod snapshots;
#[cfg(feature = "staging")]
pub mod staging;
pub mod toc;
pub mod consensus_ops {
use collection::operations::types::PeerMetadata;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::replica_set::replica_set_state::ReplicaState::Initializing;
use collection::shards::resharding::ReshardKey;
use collection::shards::shard::PeerId;
use collection::shards::transfer::ShardTransfer;
use collection::shards::{CollectionId, replica_set};
use raft::eraftpb::Entry as RaftEntry;
use serde::{Deserialize, Serialize};
use super::collection_meta_ops::ReshardingOperation;
use crate::content_manager::collection_meta_ops::{
CollectionMetaOperations, SetShardReplicaState, ShardTransferOperations, UpdateCollection,
UpdateCollectionOperation,
};
/// Operation that should pass consensus
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub enum ConsensusOperations {
CollectionMeta(Box<CollectionMetaOperations>),
AddPeer {
peer_id: PeerId,
uri: String,
},
RemovePeer(PeerId),
UpdatePeerMetadata {
peer_id: PeerId,
metadata: PeerMetadata,
},
UpdateClusterMetadata {
key: String,
value: serde_json::Value,
},
RequestSnapshot,
ReportSnapshot {
peer_id: PeerId,
status: SnapshotStatus,
},
}
impl TryFrom<&RaftEntry> for ConsensusOperations {
type Error = serde_cbor::Error;
fn try_from(entry: &RaftEntry) -> Result<Self, Self::Error> {
serde_cbor::from_slice(entry.get_data())
}
}
impl ConsensusOperations {
pub fn abort_transfer(
collection_id: CollectionId,
transfer: ShardTransfer,
reason: &str,
) -> Self {
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Abort {
transfer: transfer.key(),
reason: reason.to_string(),
},
)))
}
pub fn finish_transfer(collection_id: CollectionId, transfer: ShardTransfer) -> Self {
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Finish(transfer),
)))
}
pub fn abort_resharding(collection_id: CollectionId, reshard_key: ReshardKey) -> Self {
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::Resharding(
collection_id,
ReshardingOperation::Abort(reshard_key),
)))
}
pub fn finish_resharding(collection_id: CollectionId, reshard_key: ReshardKey) -> Self {
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::Resharding(
collection_id,
ReshardingOperation::Finish(reshard_key),
)))
}
pub fn set_replica_state(
collection_name: CollectionId,
shard_id: u32,
peer_id: PeerId,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> Self {
ConsensusOperations::CollectionMeta(
CollectionMetaOperations::SetShardReplicaState(SetShardReplicaState {
collection_name,
shard_id,
peer_id,
state,
from_state,
})
.into(),
)
}
pub fn remove_replica(
collection_name: CollectionId,
shard_id: u32,
peer_id: PeerId,
) -> Self {
let mut operation = UpdateCollectionOperation::new(
collection_name,
UpdateCollection {
vectors: None,
optimizers_config: None,
params: None,
hnsw_config: None,
quantization_config: None,
sparse_vectors: None,
strict_mode_config: None,
metadata: None,
},
);
operation
.set_shard_replica_changes(vec![replica_set::Change::Remove(shard_id, peer_id)]);
ConsensusOperations::CollectionMeta(
CollectionMetaOperations::UpdateCollection(operation).into(),
)
}
/// Report that a replica was initialized
pub fn initialize_replica(
collection_name: CollectionId,
shard_id: u32,
peer_id: PeerId,
) -> Self {
Self::set_replica_state(
collection_name,
shard_id,
peer_id,
ReplicaState::Active,
Some(Initializing),
)
}
pub fn start_transfer(collection_id: CollectionId, transfer: ShardTransfer) -> Self {
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Start(transfer),
)))
}
pub fn request_snapshot() -> Self {
Self::RequestSnapshot
}
pub fn report_snapshot(peer_id: PeerId, status: impl Into<SnapshotStatus>) -> Self {
Self::ReportSnapshot {
peer_id,
status: status.into(),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
pub enum SnapshotStatus {
Finish,
Failure,
}
impl From<raft::SnapshotStatus> for SnapshotStatus {
fn from(status: raft::SnapshotStatus) -> Self {
match status {
raft::SnapshotStatus::Finish => Self::Finish,
raft::SnapshotStatus::Failure => Self::Failure,
}
}
}
impl From<SnapshotStatus> for raft::SnapshotStatus {
fn from(status: SnapshotStatus) -> Self {
match status {
SnapshotStatus::Finish => Self::Finish,
SnapshotStatus::Failure => Self::Failure,
}
}
}
}
/// Collection container abstraction for consensus
/// Used to mock ToC in consensus state tests
pub trait CollectionContainer {
fn perform_collection_meta_op(
&self,
operation: CollectionMetaOperations,
) -> Result<bool, StorageError>;
fn collections_snapshot(&self) -> CollectionsSnapshot;
fn apply_collections_snapshot(&self, data: CollectionsSnapshot) -> Result<(), StorageError>;
fn remove_peer(&self, peer_id: PeerId) -> Result<(), StorageError>;
fn sync_local_state(&self) -> Result<(), StorageError>;
}
#[cfg(test)]
mod test {
use serde_json::json;
// Consensus messages are serialized to CBOR when sent over network and written into WAL.
//
// We are using `serde_json::Value` in `ConsensusOperations::UpdateClusterMetadata`,
// but the way `serde` works, it is not *strictly* guaranteed that all possible JSON values
// can be serialized to CBOR, there might be some minor inconsistencies between formats.
//
// These tests check that `serde_json::Value` can be serialized to (and deserialized from) CBOR.
#[test]
fn serde_json_null_combatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!(null));
}
#[test]
fn serde_json_integer_combatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!(1337));
}
#[test]
fn serde_json_float_combatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!(42.69));
}
#[test]
fn serde_json_string_compatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!(
"Qdrant is the best vector search engine on the market 💪😎👍"
));
}
#[test]
fn serde_json_basic_array_compatible_with_cbor() {
serde_json_value_compatible_with_cbor(json_array());
}
#[test]
fn serde_json_basic_object_compatible_with_cbor() {
serde_json_value_compatible_with_cbor(json_object());
}
#[test]
fn serde_json_nested_array_compatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!([
json!([json_array(), json_object()]),
json!({ "array": json_array(), "object": json_object() }),
]));
}
#[test]
fn serde_json_nested_object_compatible_with_cbor() {
serde_json_value_compatible_with_cbor(json!({
"array": json!([ json_array(), json_object() ]),
"object": json!({ "array": json_array(), "object": json_object() }),
}))
}
fn serde_json_value_compatible_with_cbor(input: serde_json::Value) {
let cbor = serde_cbor::to_vec(&input)
.unwrap_or_else(|_| panic!("JSON value {input} can be serialized to CBOR"));
let output: serde_json::Value = serde_cbor::from_slice(&cbor)
.unwrap_or_else(|_| panic!("JSON value {input} can be deserialized from CBOR"));
assert_eq!(input, output);
}
fn json_array() -> serde_json::Value {
json!([null, 1337, 42.69, "string"])
}
fn json_object() -> serde_json::Value {
json!({
"null": null,
"integer": 1337,
"float": 42.69,
"string": "string",
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/staging.rs | lib/storage/src/content_manager/staging.rs | //! Staging-only operations for testing and debugging purposes.
//!
//! This module contains operations that are only available when the `staging` feature is enabled.
use collection::shards::shard::PeerId;
use serde::{Deserialize, Serialize};
/// Introduce artificial delay to a specific peer node.
/// If no peer provided, execute on all peers.
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct TestSlowDown {
pub peer_id: Option<PeerId>,
pub duration_ms: u64,
}
impl TestSlowDown {
pub fn should_execute_on(&self, peer_id: PeerId) -> bool {
self.peer_id.is_none_or(|target| target == peer_id)
}
pub async fn execute(&self, this_peer_id: PeerId) {
if self.should_execute_on(this_peer_id) {
let duration_ms = self.duration_ms;
log::debug!("TestSlowDown: sleeping for {duration_ms}ms on peer {this_peer_id}");
tokio::time::sleep(std::time::Duration::from_millis(self.duration_ms)).await;
log::debug!("TestSlowDown: finished sleeping on peer {this_peer_id}");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/collection_verification.rs | lib/storage/src/content_manager/collection_verification.rs | use std::iter;
use std::sync::Arc;
use collection::operations::verification::{
StrictModeVerification, VerificationPass, check_timeout, new_unchecked_verification_pass,
};
use super::errors::StorageError;
use super::toc::TableOfContent;
use crate::dispatcher::Dispatcher;
use crate::rbac::{Access, AccessRequirements};
/// Checks strict mode using `TableOfContent` instead of `Dispatcher`.
///
/// Note: Avoid this method if you can and use `check_strict_mode_batch` instead to retrieve TOC with the `VerificationPass` gained from the strict mode check.
/// This method should only be used if you only have `TableOfContent` without `Dispatcher`, like in internal API.
pub async fn check_strict_mode_toc_batch<'a, I>(
requests: impl Iterator<Item = &'a I>,
timeout: Option<usize>,
collection_name: &str,
toc: &TableOfContent,
access: &Access,
) -> Result<VerificationPass, StorageError>
where
I: StrictModeVerification + 'a,
{
// Check access here first since strict-mode gets checked before `access`.
// If we simply bypassed here, requests to a collection a user doesn't has access to could leak
// information, like existence, strict mode config, payload indices, ...
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new())?;
let collection = toc.get_collection(&collection_pass).await?;
if let Some(strict_mode_config) = &collection.strict_mode_config().await
&& strict_mode_config.enabled.unwrap_or_default()
{
for request in requests {
request
.check_strict_mode(&collection, strict_mode_config)
.await?;
}
if let Some(timeout) = timeout {
check_timeout(timeout, strict_mode_config)?;
}
}
// It's checked now
Ok(new_unchecked_verification_pass())
}
pub async fn check_strict_mode_batch<'a, I>(
requests: impl Iterator<Item = &'a I>,
timeout: Option<usize>,
collection_name: &str,
dispatcher: &Dispatcher,
access: &Access,
) -> Result<VerificationPass, StorageError>
where
I: StrictModeVerification + 'a,
{
let toc = get_toc_without_verification_pass(dispatcher, access);
check_strict_mode_toc_batch(requests, timeout, collection_name, toc, access).await
}
pub async fn check_strict_mode(
request: &impl StrictModeVerification,
timeout: Option<usize>,
collection_name: &str,
dispatcher: &Dispatcher,
access: &Access,
) -> Result<VerificationPass, StorageError> {
check_strict_mode_batch(
iter::once(request),
timeout,
collection_name,
dispatcher,
access,
)
.await
}
/// Checks strict mode using `TableOfContent` instead of `Dispatcher`.
///
/// Note: Avoid this method if you can and use `check_strict_mode` instead to retrieve TOC with the `VerificationPass` gained from the strict mode check.
/// This method should only be used if you only have `TableOfContent` without `Dispatcher`, like in internal API.
pub async fn check_strict_mode_toc(
request: &impl StrictModeVerification,
timeout: Option<usize>,
collection_name: &str,
toc: &TableOfContent,
access: &Access,
) -> Result<VerificationPass, StorageError> {
check_strict_mode_toc_batch(iter::once(request), timeout, collection_name, toc, access).await
}
pub async fn check_strict_mode_timeout(
timeout: Option<usize>,
collection_name: &str,
dispatcher: &Dispatcher,
access: &Access,
) -> Result<VerificationPass, StorageError> {
let Some(timeout) = timeout else {
return Ok(new_unchecked_verification_pass());
};
let toc = get_toc_without_verification_pass(dispatcher, access);
// Check access here first since strict-mode gets checked before `access`.
// If we simply bypassed here, requests to a collection a user doesn't has access to could leak
// information, like existence, strict mode config, payload indices, ...
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new())?;
let collection = toc.get_collection(&collection_pass).await?;
if let Some(strict_mode_config) = &collection.strict_mode_config().await
&& strict_mode_config.enabled.unwrap_or_default()
{
check_timeout(timeout, strict_mode_config)?;
}
// It's checked now
Ok(new_unchecked_verification_pass())
}
/// Returns the `TableOfContent` from `dispatcher` without needing a validity check.
/// Caution: Do only use this to obtain a `VerificationPass`!
/// Don't make public!
fn get_toc_without_verification_pass<'a>(
dispatcher: &'a Dispatcher,
access: &Access,
) -> &'a Arc<TableOfContent> {
let pass = new_unchecked_verification_pass();
dispatcher.toc(access, &pass)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/conversions.rs | lib/storage/src/content_manager/conversions.rs | use std::collections::HashMap;
use std::str::FromStr;
use api::conversions::json;
use collection::operations::config_diff::{
CollectionParamsDiff, HnswConfigDiff, OptimizersConfigDiff, QuantizationConfigDiff,
};
use collection::operations::conversions::sharding_method_from_proto;
use collection::operations::types::{SparseVectorsConfig, VectorsConfigDiff};
use segment::types::{StrictModeConfig, StrictModeMultivectorConfig, StrictModeSparseConfig};
use tonic::Status;
use tonic::metadata::MetadataValue;
use crate::content_manager::collection_meta_ops::{
AliasOperations, ChangeAliasesOperation, CollectionMetaOperations, CreateAlias,
CreateAliasOperation, CreateCollection, CreateCollectionOperation, DeleteAlias,
DeleteAliasOperation, DeleteCollectionOperation, RenameAlias, RenameAliasOperation,
UpdateCollection, UpdateCollectionOperation,
};
use crate::content_manager::errors::StorageError;
impl From<StorageError> for Status {
fn from(error: StorageError) -> Self {
let mut metadata_headers = HashMap::new();
let error_code = match &error {
StorageError::BadInput { .. } => tonic::Code::InvalidArgument,
StorageError::NotFound { .. } => tonic::Code::NotFound,
StorageError::ServiceError { .. } => tonic::Code::Internal,
StorageError::BadRequest { .. } => tonic::Code::InvalidArgument,
StorageError::Locked { .. } => tonic::Code::FailedPrecondition,
StorageError::Timeout { .. } => tonic::Code::DeadlineExceeded,
StorageError::AlreadyExists { .. } => tonic::Code::AlreadyExists,
StorageError::ChecksumMismatch { .. } => tonic::Code::DataLoss,
StorageError::Forbidden { .. } => tonic::Code::PermissionDenied,
StorageError::PreconditionFailed { .. } => tonic::Code::FailedPrecondition,
StorageError::InferenceError { .. } => tonic::Code::InvalidArgument,
StorageError::RateLimitExceeded {
description: _,
retry_after,
} => {
if let Some(retry_after) = retry_after {
// Retry-After is expressed in seconds `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After`
// Ceil the value to the nearest second so clients don't retry too early
let retry_after_sec = retry_after.as_secs_f32().ceil() as u32;
metadata_headers.insert("retry-after", retry_after_sec.to_string());
}
tonic::Code::ResourceExhausted
}
StorageError::ShardUnavailable { .. } => tonic::Code::Unavailable,
StorageError::EmptyPartialSnapshot { .. } => tonic::Code::FailedPrecondition,
};
let mut status = Status::new(error_code, format!("{error}"));
// add metadata headers
for (header_key, header_value) in metadata_headers {
if let Ok(metadata) = MetadataValue::from_str(&header_value) {
status.metadata_mut().insert(header_key, metadata);
} else {
log::info!("Failed to parse metadata header value: {header_value}");
}
}
status
}
}
impl TryFrom<api::grpc::qdrant::CreateCollection> for CollectionMetaOperations {
type Error = Status;
fn try_from(value: api::grpc::qdrant::CreateCollection) -> Result<Self, Self::Error> {
let api::grpc::qdrant::CreateCollection {
collection_name,
hnsw_config,
wal_config,
optimizers_config,
shard_number,
on_disk_payload,
timeout: _,
vectors_config,
replication_factor,
write_consistency_factor,
quantization_config,
sharding_method,
sparse_vectors_config,
strict_mode_config,
metadata,
} = value;
let op = CreateCollectionOperation::new(
collection_name,
CreateCollection {
vectors: match vectors_config.and_then(|config| config.config) {
Some(vector_config) => vector_config.try_into()?,
// TODO(sparse): sparse or dense vectors config is required
None => Default::default(),
},
sparse_vectors: sparse_vectors_config
.map(|v| SparseVectorsConfig::try_from(v).map(|SparseVectorsConfig(x)| x))
.transpose()?,
hnsw_config: hnsw_config.map(|v| v.into()),
wal_config: wal_config.map(|v| v.into()),
optimizers_config: optimizers_config.map(TryFrom::try_from).transpose()?,
shard_number,
on_disk_payload,
replication_factor,
write_consistency_factor,
quantization_config: quantization_config.map(TryInto::try_into).transpose()?,
sharding_method: sharding_method
.map(sharding_method_from_proto)
.transpose()?,
strict_mode_config: strict_mode_config.map(strict_mode_from_api),
uuid: None,
metadata: if metadata.is_empty() {
None
} else {
Some(json::proto_to_payloads(metadata)?)
},
},
)?;
Ok(CollectionMetaOperations::CreateCollection(op))
}
}
pub fn strict_mode_from_api(value: api::grpc::qdrant::StrictModeConfig) -> StrictModeConfig {
let api::grpc::qdrant::StrictModeConfig {
enabled,
max_query_limit,
max_timeout,
unindexed_filtering_retrieve,
unindexed_filtering_update,
search_max_hnsw_ef,
search_allow_exact,
search_max_oversampling,
upsert_max_batchsize,
max_collection_vector_size_bytes,
read_rate_limit,
write_rate_limit,
max_collection_payload_size_bytes,
max_points_count,
filter_max_conditions,
condition_max_size,
multivector_config,
sparse_config,
max_payload_index_count,
} = value;
StrictModeConfig {
enabled,
max_query_limit: max_query_limit.map(|i| i as usize),
max_timeout: max_timeout.map(|i| i as usize),
unindexed_filtering_retrieve,
unindexed_filtering_update,
search_max_hnsw_ef: search_max_hnsw_ef.map(|i| i as usize),
search_allow_exact,
search_max_oversampling: search_max_oversampling.map(f64::from),
upsert_max_batchsize: upsert_max_batchsize.map(|i| i as usize),
max_collection_vector_size_bytes: max_collection_vector_size_bytes.map(|i| i as usize),
read_rate_limit: read_rate_limit.map(|i| i as usize),
write_rate_limit: write_rate_limit.map(|i| i as usize),
max_collection_payload_size_bytes: max_collection_payload_size_bytes.map(|i| i as usize),
max_points_count: max_points_count.map(|i| i as usize),
filter_max_conditions: filter_max_conditions.map(|i| i as usize),
condition_max_size: condition_max_size.map(|i| i as usize),
multivector_config: multivector_config.map(StrictModeMultivectorConfig::from),
sparse_config: sparse_config.map(StrictModeSparseConfig::from),
max_payload_index_count: max_payload_index_count.map(|i| i as usize),
}
}
impl TryFrom<api::grpc::qdrant::UpdateCollection> for CollectionMetaOperations {
type Error = Status;
fn try_from(value: api::grpc::qdrant::UpdateCollection) -> Result<Self, Self::Error> {
let api::grpc::qdrant::UpdateCollection {
collection_name,
optimizers_config,
timeout: _,
params,
hnsw_config,
vectors_config,
quantization_config,
sparse_vectors_config,
strict_mode_config,
metadata,
} = value;
Ok(Self::UpdateCollection(UpdateCollectionOperation::new(
collection_name,
UpdateCollection {
vectors: vectors_config
.and_then(|config| config.config)
.map(VectorsConfigDiff::try_from)
.transpose()?,
hnsw_config: hnsw_config.map(HnswConfigDiff::from),
params: params.map(CollectionParamsDiff::try_from).transpose()?,
optimizers_config: optimizers_config
.map(OptimizersConfigDiff::try_from)
.transpose()?,
quantization_config: quantization_config
.map(QuantizationConfigDiff::try_from)
.transpose()?,
sparse_vectors: sparse_vectors_config
.map(SparseVectorsConfig::try_from)
.transpose()?,
strict_mode_config: strict_mode_config.map(StrictModeConfig::from),
metadata: if metadata.is_empty() {
None
} else {
Some(json::proto_to_payloads(metadata)?)
},
},
)))
}
}
impl TryFrom<api::grpc::qdrant::DeleteCollection> for CollectionMetaOperations {
type Error = Status;
fn try_from(value: api::grpc::qdrant::DeleteCollection) -> Result<Self, Self::Error> {
let api::grpc::qdrant::DeleteCollection {
collection_name,
timeout: _,
} = value;
Ok(Self::DeleteCollection(DeleteCollectionOperation(
collection_name,
)))
}
}
impl From<api::grpc::qdrant::CreateAlias> for AliasOperations {
fn from(value: api::grpc::qdrant::CreateAlias) -> Self {
let api::grpc::qdrant::CreateAlias {
collection_name,
alias_name,
} = value;
Self::CreateAlias(CreateAliasOperation {
create_alias: CreateAlias {
collection_name,
alias_name,
},
})
}
}
impl From<api::grpc::qdrant::DeleteAlias> for AliasOperations {
fn from(value: api::grpc::qdrant::DeleteAlias) -> Self {
let api::grpc::qdrant::DeleteAlias { alias_name } = value;
Self::DeleteAlias(DeleteAliasOperation {
delete_alias: DeleteAlias { alias_name },
})
}
}
impl From<api::grpc::qdrant::RenameAlias> for AliasOperations {
fn from(value: api::grpc::qdrant::RenameAlias) -> Self {
let api::grpc::qdrant::RenameAlias {
old_alias_name,
new_alias_name,
} = value;
Self::RenameAlias(RenameAliasOperation {
rename_alias: RenameAlias {
old_alias_name,
new_alias_name,
},
})
}
}
impl TryFrom<api::grpc::qdrant::AliasOperations> for AliasOperations {
type Error = Status;
fn try_from(value: api::grpc::qdrant::AliasOperations) -> Result<Self, Self::Error> {
let api::grpc::qdrant::AliasOperations { action } = value;
match action {
Some(api::grpc::qdrant::alias_operations::Action::CreateAlias(create)) => {
Ok(create.into())
}
Some(api::grpc::qdrant::alias_operations::Action::DeleteAlias(delete)) => {
Ok(delete.into())
}
Some(api::grpc::qdrant::alias_operations::Action::RenameAlias(rename)) => {
Ok(rename.into())
}
_ => Err(Status::invalid_argument("Malformed AliasOperation type")),
}
}
}
impl TryFrom<api::grpc::qdrant::ChangeAliases> for CollectionMetaOperations {
type Error = Status;
fn try_from(value: api::grpc::qdrant::ChangeAliases) -> Result<Self, Self::Error> {
let api::grpc::qdrant::ChangeAliases {
actions,
timeout: _,
} = value;
let actions: Vec<AliasOperations> = actions
.into_iter()
.map(|a| a.try_into())
.collect::<Result<_, _>>()?;
Ok(Self::ChangeAliases(ChangeAliasesOperation { actions }))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/alias_mapping.rs | lib/storage/src/content_manager/alias_mapping.rs | use std::collections::HashMap;
use std::path::{Path, PathBuf};
use collection::shards::CollectionId;
use fs_err as fs;
use io::file_operations::{atomic_save_json, read_json};
use serde::{Deserialize, Serialize};
use crate::content_manager::errors::StorageError;
pub const ALIAS_MAPPING_CONFIG_FILE: &str = "data.json";
type Alias = String;
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Default)]
pub struct AliasMapping(HashMap<Alias, CollectionId>);
impl AliasMapping {
pub fn load(path: &Path) -> Result<Self, StorageError> {
Ok(read_json(path)?)
}
pub fn save(&self, path: &Path) -> Result<(), StorageError> {
Ok(atomic_save_json(path, self)?)
}
}
/// Persists mapping between alias and collection name. The data is assumed to be relatively small.
/// - Reads are served from memory.
/// - Writes are durably saved.
#[derive(Debug)]
pub struct AliasPersistence {
data_path: PathBuf,
alias_mapping: AliasMapping,
}
impl AliasPersistence {
pub fn get_config_path(path: &Path) -> PathBuf {
path.join(ALIAS_MAPPING_CONFIG_FILE)
}
fn init_file(dir_path: &Path) -> Result<PathBuf, StorageError> {
let data_path = Self::get_config_path(dir_path);
if !data_path.exists() {
atomic_save_json(&data_path, &AliasMapping::default())?;
}
Ok(data_path)
}
pub fn open(dir_path: &Path) -> Result<Self, StorageError> {
if !dir_path.exists() {
fs::create_dir_all(dir_path)?;
}
let data_path = Self::init_file(dir_path)?;
let alias_mapping = AliasMapping::load(&data_path)?;
Ok(AliasPersistence {
data_path,
alias_mapping,
})
}
pub fn get(&self, alias: &str) -> Option<String> {
self.alias_mapping.0.get(alias).cloned()
}
pub fn insert(&mut self, alias: String, collection_name: String) -> Result<(), StorageError> {
self.alias_mapping.0.insert(alias, collection_name);
self.alias_mapping.save(&self.data_path)?;
Ok(())
}
pub fn remove(&mut self, alias: &str) -> Result<Option<String>, StorageError> {
let output = self.alias_mapping.0.remove(alias);
if output.is_some() {
self.alias_mapping.save(&self.data_path)?;
}
Ok(output)
}
/// Removes all aliases for a given collection.
pub fn remove_collection(&mut self, collection_name: &str) -> Result<(), StorageError> {
let prev_len = self.alias_mapping.0.len();
self.alias_mapping.0.retain(|_, v| v != collection_name);
if prev_len != self.alias_mapping.0.len() {
self.alias_mapping.save(&self.data_path)?;
}
Ok(())
}
pub fn rename_alias(
&mut self,
old_alias_name: &str,
new_alias_name: String,
) -> Result<(), StorageError> {
match self.get(old_alias_name) {
None => Err(StorageError::NotFound {
description: format!("Alias {old_alias_name} does not exists!"),
}),
Some(collection_name) => {
self.alias_mapping.0.remove(old_alias_name);
self.alias_mapping.0.insert(new_alias_name, collection_name);
// 'remove' & 'insert' saved atomically
self.alias_mapping.save(&self.data_path)?;
Ok(())
}
}
}
pub fn collection_aliases(&self, collection_name: &str) -> Vec<String> {
let mut result = vec![];
for (alias, target_collection) in self.alias_mapping.0.iter() {
if collection_name == target_collection {
result.push(alias.clone());
}
}
result
}
pub fn state(&self) -> &AliasMapping {
&self.alias_mapping
}
pub fn apply_state(&mut self, alias_mapping: AliasMapping) -> Result<(), StorageError> {
self.alias_mapping = alias_mapping;
self.alias_mapping.save(&self.data_path)?;
Ok(())
}
pub fn check_alias_exists(&self, alias: &str) -> bool {
self.alias_mapping.0.contains_key(alias)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/collection_meta_ops.rs | lib/storage/src/content_manager/collection_meta_ops.rs | use std::collections::BTreeMap;
use collection::config::{CollectionConfigInternal, CollectionParams, ShardingMethod};
use collection::operations::config_diff::{
CollectionParamsDiff, HnswConfigDiff, OptimizersConfigDiff, QuantizationConfigDiff,
WalConfigDiff,
};
use collection::operations::types::{
SparseVectorParams, SparseVectorsConfig, VectorsConfig, VectorsConfigDiff,
};
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::resharding::ReshardKey;
use collection::shards::shard::{PeerId, ShardId, ShardsPlacement};
use collection::shards::transfer::{ShardTransfer, ShardTransferKey, ShardTransferRestart};
use collection::shards::{CollectionId, replica_set};
use schemars::JsonSchema;
use segment::types::{
Payload, PayloadFieldSchema, PayloadKeyType, QuantizationConfig, ShardKey, StrictModeConfig,
VectorNameBuf,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use validator::Validate;
// Re-export staging types when the feature is enabled
#[cfg(feature = "staging")]
pub use super::staging::TestSlowDown;
use crate::content_manager::errors::{StorageError, StorageResult};
use crate::content_manager::shard_distribution::ShardDistributionProposal;
// *Operation wrapper structure is only required for better OpenAPI generation
/// Create alternative name for a collection.
/// Collection will be available under both names for search, retrieve,
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateAlias {
pub collection_name: String,
pub alias_name: String,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateAliasOperation {
pub create_alias: CreateAlias,
}
/// Delete alias if exists
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DeleteAlias {
pub alias_name: String,
}
/// Delete alias if exists
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DeleteAliasOperation {
pub delete_alias: DeleteAlias,
}
/// Change alias to a new one
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RenameAlias {
pub old_alias_name: String,
pub new_alias_name: String,
}
/// Change alias to a new one
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct RenameAliasOperation {
pub rename_alias: RenameAlias,
}
/// Group of all the possible operations related to collection aliases
#[derive(Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum AliasOperations {
CreateAlias(CreateAliasOperation),
DeleteAlias(DeleteAliasOperation),
RenameAlias(RenameAliasOperation),
}
impl From<CreateAlias> for AliasOperations {
fn from(create_alias: CreateAlias) -> Self {
AliasOperations::CreateAlias(CreateAliasOperation { create_alias })
}
}
impl From<DeleteAlias> for AliasOperations {
fn from(delete_alias: DeleteAlias) -> Self {
AliasOperations::DeleteAlias(DeleteAliasOperation { delete_alias })
}
}
impl From<RenameAlias> for AliasOperations {
fn from(rename_alias: RenameAlias) -> Self {
AliasOperations::RenameAlias(RenameAliasOperation { rename_alias })
}
}
/// Operation for creating new collection and (optionally) specify index params
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateCollection {
/// Vector data config.
/// It is possible to provide one config for single vector mode and list of configs for multiple vectors mode.
#[serde(default)]
#[validate(nested)]
pub vectors: VectorsConfig,
/// For auto sharding:
/// Number of shards in collection.
/// - Default is 1 for standalone, otherwise equal to the number of nodes
/// - Minimum is 1
///
/// For custom sharding:
/// Number of shards in collection per shard group.
/// - Default is 1, meaning that each shard key will be mapped to a single shard
/// - Minimum is 1
#[serde(default)]
#[validate(range(min = 1))]
pub shard_number: Option<u32>,
/// Sharding method
/// Default is Auto - points are distributed across all available shards
/// Custom - points are distributed across shards according to shard key
#[serde(default)]
pub sharding_method: Option<ShardingMethod>,
/// Number of shards replicas.
/// Default is 1
/// Minimum is 1
#[serde(default)]
#[validate(range(min = 1))]
pub replication_factor: Option<u32>,
/// Defines how many replicas should apply the operation for us to consider it successful.
/// Increasing this number will make the collection more resilient to inconsistencies, but will
/// also make it fail if not enough replicas are available.
/// Does not have any performance impact.
#[serde(default)]
#[validate(range(min = 1))]
pub write_consistency_factor: Option<u32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
///
/// Default: true
#[serde(default)]
pub on_disk_payload: Option<bool>,
/// Custom params for HNSW index. If none - values from service configuration file are used.
#[validate(nested)]
pub hnsw_config: Option<HnswConfigDiff>,
/// Custom params for WAL. If none - values from service configuration file are used.
#[validate(nested)]
pub wal_config: Option<WalConfigDiff>,
/// Custom params for Optimizers. If none - values from service configuration file are used.
#[serde(alias = "optimizer_config")]
#[validate(nested)]
pub optimizers_config: Option<OptimizersConfigDiff>,
/// Quantization parameters. If none - quantization is disabled.
#[serde(default, alias = "quantization")]
#[validate(nested)]
pub quantization_config: Option<QuantizationConfig>,
/// Sparse vector data config.
#[validate(nested)]
pub sparse_vectors: Option<BTreeMap<VectorNameBuf, SparseVectorParams>>,
/// Strict-mode config.
#[validate(nested)]
pub strict_mode_config: Option<StrictModeConfig>,
#[serde(default)]
#[schemars(skip)]
pub uuid: Option<Uuid>,
/// Arbitrary JSON metadata for the collection
/// This can be used to store application-specific information
/// such as creation time, migration data, inference model info, etc.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Payload>,
}
/// Operation for creating new collection and (optionally) specify index params
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct CreateCollectionOperation {
pub collection_name: String,
pub create_collection: CreateCollection,
distribution: Option<ShardDistributionProposal>,
}
impl CreateCollectionOperation {
pub fn new(
collection_name: String,
create_collection: CreateCollection,
) -> StorageResult<Self> {
// validate vector names are unique between dense and sparse vectors
if let Some(sparse_config) = &create_collection.sparse_vectors {
let mut dense_names = create_collection.vectors.params_iter().map(|p| p.0);
if let Some(duplicate_name) = dense_names.find(|name| sparse_config.contains_key(*name))
{
return Err(StorageError::bad_input(format!(
"Dense and sparse vector names must be unique - duplicate found with '{duplicate_name}'",
)));
}
}
Ok(Self {
collection_name,
create_collection,
distribution: None,
})
}
pub fn is_distribution_set(&self) -> bool {
self.distribution.is_some()
}
pub fn take_distribution(&mut self) -> Option<ShardDistributionProposal> {
self.distribution.take()
}
pub fn set_distribution(&mut self, distribution: ShardDistributionProposal) {
self.distribution = Some(distribution);
}
}
/// Operation for updating parameters of the existing collection
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct UpdateCollection {
/// Map of vector data parameters to update for each named vector.
/// To update parameters in a collection having a single unnamed vector, use an empty string as name.
#[validate(nested)]
pub vectors: Option<VectorsConfigDiff>,
/// Custom params for Optimizers. If none - it is left unchanged.
/// This operation is blocking, it will only proceed once all current optimizations are complete
#[serde(alias = "optimizer_config")]
#[validate(nested)]
pub optimizers_config: Option<OptimizersConfigDiff>, // TODO: Allow updates for other configuration params as well
/// Collection base params. If none - it is left unchanged.
pub params: Option<CollectionParamsDiff>,
/// HNSW parameters to update for the collection index. If none - it is left unchanged.
#[validate(nested)]
pub hnsw_config: Option<HnswConfigDiff>,
/// Quantization parameters to update. If none - it is left unchanged.
#[serde(default, alias = "quantization")]
#[validate(nested)]
pub quantization_config: Option<QuantizationConfigDiff>,
/// Map of sparse vector data parameters to update for each sparse vector.
#[validate(nested)]
pub sparse_vectors: Option<SparseVectorsConfig>,
#[validate(nested)]
pub strict_mode_config: Option<StrictModeConfig>,
/// Metadata to update for the collection. If provided, this will merge with existing metadata.
/// To remove metadata, set it to an empty object.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Payload>,
}
/// Operation for updating parameters of the existing collection
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct UpdateCollectionOperation {
pub collection_name: String,
pub update_collection: UpdateCollection,
shard_replica_changes: Option<Vec<replica_set::Change>>,
}
impl UpdateCollectionOperation {
pub fn new_empty(collection_name: String) -> Self {
Self {
collection_name,
update_collection: UpdateCollection {
vectors: None,
hnsw_config: None,
params: None,
optimizers_config: None,
quantization_config: None,
sparse_vectors: None,
strict_mode_config: None,
metadata: None,
},
shard_replica_changes: None,
}
}
pub fn new(collection_name: String, update_collection: UpdateCollection) -> Self {
Self {
collection_name,
update_collection,
shard_replica_changes: None,
}
}
pub fn take_shard_replica_changes(&mut self) -> Option<Vec<replica_set::Change>> {
self.shard_replica_changes.take()
}
pub fn set_shard_replica_changes(&mut self, changes: Vec<replica_set::Change>) {
if changes.is_empty() {
self.shard_replica_changes = None;
} else {
self.shard_replica_changes = Some(changes);
}
}
}
/// Operation for performing changes of collection aliases.
/// Alias changes are atomic, meaning that no collection modifications can happen between
/// alias operations.
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct ChangeAliasesOperation {
pub actions: Vec<AliasOperations>,
}
/// Operation for deleting collection with given name
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub struct DeleteCollectionOperation(pub String);
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
pub enum ReshardingOperation {
Start(ReshardKey),
CommitRead(ReshardKey),
CommitWrite(ReshardKey),
Finish(ReshardKey),
Abort(ReshardKey),
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub enum ShardTransferOperations {
Start(ShardTransfer),
/// Restart an existing transfer with a new configuration
///
/// If the given transfer is ongoing, it is aborted and restarted with the new configuration.
Restart(ShardTransferRestart),
Finish(ShardTransfer),
/// Deprecated since Qdrant 1.9.0, used in Qdrant 1.7.0 and 1.8.0
///
/// Used in `ShardTransferMethod::Snapshot`
///
/// Called when the snapshot has successfully been recovered on the remote, brings the transfer
/// to the next stage.
SnapshotRecovered(ShardTransferKey),
/// Used in `ShardTransferMethod::Snapshot` and `ShardTransferMethod::WalDelta`
///
/// Called when the first stage of the transfer has been successfully finished, brings the
/// transfer to the next stage.
RecoveryToPartial(ShardTransferKey),
Abort {
transfer: ShardTransferKey,
reason: String,
},
}
/// Sets the state of shard replica
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct SetShardReplicaState {
pub collection_name: String,
pub shard_id: ShardId,
pub peer_id: PeerId,
/// If `Active` then the replica is up to date and can receive updates and answer requests
pub state: ReplicaState,
/// If `Some` then check that the replica is in this state before changing it
/// If `None` then the replica can be in any state
/// This is useful for example when we want to make sure
/// we only make transition from `Initializing` to `Active`, and not from `Dead` to `Active`.
/// If `from_state` does not match the current state of the replica, then the operation will be dismissed.
#[serde(default)]
pub from_state: Option<ReplicaState>,
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct CreateShardKey {
pub collection_name: String,
pub shard_key: ShardKey,
pub placement: ShardsPlacement,
pub initial_state: Option<ReplicaState>,
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct DropShardKey {
pub collection_name: String,
pub shard_key: ShardKey,
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct CreatePayloadIndex {
pub collection_name: String,
pub field_name: PayloadKeyType,
pub field_schema: PayloadFieldSchema,
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
pub struct DropPayloadIndex {
pub collection_name: String,
pub field_name: PayloadKeyType,
}
/// Enumeration of all possible collection update operations
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash, Clone)]
#[serde(rename_all = "snake_case")]
pub enum CollectionMetaOperations {
CreateCollection(CreateCollectionOperation),
UpdateCollection(UpdateCollectionOperation),
DeleteCollection(DeleteCollectionOperation),
ChangeAliases(ChangeAliasesOperation),
Resharding(CollectionId, ReshardingOperation),
TransferShard(CollectionId, ShardTransferOperations),
SetShardReplicaState(SetShardReplicaState),
CreateShardKey(CreateShardKey),
DropShardKey(DropShardKey),
CreatePayloadIndex(CreatePayloadIndex),
DropPayloadIndex(DropPayloadIndex),
Nop {
token: usize,
}, // Empty operation
/// Introduce artificial delay to a specific peer node
#[cfg(feature = "staging")]
TestSlowDown(TestSlowDown),
}
/// Use config of the existing collection to generate a create collection operation
/// for the new collection
impl From<CollectionConfigInternal> for CreateCollection {
fn from(value: CollectionConfigInternal) -> Self {
let CollectionConfigInternal {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
uuid,
metadata,
} = value;
let CollectionParams {
vectors,
shard_number,
sharding_method,
replication_factor,
write_consistency_factor,
read_fan_out_factor: _,
on_disk_payload,
sparse_vectors,
} = params;
Self {
vectors,
shard_number: Some(shard_number.get()),
sharding_method,
replication_factor: Some(replication_factor.get()),
write_consistency_factor: Some(write_consistency_factor.get()),
on_disk_payload: Some(on_disk_payload),
hnsw_config: Some(hnsw_config.into()),
wal_config: Some(wal_config.into()),
optimizers_config: Some(optimizer_config.into()),
quantization_config,
sparse_vectors,
strict_mode_config,
uuid,
metadata,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus_manager.rs | lib/storage/src/content_manager/consensus_manager.rs | use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::fmt::Display;
use std::future::Future;
use std::ops::Deref;
use std::path::Path;
use std::str;
use std::sync::Arc;
use std::time::{Duration, Instant};
use anyhow::{Context, anyhow};
use chrono::Utc;
use collection::collection_state;
use collection::common::is_ready::IsReady;
use collection::operations::types::PeerMetadata;
use collection::shards::CollectionId;
use collection::shards::shard::PeerId;
use common::defaults;
use futures::future::join_all;
use parking_lot::{Mutex, RwLock};
use raft::eraftpb::{ConfChange, ConfChangeType, ConfChangeV2, Entry as RaftEntry, EntryType};
use raft::{GetEntriesContext, RaftState, RawNode, SoftState, Storage};
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast;
use tokio::sync::broadcast::Receiver;
use tokio::time::error::Elapsed;
use tokio_util::task::AbortOnDropHandle;
use tonic::transport::Uri;
use super::CollectionContainer;
use super::alias_mapping::AliasMapping;
use super::consensus_ops::{ConsensusOperations, SnapshotStatus};
use super::errors::StorageError;
use crate::content_manager::consensus::consensus_wal::ConsensusOpWal;
use crate::content_manager::consensus::entry_queue::EntryId;
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::consensus::persistent::Persistent;
use crate::types::{
ClusterInfo, ClusterStatus, ConsensusThreadStatus, MessageSendErrors, PeerAddressById,
PeerInfo, PeerMetadataById, RaftInfo,
};
pub mod prelude {
use crate::content_manager::toc::TableOfContent;
pub type ConsensusState = super::ConsensusManager<TableOfContent>;
}
/// Allow us updating our peer metadata once every 60 seconds
const CONSENSUS_PEER_METADATA_UPDATE_INTERVAL: Duration = Duration::from_secs(60);
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SnapshotData {
pub collections_data: CollectionsSnapshot,
#[serde(with = "crate::serialize_peer_addresses")]
pub address_by_id: PeerAddressById,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub metadata_by_id: PeerMetadataById,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub cluster_metadata: HashMap<String, serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct CollectionsSnapshot {
pub collections: HashMap<CollectionId, collection_state::State>,
pub aliases: AliasMapping,
}
impl TryFrom<&[u8]> for SnapshotData {
type Error = serde_cbor::Error;
fn try_from(bytes: &[u8]) -> Result<SnapshotData, Self::Error> {
serde_cbor::from_slice(bytes)
}
}
pub struct ConsensusManager<C: CollectionContainer> {
pub persistent: RwLock<Persistent>,
/// Notifies if the current node knows who the leader and is not in the process of election
/// Otherwise the proposals are not accepted
pub is_leader_established: Arc<IsReady>,
wal: Mutex<ConsensusOpWal>,
/// Raft consensus state, which is not saved on disk.
/// They will change on restart anyway (role + leader id)
soft_state: RwLock<Option<SoftState>>,
/// Storage-related container. Should apply and persist changes not related to consensus
/// (user changes)
toc: Arc<C>,
/// Operation apply notifier.
/// Fires a signal if some specific operation is applied to the state machine.
/// Signal is changed on change proposal and triggered if the change was applied by consensus on this peer.
/// Also sends the result of the operation.
on_consensus_op_apply:
Mutex<HashMap<ConsensusOperations, broadcast::Sender<Result<bool, StorageError>>>>,
/// Propose operation to the consensus.
/// Sends messages to the consensus thread, which is defined externally, outside of the state.
/// (e.g. in the `src/consensus.rs`)
propose_sender: OperationSender,
/// Status of the consensus thread, changed by the consensus thread
consensus_thread_status: RwLock<ConsensusThreadStatus>,
/// Consensus thread errors, changed by the consensus thread
message_send_failures: RwLock<HashMap<String, MessageSendErrors>>,
/// Last time we attempted to update the peer metadata
next_peer_metadata_update_attempt: Mutex<Instant>,
}
impl<C: CollectionContainer> ConsensusManager<C> {
pub fn new(
persistent_state: Persistent,
toc: Arc<C>,
propose_sender: OperationSender,
storage_path: &Path,
) -> Result<Self, StorageError> {
let mut wal = ConsensusOpWal::new(storage_path);
// When our Raft index and last snapshot index match, the last thing we did is apply a Raft
// snapshot. It is possible that we crashed before clearing the WAL, so we still do it now.
// Specifically, if the last operation was applying a snapshot and our WAL does still have
// older Raft entries, we clear the whole WAL. Consensus will take care of us catching up
// with the rest.
// See `apply_snapshot` function and <https://github.com/qdrant/qdrant/pull/7577>.
let raft_index = persistent_state.state().hard_state.commit;
let snapshot_index = persistent_state.latest_snapshot_meta.index;
let last_operation_was_snapshot = raft_index == persistent_state.latest_snapshot_meta.index;
if last_operation_was_snapshot
&& let Ok(Some(last)) = wal.last_entry()
&& last.index < snapshot_index
{
log::warn!(
"Consensus WAL was not cleared after applying consensus snapshot, clearing it now"
);
wal.clear()?;
}
Ok(Self {
persistent: RwLock::new(persistent_state),
is_leader_established: Arc::new(IsReady::default()),
wal: Mutex::new(wal),
soft_state: RwLock::new(None),
toc,
on_consensus_op_apply: Default::default(),
propose_sender,
consensus_thread_status: RwLock::new(ConsensusThreadStatus::Working {
last_update: Utc::now(),
}),
message_send_failures: Default::default(),
next_peer_metadata_update_attempt: Mutex::new(Instant::now()),
})
}
pub fn report_snapshot(
&self,
peer_id: u64,
status: impl Into<SnapshotStatus>,
) -> Result<(), StorageError> {
self.propose_sender
.send(ConsensusOperations::report_snapshot(peer_id, status))
.map_err(|_err| {
StorageError::service_error(
"failed to send ReportSnapshot message to consensus thread",
)
})
}
pub fn record_message_send_failure<E: Error>(&self, peer_address: &Uri, error: E) {
let mut message_send_failures = self.message_send_failures.write();
let entry = message_send_failures
.entry(peer_address.to_string())
.or_default();
// Log only first error
if entry.count == 0 {
log::warn!("Failed to send message to {peer_address} with error: {error}")
}
entry.count += 1;
entry.latest_error = Some(error.to_string());
entry.latest_error_timestamp = Some(Utc::now());
}
pub fn record_message_send_success(&self, peer_address: &Uri) {
self.message_send_failures
.write()
.remove(&peer_address.to_string());
}
pub fn record_consensus_working(&self) {
*self.consensus_thread_status.write() = ConsensusThreadStatus::Working {
last_update: Utc::now(),
}
}
pub fn on_consensus_stopped(&self) {
*self.consensus_thread_status.write() = ConsensusThreadStatus::Stopped
}
pub fn on_consensus_thread_err<E: Display>(&self, err: E) {
*self.consensus_thread_status.write() = ConsensusThreadStatus::StoppedWithErr {
err: err.to_string(),
}
}
pub fn set_raft_soft_state(&self, state: &SoftState) {
*self.soft_state.write() = Some(SoftState { ..*state });
}
pub fn this_peer_id(&self) -> PeerId {
self.persistent.read().this_peer_id
}
pub fn peers(&self) -> Vec<PeerId> {
self.persistent
.read()
.peer_address_by_id()
.keys()
.copied()
.collect()
}
pub fn first_voter(&self) -> PeerId {
let state = self.persistent.read();
match state.first_voter() {
Some(peer_id) if peer_id != PeerId::MAX => peer_id,
_ => state.this_peer_id(),
}
}
pub fn set_first_voter(&self, id: PeerId) -> Result<(), StorageError> {
self.persistent.write().set_first_voter(id)
}
pub fn recover_first_voter(&self) -> Result<(), StorageError> {
if self.persistent.read().first_voter().is_none() {
log::debug!("Recovering first voter peer...");
let wal = self.wal.lock();
let peers = self.peers();
if let Some(peer_id) = recover_first_voter(&wal, &peers)? {
log::debug!("Recovered first voter peer {peer_id}");
self.set_first_voter(peer_id)?;
}
}
Ok(())
}
/// Report aggregated information about the cluster.
/// Useful for API reporting.
pub fn cluster_status(&self) -> ClusterStatus {
let persistent = self.persistent.read();
let hard_state = &persistent.state.hard_state;
let peers = persistent
.peer_address_by_id()
.into_iter()
.map(|(peer_id, uri)| {
(
peer_id,
PeerInfo {
uri: uri.to_string(),
},
)
})
.collect();
let pending_operations = persistent.unapplied_entities_count();
let soft_state = self.soft_state.read();
let leader = soft_state.as_ref().map(|state| state.leader_id);
let role = soft_state.as_ref().map(|state| state.raft_state.into());
let peer_id = persistent.this_peer_id;
let is_voter = persistent.state.conf_state.get_voters().contains(&peer_id);
ClusterStatus::Enabled(ClusterInfo {
peer_id,
peers,
raft_info: RaftInfo {
term: hard_state.term,
commit: hard_state.commit,
pending_operations,
leader,
role,
is_voter,
},
consensus_thread_status: self.consensus_thread_status.read().clone(),
message_send_failures: self.message_send_failures.read().clone(),
})
}
/// Handle peer removal operation.
///
/// 1. Try to remove peer
/// 2. Handle peer removal error
/// 3. Report to the listeners
///
/// Return if consensus should be stopped.
pub fn on_peer_remove(&self, peer_id: PeerId) -> Result<bool, StorageError> {
let mut stop_consensus: bool = false;
let report = match self.remove_peer(peer_id) {
Ok(()) => {
if self.this_peer_id() == peer_id {
stop_consensus = true;
}
Ok(true)
}
Err(err) => match err {
err @ StorageError::ServiceError { .. } => {
return Err(err);
}
_ => Err(err),
},
};
let operation = ConsensusOperations::RemovePeer(peer_id);
let on_apply = self.on_consensus_op_apply.lock().remove(&operation);
if let Some(on_apply) = on_apply
&& on_apply.send(report).is_err()
{
log::warn!(
"Failed to notify on consensus operation completion: channel receiver is dropped",
)
}
Ok(stop_consensus)
}
pub fn set_unapplied_entries(
&self,
first_index: EntryId,
last_index: EntryId,
) -> Result<(), raft::Error> {
self.persistent
.write()
.set_unapplied_entries(first_index, last_index)
.map_err(raft_error_other)
}
/// Process the consensus operation, which are already committed.
/// If return Error - consensus should be stopped with error.
/// Return `true` if consensus should be stopped (peer removed)
/// Return `false` if everything is ok.
pub fn apply_entries<T: Storage>(&self, raw_node: &mut RawNode<T>) -> anyhow::Result<bool> {
use raft::eraftpb::EntryType;
self.persistent
.write()
.save_if_dirty()
.context("Failed to save new state of applied entries queue")?;
loop {
let unapplied_index = self.persistent.read().current_unapplied_entry();
let Some(entry_index) = unapplied_index else {
break;
};
log::debug!("Applying committed entry with index {entry_index}");
let entry = self
.wal
.lock()
.entry(entry_index)
.context(format!("Failed to get entry at index {entry_index}"))?;
let stop_consensus: bool = if entry.data.is_empty() {
// Empty entry, when the peer becomes Leader it will send an empty entry.
false
} else {
match entry.get_entry_type() {
EntryType::EntryNormal => {
let operation_result = self.apply_normal_entry(&entry);
match operation_result {
Ok(result) => {
log::debug!(
"Successfully applied consensus operation entry. Index: {}. Result: {result}",
entry.index,
);
false
}
Err(err @ StorageError::ServiceError { .. }) => {
// This is a service error - stop consensus. Peer can be restarted when the problem is fixed.
return Err(err)
.context("Failed to apply collection meta operation entry");
}
Err(err) => {
log::warn!(
"Failed to apply collection meta operation entry with user error: {err}",
);
// This is a user error so we can safely consider it applied but with error as it was incorrect.
false
}
}
}
EntryType::EntryConfChangeV2 => {
let stop_consensus = self
.apply_conf_change_entry(&entry, raw_node)
.context("Failed to apply configuration change entry")?;
log::debug!(
"Successfully applied configuration change entry. Index: {}. Stop consensus: {}",
entry.index,
stop_consensus
);
stop_consensus
}
ty @ EntryType::EntryConfChange => {
return Err(anyhow!("Unexpected entry type: {ty:?}"));
}
}
};
if stop_consensus {
return Ok(stop_consensus);
}
self.persistent
.write()
.entry_applied()
.context("Failed to save new state of applied entries queue")?;
}
Ok(false) // do not stop consensus
}
/// Process the consensus operation, which are already committed.
/// In this particular function - operations related to the cluster topology change:
///
/// - AddPeer (different states)
/// - RemovePeer
pub fn apply_conf_change_entry<T: Storage>(
&self,
entry: &RaftEntry,
raw_node: &mut RawNode<T>,
) -> Result<bool, StorageError> {
let change: ConfChangeV2 = prost_for_raft::Message::decode(entry.get_data())?;
let conf_state = raw_node.apply_conf_change(&change)?;
log::debug!("Applied conf state {conf_state:?}");
self.persistent
.write()
.apply_state_update(|state| state.conf_state = conf_state)?;
let mut stop_consensus: bool = false;
for single_change in &change.changes {
match single_change.change_type() {
ConfChangeType::AddNode => {
let context = entry.get_context();
if !context.is_empty() {
let peer_uri = str::from_utf8(context)
.map_err(|err| {
StorageError::service_error(format!(
"failed to parse peer URI: {err}"
))
})?
.parse()
.map_err(|err| {
StorageError::service_error(format!(
"failed to parse peer URI: {err}"
))
})?;
self.add_peer(single_change.node_id, peer_uri)?;
} else {
debug_assert!(
self.peer_address_by_id()
.contains_key(&single_change.node_id),
"Peer should be already known"
)
}
}
ConfChangeType::RemoveNode => {
log::debug!("Removing node {}", single_change.node_id);
stop_consensus |= self.on_peer_remove(single_change.node_id)?;
}
ConfChangeType::AddLearnerNode => {
log::debug!("Adding learner node {}", single_change.node_id);
if let Ok(peer_uri) = String::from_utf8_lossy(entry.get_context())
.deref()
.try_into()
{
let peer_uri: Uri = peer_uri;
// Add peer to state
self.add_peer(single_change.node_id, peer_uri.clone())?;
// Notify the submitter, that operation was performed
{
let operation = ConsensusOperations::AddPeer {
peer_id: single_change.node_id,
uri: peer_uri.to_string(),
};
let on_apply = self.on_consensus_op_apply.lock().remove(&operation);
if let Some(on_apply) = on_apply
&& on_apply.send(Ok(true)).is_err()
{
log::warn!(
"Failed to notify on consensus operation completion: channel receiver is dropped",
)
}
}
} else if entry.get_context().is_empty() {
// Allow empty context for compatibility
log::warn!(
"Outdated peer addition entry found with index: {}",
entry.get_index()
)
} else {
// Should not be reachable as it is checked in API
return Err(StorageError::service_error("Failed to parse peer uri"));
}
}
}
}
Ok(stop_consensus)
}
/// Process the consensus operation, which are already committed.
/// In this particular function - operations related to user data:
///
/// - CreateCollection
/// - DropCollection
/// - Update collection params
/// - Update collection aliases
/// - Shards operations (transfer, remove, sync)
/// - e.t.c
///
pub fn apply_normal_entry(&self, entry: &RaftEntry) -> Result<bool, StorageError> {
let operation: ConsensusOperations = entry.try_into()?;
let on_apply = self.on_consensus_op_apply.lock().remove(&operation);
let result = match operation {
ConsensusOperations::CollectionMeta(operation) => {
self.toc.perform_collection_meta_op(*operation)
}
ConsensusOperations::AddPeer { .. } | ConsensusOperations::RemovePeer(_) => {
// RemovePeer or AddPeer should be converted into native ConfChangeV2 message before sending to the Raft.
// So we do not expect to receive these operations as a normal entry.
// This is a debug assert so production migrations should be ok.
// TODO: parse into CollectionMetaOperation as we will not handle other cases here, but this removes compatibility with previous entry storage
debug_assert!(
false,
"Do not expect RemovePeer or AddPeer to be directly proposed"
);
Ok(false)
}
ConsensusOperations::UpdatePeerMetadata { peer_id, metadata } => {
self.persistent
.write()
.update_peer_metadata(peer_id, metadata)?;
Ok(true)
}
ConsensusOperations::UpdateClusterMetadata { key, value } => {
self.persistent
.write()
.update_cluster_metadata_key(key, value);
Ok(true)
}
ConsensusOperations::RequestSnapshot | ConsensusOperations::ReportSnapshot { .. } => {
unreachable!()
}
};
if let Some(on_apply) = on_apply
&& on_apply.send(result.clone()).is_err()
{
log::warn!(
"Failed to notify on consensus operation completion: channel receiver is dropped",
)
}
result
}
// Outer `Result` is "fatal" error, inner `Result` is "transient"/"local" error.
pub fn apply_snapshot(
&self,
snapshot: &raft::eraftpb::Snapshot,
) -> Result<Result<(), StorageError>, StorageError> {
let meta = snapshot.get_metadata();
let SnapshotData {
collections_data,
address_by_id,
metadata_by_id,
cluster_metadata,
} = snapshot.get_data().try_into()?;
self.toc.apply_collections_snapshot(collections_data)?;
self.persistent.write().update_from_snapshot(
meta,
address_by_id,
metadata_by_id,
cluster_metadata,
)?;
// Clear now obsolete WAL entries after persisting new Raft state
// This way we prevent a crash due to an empty WAL if we crash right after clearing it,
// without bumping the Raft state. If we now crash after persisting the new state but
// before clearing the WAL, we will clear the WAL on next startup by truncating all entries
// above our commit.
self.wal.lock().clear()?;
Ok(Ok(()))
}
pub fn set_hard_state(&self, hard_state: raft::eraftpb::HardState) -> Result<(), StorageError> {
self.persistent
.write()
.apply_state_update(move |state| state.hard_state = hard_state)
}
pub fn set_conf_state(&self, conf_state: raft::eraftpb::ConfState) -> Result<(), StorageError> {
self.persistent
.write()
.apply_state_update(move |state| state.conf_state = conf_state)
}
/// Check if the consensus have empty operations log
pub fn is_new_deployment(&self) -> bool {
self.hard_state().term == 0
}
pub fn hard_state(&self) -> raft::eraftpb::HardState {
self.persistent.read().state().hard_state.clone()
}
pub fn conf_state(&self) -> raft::eraftpb::ConfState {
self.persistent.read().state().conf_state.clone()
}
pub fn set_commit_index(&self, index: u64) -> Result<(), StorageError> {
self.persistent
.write()
.apply_state_update(|state| state.hard_state.commit = index)
}
pub fn add_peer(&self, peer_id: PeerId, uri: Uri) -> Result<(), StorageError> {
self.persistent.write().insert_peer(peer_id, uri)
}
pub fn remove_peer(&self, peer_id: PeerId) -> Result<(), StorageError> {
// We sincerely apologize for this piece of code.
// The `id_to_address` is shared between `channel_pool` and `persistent`,
// plus we need to make additional removing in the `channel_pool`.
// So we handle `remove_peer` inside the `toc` and persist changes in the `persistent` after that.
self.toc.remove_peer(peer_id)?;
let persistent = self.persistent.read();
persistent.peer_metadata_by_id.write().remove(&peer_id);
persistent.save()
}
async fn await_receiver(
&self,
mut receiver: Receiver<Result<bool, StorageError>>,
wait_timeout: Duration,
operation: &ConsensusOperations,
) -> Result<bool, StorageError> {
let timeout_res = tokio::time::timeout(wait_timeout, receiver.recv())
.await
.map_err(|_: Elapsed| {
self.on_consensus_op_apply.lock().remove(operation);
StorageError::service_error(format!(
"Waiting for consensus operation commit failed. Timeout set at: {} seconds",
wait_timeout.as_secs_f64(),
))
})?;
// 2 possible errors to forward: channel sender dropped OR operation failed
timeout_res.map_err(|err| {
StorageError::service_error(format!("Error occurred while waiting for consensus operation. Channel sender dropped ({err})"))
})?
}
pub fn await_for_multiple_operations(
&self,
operations: Vec<ConsensusOperations>,
wait_timeout: Option<Duration>,
) -> impl Future<Output = Result<Result<(), StorageError>, Elapsed>> {
let mut receivers = vec![];
for operation in operations {
// one-shot broadcast channel
let (sender, mut receiver) = broadcast::channel(1);
let mut on_apply_lock = self.on_consensus_op_apply.lock();
// check that the exact same operation is not already in-flight
match on_apply_lock.get(&operation) {
Some(existing_sender) => {
// subscribe to existing sender for faster feedback
receiver = existing_sender.subscribe()
}
None => {
// insert new sender
on_apply_lock.insert(operation, sender);
}
};
receivers.push(receiver);
}
async move {
let await_for_all = join_all(receivers.iter_mut().map(|receiver| receiver.recv()));
let results = tokio::time::timeout(
wait_timeout.unwrap_or(defaults::CONSENSUS_META_OP_WAIT),
await_for_all,
)
.await?;
for result in results {
match result {
Ok(response_res) => match response_res {
Ok(_) => {}
Err(err) => return Ok(Err(err)),
},
Err(recv_error) => return Ok(Err(recv_error.into())),
}
}
Ok(Ok(()))
}
}
/// Wait and block until consensus reaches a `term` and actually applies the `commit`.
///
/// # Errors
///
/// Returns an error if we have diverged commit/term for example.
pub async fn wait_for_consensus_commit(
&self,
commit: u64,
term: u64,
consensus_tick: Duration,
timeout: Duration,
) -> Result<(), ()> {
let start = Instant::now();
// TODO: naive approach with spinlock for waiting on commit/term, find better way
while start.elapsed() < timeout {
let (current_commit, current_term) = self.persistent.read().applied_commit_term();
// Okay if on the same term and have at least the specified commit
let is_ok = current_term == term && current_commit >= commit;
if is_ok {
return Ok(());
}
// Fail if on a newer term
let is_fail = current_term > term;
if is_fail {
return Err(());
}
tokio::time::sleep(consensus_tick).await
}
// Fail on timeout
Err(())
}
/// Send operation to the consensus thread and listen for the result.
///
/// # Arguments
///
/// * `operation` - operation to propose
/// * `wait_timeout` - How long do we need to wait for the confirmation
pub async fn propose_consensus_op_with_await(
&self,
operation: ConsensusOperations,
wait_timeout: Option<Duration>,
) -> Result<bool, StorageError> {
let wait_timeout = wait_timeout.unwrap_or(defaults::CONSENSUS_META_OP_WAIT);
let is_leader_established = self.is_leader_established.clone();
let await_ready_for_timeout_future =
AbortOnDropHandle::new(tokio::task::spawn_blocking(move || {
is_leader_established.await_ready_for_timeout(wait_timeout)
}));
let is_leader_established = await_ready_for_timeout_future
.await
.map_err(|err| StorageError::service_error(err.to_string()))?;
if !is_leader_established {
return Err(StorageError::service_error(format!(
"Failed to propose operation: leader is not established within {wait_timeout:?}"
)));
}
// one-shot broadcast channel
let (sender, mut receiver) = broadcast::channel(1);
{
// acquire lock to insert new operation to apply
let mut on_apply_lock = self.on_consensus_op_apply.lock();
// check that the exact same operation is not already in-flight
match on_apply_lock.get(&operation) {
Some(existing_sender) => {
// subscribe to existing sender for faster feedback
receiver = existing_sender.subscribe()
}
None => {
// propose operation to consensus thread
self.propose_sender.send(operation.clone())?;
// insert new sender
on_apply_lock.insert(operation.clone(), sender);
}
};
}
let res = self
.await_receiver(receiver, wait_timeout, &operation)
.await?;
Ok(res)
}
pub fn peer_address_by_id(&self) -> PeerAddressById {
self.persistent.read().peer_address_by_id()
}
pub fn peer_count(&self) -> usize {
self.persistent.read().peer_address_by_id.read().len()
}
pub fn append_entries(&self, entries: Vec<RaftEntry>) -> Result<(), StorageError> {
self.wal.lock().append_entries(entries)
}
pub fn last_applied_entry(&self) -> Option<u64> {
self.persistent.read().last_applied_entry()
}
pub fn sync_local_state(&self) -> Result<(), StorageError> {
self.try_update_peer_metadata();
self.toc.sync_local_state()
}
pub fn clear_wal(&self) -> Result<(), StorageError> {
self.wal.lock().clear()
}
pub fn compact_wal(&self, min_entries_to_compact: u64) -> Result<bool, StorageError> {
if min_entries_to_compact == 0 {
return Ok(false);
}
let Some(first_entry) = self.wal.lock().first_entry()? else {
return Ok(false);
};
let Some(last_applied_index) = self.persistent.read().last_applied_entry() else {
return Ok(false);
};
debug_assert!(
first_entry.index <= last_applied_index + 1,
"Raft WAL is missing {} unapplied entries (last applied index: {}, first WAL entry index: {})",
first_entry.index - last_applied_index - 1,
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/transfer.rs | lib/storage/src/content_manager/toc/transfer.rs | use async_trait::async_trait;
use collection::operations::types::{CollectionError, CollectionResult};
use collection::shards::CollectionId;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::resharding::ReshardKey;
use collection::shards::shard::{PeerId, ShardId};
use collection::shards::transfer::{ShardTransfer, ShardTransferConsensus, ShardTransferKey};
use super::dispatcher::TocDispatcher;
use crate::content_manager::collection_meta_ops::{
CollectionMetaOperations, ReshardingOperation, ShardTransferOperations,
};
use crate::content_manager::consensus_ops::ConsensusOperations;
#[async_trait]
impl ShardTransferConsensus for TocDispatcher {
fn this_peer_id(&self) -> PeerId {
self.consensus_state.this_peer_id()
}
fn peers(&self) -> Vec<PeerId> {
self.consensus_state.peers()
}
fn consensus_commit_term(&self) -> (u64, u64) {
self.consensus_state
.0
.persistent
.read()
.applied_commit_term()
}
fn recovered_switch_to_partial(
&self,
transfer_config: &ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()> {
let Some(toc) = self.toc.upgrade() else {
return Err(CollectionError::service_error(
"Can't set shard state, table of contents is dropped",
));
};
let Some(proposal_sender) = toc.consensus_proposal_sender.as_ref() else {
return Err(CollectionError::service_error(
"Can't set shard state, this is a single node deployment",
));
};
// Propose operation to progress transfer, setting shard state to partial
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::RecoveryToPartial(transfer_config.key()),
)));
proposal_sender.send(operation).map_err(|err| {
CollectionError::service_error(format!("Failed to submit consensus proposal: {err}"))
})?;
Ok(())
}
async fn start_shard_transfer(
&self,
transfer_config: ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()> {
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Start(transfer_config),
)));
self
.consensus_state
.propose_consensus_op_with_await(operation, None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm shard transfer start operation through consensus: {err}"))
})
}
async fn restart_shard_transfer(
&self,
transfer_config: ShardTransfer,
collection_id: CollectionId,
) -> CollectionResult<()> {
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Restart(transfer_config.into()),
)));
self
.consensus_state
.propose_consensus_op_with_await(operation, None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm shard transfer restart operation through consensus: {err}"))
})
}
async fn abort_shard_transfer(
&self,
transfer: ShardTransferKey,
collection_id: CollectionId,
reason: &str,
) -> CollectionResult<()> {
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::TransferShard(
collection_id,
ShardTransferOperations::Abort {
transfer,
reason: reason.into(),
},
)));
self
.consensus_state
.propose_consensus_op_with_await(operation, None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm shard transfer abort operation through consensus: {err}"))
})
}
async fn set_shard_replica_set_state(
&self,
peer_id: Option<PeerId>,
collection_id: CollectionId,
shard_id: ShardId,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> CollectionResult<()> {
let operation = ConsensusOperations::set_replica_state(
collection_id,
shard_id,
peer_id.unwrap_or_else(|| self.this_peer_id()),
state,
from_state,
);
self
.consensus_state
.propose_consensus_op_with_await(operation.clone(), None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm set replica set state operation through consensus: {err}"))
})
}
async fn commit_read_hashring(
&self,
collection_id: CollectionId,
reshard_key: ReshardKey,
) -> CollectionResult<()> {
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::Resharding(
collection_id,
ReshardingOperation::CommitRead(reshard_key),
)));
self
.consensus_state
.propose_consensus_op_with_await(operation, None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm commit read hashring operation through consensus: {err}"))
})
}
async fn commit_write_hashring(
&self,
collection_id: CollectionId,
reshard_key: ReshardKey,
) -> CollectionResult<()> {
let operation =
ConsensusOperations::CollectionMeta(Box::new(CollectionMetaOperations::Resharding(
collection_id,
ReshardingOperation::CommitWrite(reshard_key),
)));
self
.consensus_state
.propose_consensus_op_with_await(operation, None)
.await
.map(|_| ())
.map_err(|err| {
CollectionError::service_error(format!("Failed to propose and confirm commit write hasrhing operation through consensus: {err}"))
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/telemetry.rs | lib/storage/src/content_manager/toc/telemetry.rs | use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::time::Duration;
use collection::operations::types::CollectionResult;
use collection::telemetry::{
CollectionSnapshotTelemetry, CollectionTelemetry, CollectionsAggregatedTelemetry,
};
use common::scope_tracker::{ScopeTracker, ScopeTrackerGuard};
use common::types::TelemetryDetail;
use dashmap::DashMap;
use crate::content_manager::toc::TableOfContent;
use crate::rbac::Access;
/// Collects various telemetry handled by ToC.
#[derive(Default)]
pub(super) struct TocTelemetryCollector {
snapshots: DashMap<String, SnapshotTelemetryCollector>,
}
impl TocTelemetryCollector {
/// Initialize snapshot telemetry entry for a collection.
pub fn init_snapshot_telemetry(&self, collection_name: &str) {
self.snapshots
.entry(collection_name.to_string())
.or_default();
}
}
/// Collected telemetry data provided by ToC.
pub struct TocTelemetryData {
pub collection_telemetry: Vec<CollectionTelemetry>,
pub snapshot_telemetry: Vec<CollectionSnapshotTelemetry>,
}
/// Collects telemetry data for snapshots.
#[derive(Default, Clone)]
pub struct SnapshotTelemetryCollector {
// Counter for currently running snapshot tasks.
pub running_snapshots: ScopeTracker,
// Counter for currently running snapshot tasks.
pub running_snapshot_recovery: ScopeTracker,
// Counter for snapshot creations since startup, until now.
pub snapshots_total: Arc<AtomicUsize>,
}
impl TableOfContent {
pub async fn get_telemetry_data(
&self,
detail: TelemetryDetail,
access: &Access,
timeout: Duration,
is_stopped: &AtomicBool,
) -> CollectionResult<TocTelemetryData> {
let all_collections = self.all_collections_access(access).await;
let mut collection_telemetry = Vec::new();
for collection_pass in &all_collections {
if is_stopped.load(Ordering::Relaxed) {
break;
}
if let Ok(collection) = self.get_collection(collection_pass).await {
collection_telemetry.push(collection.get_telemetry_data(detail, timeout).await?);
}
}
let snapshot_telemetry: Vec<_> = self
.telemetry
.snapshots
.iter()
.map(|item| {
let snapshot = item.value();
CollectionSnapshotTelemetry {
id: item.key().clone(),
running_snapshots: Some(snapshot.running_snapshots.get(Ordering::Relaxed)),
running_snapshot_recovery: Some(
snapshot.running_snapshot_recovery.get(Ordering::Relaxed),
),
total_snapshot_creations: Some(
snapshot.snapshots_total.load(Ordering::Relaxed),
),
}
})
.collect();
Ok(TocTelemetryData {
collection_telemetry,
snapshot_telemetry,
})
}
pub async fn get_aggregated_telemetry_data(
&self,
access: &Access,
timeout: Duration,
is_stopped: &AtomicBool,
) -> CollectionResult<Vec<CollectionsAggregatedTelemetry>> {
let mut result = Vec::new();
let all_collections = self.all_collections_access(access).await;
for collection_pass in &all_collections {
if is_stopped.load(Ordering::Relaxed) {
break;
}
if let Ok(collection) = self.get_collection(collection_pass).await {
result.push(collection.get_aggregated_telemetry_data(timeout).await?);
}
}
Ok(result)
}
pub fn max_collections(&self) -> Option<usize> {
self.storage_config.max_collections
}
/// Returns atomic counters for snapshot telemetry and metadata for the given collection.
/// If no entry with the given collection name was found, empty counters will be created and returned.
pub fn snapshot_telemetry_collector(&self, collection: &str) -> SnapshotTelemetryCollector {
self.telemetry
.snapshots
.entry(collection.to_string())
.or_default()
// We don't need a reference since all counters are atomic.
// To prevent potential deadlocks and locking the dashmap longer than necessary, we clone here.
.value()
.clone()
}
/// Increase snapshot creation counter and 'currently-running` counter.
///
/// Returns `ScopeCounterGuard` to measure the scope of snapshot creation.
/// Therefore this must always be bound to a variable in order to correctly account for the whole scope.
/// For more information see [`ScopeTracker`] and [`ScopeTrackerGuard`].
#[must_use]
pub fn count_snapshot_creation(&self, collection_name: &str) -> ScopeTrackerGuard {
// Increment current running counter.
let running_snapshots_guard = self
.snapshot_telemetry_collector(collection_name)
.running_snapshots
.measure_scope();
// Increment total counter.
self.snapshot_telemetry_collector(collection_name)
.snapshots_total
.fetch_add(1, Ordering::Relaxed);
running_snapshots_guard
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/dispatcher.rs | lib/storage/src/content_manager/toc/dispatcher.rs | use std::sync::Weak;
use super::TableOfContent;
use crate::content_manager::consensus_manager::ConsensusStateRef;
#[derive(Clone)]
pub struct TocDispatcher {
/// Reference to table of contents
///
/// This dispatcher is stored inside the table of contents after construction. It therefore
/// uses a weak reference to avoid a reference cycle which would prevent dropping the table of
/// contents on exit.
pub(super) toc: Weak<TableOfContent>,
pub(super) consensus_state: ConsensusStateRef,
}
impl TocDispatcher {
pub fn new(toc: Weak<TableOfContent>, consensus_state: ConsensusStateRef) -> Self {
Self {
toc,
consensus_state,
}
}
pub fn consensus_state(&self) -> &ConsensusStateRef {
&self.consensus_state
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/collection_container.rs | lib/storage/src/content_manager/toc/collection_container.rs | use std::collections::HashMap;
use std::sync::Arc;
use collection::collection::Collection;
use collection::collection_state;
use collection::shards::CollectionId;
use collection::shards::collection_shard_distribution::CollectionShardDistribution;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::shard::PeerId;
use super::TableOfContent;
use crate::content_manager::collection_meta_ops::*;
use crate::content_manager::collections_ops::Checker as _;
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::consensus_ops::ConsensusOperations;
use crate::content_manager::errors::StorageError;
use crate::content_manager::{CollectionContainer, consensus_manager};
impl CollectionContainer for TableOfContent {
fn perform_collection_meta_op(
&self,
operation: CollectionMetaOperations,
) -> Result<bool, StorageError> {
self.perform_collection_meta_op_sync(operation)
}
fn collections_snapshot(&self) -> consensus_manager::CollectionsSnapshot {
self.collections_snapshot_sync()
}
fn apply_collections_snapshot(
&self,
data: consensus_manager::CollectionsSnapshot,
) -> Result<(), StorageError> {
self.apply_collections_snapshot(data)
}
fn remove_peer(&self, peer_id: PeerId) -> Result<(), StorageError> {
self.general_runtime.block_on(async {
// Validation:
// 1. Check that we are not removing some unique shards (removed)
// Validation passed
self.remove_shards_at_peer(peer_id).await?;
if self.this_peer_id == peer_id {
// We are detaching the current peer, so we need to remove all connections
// Remove all peers from the channel service
let ids_to_drop: Vec<_> = self
.channel_service
.id_to_address
.read()
.keys()
.filter(|id| **id != self.this_peer_id)
.copied()
.collect();
for id in ids_to_drop {
self.channel_service.remove_peer(id).await;
}
} else {
self.channel_service.remove_peer(peer_id).await;
}
Ok(())
})
}
fn sync_local_state(&self) -> Result<(), StorageError> {
self.general_runtime.block_on(async {
let collections = self.collections.read().await;
let transfer_failure_callback =
Self::on_transfer_failure_callback(self.consensus_proposal_sender.clone());
let transfer_success_callback =
Self::on_transfer_success_callback(self.consensus_proposal_sender.clone());
for collection in collections.values() {
let finish_shard_initialize = Self::change_peer_state_callback(
self.consensus_proposal_sender.clone(),
collection.name().to_string(),
ReplicaState::Active,
Some(ReplicaState::Initializing),
);
let convert_to_listener_callback = Self::change_peer_state_callback(
self.consensus_proposal_sender.clone(),
collection.name().to_string(),
ReplicaState::Listener,
Some(ReplicaState::Active),
);
let convert_from_listener_to_active_callback = Self::change_peer_state_callback(
self.consensus_proposal_sender.clone(),
collection.name().to_string(),
ReplicaState::Active,
Some(ReplicaState::Listener),
);
collection
.sync_local_state(
transfer_failure_callback.clone(),
transfer_success_callback.clone(),
finish_shard_initialize,
convert_to_listener_callback,
convert_from_listener_to_active_callback,
)
.await?;
}
Ok(())
})
}
}
impl TableOfContent {
fn collections_snapshot_sync(&self) -> consensus_manager::CollectionsSnapshot {
self.general_runtime.block_on(self.collections_snapshot())
}
async fn collections_snapshot(&self) -> consensus_manager::CollectionsSnapshot {
let mut collections: HashMap<CollectionId, collection_state::State> = HashMap::new();
for (id, collection) in self.collections.read().await.iter() {
collections.insert(id.clone(), collection.state().await);
}
consensus_manager::CollectionsSnapshot {
collections,
aliases: self.alias_persistence.read().await.state().clone(),
}
}
fn apply_collections_snapshot(
&self,
data: consensus_manager::CollectionsSnapshot,
) -> Result<(), StorageError> {
self.general_runtime.block_on(async {
let mut collections = self.collections.write().await;
for (id, state) in &data.collections {
if let Some(collection) = collections.get(id) {
let collection_uuid = collection.uuid().await;
let recreate_collection = if collection_uuid != state.config.uuid {
log::warn!(
"Recreating collection {id}, because collection UUID is different: \
existing collection UUID: {collection_uuid:?}, \
Raft snapshot collection UUID: {:?}",
state.config.uuid,
);
true
} else if let Err(err) = collection.check_config_compatible(&state.config).await {
log::warn!(
"Recreating collection {id}, because collection config is incompatible: \
{err}",
);
true
} else {
false
};
if recreate_collection {
// Drop `collections` lock
drop(collections);
// Delete collection
self.delete_collection(id).await?;
// Re-acquire `collections` lock 🙄
collections = self.collections.write().await;
}
}
let collection_exists = collections.contains_key(id);
// Create collection if not present locally
if !collection_exists {
let collection_path = self.create_collection_path(id).await?;
let snapshots_path = self.create_snapshots_path(id).await?;
let shard_distribution =
CollectionShardDistribution::from_shards_info(state.shards.clone());
let collection = Collection::new(
id.clone(),
self.this_peer_id,
&collection_path,
&snapshots_path,
&state.config,
self.storage_config
.to_shared_storage_config(self.is_distributed())
.into(),
shard_distribution,
Some(state.shards_key_mapping.clone()),
self.channel_service.clone(),
Self::change_peer_from_state_callback(
self.consensus_proposal_sender.clone(),
id.clone(),
ReplicaState::Dead,
),
Self::request_shard_transfer_callback(
self.consensus_proposal_sender.clone(),
id.clone(),
),
Self::abort_shard_transfer_callback(
self.consensus_proposal_sender.clone(),
id.clone(),
),
Some(self.search_runtime.handle().clone()),
Some(self.update_runtime.handle().clone()),
self.optimizer_resource_budget.clone(),
self.storage_config.optimizers_overwrite.clone(),
)
.await?;
collections.validate_collection_not_exists(id)?;
collections.insert(id.clone(), collection);
}
let Some(collection) = collections.get(id) else {
unreachable!()
};
// Update collection state
if &collection.state().await != state {
if let Some(proposal_sender) = self.consensus_proposal_sender.clone() {
// In some cases on state application it might be needed to abort the transfer
let abort_transfer = |transfer| {
if let Err(error) =
proposal_sender.send(ConsensusOperations::abort_transfer(
id.clone(),
transfer,
"sender was not up to date",
))
{
log::error!(
"Can't report transfer progress to consensus: {error}"
)
};
};
collection
.apply_state(state.clone(), self.this_peer_id(), abort_transfer)
.await?;
} else {
log::error!("Can't apply state: single node mode");
}
}
// Mark local shards as dead (to initiate shard transfer),
// if collection has been created during snapshot application
if !collection_exists {
for shard_id in collection.get_local_shards().await {
let shard_holder = collection.shards_holder().read_owned().await;
let Some(replica_set) = shard_holder.get_shard(shard_id) else {
continue;
};
if replica_set.is_local().await {
replica_set.add_locally_disabled(None, self.this_peer_id, None);
}
}
}
}
// Collect names of collections that are present locally
let collection_names: Vec<_> = collections.keys().cloned().collect();
// Drop `collections` lock
drop(collections);
// Remove collections that are present locally, but are not in the snapshot state
for collection_name in &collection_names {
if !data.collections.contains_key(collection_name) {
log::debug!(
"Deleting collection {collection_name} \
because it is not part of the consensus snapshot",
);
self.delete_collection(collection_name).await?;
}
}
// Apply alias mapping
self.alias_persistence
.write()
.await
.apply_state(data.aliases)?;
Ok(())
})
}
async fn remove_shards_at_peer(&self, peer_id: PeerId) -> Result<(), StorageError> {
let collections = self.collections.read().await;
for collection in collections.values() {
collection.remove_shards_at_peer(peer_id).await?;
}
Ok(())
}
#[allow(dead_code)]
fn remove_shards_at_peer_sync(&self, peer_id: PeerId) -> Result<(), StorageError> {
self.general_runtime
.block_on(self.remove_shards_at_peer(peer_id))
}
fn on_transfer_failure_callback(
proposal_sender: Option<OperationSender>,
) -> collection::collection::OnTransferFailure {
Arc::new(move |transfer, collection_name, reason| {
if let Some(proposal_sender) = &proposal_sender {
let operation = ConsensusOperations::abort_transfer(
collection_name.clone(),
transfer.clone(),
reason,
);
if let Err(send_error) = proposal_sender.send(operation) {
log::error!(
"Can't send proposal to abort transfer of shard {} of collection {collection_name}. Error: {send_error}",
transfer.shard_id,
);
}
}
})
}
fn on_transfer_success_callback(
proposal_sender: Option<OperationSender>,
) -> collection::collection::OnTransferSuccess {
Arc::new(move |transfer, collection_name| {
if let Some(proposal_sender) = &proposal_sender {
let operation =
ConsensusOperations::finish_transfer(collection_name.clone(), transfer.clone());
if let Err(send_error) = proposal_sender.send(operation) {
log::error!(
"Can't send proposal to complete transfer of shard {} of collection {collection_name}. Error: {send_error}",
transfer.shard_id,
);
}
}
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/request_hw_counter.rs | lib/storage/src/content_manager/toc/request_hw_counter.rs | use common::counter::hardware_accumulator::{HwMeasurementAcc, HwSharedDrain};
use super::TableOfContent;
impl TableOfContent {
pub fn get_collection_hw_metrics(&self, collection_id: String) -> HwSharedDrain {
self.collection_hw_metrics
.entry(collection_id)
.or_default()
.clone()
}
}
#[derive(Clone)]
pub struct RequestHwCounter {
counter: HwMeasurementAcc,
/// If this flag is set, RequestHwCounter will be converted into non-None API representation.
/// Otherwise, it will be ignored.
report_to_api: bool,
}
impl RequestHwCounter {
pub fn new(counter: HwMeasurementAcc, report_to_api: bool) -> Self {
Self {
counter,
report_to_api,
}
}
pub fn get_counter(&self) -> HwMeasurementAcc {
self.counter.clone()
}
pub fn to_rest_api(self) -> Option<api::rest::models::HardwareUsage> {
if self.report_to_api {
Some(api::rest::models::HardwareUsage {
cpu: self.counter.get_cpu(),
payload_io_read: self.counter.get_payload_io_read(),
payload_io_write: self.counter.get_payload_io_write(),
payload_index_io_read: self.counter.get_payload_index_io_read(),
payload_index_io_write: self.counter.get_payload_index_io_write(),
vector_io_read: self.counter.get_vector_io_read(),
vector_io_write: self.counter.get_vector_io_write(),
})
} else {
None
}
}
pub fn to_grpc_api(self) -> Option<api::grpc::qdrant::HardwareUsage> {
if self.report_to_api {
Some(api::grpc::qdrant::HardwareUsage {
cpu: self.counter.get_cpu() as u64,
payload_io_read: self.counter.get_payload_io_read() as u64,
payload_io_write: self.counter.get_payload_io_write() as u64,
payload_index_io_read: self.counter.get_payload_index_io_read() as u64,
payload_index_io_write: self.counter.get_payload_index_io_write() as u64,
vector_io_read: self.counter.get_vector_io_read() as u64,
vector_io_write: self.counter.get_vector_io_write() as u64,
})
} else {
None
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/mod.rs | lib/storage/src/content_manager/toc/mod.rs | mod collection_container;
mod collection_meta_ops;
mod create_collection;
pub mod dispatcher;
mod point_ops;
mod point_ops_internal;
pub mod request_hw_counter;
mod snapshots;
mod telemetry;
mod temp_directories;
pub mod transfer;
use std::cmp::max;
use std::collections::{HashMap, HashSet};
use std::num::NonZeroU32;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use api::rest::models::HardwareUsage;
use collection::collection::{Collection, RequestShardTransfer};
use collection::config::{
CollectionConfigInternal, default_replication_factor, default_shard_number,
};
use collection::operations::types::*;
use collection::shards::channel_service::ChannelService;
use collection::shards::replica_set::AbortShardTransfer;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::shard::{PeerId, ShardId};
use collection::shards::{CollectionId, replica_set};
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwSharedDrain;
use common::cpu::get_num_cpus;
use dashmap::DashMap;
use fs_err as fs;
use fs_err::tokio as tokio_fs;
use segment::data_types::collection_defaults::CollectionConfigDefaults;
use tokio::runtime::{Handle, Runtime};
use tokio::sync::{Mutex, RwLock, RwLockReadGuard, Semaphore};
use self::dispatcher::TocDispatcher;
use crate::ConsensusOperations;
use crate::content_manager::alias_mapping::AliasPersistence;
use crate::content_manager::collection_meta_ops::CreateCollectionOperation;
use crate::content_manager::collections_ops::{Checker, Collections};
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::errors::StorageError;
use crate::content_manager::shard_distribution::ShardDistributionProposal;
use crate::content_manager::toc::telemetry::TocTelemetryCollector;
use crate::rbac::{Access, AccessRequirements, CollectionPass};
use crate::types::StorageConfig;
pub const ALIASES_PATH: &str = "aliases";
pub const COLLECTIONS_DIR: &str = "collections";
pub const FULL_SNAPSHOT_FILE_NAME: &str = "full-snapshot";
/// The main object of the service. It holds all objects, required for proper functioning.
///
/// In most cases only one `TableOfContent` is enough for service. It is created only once during
/// the launch of the service.
pub struct TableOfContent {
collections: Arc<RwLock<Collections>>,
pub(crate) storage_config: Arc<StorageConfig>,
search_runtime: Runtime,
update_runtime: Runtime,
general_runtime: Runtime,
/// Global CPU budget in number of cores for all optimization tasks.
/// Assigns CPU permits to tasks to limit overall resource utilization.
optimizer_resource_budget: ResourceBudget,
alias_persistence: RwLock<AliasPersistence>,
pub this_peer_id: PeerId,
channel_service: ChannelService,
/// Backlink to the consensus, if none - single node mode
consensus_proposal_sender: Option<OperationSender>,
/// Dispatcher for access to table of contents and consensus, if none - single node mode
toc_dispatcher: parking_lot::Mutex<Option<TocDispatcher>>,
/// Prevent DDoS of too many concurrent updates in distributed mode.
/// One external update usually triggers multiple internal updates, which breaks internal
/// timings. For example, the health check timing and consensus timing.
///
/// If not defined - no rate limiting is applied.
update_rate_limiter: Option<Semaphore>,
/// A lock to prevent concurrent collection creation.
/// Effectively, this lock ensures that `create_collection` is called sequentially.
collection_create_lock: Mutex<()>,
/// Aggregation of all hardware measurements for each alias or collection config.
collection_hw_metrics: DashMap<CollectionId, HwSharedDrain>,
/// Collector for various telemetry/metrics.
telemetry: TocTelemetryCollector,
}
impl TableOfContent {
/// PeerId does not change during execution so it is ok to copy it here.
#[allow(clippy::too_many_arguments)]
pub fn new(
storage_config: &StorageConfig,
search_runtime: Runtime,
update_runtime: Runtime,
general_runtime: Runtime,
optimizer_resource_budget: ResourceBudget,
channel_service: ChannelService,
this_peer_id: PeerId,
consensus_proposal_sender: Option<OperationSender>,
) -> Self {
let collections_path = Path::new(&storage_config.storage_path).join(COLLECTIONS_DIR);
fs::create_dir_all(&collections_path).expect("Can't create Collections directory");
if let Some(path) = storage_config.temp_path.as_deref() {
let temp_path = Path::new(path);
fs::create_dir_all(temp_path).expect("Can't create temporary files directory");
}
let collection_paths =
fs::read_dir(&collections_path).expect("Can't read Collections directory");
let mut collections: HashMap<String, Collection> = Default::default();
let is_distributed = consensus_proposal_sender.is_some();
for entry in collection_paths {
let collection_path = entry
.expect("Can't access of one of the collection files")
.path();
if !CollectionConfigInternal::check(&collection_path) {
log::warn!(
"Collection config is not found in the collection directory: {}, skipping",
collection_path.display(),
);
continue;
}
let collection_name = collection_path
.file_name()
.expect("Can't resolve a filename of one of the collection files")
.to_str()
.expect("A filename of one of the collection files is not a valid UTF-8")
.to_string();
let snapshots_path = Path::new(&storage_config.snapshots_path.clone()).to_owned();
let collection_snapshots_path =
Self::collection_snapshots_path(&snapshots_path, &collection_name);
log::info!("Loading collection: {collection_name}");
let collection = general_runtime.block_on(Collection::load(
collection_name.clone(),
this_peer_id,
&collection_path,
&collection_snapshots_path,
storage_config
.to_shared_storage_config(is_distributed)
.into(),
channel_service.clone(),
Self::change_peer_from_state_callback(
consensus_proposal_sender.clone(),
collection_name.clone(),
ReplicaState::Dead,
),
Self::request_shard_transfer_callback(
consensus_proposal_sender.clone(),
collection_name.clone(),
),
Self::abort_shard_transfer_callback(
consensus_proposal_sender.clone(),
collection_name.clone(),
),
Some(search_runtime.handle().clone()),
Some(update_runtime.handle().clone()),
optimizer_resource_budget.clone(),
storage_config.optimizers_overwrite.clone(),
));
collections.insert(collection_name.clone(), collection);
}
// Initialize snapshot telemetry for all loaded collections.
let telemetry = TocTelemetryCollector::default();
for collection_name in collections.keys() {
telemetry.init_snapshot_telemetry(collection_name);
}
let alias_path = Path::new(&storage_config.storage_path).join(ALIASES_PATH);
let alias_persistence = AliasPersistence::open(&alias_path)
.expect("Can't open database by the provided config");
let rate_limiter = match storage_config.performance.update_rate_limit {
Some(limit) => Some(Semaphore::new(limit)),
None => {
if consensus_proposal_sender.is_some() {
// Auto adjust the rate limit in distributed mode.
// Select number of working threads as a guess.
let limit = max(get_num_cpus(), 2);
log::debug!(
"Auto adjusting update rate limit to {limit} parallel update requests"
);
Some(Semaphore::new(limit))
} else {
None
}
}
};
TableOfContent {
collections: Arc::new(RwLock::new(collections)),
storage_config: Arc::new(storage_config.clone()),
search_runtime,
update_runtime,
general_runtime,
optimizer_resource_budget,
alias_persistence: RwLock::new(alias_persistence),
this_peer_id,
channel_service,
consensus_proposal_sender,
toc_dispatcher: Default::default(),
update_rate_limiter: rate_limiter,
collection_create_lock: Default::default(),
collection_hw_metrics: DashMap::new(),
telemetry,
}
}
/// Return `true` if service is working in distributed mode.
pub fn is_distributed(&self) -> bool {
self.consensus_proposal_sender.is_some()
}
pub fn storage_path(&self) -> &str {
&self.storage_config.storage_path
}
/// List of all collections to which the user has access
pub async fn all_collections(&self, access: &Access) -> Vec<CollectionPass<'static>> {
self.all_collections_with_access_requirements(access, AccessRequirements::new())
.await
}
pub async fn all_collections_access(&self, access: &Access) -> Vec<CollectionPass<'static>> {
self.all_collections_with_access_requirements(access, AccessRequirements::new())
.await
}
async fn all_collections_with_access_requirements(
&self,
access: &Access,
access_requirements: AccessRequirements,
) -> Vec<CollectionPass<'static>> {
self.collections
.read()
.await
.keys()
.filter_map(|name| {
access
.check_collection_access(name, access_requirements)
.ok()
.map(|pass| pass.into_static())
})
.collect()
}
/// List of all collections
pub fn all_collections_sync(&self) -> Vec<String> {
self.general_runtime
.block_on(self.collections.read())
.keys()
.cloned()
.collect()
}
/// Same as `get_collection`, but does not check access rights.
/// Intended for internal use only.
///
/// **Do no make public**
async fn get_collection_unchecked(
&self,
collection_name: &str,
) -> Result<RwLockReadGuard<'_, Collection>, StorageError> {
let read_collection = self.collections.read().await;
let real_collection_name = {
let alias_persistence = self.alias_persistence.read().await;
Self::resolve_name(collection_name, &read_collection, &alias_persistence)?
};
// resolve_name already checked collection existence, unwrap is safe here
Ok(RwLockReadGuard::map(read_collection, |collection| {
collection.get(&real_collection_name).unwrap() // TODO: WTF!?
}))
}
pub async fn get_collection(
&self,
collection: &CollectionPass<'_>,
) -> Result<RwLockReadGuard<'_, Collection>, StorageError> {
self.get_collection_unchecked(collection.name()).await
}
async fn get_collection_opt(
&self,
collection_name: String,
) -> Option<RwLockReadGuard<'_, Collection>> {
self.get_collection_unchecked(&collection_name).await.ok()
}
/// Finds the original name of the collection
///
/// # Arguments
///
/// * `collection_name` - Name of the collection or alias to resolve
/// * `collections` - A reference to the collections map
/// * `aliases` - A reference to the aliases storage
///
/// # Result
///
/// If the collection exists - return its name
/// If alias exists - returns the original collection name
/// If neither exists - returns [`StorageError`]
fn resolve_name(
collection_name: &str,
collections: &Collections,
aliases: &AliasPersistence,
) -> Result<String, StorageError> {
let alias_collection_name = aliases.get(collection_name);
let resolved_name = match alias_collection_name {
None => collection_name.to_string(),
Some(resolved_alias) => resolved_alias,
};
collections.validate_collection_exists(&resolved_name)?;
Ok(resolved_name)
}
/// List of all aliases for a given collection
pub async fn collection_aliases(
&self,
collection_pass: &CollectionPass<'_>,
access: &Access,
) -> Result<Vec<String>, StorageError> {
let mut result = self
.alias_persistence
.read()
.await
.collection_aliases(collection_pass.name());
result.retain(|alias| {
access
.check_collection_access(alias, AccessRequirements::new())
.is_ok()
});
Ok(result)
}
/// List of all aliases across all collections
pub async fn list_aliases(
&self,
access: &Access,
) -> Result<Vec<AliasDescription>, StorageError> {
let all_collections = self.all_collections(access).await;
let mut aliases: Vec<AliasDescription> = Default::default();
for collection_pass in &all_collections {
for alias in self.collection_aliases(collection_pass, access).await? {
aliases.push(AliasDescription {
alias_name: alias.clone(),
collection_name: collection_pass.to_string(),
});
}
}
Ok(aliases)
}
pub fn suggest_shard_distribution(
&self,
op: &CreateCollectionOperation,
collection_defaults: Option<&CollectionConfigDefaults>,
number_of_peers: usize,
) -> ShardDistributionProposal {
let non_zero_number_of_peers =
NonZeroU32::new(number_of_peers as u32).expect("NUmber of peers must be at least 1");
let suggested_shard_number = collection_defaults
.map(|cd| cd.get_shard_number(number_of_peers as u32))
.map(|x| NonZeroU32::new(x).expect("Shard number must be at least 1"))
.unwrap_or_else(|| default_shard_number().saturating_mul(non_zero_number_of_peers));
let shard_number = op
.create_collection
.shard_number
.and_then(NonZeroU32::new)
.unwrap_or(suggested_shard_number);
let mut known_peers_set: HashSet<_> = self
.channel_service
.id_to_address
.read()
.keys()
.copied()
.collect();
known_peers_set.insert(self.this_peer_id());
let known_peers: Vec<_> = known_peers_set.into_iter().collect();
let suggested_replication_factor = collection_defaults
.and_then(|cd| cd.replication_factor)
.and_then(NonZeroU32::new)
.unwrap_or_else(default_replication_factor);
let replication_factor = op
.create_collection
.replication_factor
.and_then(NonZeroU32::new)
.unwrap_or(suggested_replication_factor);
let shard_distribution =
ShardDistributionProposal::new(shard_number, replication_factor, &known_peers);
log::debug!(
"Suggesting distribution for {} shards for collection '{}' among {} peers {:?}",
shard_number,
op.collection_name,
known_peers.len(),
shard_distribution.distribution
);
shard_distribution
}
/// Initiate receiving shard.
///
/// Fails if the collection does not exist
pub async fn initiate_receiving_shard(
&self,
collection_name: String,
shard_id: ShardId,
) -> Result<(), StorageError> {
// TODO: Ensure cancel safety!
log::info!("Initiating receiving shard {collection_name}:{shard_id}");
// TODO: Ensure cancel safety!
let initiate_shard_transfer_future = self
.get_collection_unchecked(&collection_name)
.await?
.initiate_shard_transfer(shard_id);
initiate_shard_transfer_future.await?;
Ok(())
}
pub fn request_snapshot(&self) -> Result<(), StorageError> {
self.get_consensus_proposal_sender()?
.send(ConsensusOperations::request_snapshot())?;
Ok(())
}
pub async fn update_cluster_metadata(
&self,
key: String,
value: serde_json::Value,
wait: bool,
) -> Result<(), StorageError> {
let operation = ConsensusOperations::UpdateClusterMetadata { key, value };
if wait {
let dispatcher = self.toc_dispatcher.lock().clone().ok_or_else(|| {
StorageError::service_error("Qdrant is running in standalone mode")
})?;
dispatcher
.consensus_state()
.propose_consensus_op_with_await(operation, None)
.await
.map_err(|err| {
StorageError::service_error(format!("Failed to propose and confirm metadata update operation through consensus: {err}"))
})?;
} else {
self.get_consensus_proposal_sender()?.send(operation)?;
}
Ok(())
}
pub async fn peer_has_shards(&self, peer_id: PeerId) -> bool {
for collection in self.collections.read().await.values() {
let state = collection.state().await;
if state
.shards
.into_values()
.flat_map(|shard_info| shard_info.replicas.into_keys())
.any(|x| x == peer_id)
{
return true;
}
}
false
}
/// Cancels all transfers related to the current peer.
///
/// Transfers whehre this peer is the source or the target will be cancelled.
pub async fn cancel_related_transfers(&self, reason: &str) -> Result<(), StorageError> {
let collections = self.collections.read().await;
if let Some(proposal_sender) = &self.consensus_proposal_sender {
for collection in collections.values() {
for transfer in collection.get_related_transfers(self.this_peer_id).await {
let cancel_transfer = ConsensusOperations::abort_transfer(
collection.name().to_string(),
transfer,
reason,
);
proposal_sender.send(cancel_transfer)?;
}
}
} else {
log::error!(
"Can't cancel transfers related to this node, this is a single node deployment"
);
}
Ok(())
}
fn change_peer_state_callback(
proposal_sender: Option<OperationSender>,
collection_name: String,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> replica_set::ChangePeerState {
let callback =
Self::change_peer_from_state_callback(proposal_sender, collection_name, state);
Arc::new(move |peer_id, shard_id| callback(peer_id, shard_id, from_state))
}
fn change_peer_from_state_callback(
proposal_sender: Option<OperationSender>,
collection_name: String,
state: ReplicaState,
) -> replica_set::ChangePeerFromState {
Arc::new(move |peer_id, shard_id, from_state| {
if let Some(proposal_sender) = &proposal_sender {
if let Err(send_error) = Self::send_set_replica_state_proposal_op(
proposal_sender,
collection_name.clone(),
peer_id,
shard_id,
state,
from_state,
) {
log::error!(
"Can't send proposal to deactivate replica on peer {peer_id} of shard {shard_id} of collection {collection_name}. Error: {send_error}",
);
}
} else {
log::error!(
"Can't send proposal to deactivate replica. Error: this is a single node deployment",
);
}
})
}
fn send_set_replica_state_proposal_op(
proposal_sender: &OperationSender,
collection_name: String,
peer_id: PeerId,
shard_id: ShardId,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> Result<(), StorageError> {
let operation = ConsensusOperations::set_replica_state(
collection_name,
shard_id,
peer_id,
state,
from_state,
);
proposal_sender.send(operation)
}
fn request_shard_transfer_callback(
proposal_sender: Option<OperationSender>,
collection_name: String,
) -> RequestShardTransfer {
Arc::new(move |shard_transfer| {
if let Some(proposal_sender) = &proposal_sender {
let collection_name = collection_name.clone();
let to_peer = shard_transfer.to;
let operation =
ConsensusOperations::start_transfer(collection_name.clone(), shard_transfer);
if let Err(send_error) = proposal_sender.send(operation) {
log::error!(
"Can't send proposal to request shard transfer to peer {to_peer} of collection {collection_name}. Error: {send_error}"
);
}
} else {
log::error!(
"Can't send proposal to request shard transfer. Error: this is a single node deployment",
);
}
})
}
fn abort_shard_transfer_callback(
proposal_sender: Option<OperationSender>,
collection_name: String,
) -> AbortShardTransfer {
Arc::new(move |shard_transfer, reason| {
if let Some(proposal_sender) = &proposal_sender {
let shard_id = shard_transfer.shard_id;
let from = shard_transfer.from;
let to = shard_transfer.to;
let operation = ConsensusOperations::abort_transfer(
collection_name.clone(),
shard_transfer,
reason,
);
if let Err(send_error) = proposal_sender.send(operation) {
log::error!(
"Can't send proposal to abort \
{collection_name}:{shard_id} / {from} -> {to} shard transfer: \
{send_error}",
);
}
} else {
log::error!(
"Can't send proposal to abort shard transfer: \
this is a single node deployment",
);
}
})
}
fn this_peer_id(&self) -> PeerId {
self.this_peer_id
}
async fn create_collection_path(&self, collection_name: &str) -> Result<PathBuf, StorageError> {
let path = self.get_collection_path(collection_name);
if path.exists() {
if CollectionConfigInternal::check(&path) {
return Err(StorageError::bad_input(format!(
"Can't create collection with name {collection_name}. Collection data already exists at {path}",
collection_name = collection_name,
path = path.display(),
)));
} else {
// Collection doesn't have a valid config, remove it
log::debug!(
"Removing invalid collection path {path} from storage",
path = path.display(),
);
tokio_fs::remove_dir_all(&path).await.map_err(|err| {
StorageError::service_error(format!(
"Can't clear directory for collection {collection_name}. Error: {err}"
))
})?;
}
}
tokio_fs::create_dir_all(&path).await.map_err(|err| {
StorageError::service_error(format!(
"Can't create directory for collection {collection_name}. Error: {err}"
))
})?;
Ok(path)
}
fn get_collection_path(&self, collection_name: &str) -> PathBuf {
Path::new(&self.storage_config.storage_path)
.join(COLLECTIONS_DIR)
.join(collection_name)
}
fn get_consensus_proposal_sender(&self) -> Result<&OperationSender, StorageError> {
self.consensus_proposal_sender
.as_ref()
.ok_or_else(|| StorageError::service_error("Qdrant is running in standalone mode"))
}
/// Insert dispatcher for access to table of contents and consensus.
pub fn with_toc_dispatcher(&self, dispatcher: TocDispatcher) {
self.toc_dispatcher.lock().replace(dispatcher);
}
pub fn get_channel_service(&self) -> &ChannelService {
&self.channel_service
}
/// Gets a copy of hardware metrics for all collections that have been collected from operations on this node.
/// This copy is intentional to prevent 'uncontrolled' modifications of the DashMap, which doesn't need to be mutable for modifications.
pub fn all_hw_metrics(&self) -> HashMap<String, HardwareUsage> {
self.collection_hw_metrics
.iter()
.map(|i| {
let key = i.key().clone();
let hw_usage = HardwareUsage {
cpu: i.get_cpu(),
payload_io_read: i.get_payload_io_read(),
payload_io_write: i.get_payload_io_write(),
payload_index_io_read: i.get_payload_index_io_read(),
payload_index_io_write: i.get_payload_index_io_write(),
vector_io_read: i.get_vector_io_read(),
vector_io_write: i.get_vector_io_write(),
};
(key, hw_usage)
})
.collect()
}
pub fn general_runtime_handle(&self) -> &Handle {
self.general_runtime.handle()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/snapshots.rs | lib/storage/src/content_manager/toc/snapshots.rs | use std::path::{Path, PathBuf};
use collection::common::snapshots_manager::SnapshotStorageManager;
use collection::operations::snapshot_ops::SnapshotDescription;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::shard::{PeerId, ShardId};
use collection::shards::transfer::{ShardTransfer, ShardTransferMethod};
use fs_err::tokio as tokio_fs;
use super::TableOfContent;
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::consensus_ops::ConsensusOperations;
use crate::content_manager::errors::StorageError;
use crate::rbac::CollectionPass;
impl TableOfContent {
pub fn get_snapshots_storage_manager(&self) -> Result<SnapshotStorageManager, StorageError> {
SnapshotStorageManager::new(&self.storage_config.snapshots_config).map_err(|err| {
StorageError::service_error(format!(
"Can't create snapshot storage manager. Error: {err}"
))
})
}
pub fn snapshots_path(&self) -> &str {
&self.storage_config.snapshots_path
}
pub fn collection_snapshots_path(snapshots_path: &Path, collection_name: &str) -> PathBuf {
snapshots_path.join(collection_name)
}
pub fn snapshots_path_for_collection(&self, collection_name: &str) -> PathBuf {
Self::collection_snapshots_path(
Path::new(&self.storage_config.snapshots_path),
collection_name,
)
}
pub async fn create_snapshots_path(
&self,
collection_name: &str,
) -> Result<PathBuf, StorageError> {
let snapshots_path = self.snapshots_path_for_collection(collection_name);
tokio_fs::create_dir_all(&snapshots_path)
.await
.map_err(|err| {
StorageError::service_error(format!(
"Can't create directory for snapshots {collection_name}. Error: {err}"
))
})?;
Ok(snapshots_path)
}
pub async fn create_snapshot(
&self,
collection_pass: &CollectionPass<'_>,
) -> Result<SnapshotDescription, StorageError> {
// Increment snapshot telemetry/mertic counter and account for the whole scope.
// (This must be a named variable so it doesn't get dropped prematurely!)
let _running_snapshots_guard = self.count_snapshot_creation(collection_pass.name());
// create all the directories of the derived collection snapshot path of
// the collection.
self.create_snapshots_path(collection_pass.name()).await?;
let collection = self.get_collection(collection_pass).await?;
// We want to use temp dir inside the temp_path (storage if not specified), because it is possible, that
// snapshot directory is mounted as network share and multiple writes to it could be slow
let temp_dir = self.optional_temp_or_storage_temp_path()?;
Ok(collection
.create_snapshot(&temp_dir, self.this_peer_id)
.await?)
}
pub fn send_set_replica_state_proposal(
&self,
collection_name: String,
peer_id: PeerId,
shard_id: ShardId,
state: ReplicaState,
from_state: Option<ReplicaState>,
) -> Result<(), StorageError> {
if let Some(operation_sender) = &self.consensus_proposal_sender {
Self::send_set_replica_state_proposal_op(
operation_sender,
collection_name,
peer_id,
shard_id,
state,
from_state,
)?;
}
Ok(())
}
pub fn request_remove_replica(
&self,
collection_name: String,
shard_id: ShardId,
peer_id: PeerId,
) -> Result<(), StorageError> {
if let Some(proposal_sender) = &self.consensus_proposal_sender {
Self::send_remove_replica_proposal_op(
proposal_sender,
collection_name,
peer_id,
shard_id,
)?;
}
Ok(())
}
fn send_remove_replica_proposal_op(
proposal_sender: &OperationSender,
collection_name: String,
peer_id: PeerId,
shard_id: ShardId,
) -> Result<(), StorageError> {
let operation = ConsensusOperations::remove_replica(collection_name, shard_id, peer_id);
proposal_sender.send(operation)
}
pub fn request_shard_transfer(
&self,
collection_name: String,
shard_id: ShardId,
from_peer: PeerId,
to_peer: PeerId,
sync: bool,
method: Option<ShardTransferMethod>,
) -> Result<(), StorageError> {
if let Some(proposal_sender) = &self.consensus_proposal_sender {
let transfer_request = ShardTransfer {
shard_id,
to_shard_id: None,
from: from_peer,
to: to_peer,
sync,
method,
filter: None,
};
let operation = ConsensusOperations::start_transfer(collection_name, transfer_request);
proposal_sender.send(operation)?;
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/point_ops.rs | lib/storage/src/content_manager/toc/point_ops.rs | use std::time::Duration;
use collection::collection::Collection;
use collection::collection::distance_matrix::{
CollectionSearchMatrixRequest, CollectionSearchMatrixResponse,
};
use collection::grouping::GroupBy;
use collection::grouping::group_by::GroupRequest;
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::point_ops::WriteOrdering;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::*;
use collection::operations::universal_query::collection_query::CollectionQueryRequest;
use collection::operations::{CollectionUpdateOperations, OperationWithClockTag};
use collection::{discovery, recommendations};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use futures::TryStreamExt as _;
use futures::stream::FuturesUnordered;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::types::{ScoredPoint, ShardKey};
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use super::TableOfContent;
use crate::content_manager::errors::{StorageError, StorageResult};
use crate::rbac::Access;
impl TableOfContent {
/// Recommend points using positive and negative example from the request
///
/// # Arguments
///
/// * `collection_name` - for what collection do we recommend
/// * `request` - [`RecommendRequestInternal`]
///
/// # Result
///
/// Points with recommendation score
#[allow(clippy::too_many_arguments)]
pub async fn recommend(
&self,
collection_name: &str,
request: RecommendRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selector: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<ScoredPoint>> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
recommendations::recommend_by(
request,
&collection,
|name| self.get_collection_opt(name),
read_consistency,
shard_selector,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
/// Recommend points in a batching fashion using positive and negative example from the request
///
/// # Arguments
///
/// * `collection_name` - for what collection do we recommend
/// * `request` - [`RecommendRequestBatch`]
///
/// # Result
///
/// Points with recommendation score
pub async fn recommend_batch(
&self,
collection_name: &str,
mut requests: Vec<(RecommendRequestInternal, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<Vec<ScoredPoint>>> {
let mut collection_pass = None;
for (request, _shard_selector) in &mut requests {
collection_pass = Some(access.check_point_op(collection_name, request)?);
}
let Some(collection_pass) = collection_pass else {
return Ok(vec![]);
};
let collection = self.get_collection(&collection_pass).await?;
recommendations::recommend_batch_by(
requests,
&collection,
|name| self.get_collection_opt(name),
read_consistency,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
/// Search in a batching fashion for the closest points using vector similarity with given restrictions defined
/// in the request
///
/// # Arguments
///
/// * `collection_name` - in what collection do we search
/// * `request` - [`CoreSearchRequestBatch`]
/// * `shard_selection` - which local shard to use
/// * `timeout` - how long to wait for the response
/// * `read_consistency` - consistency level
///
/// # Result
///
/// Points with search score
#[allow(clippy::too_many_arguments)]
pub async fn core_search_batch(
&self,
collection_name: &str,
mut request: CoreSearchRequestBatch,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<Vec<ScoredPoint>>> {
let mut collection_pass = None;
for request in &mut request.searches {
collection_pass = Some(access.check_point_op(collection_name, request)?);
}
let Some(collection_pass) = collection_pass else {
return Ok(vec![]);
};
let collection = self.get_collection(&collection_pass).await?;
collection
.core_search_batch(
request,
read_consistency,
shard_selection,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
/// Count points in the collection.
///
/// # Arguments
///
/// * `collection_name` - in what collection do we count
/// * `request` - [`CountRequestInternal`]
/// * `shard_selection` - which local shard to use
///
/// # Result
///
/// Number of points in the collection.
///
#[allow(clippy::too_many_arguments)]
pub async fn count(
&self,
collection_name: &str,
request: CountRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<CountResult> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
collection
.count(
request,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
/// Return specific points by IDs
///
/// # Arguments
///
/// * `collection_name` - select from this collection
/// * `request` - [`PointRequestInternal`]
/// * `shard_selection` - which local shard to use
///
/// # Result
///
/// List of points with specified information included
#[allow(clippy::too_many_arguments)]
pub async fn retrieve(
&self,
collection_name: &str,
request: PointRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<RecordInternal>> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
collection
.retrieve(
request,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
#[allow(clippy::too_many_arguments)]
pub async fn group(
&self,
collection_name: &str,
request: GroupRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<GroupsResult> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
let collection_by_name = |name| self.get_collection_opt(name);
let group_by = GroupBy::new(request, &collection, collection_by_name, hw_measurement_acc)
.set_read_consistency(read_consistency)
.set_shard_selection(shard_selection)
.set_timeout(timeout);
group_by
.execute()
.await
.map(|groups| GroupsResult { groups })
.map_err(|err| err.into())
}
#[allow(clippy::too_many_arguments)]
pub async fn discover(
&self,
collection_name: &str,
request: DiscoverRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selector: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<ScoredPoint>> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
discovery::discover(
request,
&collection,
|name| self.get_collection_opt(name),
read_consistency,
shard_selector,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
pub async fn discover_batch(
&self,
collection_name: &str,
mut requests: Vec<(DiscoverRequestInternal, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<Vec<ScoredPoint>>> {
let mut collection_pass = None;
for (request, _shard_selector) in &mut requests {
collection_pass = Some(access.check_point_op(collection_name, request)?);
}
let Some(collection_pass) = collection_pass else {
return Ok(vec![]);
};
let collection = self.get_collection(&collection_pass).await?;
discovery::discover_batch(
requests,
&collection,
|name| self.get_collection_opt(name),
read_consistency,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
/// Paginate over all stored points with given filtering conditions
///
/// # Arguments
///
/// * `collection_name` - which collection to use
/// * `request` - [`ScrollRequestInternal`]
/// * `shard_selection` - which local shard to use
///
/// # Result
///
/// List of points with specified information included
#[allow(clippy::too_many_arguments)]
pub async fn scroll(
&self,
collection_name: &str,
request: ScrollRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<ScrollResult> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
collection
.scroll_by(
request,
read_consistency,
&shard_selection,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
pub async fn query_batch(
&self,
collection_name: &str,
mut requests: Vec<(CollectionQueryRequest, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<Vec<ScoredPoint>>> {
let mut collection_pass = None;
for (request, _shard_selector) in &mut requests {
collection_pass = Some(access.check_point_op(collection_name, request)?);
}
let Some(collection_pass) = collection_pass else {
// This can happen only if there are no requests
return Ok(vec![]);
};
let collection = self.get_collection(&collection_pass).await?;
collection
.query_batch(
requests,
|name| self.get_collection_opt(name),
read_consistency,
timeout,
hw_measurement_acc,
)
.await
.map_err(|err| err.into())
}
// Return unique values for a payload key, and a count of points for each value.
#[allow(clippy::too_many_arguments)]
pub async fn facet(
&self,
collection_name: &str,
request: FacetParams,
shard_selection: ShardSelectorInternal,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<FacetResponse> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
collection
.facet(
request,
shard_selection,
read_consistency,
timeout,
hw_measurement_acc,
)
.await
.map_err(StorageError::from)
}
#[allow(clippy::too_many_arguments)]
pub async fn search_points_matrix(
&self,
collection_name: &str,
request: CollectionSearchMatrixRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<CollectionSearchMatrixResponse, StorageError> {
let collection_pass = access.check_point_op(collection_name, &request)?;
let collection = self.get_collection(&collection_pass).await?;
collection
.search_points_matrix(
request,
shard_selection,
read_consistency,
timeout,
hw_measurement_acc,
)
.await
.map_err(StorageError::from)
}
/// # Cancel safety
///
/// This method is cancel safe.
///
/// When it is cancelled, the operation may not be applied on some shard keys. But, all nodes
/// are guaranteed to be consistent.
async fn _update_shard_keys(
collection: &Collection,
shard_keys: Vec<ShardKey>,
operation: CollectionUpdateOperations,
wait: bool,
ordering: WriteOrdering,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<UpdateResult> {
// `Collection::update_from_client` is cancel safe, so this method is cancel safe.
let updates: FuturesUnordered<_> = shard_keys
.into_iter()
.map(|shard_key| {
collection.update_from_client(
operation.clone(),
wait,
ordering,
Some(shard_key),
hw_measurement_acc.clone(),
)
})
.collect();
// `Collection::update_from_client` is cancel safe, so it's safe to use `TryStreamExt::try_collect`
let results: Vec<_> = updates.try_collect().await?;
results
.into_iter()
.next()
.ok_or_else(|| StorageError::bad_input("Empty shard keys selection"))
}
/// # Cancel safety
///
/// This method is cancel safe.
#[allow(clippy::too_many_arguments)]
pub async fn update(
&self,
collection_name: &str,
operation: OperationWithClockTag,
wait: bool,
ordering: WriteOrdering,
shard_selector: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<UpdateResult> {
let collection_pass = access.check_point_op(collection_name, &operation.operation)?;
// `TableOfContent::_update_shard_keys` and `Collection::update_from_*` are cancel safe,
// so this method is cancel safe.
let collection = self.get_collection(&collection_pass).await?;
// Ordered operation flow:
//
// ┌───────────────────┐
// │ User │
// └┬──────────────────┘
// │ Shard: None
// │ Ordering: Strong
// │ ShardKey: Some("cats")
// │ ClockTag: None
// ┌▼──────────────────┐
// │ First Node │ <- update_from_client
// └┬──────────────────┘
// │ Shard: Some(N)
// │ Ordering: Strong
// │ ShardKey: None
// │ ClockTag: None
// ┌▼──────────────────┐
// │ Leader node │ <- update_from_peer
// └┬──────────────────┘
// │ Shard: Some(N)
// │ Ordering: None(Weak)
// │ ShardKey: None
// │ ClockTag: { peer_id: IdOf(Leader node), clock_id: 1, clock_tick: 123 }
// ┌▼──────────────────┐
// │ Updating node │ <- update_from_peer
// └───────────────────┘
let _update_rate_limiter = match &self.update_rate_limiter {
Some(update_rate_limiter) => {
// We only want to rate limit the first node in the chain
if !shard_selector.is_shard_id() {
Some(update_rate_limiter.acquire().await)
} else {
None
}
}
None => None,
};
// TODO: `debug_assert(operation.clock_tag.is_none())` for `_update_shard_keys`/`update_from_client`!?
let res = match shard_selector {
ShardSelectorInternal::Empty => {
collection
.update_from_client(
operation.operation,
wait,
ordering,
None,
hw_measurement_acc.clone(),
)
.await?
}
ShardSelectorInternal::All => {
let shard_keys = collection.get_shard_keys().await;
if shard_keys.is_empty() {
collection
.update_from_client(
operation.operation,
wait,
ordering,
None,
hw_measurement_acc.clone(),
)
.await?
} else {
Self::_update_shard_keys(
&collection,
shard_keys,
operation.operation,
wait,
ordering,
hw_measurement_acc.clone(),
)
.await?
}
}
ShardSelectorInternal::ShardKey(shard_key) => {
collection
.update_from_client(
operation.operation,
wait,
ordering,
Some(shard_key),
hw_measurement_acc.clone(),
)
.await?
}
ShardSelectorInternal::ShardKeys(shard_keys) => {
Self::_update_shard_keys(
&collection,
shard_keys,
operation.operation,
wait,
ordering,
hw_measurement_acc.clone(),
)
.await?
}
ShardSelectorInternal::ShardKeyWithFallback(key) => {
let shard_keys: Vec<_> = collection
.shards_holder()
.read()
.await
.route_with_fallback_for_write(key)?
.into_iter()
.map(|(_shard_ids, shard_key)| shard_key)
.collect();
Self::_update_shard_keys(
&collection,
shard_keys,
operation.operation,
wait,
ordering,
hw_measurement_acc.clone(),
)
.await?
}
ShardSelectorInternal::ShardId(shard_selection) => {
collection
.update_from_peer(
operation,
shard_selection,
wait,
ordering,
hw_measurement_acc.clone(),
)
.await?
}
};
Ok(res)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/point_ops_internal.rs | lib/storage/src/content_manager/toc/point_ops_internal.rs | //! Methods here are for distributed internal use only.
use std::time::Duration;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::UpdateResult;
use collection::operations::universal_query::shard_query::{ShardQueryRequest, ShardQueryResponse};
use collection::shards::shard::ShardId;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::facets::{FacetParams, FacetResponse};
use super::TableOfContent;
use crate::content_manager::errors::StorageResult;
use crate::rbac::{Access, AccessRequirements};
impl TableOfContent {
pub async fn query_batch_internal(
&self,
collection_name: &str,
requests: Vec<ShardQueryRequest>,
shard_selection: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<Vec<ShardQueryResponse>> {
let collection = self.get_collection_unchecked(collection_name).await?;
let res = collection
.query_batch_internal(requests, &shard_selection, timeout, hw_measurement_acc)
.await?;
Ok(res)
}
pub async fn facet_internal(
&self,
collection_name: &str,
request: FacetParams,
shard_selection: ShardSelectorInternal,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> StorageResult<FacetResponse> {
let collection = self.get_collection_unchecked(collection_name).await?;
let res = collection
.facet(request, shard_selection, None, timeout, hw_measurement_acc)
.await?;
Ok(res)
}
pub async fn cleanup_local_shard(
&self,
collection_name: &str,
shard_id: ShardId,
access: Access,
wait: bool,
timeout: Option<Duration>,
) -> StorageResult<UpdateResult> {
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new().write())?;
self.get_collection(&collection_pass)
.await?
.cleanup_local_shard(shard_id, wait, timeout)
.await
.map_err(Into::into)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/collection_meta_ops.rs | lib/storage/src/content_manager/toc/collection_meta_ops.rs | use std::collections::HashSet;
use std::path::Path;
use std::sync::LazyLock;
use collection::collection_state;
use collection::config::ShardingMethod;
use collection::events::{CollectionDeletedEvent, IndexCreatedEvent};
use collection::shards::collection_shard_distribution::CollectionShardDistribution;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::transfer::ShardTransfer;
use collection::shards::{CollectionId, transfer};
use common::counter::hardware_accumulator::HwMeasurementAcc;
use fs_err::tokio as tokio_fs;
use tempfile::Builder;
use super::TableOfContent;
use crate::content_manager::collection_meta_ops::*;
use crate::content_manager::collections_ops::Checker as _;
use crate::content_manager::consensus_ops::ConsensusOperations;
use crate::content_manager::errors::StorageError;
use crate::content_manager::shard_distribution::ShardDistributionProposal;
static CREATE_CUSTOM_SHARDS_IN_INITIALIZING_STATE: LazyLock<semver::Version> =
LazyLock::new(|| semver::Version::parse("1.14.2-dev").unwrap());
impl TableOfContent {
pub(super) fn perform_collection_meta_op_sync(
&self,
operation: CollectionMetaOperations,
) -> Result<bool, StorageError> {
self.general_runtime
.block_on(self.perform_collection_meta_op(operation))
}
/// ## Cancel safety
///
/// This function is **not** cancel safe.
pub async fn perform_collection_meta_op(
&self,
operation: CollectionMetaOperations,
) -> Result<bool, StorageError> {
match operation {
CollectionMetaOperations::CreateCollection(mut operation) => {
log::info!("Creating collection {}", operation.collection_name);
let distribution = match operation.take_distribution() {
None => match operation
.create_collection
.sharding_method
.unwrap_or_default()
{
ShardingMethod::Auto => {
let collection_defaults = self.storage_config.collection.as_ref();
let number_of_peers = 1; // this is a single node deployment
let suggested_shard_number = collection_defaults
.map(|config| config.get_shard_number(number_of_peers));
let shard_number = operation
.create_collection
.shard_number
.or(suggested_shard_number);
CollectionShardDistribution::all_local(shard_number, self.this_peer_id)
}
ShardingMethod::Custom => ShardDistributionProposal::empty().into(),
},
Some(distribution) => distribution.into(),
};
self.create_collection(
&operation.collection_name,
operation.create_collection,
distribution,
)
.await
}
CollectionMetaOperations::UpdateCollection(operation) => {
log::info!("Updating collection {}", operation.collection_name);
self.update_collection(operation).await
}
CollectionMetaOperations::DeleteCollection(operation) => {
log::info!("Deleting collection {}", operation.0);
self.delete_collection(&operation.0).await
}
CollectionMetaOperations::ChangeAliases(operation) => {
log::debug!("Changing aliases");
self.update_aliases(operation).await
}
CollectionMetaOperations::Resharding(collection, operation) => {
log::debug!("Resharding {operation:?} of {collection}");
self.handle_resharding(collection, operation)
.await
.map(|_| true)
}
CollectionMetaOperations::TransferShard(collection, operation) => {
log::debug!("Transfer shard {operation:?} of {collection}");
self.handle_transfer(collection, operation)
.await
.map(|()| true)
}
CollectionMetaOperations::SetShardReplicaState(operation) => {
log::debug!("Set shard replica state {operation:?}");
self.set_shard_replica_state(operation).await.map(|()| true)
}
CollectionMetaOperations::Nop { .. } => Ok(true),
CollectionMetaOperations::CreateShardKey(create_shard_key) => {
log::debug!("Create shard key {create_shard_key:?}");
self.create_shard_key(create_shard_key).await.map(|()| true)
}
CollectionMetaOperations::DropShardKey(drop_shard_key) => {
log::debug!("Drop shard key {drop_shard_key:?}");
self.drop_shard_key(drop_shard_key).await.map(|()| true)
}
CollectionMetaOperations::CreatePayloadIndex(create_payload_index) => {
log::debug!("Create payload index {create_payload_index:?}");
self.create_payload_index(create_payload_index)
.await
.map(|()| true)
}
CollectionMetaOperations::DropPayloadIndex(drop_payload_index) => {
log::debug!("Drop payload index {drop_payload_index:?}");
self.drop_payload_index(drop_payload_index)
.await
.map(|()| true)
}
#[cfg(feature = "staging")]
CollectionMetaOperations::TestSlowDown(test_slow_down) => {
test_slow_down.execute(self.this_peer_id).await;
Ok(true)
}
}
}
async fn update_collection(
&self,
mut operation: UpdateCollectionOperation,
) -> Result<bool, StorageError> {
let replica_changes = operation.take_shard_replica_changes();
let UpdateCollection {
vectors,
hnsw_config,
params,
optimizers_config,
quantization_config,
sparse_vectors,
strict_mode_config: strict_mode,
metadata,
} = operation.update_collection;
let collection = self
.get_collection_unchecked(&operation.collection_name)
.await?;
let mut recreate_optimizers = false;
if let Some(diff) = optimizers_config {
collection.update_optimizer_params_from_diff(diff).await?;
recreate_optimizers = true;
}
if let Some(diff) = params {
collection.update_params_from_diff(diff).await?;
recreate_optimizers = true;
}
if let Some(diff) = hnsw_config {
collection.update_hnsw_config_from_diff(diff).await?;
recreate_optimizers = true;
}
if let Some(diff) = vectors {
collection.update_vectors_from_diff(&diff).await?;
recreate_optimizers = true;
}
if let Some(diff) = quantization_config {
collection
.update_quantization_config_from_diff(diff)
.await?;
recreate_optimizers = true;
}
if let Some(diff) = sparse_vectors {
collection.update_sparse_vectors_from_other(&diff).await?;
recreate_optimizers = true;
}
if let Some(changes) = replica_changes {
collection.handle_replica_changes(changes).await?;
}
if let Some(strict_mode) = strict_mode {
collection.update_strict_mode_config(strict_mode).await?;
}
if let Some(metadata) = metadata {
collection.update_metadata(metadata).await?;
}
collection.print_warnings().await;
// Recreate optimizers
if recreate_optimizers {
collection.recreate_optimizers_blocking().await?;
}
Ok(true)
}
pub(super) async fn delete_collection(
&self,
collection_name: &str,
) -> Result<bool, StorageError> {
let _collection_create_guard = self.collection_create_lock.lock().await;
self.alias_persistence
.write()
.await
.remove_collection(collection_name)?;
if let Some(removed) = self.collections.write().await.remove(collection_name) {
let path = self.get_collection_path(collection_name);
if let Some(state) = removed.resharding_state().await
&& let Err(err) = removed.abort_resharding(state.key(), true).await
{
log::error!(
"Failed to abort resharding {} when deleting collection {collection_name}: \
{err}",
state.key(),
);
}
removed.stop_gracefully().await;
// Move collection to ".deleted" folder to prevent accidental reuse
// the original collection path will be moved atomically within this
// directory.
let removed_collections_path =
Path::new(&self.storage_config.storage_path).join(".deleted");
tokio_fs::create_dir_all(&removed_collections_path).await?;
let deleted_path = Builder::new()
// Limit the file name to be on a lower side to avoid running into too-long
// file names.
// Even if the chosen randomness factor poses chances of collision, the library
// prevents creation of duplicate files within the chosen directory.
.rand_bytes(8)
.prefix("")
.tempdir_in(removed_collections_path)?;
tokio_fs::rename(path, &deleted_path).await?;
// Solve all issues related to this collection
issues::publish(CollectionDeletedEvent {
collection_id: collection_name.to_string(),
});
// At this point collection is removed from memory and moved to ".deleted" folder.
// Next time we load service the collection will not appear in the list of collections.
// We can take our time to delete the collection from disk.
tokio::spawn(async move {
if let Err(error) = tokio_fs::remove_dir_all(&deleted_path).await {
log::error!(
"Can't delete collection {} from disk. Error: {}",
deleted_path.as_ref().display(),
error
);
}
});
Ok(true)
} else {
// we hold the collection_create lock to make sure no one is creating this collection
// otherwise we would delete its content now
let path = self.get_collection_path(collection_name);
if path.exists() {
log::warn!(
"Collection {collection_name} is not loaded, but its directory still exists. Deleting it."
);
tokio_fs::remove_dir_all(path).await?;
}
Ok(false)
}
}
/// performs several alias changes in an atomic fashion
async fn update_aliases(
&self,
operation: ChangeAliasesOperation,
) -> Result<bool, StorageError> {
// Lock all collections for alias changes
// Prevent search on partially switched collections
let collection_lock = self.collections.write().await;
let mut alias_lock = self.alias_persistence.write().await;
for action in operation.actions {
match action {
AliasOperations::CreateAlias(CreateAliasOperation {
create_alias:
CreateAlias {
collection_name,
alias_name,
},
}) => {
collection_lock.validate_collection_exists(&collection_name)?;
collection_lock.validate_collection_not_exists(&alias_name)?;
alias_lock.insert(alias_name, collection_name)?;
}
AliasOperations::DeleteAlias(DeleteAliasOperation {
delete_alias: DeleteAlias { alias_name },
}) => {
alias_lock.remove(&alias_name)?;
}
AliasOperations::RenameAlias(RenameAliasOperation {
rename_alias:
RenameAlias {
old_alias_name,
new_alias_name,
},
}) => {
alias_lock.rename_alias(&old_alias_name, new_alias_name)?;
}
};
}
Ok(true)
}
/// # Cancel safety
///
/// This method is *not* cancel safe.
async fn handle_resharding(
&self,
collection_id: CollectionId,
operation: ReshardingOperation,
) -> Result<(), StorageError> {
let collection = self.get_collection_unchecked(&collection_id).await?;
let Some(proposal_sender) = self.consensus_proposal_sender.clone() else {
return Err(StorageError::service_error(
"Can't handle resharding, this is a single node deployment",
));
};
match operation {
ReshardingOperation::Start(key) => {
let consensus = match self.toc_dispatcher.lock().as_ref() {
Some(consensus) => Box::new(consensus.clone()),
None => {
return Err(StorageError::service_error(
"Can't handle transfer, this is a single node deployment",
));
}
};
let on_finish = {
let collection_id = collection_id.clone();
let key = key.clone();
let proposal_sender = proposal_sender.clone();
async move {
let operation = ConsensusOperations::finish_resharding(collection_id, key);
if let Err(error) = proposal_sender.send(operation) {
log::error!("Can't report resharding progress to consensus: {error}");
};
}
};
let on_failure = {
let collection_id = collection_id.clone();
let key = key.clone();
async move {
if let Err(error) = proposal_sender
.send(ConsensusOperations::abort_resharding(collection_id, key))
{
log::error!("Can't report resharding progress to consensus: {error}");
};
}
};
collection
.start_resharding(key, consensus, on_finish, on_failure)
.await?;
}
ReshardingOperation::CommitRead(key) => {
collection.commit_read_hashring(&key).await?;
}
ReshardingOperation::CommitWrite(key) => {
collection.commit_write_hashring(&key).await?;
}
ReshardingOperation::Finish(key) => {
collection.finish_resharding(key).await?;
}
ReshardingOperation::Abort(key) => {
collection.abort_resharding(key, false).await?;
}
}
Ok(())
}
async fn handle_transfer(
&self,
collection_id: CollectionId,
transfer_operation: ShardTransferOperations,
) -> Result<(), StorageError> {
let collection = self.get_collection_unchecked(&collection_id).await?;
let Some(proposal_sender) = self.consensus_proposal_sender.clone() else {
return Err(StorageError::service_error(
"Can't handle transfer, this is a single node deployment",
));
};
match transfer_operation {
ShardTransferOperations::Start(transfer) => {
let collection_state::State {
shards,
transfers,
shards_key_mapping,
..
} = collection.state().await;
let all_peers: HashSet<_> = self
.channel_service
.id_to_address
.read()
.keys()
.cloned()
.collect();
let source_replicas = shards.get(&transfer.shard_id).map(|info| &info.replicas);
let destination_replicas = transfer
.to_shard_id
.and_then(|to_shard_id| shards.get(&to_shard_id))
.map(|info| &info.replicas);
// Valid transfer:
// All peers: 123, 321, 111, 222, 333
// Peers: shard_id=1 - [{123: Active}]
// Transfer: {123 -> 321}, shard_id=1
// Invalid transfer:
// All peers: 123, 321, 111, 222, 333
// Peers: shard_id=1 - [{123: Active}]
// Transfer: {321 -> 123}, shard_id=1
transfer::helpers::validate_transfer(
&transfer,
&all_peers,
source_replicas,
destination_replicas,
&transfers,
&shards_key_mapping,
)?;
let on_finish = {
let collection_id = collection_id.clone();
let transfer = transfer.clone();
let proposal_sender = proposal_sender.clone();
async move {
let operation =
ConsensusOperations::finish_transfer(collection_id, transfer);
if let Err(error) = proposal_sender.send(operation) {
log::error!("Can't report transfer progress to consensus: {error}");
};
}
};
let on_failure = {
let collection_id = collection_id.clone();
let transfer = transfer.clone();
async move {
if let Err(error) =
proposal_sender.send(ConsensusOperations::abort_transfer(
collection_id,
transfer,
"transmission failed",
))
{
log::error!("Can't report transfer progress to consensus: {error}");
};
}
};
let shard_consensus = match self.toc_dispatcher.lock().as_ref() {
Some(consensus) => Box::new(consensus.clone()),
None => {
return Err(StorageError::service_error(
"Can't handle transfer, this is a single node deployment",
));
}
};
let temp_dir = self.optional_temp_or_storage_temp_path()?;
collection
.start_shard_transfer(
transfer,
shard_consensus,
temp_dir,
on_finish,
on_failure,
)
.await?;
}
ShardTransferOperations::Restart(transfer_restart) => {
let transfers: HashSet<transfer::ShardTransfer> =
collection.state().await.transfers;
let transfer_key = transfer_restart.key();
let Some(old_transfer) = transfer::helpers::get_transfer(&transfer_key, &transfers)
else {
return Err(StorageError::bad_request(format!(
"There is no transfer for shard {} from {} to {}",
transfer_key.shard_id, transfer_key.from, transfer_key.to,
)));
};
if old_transfer.method == Some(transfer_restart.method) {
return Err(StorageError::bad_request(format!(
"Cannot restart transfer for shard {} from {} to {}, its configuration did not change",
transfer_restart.shard_id, transfer_restart.from, transfer_restart.to,
)));
}
// Abort and start transfer
Box::pin(self.handle_transfer(
collection_id.clone(),
ShardTransferOperations::Abort {
transfer: transfer_restart.key(),
reason: "restart transfer".into(),
},
))
.await?;
let new_transfer = ShardTransfer {
shard_id: transfer_restart.shard_id,
to_shard_id: None,
from: transfer_restart.from,
to: transfer_restart.to,
sync: old_transfer.sync, // Preserve sync flag from the old transfer
method: Some(transfer_restart.method),
filter: None,
};
Box::pin(
self.handle_transfer(
collection_id,
ShardTransferOperations::Start(new_transfer),
),
)
.await?;
}
ShardTransferOperations::Finish(transfer) => {
// Validate transfer exists to prevent double handling
transfer::helpers::validate_transfer_exists(
&transfer.key(),
&collection.state().await.transfers,
)?;
collection.finish_shard_transfer(transfer, None).await?;
}
ShardTransferOperations::RecoveryToPartial(transfer)
| ShardTransferOperations::SnapshotRecovered(transfer) => {
// Validate transfer exists to prevent double handling
transfer::helpers::validate_transfer_exists(
&transfer,
&collection.state().await.transfers,
)?;
let collection = self.get_collection_unchecked(&collection_id).await?;
let current_state = collection
.state()
.await
.shards
.get(&transfer.shard_id)
.and_then(|info| info.replicas.get(&transfer.to))
.copied();
let Some(current_state) = current_state else {
return Err(StorageError::bad_input(format!(
"Replica {} of {collection_id}:{} does not exist",
transfer.to, transfer.shard_id,
)));
};
match current_state {
ReplicaState::PartialSnapshot | ReplicaState::Recovery => (),
_ => {
return Err(StorageError::bad_input(format!(
"Replica {} of {collection_id}:{} has unexpected {current_state:?} \
(expected {:?} or {:?})",
transfer.to,
transfer.shard_id,
ReplicaState::PartialSnapshot,
ReplicaState::Recovery,
)));
}
}
log::debug!(
"Set shard replica state from {current_state:?} to {:?}",
ReplicaState::Partial,
);
collection
.set_shard_replica_state(
transfer.shard_id,
transfer.to,
ReplicaState::Partial,
Some(current_state),
)
.await?;
}
ShardTransferOperations::Abort { transfer, reason } => {
// Validate transfer exists to prevent double handling
transfer::helpers::validate_transfer_exists(
&transfer,
&collection.state().await.transfers,
)?;
log::warn!("Aborting shard transfer: {reason}");
collection
.abort_shard_transfer_and_resharding(transfer, None)
.await?;
}
};
Ok(())
}
async fn set_shard_replica_state(
&self,
operation: SetShardReplicaState,
) -> Result<(), StorageError> {
self.get_collection_unchecked(&operation.collection_name)
.await?
.set_shard_replica_state(
operation.shard_id,
operation.peer_id,
operation.state,
operation.from_state,
)
.await?;
Ok(())
}
/// ## Cancel safety
///
/// This function is **not** cancel safe.
async fn create_shard_key(&self, operation: CreateShardKey) -> Result<(), StorageError> {
let use_initializing_state = self.is_distributed()
&& self
.get_channel_service()
.all_peers_at_version(&CREATE_CUSTOM_SHARDS_IN_INITIALIZING_STATE);
let init_state = if let Some(initial_state) = operation.initial_state {
initial_state
} else if use_initializing_state {
ReplicaState::Initializing
} else {
ReplicaState::Active
};
self.get_collection_unchecked(&operation.collection_name)
.await?
.create_shard_key(operation.shard_key, operation.placement, init_state)
.await?;
Ok(())
}
async fn drop_shard_key(&self, operation: DropShardKey) -> Result<(), StorageError> {
self.get_collection_unchecked(&operation.collection_name)
.await?
.drop_shard_key(operation.shard_key)
.await?;
Ok(())
}
async fn create_payload_index(
&self,
operation: CreatePayloadIndex,
) -> Result<(), StorageError> {
// We measure hardware on collection level here to not touch consensus for measurements but still
// measure hw for payload index creation on all nodes.
let collection_hw_acc = HwMeasurementAcc::new_with_metrics_drain(
self.get_collection_hw_metrics(operation.collection_name.clone()),
);
self.get_collection_unchecked(&operation.collection_name)
.await?
.create_payload_index(
operation.field_name.clone(),
operation.field_schema,
collection_hw_acc,
)
.await?;
// We can solve issues related to this missing index
issues::publish(IndexCreatedEvent {
collection_id: operation.collection_name,
field_name: operation.field_name,
});
Ok(())
}
async fn drop_payload_index(&self, operation: DropPayloadIndex) -> Result<(), StorageError> {
self.get_collection_unchecked(&operation.collection_name)
.await?
.drop_payload_index(operation.field_name)
.await?;
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/create_collection.rs | lib/storage/src/content_manager/toc/create_collection.rs | use std::num::NonZeroU32;
use collection::collection::Collection;
use collection::config::{self, CollectionConfigInternal, CollectionParams, ShardingMethod};
use collection::operations::config_diff::DiffConfig as _;
use collection::operations::types::{CollectionResult, VectorsConfig};
use collection::shards::collection_shard_distribution::CollectionShardDistribution;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::shard::{PeerId, ShardId};
use super::TableOfContent;
use crate::content_manager::collection_meta_ops::*;
use crate::content_manager::collections_ops::Checker as _;
use crate::content_manager::consensus_ops::ConsensusOperations;
use crate::content_manager::errors::StorageError;
impl TableOfContent {
pub(super) async fn create_collection(
&self,
collection_name: &str,
operation: CreateCollection,
collection_shard_distribution: CollectionShardDistribution,
) -> Result<bool, StorageError> {
// Collection operations require multiple file operations,
// before collection can actually be registered in the service.
// To prevent parallel writing of the files, we use this lock.
let collection_create_guard = self.collection_create_lock.lock().await;
let CreateCollection {
mut vectors,
shard_number,
sharding_method,
on_disk_payload,
hnsw_config: hnsw_config_diff,
wal_config: wal_config_diff,
optimizers_config: optimizers_config_diff,
replication_factor,
write_consistency_factor,
quantization_config,
sparse_vectors,
strict_mode_config,
uuid,
metadata,
} = operation;
{
let collections = self.collections.read().await;
collections.validate_collection_not_exists(collection_name)?;
if let Some(max_collections) = self.storage_config.max_collections
&& collections.len() >= max_collections
{
return Err(StorageError::bad_request(format!(
"Can't create collection with name {collection_name}. Max collections limit reached: {max_collections}",
)));
}
}
if self
.alias_persistence
.read()
.await
.check_alias_exists(collection_name)
{
return Err(StorageError::bad_input(format!(
"Can't create collection with name {collection_name}. Alias with the same name already exists",
)));
}
let collection_path = self.create_collection_path(collection_name).await?;
// derive the snapshots path for the collection to be used across collection operation, the directories for the snapshot
// is created only when a create snapshot api is invoked.
let snapshots_path = self.snapshots_path_for_collection(collection_name);
let collection_defaults_config = self.storage_config.collection.as_ref();
let default_shard_number = collection_defaults_config
.and_then(|x| x.shard_number)
.unwrap_or_else(|| config::default_shard_number().get());
let shard_number = match sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
if let Some(shard_number) = shard_number {
debug_assert_eq!(
shard_number as usize,
collection_shard_distribution.shard_count(),
"If shard number was supplied then this exact number should be used in a distribution",
);
shard_number
} else {
collection_shard_distribution.shard_count() as u32
}
}
ShardingMethod::Custom => {
if let Some(shard_number) = shard_number {
shard_number
} else {
default_shard_number
}
}
};
let replication_factor = replication_factor
.or_else(|| collection_defaults_config.and_then(|i| i.replication_factor))
.unwrap_or_else(|| config::default_replication_factor().get());
let write_consistency_factor = write_consistency_factor
.or_else(|| collection_defaults_config.and_then(|i| i.write_consistency_factor))
.unwrap_or_else(|| config::default_write_consistency_factor().get());
// Apply default vector config values if not set.
let vectors_defaults = collection_defaults_config.and_then(|i| i.vectors.as_ref());
if let Some(vectors_defaults) = vectors_defaults {
match &mut vectors {
VectorsConfig::Single(s) => {
if let Some(on_disk_default) = vectors_defaults.on_disk {
s.on_disk.get_or_insert(on_disk_default);
}
}
VectorsConfig::Multi(m) => {
for (_, vec_params) in m.iter_mut() {
if let Some(on_disk_default) = vectors_defaults.on_disk {
vec_params.on_disk.get_or_insert(on_disk_default);
}
}
}
};
}
let collection_params = CollectionParams {
vectors,
sparse_vectors,
shard_number: NonZeroU32::new(shard_number)
.ok_or_else(|| StorageError::bad_input("`shard_number` cannot be 0"))?,
sharding_method,
on_disk_payload: on_disk_payload.unwrap_or(self.storage_config.on_disk_payload),
replication_factor: NonZeroU32::new(replication_factor).ok_or_else(|| {
StorageError::BadInput {
description: "`replication_factor` cannot be 0".to_string(),
}
})?,
write_consistency_factor: NonZeroU32::new(write_consistency_factor).ok_or_else(
|| StorageError::BadInput {
description: "`write_consistency_factor` cannot be 0".to_string(),
},
)?,
read_fan_out_factor: None,
};
let wal_config = self.storage_config.wal.update_opt(wal_config_diff.as_ref());
let optimizer_config = self
.storage_config
.optimizers
.update_opt(optimizers_config_diff.as_ref());
let hnsw_config = self
.storage_config
.hnsw_index
.update_opt(hnsw_config_diff.as_ref());
let quantization_config = match quantization_config {
None => self
.storage_config
.collection
.as_ref()
.and_then(|i| i.quantization.clone()),
Some(diff) => Some(diff),
};
let strict_mode_config = match strict_mode_config {
Some(diff) => {
let default_config = self
.storage_config
.collection
.as_ref()
.and_then(|i| i.strict_mode.clone())
.unwrap_or_default();
Some(default_config.update(&diff))
}
None => self
.storage_config
.collection
.as_ref()
.and_then(|i| i.strict_mode.as_ref())
.cloned(),
};
let storage_config = self
.storage_config
.to_shared_storage_config(self.is_distributed())
.into();
let collection_config = CollectionConfigInternal {
wal_config,
params: collection_params,
optimizer_config,
hnsw_config,
quantization_config,
strict_mode_config,
uuid,
metadata,
};
// No shard key mapping on creation, shard keys are set up after creating the collection
let shard_key_mapping = None;
let collection = Collection::new(
collection_name.to_string(),
self.this_peer_id,
&collection_path,
&snapshots_path,
&collection_config,
storage_config,
collection_shard_distribution,
shard_key_mapping,
self.channel_service.clone(),
Self::change_peer_from_state_callback(
self.consensus_proposal_sender.clone(),
collection_name.to_string(),
ReplicaState::Dead,
),
Self::request_shard_transfer_callback(
self.consensus_proposal_sender.clone(),
collection_name.to_string(),
),
Self::abort_shard_transfer_callback(
self.consensus_proposal_sender.clone(),
collection_name.to_string(),
),
Some(self.search_runtime.handle().clone()),
Some(self.update_runtime.handle().clone()),
self.optimizer_resource_budget.clone(),
self.storage_config.optimizers_overwrite.clone(),
)
.await?;
collection.print_warnings().await;
let local_shards = collection.get_local_shards().await;
{
let mut write_collections = self.collections.write().await;
write_collections.validate_collection_not_exists(collection_name)?;
let existing_collection =
write_collections.insert(collection_name.to_string(), collection);
if let Some(existing_collection) = existing_collection {
debug_assert!(
false,
"Collection `{collection_name}` was not expected to exist"
);
existing_collection.stop_gracefully().await;
}
self.telemetry.init_snapshot_telemetry(collection_name);
}
drop(collection_create_guard);
// Notify the collection is created and ready to use
for shard_id in local_shards {
self.on_peer_created(collection_name.to_string(), self.this_peer_id, shard_id)
.await?;
}
Ok(true)
}
async fn on_peer_created(
&self,
collection_name: String,
peer_id: PeerId,
shard_id: ShardId,
) -> CollectionResult<()> {
if let Some(proposal_sender) = &self.consensus_proposal_sender {
let operation =
ConsensusOperations::initialize_replica(collection_name.clone(), shard_id, peer_id);
if let Err(send_error) = proposal_sender.send(operation) {
log::error!(
"Can't send proposal to deactivate replica on peer {peer_id} of shard {shard_id} of collection {collection_name}. Error: {send_error}",
);
}
} else {
// Just activate the shard
let collections = self.collections.read().await;
if let Some(collection) = collections.get(&collection_name) {
collection
.set_shard_replica_state(
shard_id,
peer_id,
ReplicaState::Active,
Some(ReplicaState::Initializing),
)
.await?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/toc/temp_directories.rs | lib/storage/src/content_manager/toc/temp_directories.rs | use std::path::{Path, PathBuf};
use collection::operations::types::{CollectionError, CollectionResult};
use fs_err as fs;
use crate::content_manager::toc::TableOfContent;
const TEMP_SUBDIR_NAME: &str = "tmp";
const FILE_UPLOAD_SUBDIR_NAME: &str = "upload";
/// Functions for managing temporary storages of TOC.
///
/// The directory structure is as follows:
///
/// ./snapshots
/// └── tmp
/// └── (tempdirs)
/// ./optional_temp_path (if specified)
/// └── tmp
/// └── upload
/// └── (tempdirs)
/// ./storage
/// └── tmp
/// └── (tempdirs)
///
/// optional_temp_path can be used instead of `snapshots/tmp` or `storage/tmp`
/// to speed up processing.
///
/// Assume all temp directories are located on different filesystems, so
/// the choice between them should be made from the performance considerations.
///
/// Subdirectories are required for simpler cleanup on the start of the process.
impl TableOfContent {
pub fn temp_path(&self) -> Option<&str> {
self.storage_config.temp_path.as_deref()
}
fn get_snapshots_temp_path(&self) -> PathBuf {
Path::new(self.snapshots_path()).join(TEMP_SUBDIR_NAME)
}
fn get_storage_temp_path(&self) -> PathBuf {
Path::new(self.storage_path()).join(TEMP_SUBDIR_NAME)
}
fn get_optional_temp_path(&self) -> Option<PathBuf> {
self.temp_path()
.map(|path| Path::new(path).join(TEMP_SUBDIR_NAME))
}
/// Get temporary storage path inside the `snapshots` directory.
pub fn snapshots_temp_path(&self) -> CollectionResult<PathBuf> {
let path = self.get_snapshots_temp_path();
if !path.exists() {
fs::create_dir_all(&path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to create snapshots temp directory at {}: {:?}",
path.display(),
e,
))
})?;
}
Ok(path)
}
/// Get temporary storage path inside the `storage` directory.
pub fn storage_temp_path(&self) -> CollectionResult<PathBuf> {
let path = self.get_storage_temp_path();
if !path.exists() {
fs::create_dir_all(&path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to create storage temp directory at {}: {:?}",
path.display(),
e,
))
})?;
}
Ok(path)
}
/// Get temporary storage path inside the `optional_temp_path` directory.
pub fn optional_temp_path(&self) -> CollectionResult<Option<PathBuf>> {
if let Some(path) = self.get_optional_temp_path() {
if !path.exists() {
fs::create_dir_all(&path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to create optional temp directory at {}: {:?}",
path.display(),
e,
))
})?;
}
Ok(Some(path))
} else {
Ok(None)
}
}
/// Get directory for snapshots-related temporary files.
/// If the optional_temp_path is specified, it will be used instead of snapshots_temp_path.
pub fn optional_temp_or_snapshot_temp_path(&self) -> CollectionResult<PathBuf> {
match self.optional_temp_path() {
Ok(Some(path)) => Ok(path),
Ok(None) => self.snapshots_temp_path(),
Err(err) => Err(err),
}
}
/// Get directory for storage-related temporary files.
/// If the optional_temp_path is specified, it will be used instead of storage_temp_path.
pub fn optional_temp_or_storage_temp_path(&self) -> CollectionResult<PathBuf> {
match self.optional_temp_path() {
Ok(Some(path)) => Ok(path),
Ok(None) => self.storage_temp_path(),
Err(err) => Err(err),
}
}
pub fn upload_dir(&self) -> CollectionResult<PathBuf> {
let tmp_storage_dir = match self.optional_temp_path() {
Ok(Some(path)) => path,
Ok(None) => self.snapshots_temp_path()?,
Err(err) => return Err(err),
};
let upload_dir = tmp_storage_dir.join(FILE_UPLOAD_SUBDIR_NAME);
if !upload_dir.exists() {
fs::create_dir_all(&upload_dir).map_err(|e| {
CollectionError::service_error(format!(
"Failed to create upload directory at {}: {:?}",
upload_dir.display(),
e,
))
})?;
}
Ok(upload_dir)
}
pub fn clear_all_tmp_directories(&self) -> CollectionResult<()> {
let snapshots_temp_path = self.get_snapshots_temp_path();
let storage_temp_path = self.get_storage_temp_path();
let optional_temp_path = self.get_optional_temp_path();
if snapshots_temp_path.exists() {
fs::remove_dir_all(&snapshots_temp_path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to remove snapshots temp directory at {}: {:?}",
snapshots_temp_path.display(),
e,
))
})?;
}
if storage_temp_path.exists() {
fs::remove_dir_all(&storage_temp_path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to remove storage temp directory at {}: {:?}",
storage_temp_path.display(),
e,
))
})?;
}
if let Some(path) = optional_temp_path
&& path.exists()
{
fs::remove_dir_all(&path).map_err(|e| {
CollectionError::service_error(format!(
"Failed to remove optional temp directory at {}: {:?}",
path.display(),
e,
))
})?;
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus/operation_sender.rs | lib/storage/src/content_manager/consensus/operation_sender.rs | use std::sync::mpsc::Sender;
use parking_lot::Mutex;
use crate::{ConsensusOperations, StorageError};
/// Structure used to notify consensus about operation
pub struct OperationSender(Mutex<Sender<ConsensusOperations>>);
impl OperationSender {
pub fn new(sender: Sender<ConsensusOperations>) -> Self {
OperationSender(Mutex::new(sender))
}
pub fn send(&self, operation: ConsensusOperations) -> Result<(), StorageError> {
self.0.lock().send(operation)?;
Ok(())
}
}
impl Clone for OperationSender {
fn clone(&self) -> Self {
OperationSender::new(self.0.lock().clone())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus/entry_queue.rs | lib/storage/src/content_manager/consensus/entry_queue.rs | use serde::{Deserialize, Serialize};
pub type EntryId = u64;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
pub struct EntryApplyProgressQueue(Option<(EntryId, EntryId)>);
impl EntryApplyProgressQueue {
pub fn new(first: EntryId, last: EntryId) -> Self {
Self(Some((first, last)))
}
/// Return oldest un-applied entry id if any
pub fn current(&self) -> Option<EntryId> {
match self.0 {
Some((current_index, last_index)) => {
if current_index > last_index {
None
} else {
Some(current_index)
}
}
None => None,
}
}
pub fn applied(&mut self) {
if let Some((current_index, _)) = &mut self.0 {
*current_index += 1;
}
}
pub fn get_last_applied(&self) -> Option<EntryId> {
match &self.0 {
Some((0, _)) => None,
Some((current, _)) => Some(current - 1),
None => None,
}
}
pub fn set_from_snapshot(&mut self, snapshot_at_commit: EntryId) {
self.0 = Some((snapshot_at_commit + 1, snapshot_at_commit))
}
pub fn len(&self) -> usize {
match self.0 {
None => 0,
Some((current, last)) => (last as isize - current as isize + 1) as usize,
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus/mod.rs | lib/storage/src/content_manager/consensus/mod.rs | pub mod consensus_wal;
pub mod entry_queue;
pub mod operation_sender;
pub mod persistent;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus/consensus_wal.rs | lib/storage/src/content_manager/consensus/consensus_wal.rs | use std::cmp;
use std::path::Path;
use fs_err as fs;
use prost_for_raft::Message;
use protobuf::Message as _;
use raft::eraftpb::Entry as RaftEntry;
use wal::Wal;
use crate::StorageError;
use crate::content_manager::consensus_manager;
use crate::content_manager::consensus_ops::ConsensusOperations;
const COLLECTIONS_META_WAL_DIR: &str = "collections_meta_wal";
#[derive(Debug)]
pub struct ConsensusOpWal {
wal: Wal,
/// This value represents which entries are compacted in the WAL.
/// If the record is below this value, it is considered compacted.
/// If the record is equal to or greater than this value, it is considered not compacted.
///
/// Note: this value uses Raft index, not WAL index.
/// Raft indexes start from 1 always, but WAL indexes represent physical offsets in the file,
/// so they can start with bigger values if WAL is really compacted.
compacted_until_raft_index: u64,
}
impl ConsensusOpWal {
pub fn new(storage_path: &Path) -> Self {
let collections_meta_wal_path = storage_path.join(COLLECTIONS_META_WAL_DIR);
fs::create_dir_all(&collections_meta_wal_path)
.expect("Can't create consensus WAL directory");
let wal = Wal::open(collections_meta_wal_path).expect("Can't open consensus WAL");
Self {
wal,
// If we load WAL, we don't know if it was compacted or not.
// We can run `compact` to set this value correctly.
// But even if we don't, the worst thing that can happen is that we will read some
// entries that are already compacted.
compacted_until_raft_index: 0,
}
}
pub fn clear(&mut self) -> Result<(), StorageError> {
log::debug!("Clearing consensus WAL");
self.wal.clear()?;
self.wal.flush_open_segment()?;
Ok(())
}
pub fn entry(&self, raft_index: u64) -> raft::Result<RaftEntry> {
// Raft entries are expected to have index starting from 1
if raft_index < 1 {
return Err(raft::Error::Store(raft::StorageError::Unavailable));
}
if raft_index < self.compacted_until_raft_index {
return Err(raft::Error::Store(raft::StorageError::Compacted));
}
let wal_index = self
.index_offset()?
.try_raft_to_wal(raft_index)
.ok_or_else(|| raft::Error::Store(raft::StorageError::Compacted))?;
self.entry_by_wal_index(wal_index)
}
/// Get all entries in the given range
///
/// The end of the range is exclusive (`[from_raft_index, until_raft_index)`).
/// A specified `max_size_bytes` will be ignored for the first entry.
pub fn entries(
&self,
from_raft_index: u64,
until_raft_index: u64,
max_size_bytes: Option<u64>,
) -> raft::Result<Vec<RaftEntry>> {
let offset = self.index_offset()?;
// TODO
let from_raft_index = cmp::max(from_raft_index, self.compacted_until_raft_index);
// Map requested Raft indices to WAL indices
let from_wal_index = offset.raft_to_wal(from_raft_index);
let until_wal_index = offset.raft_to_wal(until_raft_index);
// Bound mapped WAL indices between first/last WAL indices
let from_wal_index = cmp::max(from_wal_index, offset.wal_index);
let until_wal_index = cmp::min(until_wal_index, offset.wal_index + self.wal.num_entries());
// `Some(u64::MAX)` and `None` are treated as "no max size"
// `Some(0)` is treated as "return single entry"
//
// See:
// - https://docs.rs/raft/latest/raft/storage/trait.Storage.html#tymethod.entries
// - https://github.com/tikv/raft-rs/blob/v0.7.0/src/util.rs#L56-L71
let max_size_bytes = match max_size_bytes {
Some(u64::MAX) | None => None,
Some(max_size_bytes) => Some(max_size_bytes),
};
let mut size_bytes = 0_u64;
let mut entries = Vec::with_capacity(until_wal_index.saturating_sub(from_wal_index) as _);
for wal_index in from_wal_index..until_wal_index {
let entry = self.entry_by_wal_index(wal_index)?;
if let Some(max_size_bytes) = max_size_bytes {
size_bytes = size_bytes.saturating_add(entry.compute_size().into());
if size_bytes >= max_size_bytes && !entries.is_empty() {
break;
}
}
entries.push(entry);
}
Ok(entries)
}
pub fn first_entry(&self) -> Result<Option<RaftEntry>, StorageError> {
let Some(entry) = self.entry_by_wal_index_impl(self.wal.first_index())? else {
return Ok(None);
};
if entry.index >= self.compacted_until_raft_index {
// If the first physical entry is not compacted, return it
Ok(Some(entry))
} else {
// If it is compacted, then we need to find the first non-compacted entry
let wal_index = IndexOffset::new(self.wal.first_index(), &entry)
.try_raft_to_wal(self.compacted_until_raft_index);
let Some(wal_index) = wal_index else {
return Ok(None);
};
self.entry_by_wal_index_impl(wal_index)
}
}
pub fn last_entry(&self) -> Result<Option<RaftEntry>, StorageError> {
let Some(entry) = self.entry_by_wal_index_impl(self.wal.last_index())? else {
return Ok(None);
};
if entry.index >= self.compacted_until_raft_index {
Ok(Some(entry))
} else {
Ok(None)
}
}
pub fn append_entries(&mut self, new_entries: Vec<RaftEntry>) -> Result<(), StorageError> {
if new_entries.is_empty() {
return Ok(());
}
// Calculate WAL to Raft index offset
let mut current_index_offset = self.index_offset_impl()?;
// Use single buffer to encode all new entries to reduce allocations
let mut buf = Vec::new();
for new_entry in new_entries {
// Check that new entry index was not already *logically* compacted
if new_entry.index < self.compacted_until_raft_index {
return Err(StorageError::service_error(format!(
"Can't append entry with Raft index {}, \
because WAL is already *logically* compacted at Raft index {}",
new_entry.index, self.compacted_until_raft_index,
)));
}
// If WAL is not empty, check that new entry index is within WAL bounds
if let Some(offset) = current_index_offset {
// Check that new entry index was not already *physically* compacted (it's not less
// than first WAL index)
let Some(new_entry_wal_index) = offset.try_raft_to_wal(new_entry.index) else {
return Err(StorageError::service_error(format!(
"Can't append entry with Raft index {}, \
because WAL is already *physically* compacted at Raft index {}",
new_entry.index, offset.raft_index,
)));
};
let next_wal_index = self.wal.last_index() + 1;
// Check that new entry index is sequential (it's not greater than next WAL index),
// or truncate entries at the tail of WAL, if it overwrites some
#[allow(clippy::comparison_chain)] // stupid ahh diagnostics 🙄
if new_entry_wal_index > next_wal_index {
return Err(StorageError::service_error(format!(
"Can't append entry with Raft index {} (expected WAL index {}), \
because last entry in WAL is at WAL index {}, \
and all entries have to be sequential",
new_entry.index,
new_entry_wal_index,
self.wal.last_index(),
)));
} else if new_entry_wal_index < next_wal_index {
log::debug!(
"Truncating conflicting WAL entries from Raft index {} \
(WAL index {new_entry_wal_index})",
new_entry.index,
);
self.wal.truncate(new_entry_wal_index)?;
}
}
if log::log_enabled!(log::Level::Debug) {
if let Ok(op) = ConsensusOperations::try_from(&new_entry) {
log::debug!(
"Appending operation, term: {}, index: {}, entry: {op:?}",
new_entry.term,
new_entry.index,
);
} else {
log::debug!("Appending entry: {new_entry:?}");
}
}
buf.clear();
new_entry.encode(&mut buf)?;
#[cfg_attr(not(debug_assertions), expect(unused_variables))]
let new_entry_wal_index = self.wal.append(&buf)?;
#[cfg(debug_assertions)]
{
// Assert that we calculated indices (and truncated WAL) correctly, and new entry
// was inserted at expected WAL index
let expected_new_entry_wal_index = current_index_offset
.map_or(Some(0), |offset| offset.try_raft_to_wal(new_entry.index))
.expect("new entry can't overwrite already compacted WAL entries");
debug_assert_eq!(
new_entry_wal_index, expected_new_entry_wal_index,
"WAL index of inserted entry does not match its expected WAL index, \
Raft index: {}, inserted at WAL index: {}, expected WAL index: {}",
new_entry.index, new_entry_wal_index, expected_new_entry_wal_index,
);
}
// Calculate WAL to Raft index offset, if we inserted first entry into empty WAL
if current_index_offset.is_none() {
current_index_offset = self.index_offset_impl()?;
}
}
// Flush WAL to disk
self.wal.flush_open_segment()?;
Ok(())
}
pub fn compact(&mut self, until_raft_index: u64) -> Result<(), StorageError> {
// Check if WAL is empty
let Some(offset) = self.index_offset_impl()? else {
return Ok(());
};
// Check if WAL is already *logically* compacted
if until_raft_index <= self.compacted_until_raft_index {
return Ok(());
}
// Check if WAL is already *physically* compacted (this should not happen, but we can handle
// it gracefully)
let Some(compact_until_wal_index) = offset.try_raft_to_wal(until_raft_index) else {
log::warn!(
"WAL logical/physical compaction mismatch: \
WAL is logically truncated at Raft index {}, \
but it's physically truncated at Raft index {} (WAL index {})",
self.compacted_until_raft_index,
offset.raft_index,
offset.wal_index,
);
self.compacted_until_raft_index = until_raft_index;
return Ok(());
};
// Bound compaction index, so that there's at least 1 entry available after compaction
// (compact *at most* until last WAL index)
let compact_until_wal_index = cmp::min(
compact_until_wal_index,
offset.wal_index + self.wal.num_entries() - 1, // there's always *at least* 1 entry, because WAL is not empty
);
log::debug!(
"Compacting WAL until Raft index {until_raft_index}/WAL index {compact_until_wal_index} \
(first WAL index {}, WAL size {})",
offset.wal_index,
self.wal.num_entries(),
);
// Compact WAL
self.compacted_until_raft_index = offset.wal_to_raft(compact_until_wal_index);
self.wal.prefix_truncate(compact_until_wal_index)?;
Ok(())
}
pub fn index_offset(&self) -> raft::Result<IndexOffset> {
let res = self.index_offset_impl();
into_raft_result(res)
}
pub fn index_offset_impl(&self) -> Result<Option<IndexOffset>, StorageError> {
let wal_index = self.wal.first_index();
let Some(entry) = self.entry_by_wal_index_impl(wal_index)? else {
return Ok(None);
};
Ok(Some(IndexOffset::new(wal_index, &entry)))
}
fn entry_by_wal_index(&self, wal_index: u64) -> raft::Result<RaftEntry> {
let res = self.entry_by_wal_index_impl(wal_index);
into_raft_result(res)
}
fn entry_by_wal_index_impl(&self, wal_index: u64) -> Result<Option<RaftEntry>, StorageError> {
let entry: Option<RaftEntry> = self
.wal
.entry(wal_index)
.map(|entry| prost_for_raft::Message::decode(entry.as_ref()))
.transpose()?;
if let Some(entry) = &entry {
// WAL index of an entry should always be *less* than its Raft index.
//
// - WAL index starts with 0
// - Raft index starts with 1
// - if WAL is compacted, then difference between Raft index and WAL index may be even greater
// - Raft indices in the WAL should always be *sequential*
//
// So, if at any point WAL index of an entry is *greater-or-equal* than its Raft index,
// it's an error in `ConsensusOpWal` logic.
debug_assert!(
wal_index < entry.index,
"WAL index of an entry is greater than (or equal to) its Raft index, \
WAL index: {wal_index}, Raft index: {}",
entry.index,
);
}
Ok(entry)
}
}
#[derive(Copy, Clone, Debug)]
pub struct IndexOffset {
pub wal_index: u64,
pub raft_index: u64,
pub wal_to_raft_offset: u64,
}
impl IndexOffset {
pub fn new(wal_index: u64, entry: &RaftEntry) -> Self {
// WAL index of an entry should always be *less* than its Raft index, but this is already
// asserted in `entry_by_wal_index_impl`
Self {
wal_index,
raft_index: entry.index,
wal_to_raft_offset: entry.index - wal_index,
}
}
pub fn try_raft_to_wal(&self, raft_index: u64) -> Option<u64> {
raft_index.checked_sub(self.wal_to_raft_offset)
}
pub fn raft_to_wal(&self, raft_index: u64) -> u64 {
raft_index.saturating_sub(self.wal_to_raft_offset)
}
pub fn wal_to_raft(&self, wal_index: u64) -> u64 {
wal_index + self.wal_to_raft_offset
}
}
fn into_raft_result<T>(result: Result<Option<T>, StorageError>) -> raft::Result<T> {
result
.map_err(consensus_manager::raft_error_other)?
.ok_or(raft::Error::Store(raft::StorageError::Unavailable))
}
#[cfg(test)]
mod tests {
use raft::eraftpb::Entry;
use super::*;
fn init_logger() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_log_compaction_rewrite() {
init_logger();
let entries_orig = vec![
Entry {
entry_type: 0,
term: 1,
index: 1,
data: vec![1, 2, 3],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 2,
data: vec![1, 2, 3],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![1, 2, 3],
context: vec![],
sync_log: false,
},
];
let entries_new = vec![
Entry {
entry_type: 0,
term: 1,
index: 2,
data: vec![2, 2, 2],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![3, 3, 3],
context: vec![],
sync_log: false,
},
];
let temp_dir = tempfile::tempdir().unwrap();
let mut wal = ConsensusOpWal::new(temp_dir.path());
wal.append_entries(entries_orig).unwrap();
wal.append_entries(entries_new.clone()).unwrap();
let result_entries = wal.entries(1, 4, None).unwrap();
assert_eq!(result_entries.len(), 3);
assert_eq!(result_entries[0].data, vec![1, 2, 3]);
assert_eq!(result_entries[1].data, vec![2, 2, 2]);
assert_eq!(result_entries[2].data, vec![3, 3, 3]);
wal.clear().unwrap();
wal.append_entries(entries_new).unwrap();
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 2);
let broken_entry = vec![Entry {
entry_type: 0,
term: 1,
index: 1, // Index 1 can't be overwritten, because it is already compacted
data: vec![5, 5, 5],
context: vec![],
sync_log: false,
}];
// Some errors can't be corrected
assert!(matches!(
wal.append_entries(broken_entry),
Err(StorageError::ServiceError { .. })
));
}
#[test]
fn test_log_rewrite() {
init_logger();
let entries_orig = vec![
Entry {
entry_type: 0,
term: 1,
index: 1,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 2,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
];
let entries_new = vec![
Entry {
entry_type: 0,
term: 1,
index: 2,
data: vec![2, 2, 2],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![2, 2, 2],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 4,
data: vec![2, 2, 2],
context: vec![],
sync_log: false,
},
];
let temp_dir = tempfile::tempdir().unwrap();
let mut wal = ConsensusOpWal::new(temp_dir.path());
// append original entries
wal.append_entries(entries_orig).unwrap();
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 3);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 3);
let result_entries = wal.entries(1, 4, None).unwrap();
assert_eq!(result_entries.len(), 3);
assert_eq!(result_entries[0].data, vec![1, 1, 1]);
assert_eq!(result_entries[1].data, vec![1, 1, 1]);
assert_eq!(result_entries[2].data, vec![1, 1, 1]);
// drop wal to check persistence
drop(wal);
let mut wal = ConsensusOpWal::new(temp_dir.path());
// append overlapping entries
wal.append_entries(entries_new).unwrap();
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 4);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 4);
let result_entries = wal.entries(1, 5, None).unwrap();
assert_eq!(result_entries.len(), 4);
assert_eq!(result_entries[0].data, vec![1, 1, 1]); // survived the truncation
assert_eq!(result_entries[1].data, vec![2, 2, 2]);
assert_eq!(result_entries[2].data, vec![2, 2, 2]);
assert_eq!(result_entries[3].data, vec![2, 2, 2]);
// drop wal to check persistence
drop(wal);
let wal = ConsensusOpWal::new(temp_dir.path());
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 4);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 4);
}
#[test]
fn test_log_rewrite_last() {
init_logger();
let entries_orig = vec![
Entry {
entry_type: 0,
term: 1,
index: 1,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 2,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![1, 1, 1],
context: vec![],
sync_log: false,
},
];
// change only the last entry
let entries_new = vec![Entry {
entry_type: 0,
term: 1,
index: 3,
data: vec![2, 2, 2],
context: vec![],
sync_log: false,
}];
let temp_dir = tempfile::tempdir().unwrap();
let mut wal = ConsensusOpWal::new(temp_dir.path());
// append original entries
wal.append_entries(entries_orig).unwrap();
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 3);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 3);
let result_entries = wal.entries(1, 4, None).unwrap();
assert_eq!(result_entries.len(), 3);
assert_eq!(result_entries[0].data, vec![1, 1, 1]);
assert_eq!(result_entries[1].data, vec![1, 1, 1]);
assert_eq!(result_entries[2].data, vec![1, 1, 1]);
// drop wal to check persistence
drop(wal);
let mut wal = ConsensusOpWal::new(temp_dir.path());
// append overlapping entries
wal.append_entries(entries_new).unwrap();
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 3);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 3);
let result_entries = wal.entries(1, 4, None).unwrap();
assert_eq!(result_entries.len(), 3);
assert_eq!(result_entries[0].data, vec![1, 1, 1]);
assert_eq!(result_entries[1].data, vec![1, 1, 1]);
assert_eq!(result_entries[2].data, vec![2, 2, 2]); // value updated
// drop wal to check persistence
drop(wal);
let wal = ConsensusOpWal::new(temp_dir.path());
assert_eq!(wal.wal.num_segments(), 1);
assert_eq!(wal.wal.num_entries(), 3);
assert_eq!(wal.index_offset().unwrap().wal_to_raft_offset, 1);
assert_eq!(wal.first_entry().unwrap().unwrap().index, 1);
assert_eq!(wal.last_entry().unwrap().unwrap().index, 3);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/consensus/persistent.rs | lib/storage/src/content_manager/consensus/persistent.rs | use std::cmp;
use std::collections::HashMap;
use std::io::{BufReader, BufWriter, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use atomicwrites::{AllowOverwrite, AtomicFile};
use collection::operations::types::PeerMetadata;
use collection::shards::shard::PeerId;
use fs_err as fs;
use fs_err::File;
use http::Uri;
use parking_lot::RwLock;
use raft::RaftState;
use raft::eraftpb::{ConfState, HardState, SnapshotMetadata};
use serde::{Deserialize, Serialize};
use crate::StorageError;
use crate::content_manager::consensus::entry_queue::{EntryApplyProgressQueue, EntryId};
use crate::types::{PeerAddressById, PeerMetadataById};
// Deprecated, use `STATE_FILE_NAME` instead
const STATE_FILE_NAME_CBOR: &str = "raft_state";
const STATE_FILE_NAME: &str = "raft_state.json";
/// State of the Raft consensus, which should be saved between restarts.
/// State of the collections, aliases and transfers are stored as regular storage.
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct Persistent {
/// last known state of the Raft consensus
#[serde(with = "RaftStateDef")]
pub state: RaftState,
/// Store last applied snapshot index, required in case if there are no raft change log except
/// for this last snapshot ID (term + commit)
#[serde(default)] // TODO quick fix to avoid breaking the compat. with 0.8.1
pub latest_snapshot_meta: SnapshotMetadataSer,
/// Operations to be applied, consensus considers them committed, but this peer didn't apply them yet
#[serde(default)]
pub apply_progress_queue: EntryApplyProgressQueue,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub first_voter: Option<PeerId>,
/// Last known cluster topology
#[serde(with = "serialize_peer_addresses")]
pub peer_address_by_id: Arc<RwLock<PeerAddressById>>,
#[serde(default)]
pub peer_metadata_by_id: Arc<RwLock<PeerMetadataById>>,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub cluster_metadata: HashMap<String, serde_json::Value>,
pub this_peer_id: PeerId,
#[serde(skip)]
pub path: PathBuf,
/// Tracks if there are some unsaved changes due to the failure on save
#[serde(skip)]
pub dirty: AtomicBool,
}
impl Persistent {
pub fn state(&self) -> &RaftState {
&self.state
}
pub fn latest_snapshot_meta(&self) -> &SnapshotMetadataSer {
&self.latest_snapshot_meta
}
pub fn update_from_snapshot(
&mut self,
meta: &SnapshotMetadata,
address_by_id: PeerAddressById,
mut metadata_by_id: PeerMetadataById,
new_cluster_metadata: HashMap<String, serde_json::Value>,
) -> Result<(), StorageError> {
// IF YOU ADD NEW DATA INTO `PERSISTENT` STATE, DON'T FORGET TO ALSO ADD IT INTO RAFT SNAPSHOT!
let Self {
state,
latest_snapshot_meta,
apply_progress_queue,
first_voter: _,
peer_address_by_id,
peer_metadata_by_id,
cluster_metadata,
this_peer_id: _,
path: _,
dirty: _,
} = self;
state.conf_state = meta.get_conf_state().clone();
state.hard_state.term = cmp::max(state.hard_state.term, meta.term);
state.hard_state.commit = meta.index;
apply_progress_queue.set_from_snapshot(meta.index);
*latest_snapshot_meta = meta.into();
metadata_by_id.retain(|peer_id, _| address_by_id.contains_key(peer_id));
*peer_address_by_id.write() = address_by_id;
*peer_metadata_by_id.write() = metadata_by_id;
*cluster_metadata = new_cluster_metadata;
// Last Raft commit and last snapshot index must be equal and persisted in one operation
// Our `ConsensusManager::new` function relies on this for reconciling WAL clears
debug_assert_eq!(
state.hard_state.commit, latest_snapshot_meta.index,
"applied Raft commit and last snapshot index must be equal",
);
self.save()
}
/// Returns state and if it was initialized for the first time
///
/// `peer_id` is used only when raft state is not found.
pub fn load_or_init(
storage_path: impl AsRef<Path>,
first_peer: bool,
reinit: bool,
peer_id: Option<PeerId>,
) -> Result<Self, StorageError> {
fs::create_dir_all(storage_path.as_ref())?;
let path_legacy = storage_path.as_ref().join(STATE_FILE_NAME_CBOR);
let path_json = storage_path.as_ref().join(STATE_FILE_NAME);
let mut state = if path_json.exists() {
log::info!("Loading raft state from {}", path_json.display());
Self::load_json(path_json.clone())?
} else if path_legacy.exists() {
log::info!("Loading raft state from {}", path_legacy.display());
let mut state = Self::load(path_legacy)?;
// migrate to json
state.path = path_json.clone();
state.save()?;
state
} else {
log::info!("Initializing new raft state at {}", path_json.display());
if let Some(peer_id) = peer_id {
log::debug!("Using peer ID: {peer_id}");
};
Self::init(path_json.clone(), first_peer, peer_id)?
};
let state = if reinit {
if first_peer {
// Re-initialize consensus of the first peer is different from the rest
// Effectively, we should remove all other peers from voters and learners
// assuming that other peers would need to join consensus again.
// PeerId if the current peer should stay in the list of voters,
// so we can accept consensus operations.
state.state.conf_state.voters = vec![state.this_peer_id];
state.state.conf_state.learners = vec![];
state.state.hard_state.vote = state.this_peer_id;
state.save()?;
state
} else {
// We want to re-initialize consensus while preserve the peer ID
// which is needed for migration from one cluster to another
let keep_peer_id = state.this_peer_id;
Self::init(path_json, first_peer, Some(keep_peer_id))?
}
} else {
state
};
state.remove_unknown_peer_metadata()?;
log::debug!("State: {state:?}");
Ok(state)
}
fn remove_unknown_peer_metadata(&self) -> Result<(), StorageError> {
let is_updated = {
let mut peer_metadata = self.peer_metadata_by_id.write();
let peer_address = self.peer_address_by_id.read();
peer_metadata
.extract_if(|peer_id, _| !peer_address.contains_key(peer_id))
.count()
> 0
};
if is_updated {
self.save()?;
}
Ok(())
}
pub fn unapplied_entities_count(&self) -> usize {
self.apply_progress_queue.len()
}
pub fn apply_state_update(
&mut self,
update: impl FnOnce(&mut RaftState),
) -> Result<(), StorageError> {
let mut state = self.state.clone();
update(&mut state);
self.state = state;
self.save()
}
pub fn current_unapplied_entry(&self) -> Option<EntryId> {
self.apply_progress_queue.current()
}
pub fn entry_applied(&mut self) -> Result<(), StorageError> {
self.apply_progress_queue.applied();
self.save()
}
pub fn set_unapplied_entries(
&mut self,
first_index: EntryId,
last_index: EntryId,
) -> Result<(), StorageError> {
self.apply_progress_queue = EntryApplyProgressQueue::new(first_index, last_index);
self.save()
}
pub fn set_peer_address_by_id(
&mut self,
peer_address_by_id: PeerAddressById,
) -> Result<(), StorageError> {
*self.peer_address_by_id.write() = peer_address_by_id;
self.save()
}
pub fn insert_peer(&mut self, peer_id: PeerId, address: Uri) -> Result<(), StorageError> {
let address_display = address.to_string();
match self
.peer_address_by_id
.write()
.insert(peer_id, address.clone())
{
Some(prev_address) if prev_address != address => log::warn!(
"Replaced address of peer {peer_id} from {prev_address} to {address_display}"
),
Some(_) => log::debug!(
"Re-added peer with id {peer_id} with the same address {address_display}"
),
None => log::debug!("Added peer with id {peer_id} and address {address_display}"),
}
self.save()
}
pub fn update_peer_metadata(
&mut self,
peer_id: PeerId,
metadata: PeerMetadata,
) -> Result<(), StorageError> {
if let Some(prev_metadata) = self
.peer_metadata_by_id
.write()
.insert(peer_id, metadata.clone())
{
log::info!(
"Replaced metadata of peer {peer_id} from {prev_metadata:?} to {metadata:?}"
);
} else {
log::debug!("Added metadata for peer with id {peer_id}: {metadata:?}")
}
self.save()
}
pub fn get_cluster_metadata_keys(&self) -> Vec<String> {
self.cluster_metadata.keys().cloned().collect()
}
pub fn get_cluster_metadata_key(&self, key: &str) -> serde_json::Value {
self.cluster_metadata
.get(key)
.cloned()
.unwrap_or(serde_json::Value::Null)
}
pub fn update_cluster_metadata_key(&mut self, key: String, value: serde_json::Value) {
if !value.is_null() {
self.cluster_metadata.insert(key, value);
} else {
self.cluster_metadata.remove(&key);
}
}
pub fn last_applied_entry(&self) -> Option<u64> {
self.apply_progress_queue.get_last_applied()
}
/// Get the last applied commit and term, reflected in our current state.
pub fn applied_commit_term(&self) -> (u64, u64) {
let hard_state = &self.state().hard_state;
// Fall back to 0 because it's always less than any commit
let last_commit = self.last_applied_entry().unwrap_or(0);
(last_commit, hard_state.term)
}
pub fn first_voter(&self) -> Option<PeerId> {
self.first_voter
}
pub fn set_first_voter(&mut self, id: PeerId) -> Result<(), StorageError> {
self.first_voter = Some(id);
self.save()
}
pub fn peer_address_by_id(&self) -> PeerAddressById {
self.peer_address_by_id.read().clone()
}
pub fn peer_metadata_by_id(&self) -> PeerMetadataById {
self.peer_metadata_by_id.read().clone()
}
pub fn is_our_metadata_outdated(&self) -> bool {
self.peer_metadata_by_id
.read()
.get(&self.this_peer_id())
.is_none_or(|metadata| metadata.is_different_version())
}
pub fn this_peer_id(&self) -> PeerId {
self.this_peer_id
}
/// ## Arguments
/// `path` - full name of the file where state will be saved
///
/// `first_peer` - if this is a first peer in a new deployment (e.g. it does not bootstrap from anyone)
/// It is `None` if distributed deployment is disabled
fn init(
path: PathBuf,
first_peer: bool,
peer_id: Option<PeerId>,
) -> Result<Self, StorageError> {
// Do not generate too big peer ID, to avoid problems with serialization
// (especially in json format)
let this_peer_id = peer_id.unwrap_or_else(|| rand::random::<PeerId>() % (1 << 53) + 1);
let voters = if first_peer {
vec![this_peer_id]
} else {
// `Some(false)` - Leave empty the network topology for the peer, if it is not starting a network itself.
// This way it will not be able to become a leader and commit data
// until it joins an existing network.
vec![]
};
let state = Self {
state: RaftState {
hard_state: HardState::default(),
// For network with 1 node, set it as voter.
// First vec is voters, second is learners.
conf_state: ConfState::from((voters, vec![])),
},
apply_progress_queue: Default::default(),
first_voter: if first_peer { Some(this_peer_id) } else { None },
peer_address_by_id: Default::default(),
peer_metadata_by_id: Default::default(),
cluster_metadata: Default::default(),
this_peer_id,
path,
latest_snapshot_meta: Default::default(),
dirty: AtomicBool::new(false),
};
state.save()?;
Ok(state)
}
fn load(path: PathBuf) -> Result<Self, StorageError> {
let reader = BufReader::new(File::open(&path)?);
let mut state: Self = serde_cbor::from_reader(reader)?;
state.path = path;
Ok(state)
}
fn load_json(path: PathBuf) -> Result<Self, StorageError> {
let reader = BufReader::new(File::open(&path)?);
let mut state: Self = serde_json::from_reader(reader)?;
state.path = path;
Ok(state)
}
pub fn save(&self) -> Result<(), StorageError> {
let result = AtomicFile::new(&self.path, AllowOverwrite).write(|file| {
let mut writer = BufWriter::new(file);
serde_json::to_writer(&mut writer, self)?;
writer.flush()
});
log::trace!("Saved state: {self:?}");
self.dirty.store(result.is_err(), Ordering::Relaxed);
Ok(result?)
}
pub fn save_if_dirty(&mut self) -> Result<(), StorageError> {
if self.dirty.load(Ordering::Relaxed) {
self.save()?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize, Default, Debug)]
pub struct SnapshotMetadataSer {
pub term: u64,
/// Aka: commit
pub index: u64,
}
impl From<&SnapshotMetadata> for SnapshotMetadataSer {
fn from(meta: &SnapshotMetadata) -> Self {
Self {
term: meta.term,
index: meta.index,
}
}
}
mod serialize_peer_addresses {
use std::collections::HashMap;
use std::sync::Arc;
use http::Uri;
use parking_lot::RwLock;
use serde::{self, Deserializer, Serializer};
use crate::serialize_peer_addresses;
use crate::types::PeerAddressById;
pub fn serialize<S>(
addresses: &Arc<RwLock<PeerAddressById>>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serialize_peer_addresses::serialize(&addresses.read(), serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Arc<RwLock<PeerAddressById>>, D::Error>
where
D: Deserializer<'de>,
{
let addresses: HashMap<u64, Uri> = serialize_peer_addresses::deserialize(deserializer)?;
Ok(Arc::new(RwLock::new(addresses)))
}
}
/// Definition of struct to help with serde serialization.
/// Should be used only in `[serde(with=...)]`
#[derive(Serialize, Deserialize)]
#[serde(remote = "RaftState")]
struct RaftStateDef {
#[serde(with = "HardStateDef")]
hard_state: HardState,
#[serde(with = "ConfStateDef")]
conf_state: ConfState,
}
/// Definition of struct to help with serde serialization.
/// Should be used only in `[serde(with=...)]`
#[derive(Serialize, Deserialize)]
#[serde(remote = "HardState")]
struct HardStateDef {
term: u64,
vote: u64,
commit: u64,
}
/// Definition of struct to help with serde serialization.
/// Should be used only in `[serde(with=...)]`
#[derive(Serialize, Deserialize)]
#[serde(remote = "ConfState")]
struct ConfStateDef {
voters: Vec<u64>,
learners: Vec<u64>,
voters_outgoing: Vec<u64>,
learners_next: Vec<u64>,
auto_leave: bool,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/snapshots/download.rs | lib/storage/src/content_manager/snapshots/download.rs | use std::ffi::OsString;
use std::path::Path;
use common::tempfile_ext::MaybeTempPath;
use fs_err::tokio as tokio_fs;
use futures::StreamExt;
use segment::common::BYTES_IN_MB;
use tap::Tap;
use tempfile::TempPath;
use tokio::io::AsyncWriteExt;
use url::Url;
use {fs_err as fs, reqwest};
use crate::StorageError;
fn snapshot_prefix(url: &Url) -> OsString {
Path::new(url.path())
.file_name()
.map(|x| OsString::from(x).tap_mut(|x| x.push("-")))
.unwrap_or_default()
}
/// Download a remote file from `url` to `path`
///
/// Returns a `TempPath` that will delete the downloaded file once it is dropped.
/// To persist the file, use `download_file(...).keep()`.
#[must_use = "returns a TempPath, if dropped the downloaded file is deleted"]
async fn download_file(
client: &reqwest::Client,
url: &Url,
dir_path: &Path,
) -> Result<TempPath, StorageError> {
let download_start_time = tokio::time::Instant::now();
let (file, temp_path) = tempfile::Builder::new()
.prefix(&snapshot_prefix(url))
.suffix(".download")
.tempfile_in(dir_path)?
.into_parts();
let file = fs::File::from_parts::<&Path>(file, temp_path.as_ref());
log::debug!("Downloading snapshot from {url} to {temp_path:?}");
let mut file = tokio_fs::File::from_std(file);
let response = client.get(url.clone()).send().await?;
if !response.status().is_success() {
return Err(StorageError::bad_input(format!(
"Failed to download snapshot from {}: status - {}",
url,
response.status()
)));
}
let mut stream = response.bytes_stream();
let mut total_bytes_downloaded = 0u64;
while let Some(chunk_result) = stream.next().await {
let chunk = chunk_result?;
total_bytes_downloaded += chunk.len() as u64;
file.write_all(&chunk).await?;
}
file.flush().await?;
let download_duration = download_start_time.elapsed();
let total_size_mb = total_bytes_downloaded as f64 / BYTES_IN_MB as f64;
let download_speed_mbps = total_size_mb / download_duration.as_secs_f64();
log::debug!(
"Snapshot download completed: path={}, size={:.2} MB, duration={:.2}s, speed={:.2} MB/s",
temp_path.display(),
total_size_mb,
download_duration.as_secs_f64(),
download_speed_mbps
);
Ok(temp_path)
}
/// Download a snapshot from the given URI.
///
/// May returen a `TempPath` if a file was downloaded from a remote source. If it is dropped the
/// downloaded file is deleted automatically. To keep the file `keep()` may be used.
pub async fn download_snapshot(
client: &reqwest::Client,
url: Url,
snapshots_dir: &Path,
) -> Result<MaybeTempPath, StorageError> {
match url.scheme() {
"file" => {
let local_path = url.to_file_path().map_err(|_| {
StorageError::bad_request(
"Invalid snapshot URI, file path must be absolute or on localhost",
)
})?;
if !local_path.exists() {
return Err(StorageError::bad_request(format!(
"Snapshot file {local_path:?} does not exist"
)));
}
Ok(MaybeTempPath::Persistent(local_path))
}
"http" | "https" => Ok(MaybeTempPath::Temporary(
download_file(client, &url, snapshots_dir).await?,
)),
_ => Err(StorageError::bad_request(format!(
"URL {} with schema {} is not supported",
url,
url.scheme()
))),
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/snapshots/mod.rs | lib/storage/src/content_manager/snapshots/mod.rs | pub mod download;
pub mod recover;
use std::collections::HashMap;
use std::io::{BufWriter, Write};
use std::path::Path;
use collection::operations::snapshot_ops::SnapshotDescription;
use collection::operations::verification::new_unchecked_verification_pass;
use fs_err as fs;
use fs_err::tokio as tokio_fs;
use serde::{Deserialize, Serialize};
use tar::Builder as TarBuilder;
use tempfile::TempPath;
use tokio::io::AsyncWriteExt;
use tokio_util::task::AbortOnDropHandle;
use crate::content_manager::toc::FULL_SNAPSHOT_FILE_NAME;
use crate::dispatcher::Dispatcher;
use crate::rbac::{Access, AccessRequirements};
use crate::{StorageError, TableOfContent};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SnapshotConfig {
/// Map collection name to snapshot file name
pub collections_mapping: HashMap<String, String>,
/// Aliases for collections `<alias>:<collection_name>`
#[serde(default)]
pub collections_aliases: HashMap<String, String>,
}
pub async fn do_delete_full_snapshot(
dispatcher: &Dispatcher,
access: Access,
snapshot_name: &str,
) -> Result<bool, StorageError> {
access.check_global_access(AccessRequirements::new().manage())?;
// All checks should've been done at this point.
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass);
let snapshot_manager = toc.get_snapshots_storage_manager()?;
let snapshot_dir =
snapshot_manager.get_full_snapshot_path(toc.snapshots_path(), snapshot_name)?;
let res = tokio::spawn(async move {
log::info!("Deleting full storage snapshot {snapshot_dir:?}");
snapshot_manager.delete_snapshot(&snapshot_dir).await
})
.await??;
Ok(res)
}
pub async fn do_delete_collection_snapshot(
dispatcher: &Dispatcher,
access: Access,
collection_name: &str,
snapshot_name: &str,
) -> Result<bool, StorageError> {
let collection_pass = access
.check_collection_access(collection_name, AccessRequirements::new().write().extras())?;
// All checks should've been done at this point.
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass);
let snapshot_name = snapshot_name.to_string();
let collection = toc.get_collection(&collection_pass).await?;
let snapshot_manager = toc.get_snapshots_storage_manager()?;
let file_name =
snapshot_manager.get_snapshot_path(collection.snapshots_path(), &snapshot_name)?;
let res = tokio::spawn(async move {
log::info!("Deleting collection snapshot {file_name:?}");
snapshot_manager.delete_snapshot(&file_name).await
})
.await??;
Ok(res)
}
pub async fn do_list_full_snapshots(
toc: &TableOfContent,
access: Access,
) -> Result<Vec<SnapshotDescription>, StorageError> {
access.check_global_access(AccessRequirements::new())?;
let snapshots_manager = toc.get_snapshots_storage_manager()?;
let snapshots_path = Path::new(toc.snapshots_path());
Ok(snapshots_manager.list_snapshots(snapshots_path).await?)
}
pub async fn do_create_full_snapshot(
dispatcher: &Dispatcher,
access: Access,
) -> Result<SnapshotDescription, StorageError> {
access.check_global_access(AccessRequirements::new().manage())?;
// All checks should've been done at this point.
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass).clone();
let res = tokio::spawn(async move { _do_create_full_snapshot(&toc, access).await }).await??;
Ok(res)
}
async fn _do_create_full_snapshot(
toc: &TableOfContent,
access: Access,
) -> Result<SnapshotDescription, StorageError> {
let snapshot_dir = Path::new(toc.snapshots_path()).to_path_buf();
let all_collections = toc.all_collections(&access).await;
let mut created_snapshots: Vec<(&str, SnapshotDescription)> = vec![];
for collection_pass in &all_collections {
let snapshot_details = toc.create_snapshot(collection_pass).await?;
created_snapshots.push((collection_pass.name(), snapshot_details));
}
let current_time = chrono::Utc::now().format("%Y-%m-%d-%H-%M-%S").to_string();
let snapshot_name = format!("{FULL_SNAPSHOT_FILE_NAME}-{current_time}.snapshot");
let collection_name_to_snapshot_path: HashMap<_, _> = created_snapshots
.iter()
.map(|&(collection_name, ref snapshot_details)| {
(collection_name.to_string(), snapshot_details.name.clone())
})
.collect();
let mut alias_mapping: HashMap<String, String> = Default::default();
for collection_pass in &all_collections {
for alias in toc.collection_aliases(collection_pass, &access).await? {
alias_mapping.insert(alias.clone(), collection_pass.name().to_string());
}
}
let config_path = snapshot_dir.join(format!("config-{current_time}.json"));
{
let snapshot_config = SnapshotConfig {
collections_mapping: collection_name_to_snapshot_path,
collections_aliases: alias_mapping,
};
let mut config_file = tokio_fs::File::create(&config_path).await?;
config_file
.write_all(
serde_json::to_string_pretty(&snapshot_config)
.unwrap()
.as_bytes(),
)
.await?;
}
let full_snapshot_path = snapshot_dir.join(&snapshot_name);
let temp_full_snapshot_path = toc
.optional_temp_or_storage_temp_path()?
.join(&snapshot_name);
// Make sure temporary file is removed in case of error
let _temp_full_snapshot_path_file = TempPath::from_path(&temp_full_snapshot_path);
let config_path_clone = config_path.clone();
// (tempfile_with_snapshot, snapshot_name)
let mut temp_collection_snapshots = vec![];
let temp_storage_path = toc.optional_temp_or_storage_temp_path()?;
let snapshot_manager = toc.get_snapshots_storage_manager()?;
for (collection_name, snapshot_details) in &created_snapshots {
let snapshot_path = snapshot_dir
.join(collection_name)
.join(&snapshot_details.name);
let local_temp_collection_snapshot = temp_storage_path
.join(collection_name)
.join(&snapshot_details.name);
snapshot_manager
.get_stored_file(&snapshot_path, &local_temp_collection_snapshot)
.await?;
temp_collection_snapshots.push((
TempPath::from_path(local_temp_collection_snapshot),
snapshot_details.name.clone(),
));
}
let full_snapshot_path_clone = temp_full_snapshot_path.clone();
let archiving = tokio::task::spawn_blocking(move || {
// have to use std here, cause TarBuilder is not async
let mut file = BufWriter::new(fs::File::create(&full_snapshot_path_clone)?);
let mut builder = TarBuilder::new(&mut file);
builder.sparse(true);
for (temp_file, snapshot_name) in temp_collection_snapshots {
builder.append_path_with_name(&temp_file, &snapshot_name)?;
}
builder.append_path_with_name(&config_path_clone, "config.json")?;
builder.finish()?;
// Explicitly flush write buffer so we can catch IO errors
drop(builder);
file.flush()?;
Ok::<(), StorageError>(())
});
AbortOnDropHandle::new(archiving).await??;
let snapshot_description = snapshot_manager
.store_file(&temp_full_snapshot_path, &full_snapshot_path)
.await?;
tokio_fs::remove_file(&config_path).await?;
Ok(snapshot_description)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/content_manager/snapshots/recover.rs | lib/storage/src/content_manager/snapshots/recover.rs | use collection::collection::Collection;
use collection::collection::payload_index_schema::{PAYLOAD_INDEX_CONFIG_FILE, PayloadIndexSchema};
use collection::common::sha_256::{hash_file, hashes_equal};
use collection::config::CollectionConfigInternal;
use collection::operations::snapshot_ops::{SnapshotPriority, SnapshotRecover};
use collection::operations::verification::new_unchecked_verification_pass;
use collection::shards::check_shard_path;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::replica_set::snapshots::RecoveryType;
use collection::shards::shard::{PeerId, ShardId};
use common::save_on_disk::SaveOnDisk;
use fs_err::tokio as tokio_fs;
use crate::content_manager::collection_meta_ops::{
CollectionMetaOperations, CreateCollectionOperation, CreatePayloadIndex,
};
use crate::content_manager::snapshots::download::download_snapshot;
use crate::dispatcher::Dispatcher;
use crate::rbac::{Access, AccessRequirements, CollectionPass};
use crate::{StorageError, TableOfContent};
pub async fn activate_shard(
toc: &TableOfContent,
collection: &Collection,
peer_id: PeerId,
shard_id: &ShardId,
) -> Result<(), StorageError> {
if toc.is_distributed() {
log::debug!(
"Activating shard {} of collection {} with consensus",
shard_id,
&collection.name()
);
toc.send_set_replica_state_proposal(
collection.name().to_string(),
peer_id,
*shard_id,
ReplicaState::Active,
None,
)?;
} else {
log::debug!(
"Activating shard {} of collection {} locally",
shard_id,
&collection.name()
);
collection
.set_shard_replica_state(*shard_id, peer_id, ReplicaState::Active, None)
.await?;
}
Ok(())
}
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn do_recover_from_snapshot(
dispatcher: &Dispatcher,
collection_name: &str,
source: SnapshotRecover,
access: Access,
client: reqwest::Client,
) -> Result<bool, StorageError> {
let multipass = access.check_global_access(AccessRequirements::new().manage())?;
let dispatcher = dispatcher.clone();
let collection_pass = multipass.issue_pass(collection_name).into_static();
let toc = dispatcher
.toc(&access, &new_unchecked_verification_pass())
.clone();
let res = toc
.general_runtime_handle()
.spawn(async move {
_do_recover_from_snapshot(dispatcher, access, collection_pass, source, &client).await
})
.await??;
Ok(res)
}
/// # Cancel safety
///
/// This method is *not* cancel safe.
async fn _do_recover_from_snapshot(
dispatcher: Dispatcher,
access: Access,
collection_pass: CollectionPass<'static>,
source: SnapshotRecover,
client: &reqwest::Client,
) -> Result<bool, StorageError> {
let SnapshotRecover {
location,
priority,
checksum,
api_key: _,
} = source;
// All checks should've been done at this point.
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass);
// Measure this scope for metrics/telemetry.
// (This must be a named variable so it doesn't get dropped prematurely!)
let _measure_guard = toc
.snapshot_telemetry_collector(collection_pass.name())
.running_snapshot_recovery
.measure_scope();
let this_peer_id = toc.this_peer_id;
let is_distributed = toc.is_distributed();
let snapshot_path = download_snapshot(
client,
location,
&toc.optional_temp_or_snapshot_temp_path()?,
)
.await?;
if let Some(checksum) = checksum {
let snapshot_checksum = hash_file(&snapshot_path).await?;
if !hashes_equal(&snapshot_checksum, &checksum) {
return Err(StorageError::bad_input(format!(
"Snapshot checksum mismatch: expected {checksum}, got {snapshot_checksum}"
)));
}
}
log::debug!("Snapshot downloaded to {}", snapshot_path.display());
let temp_storage_path = toc.optional_temp_or_storage_temp_path()?;
let tmp_collection_dir = tempfile::Builder::new()
.prefix(&format!("col-{collection_pass}-recovery-"))
.tempdir_in(temp_storage_path)?;
log::debug!(
"Recovering collection {collection_pass} from snapshot {}",
snapshot_path.display(),
);
log::debug!(
"Unpacking snapshot to {}",
tmp_collection_dir.path().display(),
);
let tmp_collection_dir_clone = tmp_collection_dir.path().to_path_buf();
let snapshot_path_clone = snapshot_path.to_path_buf();
let restoring = tokio::task::spawn_blocking(move || {
Collection::restore_snapshot(
&snapshot_path_clone,
&tmp_collection_dir_clone,
this_peer_id,
is_distributed,
)
});
restoring.await??;
let snapshot_config = CollectionConfigInternal::load(tmp_collection_dir.path())?;
snapshot_config.validate_and_warn();
let payload_index_file = tmp_collection_dir.path().join(PAYLOAD_INDEX_CONFIG_FILE);
let payload_schema: SaveOnDisk<PayloadIndexSchema> =
SaveOnDisk::load_or_init_default(&payload_index_file).map_err(|err| {
StorageError::service_error(format!(
"Failed to load payload index schema from {payload_index_file:?}: {err}"
))
})?;
let schema = payload_schema.read().schema.clone();
let collection = match toc.get_collection(&collection_pass).await.ok() {
Some(collection) => collection,
None => {
log::debug!("Collection {collection_pass} does not exist, creating it");
let operation =
CollectionMetaOperations::CreateCollection(CreateCollectionOperation::new(
collection_pass.to_string(),
snapshot_config.clone().into(),
)?);
dispatcher
.submit_collection_meta_op(operation, access.clone(), None)
.await?;
// Since we not just copy files into a collection dir,
// but create collection in consensus and then copy data into recreated collection,
// we also need to register all associated payload indexes in consensus.
for (field_name, field_schema) in schema.iter() {
let consensus_op =
CollectionMetaOperations::CreatePayloadIndex(CreatePayloadIndex {
collection_name: collection_pass.to_string(),
field_name: field_name.clone(),
field_schema: field_schema.clone(),
});
dispatcher
.submit_collection_meta_op(consensus_op, access.clone(), None)
.await?;
}
toc.get_collection(&collection_pass).await?
}
};
let state = collection.state().await;
// Check config compatibility
// Check vectors config
if snapshot_config.params.vectors != state.config.params.vectors {
return Err(StorageError::bad_input(format!(
"Snapshot is not compatible with existing collection: Collection vectors: {:?} Snapshot Vectors: {:?}",
state.config.params.vectors, snapshot_config.params.vectors
)));
}
// Check shard number
if snapshot_config.params.shard_number != state.config.params.shard_number {
return Err(StorageError::bad_input(format!(
"Snapshot is not compatible with existing collection: Collection shard number: {:?} Snapshot shard number: {:?}",
state.config.params.shard_number, snapshot_config.params.shard_number
)));
}
// Deactivate collection local shards during recovery
for (shard_id, shard_info) in &state.shards {
let local_shard_state = shard_info.replicas.get(&this_peer_id);
match local_shard_state {
None => {} // Shard is not on this node, skip
Some(state) => {
if state != &ReplicaState::Partial {
toc.send_set_replica_state_proposal(
collection_pass.to_string(),
this_peer_id,
*shard_id,
ReplicaState::Partial,
None,
)?;
}
}
}
}
let priority = priority.unwrap_or_default();
// Recover shards from the snapshot
for (shard_id, shard_info) in &state.shards {
let snapshot_shard_path = check_shard_path(tmp_collection_dir.path(), *shard_id).await?;
log::debug!(
"Recovering shard {} from {}",
shard_id,
snapshot_shard_path.display(),
);
// TODO:
// `_do_recover_from_snapshot` is not *yet* analyzed/organized for cancel safety,
// but `recover_local_shard_from` requires `cancel::CanellationToken` argument *now*,
// so we provide a token that is never triggered (in this case `recover_local_shard_from`
// works *exactly* as before the `cancel::CancellationToken` parameter was added to it)
let recovered = collection
.recover_local_shard_from(
&snapshot_shard_path,
RecoveryType::Full,
*shard_id,
cancel::CancellationToken::new(),
)
.await?;
if !recovered {
log::debug!("Shard {shard_id} is not in snapshot");
continue;
}
// If this is the only replica, we can activate it
// If not - de-sync is possible, so we need to run synchronization
let other_active_replicas: Vec<_> = shard_info
.replicas
.iter()
.filter(|&(&peer_id, &state)| {
// Check if there are *other* active replicas, after recovering collection snapshot.
// This should include `ReshardingScaleDown` replicas.
let is_active = matches!(
state,
ReplicaState::Active | ReplicaState::ReshardingScaleDown
);
peer_id != this_peer_id && is_active
})
.collect();
if other_active_replicas.is_empty() {
// No other active replicas, we can activate this shard
// as there is no de-sync possible
activate_shard(toc, &collection, this_peer_id, shard_id).await?;
} else {
match priority {
SnapshotPriority::NoSync => {
activate_shard(toc, &collection, this_peer_id, shard_id).await?;
}
SnapshotPriority::Snapshot => {
// Snapshot is the source of truth, we need to remove all other replicas
activate_shard(toc, &collection, this_peer_id, shard_id).await?;
let replicas_to_keep = state.config.params.replication_factor.get() - 1;
let mut replicas_to_remove = other_active_replicas
.len()
.saturating_sub(replicas_to_keep as usize);
for (peer_id, _) in other_active_replicas {
if replicas_to_remove > 0 {
// Keep this replica
replicas_to_remove -= 1;
// Don't need more replicas, remove this one
toc.request_remove_replica(
collection_pass.to_string(),
*shard_id,
*peer_id,
)?;
} else {
toc.send_set_replica_state_proposal(
collection_pass.to_string(),
*peer_id,
*shard_id,
ReplicaState::Dead,
None,
)?;
}
}
}
SnapshotPriority::Replica => {
// Replica is the source of truth, we need to sync recovered data with this replica
let (replica_peer_id, _state) =
other_active_replicas.into_iter().next().unwrap();
log::debug!(
"Running synchronization for shard {shard_id} of collection {collection_pass} from {replica_peer_id}",
);
// assume that if there is another peers, the server is distributed
toc.request_shard_transfer(
collection_pass.to_string(),
*shard_id,
*replica_peer_id,
this_peer_id,
true,
None,
)?;
}
// `ShardTransfer` is only used during snapshot *shard transfer*.
// It is only exposed in internal gRPC API and only used for *shard* snapshot recovery.
SnapshotPriority::ShardTransfer => unreachable!(),
}
}
}
// Explicitly trigger optimizers for the collection we have recovered. This prevents them from
// remaining in grey state if the snapshot is not optimized.
// See: <https://github.com/qdrant/qdrant/issues/5139>
collection.trigger_optimizers().await;
// Remove tmp collection dir
tokio_fs::remove_dir_all(&tmp_collection_dir).await?;
// Remove snapshot after recovery if downloaded
if let Err(err) = snapshot_path.close() {
log::error!("Failed to remove downloaded collection snapshot after recovery: {err}");
}
Ok(true)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/rbac/mod.rs | lib/storage/src/rbac/mod.rs | use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::{Validate, ValidateArgs, ValidationError, ValidationErrors};
use crate::content_manager::errors::StorageError;
mod ops_checks;
/// A structure that defines access rights.
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(untagged)]
pub enum Access {
/// Global access.
Global(GlobalAccessMode),
/// Access to specific collections.
Collection(CollectionAccessList),
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct CollectionAccessList(pub Vec<CollectionAccess>);
pub struct ExistingCollections {
inner: HashSet<String>,
}
#[derive(Serialize, Deserialize, Validate, PartialEq, Clone, Debug)]
#[validate(context = ExistingCollections, mutable)]
pub struct CollectionAccess {
/// Collection names that are allowed to be accessed
#[validate(custom(function = "validate_unique_collections", use_context))]
pub collection: String,
pub access: CollectionAccessMode,
/// Payload constraints.
/// An object where each key is a JSON path, and each value is JSON value.
///
/// Deprecation: this parameter is kept for preventing old keys to become valid after parameter removal.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[deprecated(since = "1.15.0")]
#[validate(custom(function = "validate_payload_empty"))]
pub payload: Option<Value>, // Value is a placeholder for a now removed type
}
fn validate_payload_empty(_payload: &Value) -> Result<(), ValidationError> {
Err(ValidationError {
code: Cow::from("deprecated"),
message: Some(Cow::from(
"The 'payload' constraint is deprecated and should not be used",
)),
params: HashMap::new(),
})
}
impl CollectionAccess {
fn view(&self) -> CollectionAccessView<'_> {
CollectionAccessView {
collection: &self.collection,
access: self.access,
}
}
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Copy, Clone, Debug)]
pub enum GlobalAccessMode {
/// Read-only access
#[serde(rename = "r")]
Read,
/// Read and write access
#[serde(rename = "m")]
Manage,
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Copy, Clone, Debug)]
pub enum CollectionAccessMode {
/// Read-only access to a collection.
#[serde(rename = "r")]
Read,
/// Read and write access to a collection, with some restrictions.
#[serde(rename = "rw")]
ReadWrite,
/// Points read and write - access to update and modify points in the collection,
/// but not snapshots or payload indexes.
#[serde(rename = "prw")]
PointsReadWrite,
}
impl Access {
/// Create an `Access` object with full access.
/// The ``_reason`` parameter is not used in the code, but serves as a mandatory commentary to
/// explain why the access is granted, e.g. ``Access::full("Internal API")`` or
/// ``Access::full("Test")``.
pub const fn full(_reason: &'static str) -> Self {
Self::Global(GlobalAccessMode::Manage)
}
pub const fn full_ro(_reason: &'static str) -> Self {
Self::Global(GlobalAccessMode::Read)
}
/// Check if the user has global access.
pub fn check_global_access(
&self,
requirements: AccessRequirements,
) -> Result<CollectionMultipass, StorageError> {
match self {
Access::Global(mode) => mode.meets_requirements(requirements)?,
Access::Collection(_) => {
return Err(StorageError::forbidden("Global access is required"));
}
}
Ok(CollectionMultipass)
}
/// Check if the user has access to a collection with given requirements.
pub fn check_collection_access<'a>(
&self,
collection_name: &'a str,
requirements: AccessRequirements,
) -> Result<CollectionPass<'a>, StorageError> {
match self {
Access::Global(mode) => mode.meets_requirements(requirements)?,
Access::Collection(list) => list
.find_view(collection_name)?
.meets_requirements(requirements)?,
}
Ok(CollectionPass(Cow::Borrowed(collection_name)))
}
}
impl CollectionAccessList {
pub(self) fn find_view<'a>(
&'a self,
collection_name: &'a str,
) -> Result<CollectionAccessView<'a>, StorageError> {
let access = self
.0
.iter()
.find(|collections| collections.collection == collection_name)
.ok_or_else(|| {
StorageError::forbidden(format!(
"Access to collection {collection_name} is required"
))
})?;
Ok(access.view())
}
/// Lists the collections which fulfill the requirements.
pub fn meeting_requirements(&self, requirements: AccessRequirements) -> Vec<&String> {
self.0
.iter()
.filter(|access| access.view().meets_requirements(requirements).is_ok())
.map(|access| &access.collection)
.collect()
}
}
#[derive(Debug)]
struct CollectionAccessView<'a> {
pub collection: &'a str,
pub access: CollectionAccessMode,
}
impl CollectionAccessView<'_> {
fn meets_requirements(&self, requirements: AccessRequirements) -> Result<(), StorageError> {
let AccessRequirements {
write,
manage,
extras,
} = requirements;
if extras {
match self.access {
CollectionAccessMode::Read => {} // Ok
CollectionAccessMode::ReadWrite => {} // Ok
CollectionAccessMode::PointsReadWrite => {
return Err(StorageError::forbidden(format!(
"Only points access is allowed for collection {}",
self.collection,
)));
}
}
}
if write {
match self.access {
CollectionAccessMode::Read => {
return Err(StorageError::forbidden(format!(
"Write access to collection {} is required",
self.collection,
)));
}
CollectionAccessMode::ReadWrite => (),
CollectionAccessMode::PointsReadWrite => {
// Extras are checked above.
}
}
}
if manage {
// Don't specify collection name since the manage access could be enabled globally, and
// not per collection.
return Err(StorageError::forbidden(
"Manage access for this operation is required",
));
}
Ok(())
}
}
/// Creates [CollectionPass] objects for all collections
pub struct CollectionMultipass;
impl CollectionMultipass {
pub fn issue_pass<'a>(&self, name: &'a str) -> CollectionPass<'a> {
CollectionPass(Cow::Borrowed(name))
}
}
/// A pass that allows access to a specific collection.
#[derive(Debug)]
pub struct CollectionPass<'a>(pub(self) Cow<'a, str>);
impl<'a> CollectionPass<'a> {
pub fn name(&'a self) -> &'a str {
&self.0
}
pub fn into_static(self) -> CollectionPass<'static> {
CollectionPass(Cow::Owned(self.0.into_owned()))
}
}
impl std::fmt::Display for CollectionPass<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Default, Debug, Copy, Clone)]
pub struct AccessRequirements {
/// Write access is required.
pub write: bool,
/// Manage access is required, implies write access.
pub manage: bool,
/// Require access to collection extras, like snapshots, payload indexes, cluster info.
pub extras: bool,
}
impl AccessRequirements {
pub fn new() -> Self {
Self::default()
}
pub fn write(&self) -> Self {
Self {
write: true,
..*self
}
}
pub fn manage(&self) -> Self {
Self {
manage: true,
..*self
}
}
pub fn extras(&self) -> Self {
Self {
extras: true,
..*self
}
}
}
impl GlobalAccessMode {
fn meets_requirements(&self, requirements: AccessRequirements) -> Result<(), StorageError> {
let AccessRequirements {
write,
manage,
extras: _,
} = requirements;
if write || manage {
match self {
GlobalAccessMode::Read => {
return Err(StorageError::forbidden("Global manage access is required"));
}
GlobalAccessMode::Manage => (),
}
}
Ok(())
}
}
impl Access {
/// Return a list of validation errors in a format suitable for [ValidationErrors::merge_all].
pub fn validate(&self) -> Vec<Result<(), ValidationErrors>> {
match self {
Access::Global(_) => Vec::new(),
Access::Collection(list) => {
let mut used_collections = ExistingCollections {
inner: HashSet::new(),
};
list.0
.iter()
.map(|x| {
ValidationErrors::merge(
Ok(()),
"access",
x.validate_with_args(&mut used_collections),
)
})
.collect::<Vec<_>>()
}
}
}
}
fn validate_unique_collections(
collection: &str,
used_collections: &mut ExistingCollections,
) -> Result<(), ValidationError> {
let unique = used_collections.inner.insert(collection.to_owned());
if unique {
Ok(())
} else {
Err(ValidationError {
code: Cow::from("unique"),
message: Some(Cow::from("Collection name should be unique")),
params: HashMap::from([(Cow::from("collection"), collection.to_owned().into())]),
})
}
}
#[cfg(test)]
struct AccessCollectionBuilder(pub Vec<CollectionAccess>);
#[cfg(test)]
impl AccessCollectionBuilder {
pub(self) fn new() -> Self {
Self(Vec::new())
}
pub(self) fn add(mut self, name: &str, write: bool) -> Self {
self.0.push(CollectionAccess {
collection: name.to_string(),
access: if write {
CollectionAccessMode::ReadWrite
} else {
CollectionAccessMode::Read
},
#[expect(deprecated)]
payload: None,
});
self
}
}
#[cfg(test)]
impl From<AccessCollectionBuilder> for Access {
fn from(builder: AccessCollectionBuilder) -> Self {
Access::Collection(CollectionAccessList(builder.0))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/src/rbac/ops_checks.rs | lib/storage/src/rbac/ops_checks.rs | use std::borrow::Cow;
use api::rest::{LookupLocation, SearchRequestInternal};
use collection::collection::distance_matrix::CollectionSearchMatrixRequest;
use collection::grouping::group_by::{GroupRequest, SourceRequest};
use collection::lookup::WithLookup;
use collection::operations::CollectionUpdateOperations;
use collection::operations::types::{
CoreSearchRequest, CountRequestInternal, DiscoverRequestInternal, PointRequestInternal,
RecommendRequestInternal, ScrollRequestInternal,
};
use collection::operations::universal_query::collection_query::{
CollectionPrefetch, CollectionQueryRequest,
};
use segment::data_types::facets::FacetParams;
use super::{Access, AccessRequirements, CollectionAccessList, CollectionPass};
use crate::content_manager::collection_meta_ops::CollectionMetaOperations;
use crate::content_manager::errors::{StorageError, StorageResult};
impl Access {
#[allow(private_bounds)]
pub(crate) fn check_point_op<'a>(
&self,
collection_name: &'a str,
op: &impl CheckableCollectionOperation,
) -> Result<CollectionPass<'a>, StorageError> {
let requirements = op.access_requirements();
match self {
Access::Global(mode) => mode.meets_requirements(requirements)?,
Access::Collection(list) => {
let view = list.find_view(collection_name)?;
view.meets_requirements(requirements)?;
op.check_access(list)?;
}
}
Ok(CollectionPass(Cow::Borrowed(collection_name)))
}
pub(crate) fn check_collection_meta_operation(
&self,
operation: &CollectionMetaOperations,
) -> Result<(), StorageError> {
match operation {
CollectionMetaOperations::CreateCollection(_)
| CollectionMetaOperations::UpdateCollection(_)
| CollectionMetaOperations::DeleteCollection(_)
| CollectionMetaOperations::ChangeAliases(_)
| CollectionMetaOperations::Resharding(_, _)
| CollectionMetaOperations::TransferShard(_, _)
| CollectionMetaOperations::SetShardReplicaState(_)
| CollectionMetaOperations::CreateShardKey(_)
| CollectionMetaOperations::DropShardKey(_) => {
self.check_global_access(AccessRequirements::new().manage())?;
}
CollectionMetaOperations::CreatePayloadIndex(op) => {
self.check_collection_access(
&op.collection_name,
AccessRequirements::new().write().extras(),
)?;
}
CollectionMetaOperations::DropPayloadIndex(op) => {
self.check_collection_access(
&op.collection_name,
AccessRequirements::new().write().extras(),
)?;
}
CollectionMetaOperations::Nop { token: _ } => (),
#[cfg(feature = "staging")]
CollectionMetaOperations::TestSlowDown(_) => {
self.check_global_access(AccessRequirements::new().manage())?;
}
}
Ok(())
}
}
trait CheckableCollectionOperation {
/// Used to distinguish whether the operation is read-only or read-write.
fn access_requirements(&self) -> AccessRequirements;
fn check_access(&self, access: &CollectionAccessList) -> Result<(), StorageError>;
}
impl CollectionAccessList {
fn check_lookup_from(
&self,
lookup_location: &Option<LookupLocation>,
) -> Result<(), StorageError> {
if let Some(lookup_location) = lookup_location {
self.find_view(&lookup_location.collection)?;
}
Ok(())
}
fn check_with_lookup(&self, with_lookup: &Option<WithLookup>) -> Result<(), StorageError> {
if let Some(with_lookup) = with_lookup {
self.find_view(&with_lookup.collection_name)?;
}
Ok(())
}
}
impl CheckableCollectionOperation for SearchRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
impl CheckableCollectionOperation for RecommendRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, access: &CollectionAccessList) -> Result<(), StorageError> {
access.check_lookup_from(&self.lookup_from)?;
Ok(())
}
}
impl CheckableCollectionOperation for PointRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
impl CheckableCollectionOperation for CoreSearchRequest {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
impl CheckableCollectionOperation for CountRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
impl CheckableCollectionOperation for GroupRequest {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, access: &CollectionAccessList) -> Result<(), StorageError> {
match &self.source {
SourceRequest::Search(s) => s.check_access(access)?,
SourceRequest::Recommend(r) => r.check_access(access)?,
SourceRequest::Query(q) => q.check_access(access)?,
}
access.check_with_lookup(&self.with_lookup)?;
Ok(())
}
}
impl CheckableCollectionOperation for DiscoverRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, access: &CollectionAccessList) -> Result<(), StorageError> {
access.check_lookup_from(&self.lookup_from)?;
Ok(())
}
}
impl CheckableCollectionOperation for ScrollRequestInternal {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
impl CheckableCollectionOperation for CollectionQueryRequest {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, access: &CollectionAccessList) -> Result<(), StorageError> {
access.check_lookup_from(&self.lookup_from)?;
for prefetch_query in self.prefetch.iter() {
check_access_for_prefetch(prefetch_query, access)?;
}
Ok(())
}
}
fn check_access_for_prefetch(
prefetch: &CollectionPrefetch,
access: &CollectionAccessList,
) -> Result<(), StorageError> {
access.check_lookup_from(&prefetch.lookup_from)?;
// Recurse inner prefetches
for prefetch_query in prefetch.prefetch.iter() {
check_access_for_prefetch(prefetch_query, access)?;
}
Ok(())
}
impl CheckableCollectionOperation for FacetParams {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> StorageResult<()> {
Ok(())
}
}
impl CheckableCollectionOperation for CollectionSearchMatrixRequest {
fn access_requirements(&self) -> AccessRequirements {
AccessRequirements {
write: false,
manage: false,
extras: false,
}
}
fn check_access(&self, _access: &CollectionAccessList) -> StorageResult<()> {
Ok(())
}
}
impl CheckableCollectionOperation for CollectionUpdateOperations {
fn access_requirements(&self) -> AccessRequirements {
match self {
CollectionUpdateOperations::PointOperation(_)
| CollectionUpdateOperations::VectorOperation(_)
| CollectionUpdateOperations::PayloadOperation(_) => AccessRequirements {
write: true,
manage: false,
extras: false,
},
CollectionUpdateOperations::FieldIndexOperation(_) => AccessRequirements {
write: true,
manage: true,
extras: true,
},
}
}
fn check_access(&self, _access: &CollectionAccessList) -> Result<(), StorageError> {
Ok(())
}
}
#[cfg(test)]
mod tests_ops {
use std::fmt::Debug;
use api::rest::{
self, LookupLocation, OrderByInterface, RecommendStrategy, SearchRequestInternal,
};
use collection::operations::payload_ops::PayloadOpsDiscriminants;
use collection::operations::point_ops::{
BatchPersisted, BatchVectorStructPersisted, ConditionalInsertOperationInternal,
PointInsertOperationsInternal, PointInsertOperationsInternalDiscriminants,
PointOperationsDiscriminants, PointStructPersisted, PointSyncOperation,
VectorStructPersisted,
};
use collection::operations::query_enum::QueryEnum;
use collection::operations::types::{ContextExamplePair, RecommendExample, UsingVector};
use collection::operations::vector_ops::{
PointVectorsPersisted, UpdateVectorsOp, VectorOperationsDiscriminants,
};
use collection::operations::{
CollectionUpdateOperationsDiscriminants, CreateIndex, FieldIndexOperations,
FieldIndexOperationsDiscriminants,
};
use segment::data_types::vectors::NamedQuery;
use segment::types::{
Condition, ExtendedPointId, Filter, Payload, PointIdType, SearchParams,
WithPayloadInterface, WithVector,
};
use shard::operations::payload_ops::{DeletePayloadOp, PayloadOps, SetPayloadOp};
use shard::operations::point_ops::{PointIdsList, PointOperations};
use shard::operations::vector_ops::VectorOperations;
use strum::IntoEnumIterator as _;
use super::*;
use crate::rbac::{AccessCollectionBuilder, GlobalAccessMode};
/// Create a `must` filter from a list of point IDs.
#[cfg(test)]
fn make_filter_from_ids(ids: Vec<ExtendedPointId>) -> Filter {
let cond = ids.into_iter().collect::<ahash::AHashSet<_>>().into();
Filter {
must: Some(vec![Condition::HasId(cond)]),
..Default::default()
}
}
/// Operation is allowed with the given access, and no rewrite is expected.
fn assert_allowed<Op: Debug + Clone + PartialEq + CheckableCollectionOperation>(
op: &Op,
access: &Access,
) {
access.check_point_op("col", op).expect("Should be allowed");
}
/// Operation is forbidden with the given access.
fn assert_forbidden<Op: Clone + CheckableCollectionOperation + PartialEq>(
op: &Op,
access: &Access,
) {
access
.check_point_op("col", op)
.expect_err("should be forbidden");
}
/// Operation requires write + whole collection access.
fn assert_requires_whole_write_access<Op>(op: &Op)
where
Op: CheckableCollectionOperation + Clone + Debug + PartialEq,
{
assert_allowed(op, &Access::Global(GlobalAccessMode::Manage));
assert_forbidden(op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(op, &AccessCollectionBuilder::new().add("col", true).into());
assert_forbidden(op, &AccessCollectionBuilder::new().add("col", false).into());
}
#[test]
fn test_recommend_request_internal() {
let op = RecommendRequestInternal {
positive: vec![RecommendExample::Dense(vec![0.0, 1.0, 2.0])],
negative: vec![RecommendExample::Sparse(vec![(0, 0.0)].try_into().unwrap())],
strategy: Some(RecommendStrategy::AverageVector),
filter: None,
params: Some(SearchParams::default()),
limit: 100,
offset: Some(100),
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: Some(WithVector::Bool(true)),
score_threshold: Some(42.0),
using: Some(UsingVector::Name("vector".into())),
lookup_from: Some(LookupLocation {
collection: "col2".to_string(),
vector: Some("vector".into()),
shard_key: None,
}),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
// Point ID is used
assert_forbidden(
&RecommendRequestInternal {
positive: vec![RecommendExample::PointId(ExtendedPointId::NumId(12345))],
..op.clone()
},
&AccessCollectionBuilder::new().add("col2", false).into(),
);
// lookup_from requires read access
assert_forbidden(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
assert_allowed(
&RecommendRequestInternal {
lookup_from: None,
..op
},
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_point_request_internal() {
let op = PointRequestInternal {
ids: vec![PointIdType::NumId(12345)],
with_payload: None,
with_vector: WithVector::Bool(true),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_forbidden(&op, &AccessCollectionBuilder::new().into());
assert_allowed(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_core_search_request() {
let op = CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::default_dense(vec![0.0, 1.0, 2.0])),
filter: None,
params: Some(SearchParams::default()),
limit: 100,
offset: 100,
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: Some(WithVector::Bool(true)),
score_threshold: Some(42.0),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_count_request_internal() {
let op = CountRequestInternal {
filter: None,
exact: false,
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_group_request_source() {
let op = GroupRequest {
// NOTE: SourceRequest::Recommend is already tested in test_recommend_request_internal
source: SourceRequest::Search(SearchRequestInternal {
vector: rest::NamedVectorStruct::Default(vec![0.0, 1.0, 2.0]),
filter: None,
params: Some(SearchParams::default()),
limit: 100,
offset: Some(100),
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: Some(WithVector::Bool(true)),
score_threshold: Some(42.0),
}),
group_by: "path".parse().unwrap(),
group_size: 100,
limit: 100,
with_lookup: Some(WithLookup {
collection_name: "col2".to_string(),
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vectors: Some(WithVector::Bool(true)),
}),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(
&op,
&AccessCollectionBuilder::new()
.add("col", false)
.add("col2", false)
.into(),
);
// with_lookup requires whole read access
assert_forbidden(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
assert_allowed(
&GroupRequest {
with_lookup: None,
..op.clone()
},
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_discover_request_internal() {
let op = DiscoverRequestInternal {
target: Some(RecommendExample::Dense(vec![0.0, 1.0, 2.0])),
context: Some(vec![ContextExamplePair {
positive: RecommendExample::Dense(vec![0.0, 1.0, 2.0]),
negative: RecommendExample::Dense(vec![0.0, 1.0, 2.0]),
}]),
filter: None,
params: Some(SearchParams::default()),
limit: 100,
offset: Some(100),
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: Some(WithVector::Bool(true)),
using: Some(UsingVector::Name("vector".into())),
lookup_from: Some(LookupLocation {
collection: "col2".to_string(),
vector: Some("vector".into()),
shard_key: None,
}),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(
&op,
&AccessCollectionBuilder::new()
.add("col", false)
.add("col2", false)
.into(),
);
// Point ID is used
assert_forbidden(
&DiscoverRequestInternal {
target: Some(RecommendExample::PointId(ExtendedPointId::NumId(12345))),
..op.clone()
},
&AccessCollectionBuilder::new().add("col2", false).into(),
);
assert_forbidden(
&DiscoverRequestInternal {
context: Some(vec![ContextExamplePair {
positive: RecommendExample::PointId(ExtendedPointId::NumId(12345)),
negative: RecommendExample::Dense(vec![0.0, 1.0, 2.0]),
}]),
..op.clone()
},
&AccessCollectionBuilder::new().add("col2", false).into(),
);
assert_forbidden(
&DiscoverRequestInternal {
context: Some(vec![ContextExamplePair {
positive: RecommendExample::Dense(vec![0.0, 1.0, 2.0]),
negative: RecommendExample::PointId(ExtendedPointId::NumId(12345)),
}]),
..op.clone()
},
&AccessCollectionBuilder::new().add("col2", false).into(),
);
// lookup_from requires read access
assert_forbidden(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
assert_allowed(
&DiscoverRequestInternal {
lookup_from: None,
..op
},
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_scroll_request_internal() {
let op = ScrollRequestInternal {
offset: Some(ExtendedPointId::NumId(12345)),
limit: Some(100),
filter: None,
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: WithVector::Bool(true),
order_by: Some(OrderByInterface::Key("path".parse().unwrap())),
};
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_allowed(&op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
#[test]
fn test_collection_update_operations() {
CollectionUpdateOperationsDiscriminants::iter().for_each(|discr| match discr {
CollectionUpdateOperationsDiscriminants::PointOperation => {
check_collection_update_operations_points()
}
CollectionUpdateOperationsDiscriminants::VectorOperation => {
check_collection_update_operations_update_vectors()
}
CollectionUpdateOperationsDiscriminants::PayloadOperation => {
check_collection_update_operations_payload()
}
CollectionUpdateOperationsDiscriminants::FieldIndexOperation => {
check_collection_update_operations_field_index()
}
});
}
/// Tests for [`CollectionUpdateOperations::PointOperation`].
fn check_collection_update_operations_points() {
PointOperationsDiscriminants::iter().for_each(|discr| match discr {
PointOperationsDiscriminants::UpsertPoints => {
for discr in PointInsertOperationsInternalDiscriminants::iter() {
let inner = match discr {
PointInsertOperationsInternalDiscriminants::PointsBatch => {
PointInsertOperationsInternal::PointsBatch(BatchPersisted {
ids: vec![ExtendedPointId::NumId(12345)],
vectors: BatchVectorStructPersisted::Single(vec![vec![
0.0, 1.0, 2.0,
]]),
payloads: None,
})
}
PointInsertOperationsInternalDiscriminants::PointsList => {
PointInsertOperationsInternal::PointsList(vec![PointStructPersisted {
id: ExtendedPointId::NumId(12345),
vector: VectorStructPersisted::Single(vec![0.0, 1.0, 2.0]),
payload: None,
}])
}
};
let op = CollectionUpdateOperations::PointOperation(
PointOperations::UpsertPoints(inner),
);
assert_requires_whole_write_access(&op);
}
}
PointOperationsDiscriminants::UpsertPointsConditional => {
let inner = PointInsertOperationsInternal::PointsList(vec![PointStructPersisted {
id: ExtendedPointId::NumId(12345),
vector: VectorStructPersisted::Single(vec![0.0, 1.0, 2.0]),
payload: None,
}]);
let filter = make_filter_from_ids(vec![ExtendedPointId::NumId(12345)]);
let op = CollectionUpdateOperations::PointOperation(
PointOperations::UpsertPointsConditional(ConditionalInsertOperationInternal {
points_op: inner,
condition: filter,
}),
);
assert_requires_whole_write_access(&op);
}
PointOperationsDiscriminants::DeletePoints => {
let op =
CollectionUpdateOperations::PointOperation(PointOperations::DeletePoints {
ids: vec![ExtendedPointId::NumId(12345)],
});
check_collection_update_operations_delete_points(&op);
}
PointOperationsDiscriminants::DeletePointsByFilter => {
let op = CollectionUpdateOperations::PointOperation(
PointOperations::DeletePointsByFilter(make_filter_from_ids(vec![
ExtendedPointId::NumId(12345),
])),
);
check_collection_update_operations_delete_points(&op);
}
PointOperationsDiscriminants::SyncPoints => {
let op = CollectionUpdateOperations::PointOperation(PointOperations::SyncPoints(
PointSyncOperation {
from_id: None,
to_id: None,
points: Vec::new(),
},
));
assert_requires_whole_write_access(&op);
}
#[cfg(feature = "staging")]
PointOperationsDiscriminants::TestDelay => {
use shard::operations::staging::TestDelayOperation;
let op = CollectionUpdateOperations::PointOperation(PointOperations::TestDelay(
TestDelayOperation::new(1.0),
));
assert_requires_whole_write_access(&op);
}
});
}
/// Tests for [`CollectionUpdateOperations::PointOperation`] with
/// [`PointOperations::DeletePoints`] and [`PointOperations::DeletePointsByFilter`].
fn check_collection_update_operations_delete_points(op: &CollectionUpdateOperations) {
assert_allowed(op, &Access::Global(GlobalAccessMode::Manage));
assert_forbidden(op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(op, &AccessCollectionBuilder::new().add("col", true).into());
assert_forbidden(op, &AccessCollectionBuilder::new().add("col", false).into());
}
/// Tests for [`CollectionUpdateOperations::VectorOperation`].
fn check_collection_update_operations_update_vectors() {
VectorOperationsDiscriminants::iter().for_each(|discr| match discr {
VectorOperationsDiscriminants::UpdateVectors => {
let op = CollectionUpdateOperations::VectorOperation(
VectorOperations::UpdateVectors(UpdateVectorsOp {
points: vec![PointVectorsPersisted {
id: ExtendedPointId::NumId(12345),
vector: VectorStructPersisted::Single(vec![0.0, 1.0, 2.0]),
}],
update_filter: None,
}),
);
assert_requires_whole_write_access(&op);
}
VectorOperationsDiscriminants::DeleteVectors => {
let op =
CollectionUpdateOperations::VectorOperation(VectorOperations::DeleteVectors(
PointIdsList {
points: vec![ExtendedPointId::NumId(12345)],
shard_key: None,
},
vec!["vector".into()],
));
check_collection_update_operations_delete_vectors(&op);
}
VectorOperationsDiscriminants::DeleteVectorsByFilter => {
let op = CollectionUpdateOperations::VectorOperation(
VectorOperations::DeleteVectorsByFilter(
make_filter_from_ids(vec![ExtendedPointId::NumId(12345)]),
vec!["vector".into()],
),
);
check_collection_update_operations_delete_vectors(&op);
}
});
}
/// Tests for [`CollectionUpdateOperations::VectorOperation`] with
/// [`VectorOperations::DeleteVectors`] and [`VectorOperations::DeleteVectorsByFilter`].
fn check_collection_update_operations_delete_vectors(op: &CollectionUpdateOperations) {
assert_allowed(op, &Access::Global(GlobalAccessMode::Manage));
assert_forbidden(op, &Access::Global(GlobalAccessMode::Read));
assert_allowed(op, &AccessCollectionBuilder::new().add("col", true).into());
assert_forbidden(op, &AccessCollectionBuilder::new().add("col", false).into());
}
/// Tests for [`CollectionUpdateOperations::PayloadOperation`].
fn check_collection_update_operations_payload() {
for discr in PayloadOpsDiscriminants::iter() {
let inner = match discr {
PayloadOpsDiscriminants::SetPayload => PayloadOps::SetPayload(SetPayloadOp {
payload: Payload::default(),
points: Some(vec![ExtendedPointId::NumId(12345)]),
filter: None,
key: None,
}),
PayloadOpsDiscriminants::DeletePayload => {
PayloadOps::DeletePayload(DeletePayloadOp {
keys: vec!["path".parse().unwrap()],
points: Some(vec![ExtendedPointId::NumId(12345)]),
filter: None,
})
}
PayloadOpsDiscriminants::ClearPayload => PayloadOps::ClearPayload {
points: vec![ExtendedPointId::NumId(12345)],
},
PayloadOpsDiscriminants::ClearPayloadByFilter => {
PayloadOps::ClearPayloadByFilter(make_filter_from_ids(vec![
ExtendedPointId::NumId(12345),
]))
}
PayloadOpsDiscriminants::OverwritePayload => {
PayloadOps::OverwritePayload(SetPayloadOp {
payload: Payload::default(),
points: Some(vec![ExtendedPointId::NumId(12345)]),
filter: None,
key: None,
})
}
};
let op = CollectionUpdateOperations::PayloadOperation(inner);
assert_requires_whole_write_access(&op);
}
}
/// Tests for [`CollectionUpdateOperations::FieldIndexOperation`].
fn check_collection_update_operations_field_index() {
for discr in FieldIndexOperationsDiscriminants::iter() {
let inner = match discr {
FieldIndexOperationsDiscriminants::CreateIndex => {
FieldIndexOperations::CreateIndex(CreateIndex {
field_name: "path".parse().unwrap(),
field_schema: None,
})
}
FieldIndexOperationsDiscriminants::DeleteIndex => {
FieldIndexOperations::DeleteIndex("path".parse().unwrap())
}
};
let op = CollectionUpdateOperations::FieldIndexOperation(inner);
assert_allowed(&op, &Access::Global(GlobalAccessMode::Manage));
assert_forbidden(&op, &Access::Global(GlobalAccessMode::Read));
assert_forbidden(&op, &AccessCollectionBuilder::new().add("col", true).into());
assert_forbidden(
&op,
&AccessCollectionBuilder::new().add("col", false).into(),
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/tests/integration/alias_tests.rs | lib/storage/tests/integration/alias_tests.rs | use std::num::NonZeroUsize;
use std::sync::Arc;
use collection::operations::vector_params_builder::VectorParamsBuilder;
use collection::operations::verification::new_unchecked_verification_pass;
use collection::optimizers_builder::OptimizersConfig;
use collection::shards::channel_service::ChannelService;
use common::budget::ResourceBudget;
use memory::madvise;
use segment::types::Distance;
use storage::content_manager::collection_meta_ops::{
ChangeAliasesOperation, CollectionMetaOperations, CreateAlias, CreateCollection,
CreateCollectionOperation, DeleteAlias, RenameAlias,
};
use storage::content_manager::consensus::operation_sender::OperationSender;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use storage::types::{PerformanceConfig, StorageConfig};
use tempfile::Builder;
use tokio::runtime::Runtime;
const FULL_ACCESS: Access = Access::full("For test");
#[test]
fn test_alias_operation() {
let storage_dir = Builder::new().prefix("storage").tempdir().unwrap();
let config = StorageConfig {
storage_path: storage_dir.path().to_str().unwrap().to_string(),
snapshots_path: storage_dir
.path()
.join("snapshots")
.to_str()
.unwrap()
.to_string(),
snapshots_config: Default::default(),
temp_path: None,
on_disk_payload: false,
optimizers: OptimizersConfig {
deleted_threshold: 0.5,
vacuum_min_vector_number: 100,
default_segment_number: 2,
max_segment_size: None,
#[expect(deprecated)]
memmap_threshold: Some(100),
indexing_threshold: Some(100),
flush_interval_sec: 2,
max_optimization_threads: Some(2),
},
optimizers_overwrite: None,
wal: Default::default(),
performance: PerformanceConfig {
max_search_threads: 1,
max_optimization_runtime_threads: 1,
optimizer_cpu_budget: 0,
optimizer_io_budget: 0,
update_rate_limit: None,
search_timeout_sec: None,
incoming_shard_transfers_limit: Some(1),
outgoing_shard_transfers_limit: Some(1),
async_scorer: None,
},
hnsw_index: Default::default(),
hnsw_global_config: Default::default(),
mmap_advice: madvise::Advice::Random,
node_type: Default::default(),
update_queue_size: Default::default(),
handle_collection_load_errors: false,
recovery_mode: None,
update_concurrency: Some(NonZeroUsize::new(2).unwrap()),
// update_concurrency: None,
shard_transfer_method: None,
collection: None,
max_collections: None,
};
let search_runtime = Runtime::new().unwrap();
let handle = search_runtime.handle().clone();
let update_runtime = Runtime::new().unwrap();
let general_runtime = Runtime::new().unwrap();
let (propose_sender, _propose_receiver) = std::sync::mpsc::channel();
let propose_operation_sender = OperationSender::new(propose_sender);
let toc = Arc::new(TableOfContent::new(
&config,
search_runtime,
update_runtime,
general_runtime,
ResourceBudget::default(),
ChannelService::new(6333, None),
0,
Some(propose_operation_sender),
));
let dispatcher = Dispatcher::new(toc);
handle
.block_on(
dispatcher.submit_collection_meta_op(
CollectionMetaOperations::CreateCollection(
CreateCollectionOperation::new(
"test".to_string(),
CreateCollection {
vectors: VectorParamsBuilder::new(10, Distance::Cosine)
.build()
.into(),
sparse_vectors: None,
hnsw_config: None,
wal_config: None,
optimizers_config: None,
shard_number: Some(1),
on_disk_payload: None,
replication_factor: None,
write_consistency_factor: None,
quantization_config: None,
sharding_method: None,
strict_mode_config: None,
uuid: None,
metadata: None,
},
)
.unwrap(),
),
FULL_ACCESS,
None,
),
)
.unwrap();
handle
.block_on(dispatcher.submit_collection_meta_op(
CollectionMetaOperations::ChangeAliases(ChangeAliasesOperation {
actions: vec![CreateAlias {
collection_name: "test".to_string(),
alias_name: "test_alias".to_string(),
}
.into()],
}),
FULL_ACCESS,
None,
))
.unwrap();
handle
.block_on(dispatcher.submit_collection_meta_op(
CollectionMetaOperations::ChangeAliases(ChangeAliasesOperation {
actions: vec![
CreateAlias {
collection_name: "test".to_string(),
alias_name: "test_alias2".to_string(),
}
.into(),
DeleteAlias {
alias_name: "test_alias".to_string(),
}
.into(),
RenameAlias {
old_alias_name: "test_alias2".to_string(),
new_alias_name: "test_alias3".to_string(),
}
.into(),
],
}),
FULL_ACCESS,
None,
))
.unwrap();
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let _ = handle
.block_on(
dispatcher.toc(&FULL_ACCESS, &pass).get_collection(
&FULL_ACCESS
.check_collection_access("test_alias3", AccessRequirements::new())
.unwrap(),
),
)
.unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/storage/tests/integration/main.rs | lib/storage/tests/integration/main.rs | mod alias_tests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/macros/src/lib.rs | lib/macros/src/lib.rs | use proc_macro::TokenStream;
mod anonymize;
#[proc_macro_derive(Anonymize, attributes(anonymize))]
pub fn derive_anonymize(input: TokenStream) -> TokenStream {
match anonymize::derive_anonymize(input.into()) {
Ok(ts) => ts.into(),
Err(e) => e.to_compile_error().into(),
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/macros/src/anonymize.rs | lib/macros/src/anonymize.rs | use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::{ToTokens as _, format_ident, quote};
use syn::parse::{Parse, ParseStream};
use syn::spanned::Spanned as _;
use syn::{Attribute, Data, DeriveInput, Error, Expr, Index, LitBool, Path, Result, Token, parse2};
mod kw {
syn::custom_keyword!(value);
syn::custom_keyword!(with);
}
#[derive(Clone)]
enum AnonymizeAttr {
/// `#[anonymize(true)]`
True,
/// `#[anonymize(false)]`
False,
/// `#[anonymize(value = 42)]`
Value(Expr),
/// `#[anonymize(with = path::to::function)]`
With(Path),
}
impl Parse for AnonymizeAttr {
fn parse(input: ParseStream) -> Result<Self> {
let lookahead = input.lookahead1();
let result = if lookahead.peek(LitBool) {
if input.parse::<LitBool>()?.value {
AnonymizeAttr::True
} else {
AnonymizeAttr::False
}
} else if lookahead.peek(kw::value) {
input.parse::<kw::value>()?;
input.parse::<Token![=]>()?;
AnonymizeAttr::Value(input.parse()?)
} else if lookahead.peek(kw::with) {
input.parse::<kw::with>()?;
input.parse::<Token![=]>()?;
AnonymizeAttr::With(input.parse()?)
} else {
return Err(lookahead.error());
};
input.parse::<Option<Token![,]>>()?;
Ok(result)
}
}
fn parse_attrs<'a>(
default: &'a AnonymizeAttr,
attrs: &[Attribute],
) -> Result<Cow<'a, AnonymizeAttr>> {
let mut it = attrs.iter().filter(|a| a.path().is_ident("anonymize"));
match (it.next(), it.next()) {
(None, None) => Ok(Cow::Borrowed(default)),
(Some(attr), None) => attr.parse_args().map(Cow::Owned),
(_, Some(attr2)) => Err(Error::new(
attr2.span(),
"only one #[anonymize(...)] attribute is allowed",
)),
}
}
fn anonymize_expr(attr: &AnonymizeAttr, expr: TokenStream) -> TokenStream {
match attr {
AnonymizeAttr::True => quote! { Anonymize::anonymize(#expr) },
AnonymizeAttr::False => quote! { ::core::clone::Clone::clone(#expr) },
AnonymizeAttr::Value(expr) => quote! { #expr },
AnonymizeAttr::With(path) => quote! { #path(#expr) },
}
}
pub(crate) fn derive_anonymize(input: TokenStream) -> Result<TokenStream> {
let input_ast: DeriveInput = parse2(input)?;
let container_attr = parse_attrs(&AnonymizeAttr::True, &input_ast.attrs)?;
let function_body = match &input_ast.data {
Data::Struct(data_struct) => {
let field_initializers = data_struct
.fields
.iter()
.enumerate()
.map(|(idx, field)| {
let fi = match &field.ident {
Some(id) => quote! { #id },
None => Index::from(idx).into_token_stream(),
};
let field_attr = parse_attrs(&container_attr, &field.attrs)?;
let expr = anonymize_expr(&field_attr, quote! { &self.#fi });
Ok(quote! { #fi: #expr, })
})
.collect::<Result<Vec<_>>>()?;
quote! { Self { #(#field_initializers)* } }
}
Data::Enum(data_enum) => {
let arms = data_enum
.variants
.iter()
.map(|variant| {
let variant_attr = parse_attrs(&container_attr, &variant.attrs)?;
let mut pattern = Vec::with_capacity(variant.fields.iter().len());
let mut body = Vec::with_capacity(variant.fields.iter().len());
for (idx, field) in variant.fields.iter().enumerate() {
let fi = match &field.ident {
Some(id) => quote! { #id },
None => Index::from(idx).into_token_stream(),
};
let binding = format_ident!("__anonymize_binding_{idx}");
let field_attr = parse_attrs(&variant_attr, &field.attrs)?;
let expr = anonymize_expr(&field_attr, quote! { #binding });
pattern.push(quote! { #fi: #binding, });
body.push(quote! { #fi: #expr, });
}
let ident = &variant.ident;
Ok(quote! {
Self::#ident { #(#pattern)* } => Self::#ident { #(#body)* },
})
})
.collect::<Result<Vec<_>>>()?;
quote! { match self { #(#arms)* } }
}
Data::Union(data_union) => {
return Err(Error::new(
data_union.union_token.span,
"unions are not supported",
));
}
};
let ident = &input_ast.ident;
Ok(quote! {
impl Anonymize for #ident {
fn anonymize(&self) -> Self {
#function_body
}
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[track_caller]
fn check_derive(input: TokenStream, expected: TokenStream) {
let actual = derive_anonymize(input).unwrap();
assert_eq!(
actual.to_string(),
expected.to_string(),
"\n// actual\n{}\n\n// expected\n{}",
pretty(actual),
pretty(expected),
);
}
fn pretty(ts: TokenStream) -> String {
syn::parse_file(&ts.to_string())
.map_or_else(|e| e.to_string(), |f| prettyplease::unparse(&f))
}
#[test]
fn test_derive_anonymize() {
check_derive(
quote! {
struct Test {
foo: Foo,
#[anonymize(true)]
bar: Bar,
#[anonymize(false)]
baz: Baz,
#[anonymize(with = crate::anonymize_qux)]
qux: Qux,
#[anonymize(value = 42)]
quux: u32,
}
},
quote! {
impl Anonymize for Test {
fn anonymize(&self) -> Self {
Self {
foo: Anonymize::anonymize(&self.foo),
bar: Anonymize::anonymize(&self.bar),
baz: ::core::clone::Clone::clone(&self.baz),
qux: crate::anonymize_qux(&self.qux),
quux: 42,
}
}
}
},
);
check_derive(
quote! {
#[anonymize(false)]
struct Test {
foo: Foo,
#[anonymize(true)]
bar: Bar,
#[anonymize(false)]
baz: Baz,
#[anonymize(with = crate::anonymize_qux)]
qux: Qux,
#[anonymize(value = 42)]
quux: u32,
}
},
quote! {
impl Anonymize for Test {
fn anonymize(&self) -> Self {
Self {
foo: ::core::clone::Clone::clone(&self.foo),
bar: Anonymize::anonymize(&self.bar),
baz: ::core::clone::Clone::clone(&self.baz),
qux: crate::anonymize_qux(&self.qux),
quux: 42,
}
}
}
},
);
check_derive(
quote! {
struct Test(
Foo,
#[anonymize(with = crate::anonymize_bar)] Bar,
#[anonymize(false)] Baz,
#[anonymize(value = 42)] u32,
);
},
quote! {
impl Anonymize for Test {
fn anonymize(&self) -> Self {
Self {
0: Anonymize::anonymize(&self.0),
1: crate::anonymize_bar(&self.1),
2: ::core::clone::Clone::clone(&self.2),
3: 42,
}
}
}
},
);
check_derive(
quote! {
struct Test;
},
quote! {
impl Anonymize for Test {
fn anonymize(&self) -> Self {
Self {}
}
}
},
);
check_derive(
quote! {
enum Test {
A,
B(),
C {},
D(Foo, Bar),
E {
foo: Foo,
bar: Bar,
},
F(#[anonymize(with = crate::anonymize_foo)] Foo, Bar),
H(#[anonymize(false)] Foo, Bar),
I(#[anonymize(value = 42)] u32, Bar),
#[anonymize(with = crate::anonymize_bar)]
J(Foo, #[anonymize(true)] Bar, Baz),
}
},
quote! {
impl Anonymize for Test {
fn anonymize(&self) -> Self {
match self {
Self::A {} => Self::A {},
Self::B {} => Self::B {},
Self::C {} => Self::C {},
Self::D {
0: __anonymize_binding_0,
1: __anonymize_binding_1,
} => Self::D {
0: Anonymize::anonymize(__anonymize_binding_0),
1: Anonymize::anonymize(__anonymize_binding_1),
},
Self::E {
foo: __anonymize_binding_0,
bar: __anonymize_binding_1,
} => Self::E {
foo: Anonymize::anonymize(__anonymize_binding_0),
bar: Anonymize::anonymize(__anonymize_binding_1),
},
Self::F {
0: __anonymize_binding_0,
1: __anonymize_binding_1,
} => Self::F {
0: crate::anonymize_foo(__anonymize_binding_0),
1: Anonymize::anonymize(__anonymize_binding_1),
},
Self::H {
0: __anonymize_binding_0,
1: __anonymize_binding_1,
} => Self::H {
0: ::core::clone::Clone::clone(__anonymize_binding_0),
1: Anonymize::anonymize(__anonymize_binding_1),
},
Self::I {
0: __anonymize_binding_0,
1: __anonymize_binding_1,
} => Self::I {
0: 42,
1: Anonymize::anonymize(__anonymize_binding_1),
},
Self::J {
0: __anonymize_binding_0,
1: __anonymize_binding_1,
2: __anonymize_binding_2,
} => Self::J {
0: crate::anonymize_bar(__anonymize_binding_0),
1: Anonymize::anonymize(__anonymize_binding_1),
2: crate::anonymize_bar(__anonymize_binding_2),
},
}
}
}
},
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/build.rs | lib/quantization/build.rs | use std::env;
fn main() {
println!("cargo:rerun-if-changed=cpp");
let mut builder = cc::Build::new();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH")
.expect("CARGO_CFG_TARGET_ARCH env-var is not defined or is not UTF-8");
// TODO: Is `CARGO_CFG_TARGET_FEATURE` *always* defined?
//
// Cargo docs says that "boolean configurations are present if they are set,
// and not present otherwise", so, what about "target features"?
//
// https://doc.rust-lang.org/cargo/reference/environment-variables.html (Ctrl-F CARGO_CFG_<cfg>)
let target_feature = env::var("CARGO_CFG_TARGET_FEATURE")
.expect("CARGO_CFG_TARGET_FEATURE env-var is not defined or is not UTF-8");
if target_arch == "x86_64" {
builder.file("cpp/sse.c");
builder.file("cpp/avx2.c");
if builder.get_compiler().is_like_msvc() {
builder.flag("/arch:AVX");
builder.flag("/arch:AVX2");
builder.flag("/arch:SSE");
builder.flag("/arch:SSE2");
} else {
builder.flag("-march=haswell");
}
// O3 optimization level
builder.flag("-O3");
// Use popcnt instruction
builder.flag("-mpopcnt");
} else if target_arch == "aarch64" && target_feature.split(',').any(|feat| feat == "neon") {
builder.file("cpp/neon.c");
builder.flag("-O3");
}
builder.compile("simd_utils");
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/encoded_storage.rs | lib/quantization/src/encoded_storage.rs | #[cfg(feature = "testing")]
use std::io::{Read, Write};
#[cfg(feature = "testing")]
use std::num::NonZeroUsize;
#[cfg(feature = "testing")]
use std::path::Path;
use std::path::PathBuf;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
#[cfg(feature = "testing")]
use fs_err as fs;
#[cfg(feature = "testing")]
use fs_err::File;
#[cfg(feature = "testing")]
use memory::fadvise::OneshotFile;
use memory::mmap_type::MmapFlusher;
pub trait EncodedStorage {
fn get_vector_data(&self, index: PointOffsetType) -> &[u8];
fn is_on_disk(&self) -> bool;
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[u8],
hw_counter: &HardwareCounterCell,
) -> std::io::Result<()>;
fn vectors_count(&self) -> usize;
fn flusher(&self) -> MmapFlusher;
fn files(&self) -> Vec<PathBuf>;
fn immutable_files(&self) -> Vec<PathBuf>;
}
pub trait EncodedStorageBuilder {
type Storage: EncodedStorage;
fn build(self) -> std::io::Result<Self::Storage>;
fn push_vector_data(&mut self, other: &[u8]) -> std::io::Result<()>;
}
#[cfg(feature = "testing")]
pub struct TestEncodedStorage {
data: Vec<u8>,
quantized_vector_size: NonZeroUsize,
path: Option<PathBuf>,
}
#[cfg(feature = "testing")]
impl TestEncodedStorage {
pub fn from_file(path: &Path, quantized_vector_size: usize) -> std::io::Result<Self> {
let mut file = OneshotFile::open(path)?;
let mut buffer = Vec::new();
file.read_to_end(&mut buffer)?;
file.drop_cache()?;
if !buffer.len().is_multiple_of(quantized_vector_size) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!(
"TestEncodedStorage: buffer size ({}) not divisible by quantized_vector_size ({})",
buffer.len(),
quantized_vector_size,
),
));
}
let quantized_vector_size = NonZeroUsize::new(quantized_vector_size).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"`quantized_vector_size` must be non-zero",
)
})?;
Ok(Self {
data: buffer,
quantized_vector_size,
path: Some(path.to_path_buf()),
})
}
}
#[cfg(feature = "testing")]
impl EncodedStorage for TestEncodedStorage {
fn get_vector_data(&self, index: PointOffsetType) -> &[u8] {
let start = self
.quantized_vector_size
.get()
.saturating_mul(index as usize);
let end = self
.quantized_vector_size
.get()
.saturating_mul(index as usize + 1);
self.data.get(start..end).unwrap_or(&[])
}
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[u8],
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
if vector.len() != self.quantized_vector_size.get() {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!(
"upsert_vector: payload length {} != quantized_vector_size {}",
vector.len(),
self.quantized_vector_size
),
));
}
// Skip hardware counter increment because it's a RAM storage.
let offset = id as usize * self.quantized_vector_size.get();
if id as usize >= self.vectors_count() {
self.data
.resize(offset + self.quantized_vector_size.get(), 0);
}
self.data[offset..offset + self.quantized_vector_size.get()].copy_from_slice(vector);
Ok(())
}
fn is_on_disk(&self) -> bool {
false
}
fn vectors_count(&self) -> usize {
self.data.len() / self.quantized_vector_size.get()
}
fn flusher(&self) -> MmapFlusher {
Box::new(|| Ok(()))
}
fn files(&self) -> Vec<PathBuf> {
if let Some(ref path) = self.path {
vec![path.clone()]
} else {
vec![]
}
}
fn immutable_files(&self) -> Vec<PathBuf> {
self.files()
}
}
#[cfg(feature = "testing")]
pub struct TestEncodedStorageBuilder {
data: Vec<u8>,
path: Option<PathBuf>,
quantized_vector_size: NonZeroUsize,
}
#[cfg(feature = "testing")]
impl TestEncodedStorageBuilder {
pub fn new(path: Option<&std::path::Path>, quantized_vector_size: usize) -> Self {
Self {
data: Vec::new(),
path: path.map(PathBuf::from),
quantized_vector_size: NonZeroUsize::new(quantized_vector_size).unwrap_or_else(|| {
panic!("quantized_vector_size must be non-zero");
}),
}
}
}
#[cfg(feature = "testing")]
impl EncodedStorageBuilder for TestEncodedStorageBuilder {
type Storage = TestEncodedStorage;
fn build(self) -> std::io::Result<Self::Storage> {
if let Some(path) = &self.path {
path.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)?;
let mut file = File::create(path)?;
file.write_all(&self.data)?;
file.sync_all()?;
}
Ok(TestEncodedStorage {
data: self.data,
quantized_vector_size: self.quantized_vector_size,
path: self.path,
})
}
fn push_vector_data(&mut self, other: &[u8]) -> std::io::Result<()> {
debug_assert_eq!(other.len(), self.quantized_vector_size.get());
self.data.extend_from_slice(other);
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/p_square.rs | lib/quantization/src/p_square.rs | use ordered_float::NotNan;
use crate::EncodingError;
/// Extended version of P-square one-quantile estimator by Jain & Chlamtac (1985).
///
/// <https://www.cse.wustl.edu/~jain/papers/ftp/psqr.pdf>
/// By default, P-square uses 5 markers to estimate a single quantile.
/// This implementation is extended to support an arbitrary odd number of markers N >= 5
///
/// Usage:
/// ```ignore
/// let mut p2 = P2Quantile::<7>::new(0.99).unwrap();
/// for x in data { p2.push(x).unwrap(); }
/// let q_hat = p2.estimate();
/// ```
pub enum P2Quantile<const N: usize = 7> {
Linear(P2QuantileLinear<N>),
Impl(P2QuantileImpl<N>),
}
impl<const N: usize> P2Quantile<N> {
pub fn new(q: f64) -> Result<Self, EncodingError> {
const {
assert!(N >= 5, "P2Quantile requires at least 5 markers");
assert!(N % 2 == 1, "P2Quantile requires an odd number of markers");
};
if q <= 0.0 || q >= 1.0 {
return Err(EncodingError::EncodingError(
"Quantile q must be in (0, 1)".to_string(),
));
}
Ok(Self::Linear(P2QuantileLinear {
quantile: q,
observations: Default::default(),
}))
}
/// Push one observation.
pub fn push(&mut self, x: f64) {
let Ok(x) = NotNan::new(x) else {
return;
};
if !x.is_finite() {
return;
}
match self {
P2Quantile::Linear(linear) => {
// in linear case just collect observations until we have N of them
linear.observations.push(x);
if linear.observations.len() == N {
*self = P2Quantile::Impl(P2QuantileImpl::new_from_linear(linear));
}
}
P2Quantile::Impl(p2) => p2.push(*x),
}
}
/// Get resulting quantile estimation.
pub fn estimate(self) -> f64 {
match self {
P2Quantile::Linear(linear) => linear.estimate(),
P2Quantile::Impl(p2) => p2.estimate(),
}
}
}
pub struct P2QuantileImpl<const N: usize> {
count: usize,
markers: [Marker; N],
}
impl<const N: usize> P2QuantileImpl<N> {
fn new_from_linear(linear: &P2QuantileLinear<N>) -> Self {
assert_eq!(linear.observations.len(), N);
let mut buf = linear.observations.clone();
buf.sort_unstable();
let p = Self::generate_grid_probabilities(linear.quantile);
let mut markers = [Marker::default(); N];
for i in 0..N {
markers[i].height = *buf[i];
markers[i].target_probability = p[i];
markers[i].n_position = (i + 1) as f64;
markers[i].update_desired_position(N);
}
P2QuantileImpl { count: N, markers }
}
fn estimate(self) -> f64 {
// `N / 2` marker tracks the target quantile
self.markers[N / 2].height
}
fn push(&mut self, x: f64) {
self.count += 1;
// 1) Identify cell k and update extreme markers if needed
// k is the cell index in [0..N - 1]
let k = if x < self.markers[0].height {
// update minimum marker
self.markers[0].height = x;
0
} else if x > self.markers[N - 1].height {
// update maximum marker
self.markers[N - 1].height = x;
N - 1
} else {
// otherwise find the markers cell
self.find_marker(x)
};
// 2) Increment positions of markers above k
for i in (k + 1)..N {
self.markers[i].n_position += 1.0;
}
// 3) Update desired positions
for i in 0..N {
self.markers[i].update_desired_position(self.count);
}
// 4) Adjust interior markers
for i in 1..(N - 1) {
self.markers[i].adjust(self.markers[i - 1], self.markers[i + 1]);
}
}
fn find_marker(&self, x: f64) -> usize {
for i in 1..N {
if x <= self.markers[i].height {
return i - 1;
}
}
N - 1
}
/// Generate target probabilities for markers
/// In the original P-square with 5 markers, the target probabilities are:
/// p = [0, q/2, q, (1 + q)/2, 1]
/// This function generalizes this to N markers by placing additional markers
/// between the second and the middle, and between the middle and the second last.
fn generate_grid_probabilities(q: f64) -> [f64; N] {
let mut p = [0.0; N];
let additional_markers_count = (N - 5) / 2;
p[0] = 0.0;
p[1] = q * 0.5;
// add extended marker probabilities
for i in 0..additional_markers_count {
// just lerp between q/2 and be more close to the middle
let factor = 0.7 + 0.3 * (i + 1) as f64 / (additional_markers_count as f64 + 2.0);
p[i + 2] = q * factor;
}
// middle marker, tracks the required quantile
p[N / 2] = q;
// add extended marker probabilities
for i in 0..additional_markers_count {
let factor = 0.7
+ 0.3 * (additional_markers_count - i) as f64
/ (additional_markers_count as f64 + 2.0);
p[N / 2 + 1 + i] = 1.0 + (q - 1.0) * factor;
}
p[N - 2] = 1.0 + (q - 1.0) * 0.5;
p[N - 1] = 1.0;
p
}
}
#[derive(Clone, Copy, Default)]
struct Marker {
height: f64,
n_position: f64,
n_desired: f64,
target_probability: f64,
}
impl Marker {
fn adjust(&mut self, prev: Marker, next: Marker) {
loop {
let di = self.n_desired - self.n_position;
if di >= 1.0 && (next.n_position - self.n_position) > 1.0 {
self.adjust_step(&prev, &next, 1.0);
} else if di <= -1.0 && (prev.n_position - self.n_position) < -1.0 {
self.adjust_step(&prev, &next, -1.0);
} else {
break;
}
}
}
fn adjust_step(&mut self, prev: &Marker, next: &Marker, dsign: f64) {
// Try parabolic prediction
let denom = next.n_position - prev.n_position;
let mut h_par = self.height;
if denom != 0.0 {
let a = (self.n_position - prev.n_position + dsign)
/ (next.n_position - self.n_position)
* (next.height - self.height);
let b = (next.n_position - self.n_position - dsign)
/ (self.n_position - prev.n_position)
* (self.height - prev.height);
h_par = self.height + (a + b) * dsign / denom;
}
// If parabolic result is within neighbors, use it; otherwise linear
self.height = if h_par > prev.height && h_par < next.height && h_par.is_finite() {
h_par
} else {
// Linear step toward neighbor indicated by dsign
if dsign > 0.0 {
self.height + (next.height - self.height) / (next.n_position - self.n_position)
} else {
self.height + (prev.height - self.height) / (prev.n_position - self.n_position)
}
};
self.n_position += dsign;
}
fn update_desired_position(&mut self, n: usize) {
self.n_desired = 1.0 + self.target_probability * (n as f64 - 1.0);
}
}
pub struct P2QuantileLinear<const N: usize> {
quantile: f64,
observations: arrayvec::ArrayVec<NotNan<f64>, N>,
}
impl<const N: usize> P2QuantileLinear<N> {
/// Simple linear-interpolated sample quantile
fn estimate(mut self) -> f64 {
estimate_quantile_from_slice(&mut self.observations, self.quantile)
}
}
fn estimate_quantile_from_slice(observations: &mut [NotNan<f64>], quantile: f64) -> f64 {
if observations.is_empty() {
// No data
return 0.0;
}
if observations.len() == 1 {
return *observations[0];
}
observations.sort_unstable();
let k = quantile * (observations.len() as f64 - 1.0);
let lo = k.floor() as usize;
let hi = k.ceil() as usize;
if lo == hi {
*observations[lo]
} else {
let frac = k - lo as f64;
*observations[lo] + frac * (*observations[hi] - *observations[lo])
}
}
#[cfg(test)]
mod tests {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::{Poisson, StandardNormal, StudentT};
use super::*;
const N: usize = 7;
const COUNT: usize = 10_000;
#[test]
fn test_p_square() {
// Test P2 quantile estimator on uniformly distributed data
const QUANTILE: f64 = 0.99;
// In case of uniform distribution, the theoretical value of quantile is equal to the quantile level
const THEORETICAL_VALUE: f64 = QUANTILE;
const ERROR: f64 = 1e-2;
let mut p2 = P2Quantile::<N>::new(QUANTILE).unwrap();
let mut rng = StdRng::seed_from_u64(42);
let mut data = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let value = rng.random::<f64>();
data.push(value.try_into().unwrap());
p2.push(value);
}
// Take P square estimation
let p = p2.estimate();
// Compare with linear estimation
let linear_p = estimate_quantile_from_slice(data.as_mut_slice(), QUANTILE);
assert!((p - linear_p).abs() < ERROR);
// Compare with theoretical value
assert!((p - THEORETICAL_VALUE).abs() < ERROR);
}
#[test]
fn test_p_square_normal() {
// Test P2 quantile estimator on normally N(0, 1) distributed data
// Take percentile corresponding to 2 standard deviations (2 sigmas)
// It'a approximately 97.72 percentile
const QUANTILE: f64 = 0.9772;
// The theoretical value of 97.72 percentile for N(0, 1) is approximately 2 sigmas, i.e., 2.0
const THEORETICAL_VALUE: f64 = 2.0;
const ERROR: f64 = 0.1; // allow 5% error (0.1 / 2.0 = 0.05 = 5%)
let mut p2 = P2Quantile::<N>::new(QUANTILE).unwrap();
let mut rng = StdRng::seed_from_u64(42);
let mut data = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let value: f64 = rng.sample(StandardNormal);
data.push(value.try_into().unwrap());
p2.push(value);
}
// Take P square estimation
let p = p2.estimate();
// Compare with linear estimation
let linear_p = estimate_quantile_from_slice(data.as_mut_slice(), QUANTILE);
assert!((p - linear_p).abs() < ERROR);
// Compare with theoretical value
assert!((p - THEORETICAL_VALUE).abs() < ERROR);
}
#[test]
fn test_p_square_normal_low() {
// Same as test_p_square_normal but with 100 - 97.72 = 2.28 percentile
// Test P2 quantile estimator on normally N(0, 1) distributed data
// Take percentile corresponding to -2 standard deviations (-2 sigmas)
// It'a approximately 2.28 percentile
const QUANTILE: f64 = 0.0228;
// The theoretical value of 2.28 percentile for N(0, 1) is approximately -2 sigmas, i.e., -2.0
const THEORETICAL_VALUE: f64 = -2.0;
const ERROR: f64 = 0.1; // allow 5% error (0.1 / 2.0 = 0.05 = 5%)
let mut p2 = P2Quantile::<N>::new(QUANTILE).unwrap();
let mut rng = StdRng::seed_from_u64(42);
let mut data = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let value: f64 = rng.sample(StandardNormal);
data.push(value.try_into().unwrap());
p2.push(value);
}
// Take P square estimation
let p = p2.estimate();
// Compare with linear estimation
let linear_p = estimate_quantile_from_slice(data.as_mut_slice(), QUANTILE);
assert!((p - linear_p).abs() < ERROR);
// Compare with theoretical value
assert!((p - THEORETICAL_VALUE).abs() < ERROR);
}
#[test]
fn test_p_square_poisson() {
// Take Poisson-distributed data with mean 2. It's case of non-symmetric and non-normal distribution.
const QUANTILE: f64 = 0.99;
// The theoretical value of 99 percentile is 6.0
const THEORETICAL_VALUE: f64 = 6.0;
const ERROR: f64 = 0.3; // allow 5% error (0.3 / 6.0 = 0.05 = 5%)
let mut p2 = P2Quantile::<N>::new(QUANTILE).unwrap();
let mut rng = StdRng::seed_from_u64(42);
let mut data = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let value = rng.sample(Poisson::new(2.0).unwrap());
data.push(value.try_into().unwrap());
p2.push(value);
}
// Take P square estimation
let p = p2.estimate();
// Compare with linear estimation
let linear_p = estimate_quantile_from_slice(data.as_mut_slice(), QUANTILE);
assert!((p - linear_p).abs() < ERROR);
// Compare with theoretical value
assert!((p - THEORETICAL_VALUE).abs() < ERROR);
}
#[test]
fn test_p_square_student() {
// Corner case test with Student t-distribution with low degrees of freedom (heavy tails)
// StudentT-distributed data with 2 degrees of freedom has heavy tails and infinite variance.
const QUANTILE: f64 = 0.99;
// The theoretical value of 99 percentile is somewhat around 6.9646
const THEORETICAL_VALUE: f64 = 6.9646;
const ERROR: f64 = 0.69646; // 10% error because of heavy tails
let mut p2 = P2Quantile::<N>::new(QUANTILE).unwrap();
let mut rng = StdRng::seed_from_u64(42);
let mut data = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let value = rng.sample(StudentT::new(2.0).unwrap());
data.push(value.try_into().unwrap());
p2.push(value);
}
// Take P square estimation
let p = p2.estimate();
// Compare with linear estimation
let linear_p = estimate_quantile_from_slice(data.as_mut_slice(), QUANTILE);
assert!((p - linear_p).abs() < ERROR);
// Compare with theoretical value
assert!((p - THEORETICAL_VALUE).abs() < ERROR);
}
#[test]
fn test_p_square_zeros() {
let mut p2 = P2Quantile::<N>::new(0.99).unwrap();
for _ in 0..COUNT {
p2.push(0.0);
}
// Take P square estimation
let p = p2.estimate();
// Should be exactly zero
assert_eq!(p, 0.0);
}
#[test]
fn test_p_square_linear() {
let mut p2 = P2Quantile::<N>::new(0.99).unwrap();
p2.push(0.0);
p2.push(0.0);
p2.push(0.0);
// Take P square estimation
let p = p2.estimate();
// Should be exactly zero
assert_eq!(p, 0.0);
}
#[test]
fn test_p_square_extended_grid() {
// Check increasing order
let grid = P2QuantileImpl::<7>::generate_grid_probabilities(0.99);
for i in 1..grid.len() {
assert!(grid[i] > grid[i - 1]);
}
// Check increasing order
let grid = P2QuantileImpl::<9>::generate_grid_probabilities(0.99);
for i in 1..grid.len() {
assert!(grid[i] > grid[i - 1]);
}
// Check increasing order
let grid = P2QuantileImpl::<11>::generate_grid_probabilities(0.99);
for i in 1..grid.len() {
assert!(grid[i] > grid[i - 1]);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/kmeans.rs | lib/quantization/src/kmeans.rs | use std::sync::atomic::{AtomicBool, Ordering};
use rand::Rng;
use rayon::ThreadPool;
use rayon::prelude::*;
use crate::EncodingError;
pub fn kmeans(
data: &[f32],
centroids_count: usize,
dim: usize,
max_iterations: usize,
max_threads: usize,
accuracy: f32,
stopped: &AtomicBool,
) -> Result<Vec<f32>, EncodingError> {
let pool = rayon::ThreadPoolBuilder::new()
.thread_name(|idx| format!("kmeans-{idx}"))
.num_threads(max_threads)
.build()
.map_err(|e| {
EncodingError::EncodingError(format!("Failed PQ encoding while thread pool init: {e}"))
})?;
// initial centroids positions are some vectors from data
let mut centroids = data[0..centroids_count * dim].to_vec();
let mut centroid_indexes = vec![0u32; data.len() / dim];
for _ in 0..max_iterations {
if stopped.load(Ordering::Relaxed) {
return Err(EncodingError::Stopped);
}
update_indexes(&pool, data, &mut centroid_indexes, ¢roids);
if update_centroids(
&pool,
data,
¢roid_indexes,
&mut centroids,
max_threads,
accuracy,
) {
break;
}
}
update_indexes(&pool, data, &mut centroid_indexes, ¢roids);
Ok(centroids)
}
fn update_centroids(
pool: &ThreadPool,
data: &[f32],
centroid_indexes: &[u32],
centroids: &mut [f32],
max_threads: usize,
accuracy: f32,
) -> bool {
struct CentroidsCounter {
counter: Vec<usize>,
acc: Vec<f64>,
}
let dim = data.len() / centroid_indexes.len();
let centroids_count = centroids.len() / dim;
let mut counters = (0..max_threads)
.map(|_| CentroidsCounter {
counter: vec![0usize; centroids_count],
acc: vec![0.0_f64; centroids.len()],
})
.collect::<Vec<_>>();
pool.install(|| {
counters
.par_iter_mut()
.enumerate()
.for_each(|(i, counter)| {
let chunk_size = centroid_indexes.len() / max_threads;
let vector_data_range = if i + 1 == max_threads {
chunk_size * i..centroid_indexes.len()
} else {
chunk_size * i..chunk_size * (i + 1)
};
for i in vector_data_range {
let vector_data = &data[dim * i..dim * (i + 1)];
let centroid_index = centroid_indexes[i] as usize;
counter.counter[centroid_index] += 1;
let centroid_data =
&mut counter.acc[dim * centroid_index..dim * (centroid_index + 1)];
for (c, v) in centroid_data.iter_mut().zip(vector_data.iter()) {
*c += f64::from(*v);
}
}
})
});
let mut counter = CentroidsCounter {
counter: vec![0usize; centroids_count],
acc: vec![0.0_f64; centroids.len()],
};
for c in counters {
for (dst, src) in counter.counter.iter_mut().zip(c.counter.iter()) {
*dst += src;
}
for (dst, src) in counter.acc.iter_mut().zip(c.acc.iter()) {
*dst += src;
}
}
for (centroid_index, centroid_data) in counter.acc.chunks_exact_mut(dim).enumerate() {
if counter.counter[centroid_index] == 0 {
// the cluster is empty, so we take random vector as centroid
let data_index = rand::rng().random_range(0..centroid_indexes.len());
let vector = &data[dim * data_index..dim * (data_index + 1)];
centroid_data
.iter_mut()
.zip(vector.iter())
.for_each(|(c, v)| *c = f64::from(*v));
} else {
let count = counter.counter[centroid_index] as f64;
centroid_data.iter_mut().for_each(|c| *c /= count);
}
}
let diff = centroids
.iter_mut()
.zip(counter.acc.iter())
.map(|(c, c_acc)| {
let c_acc = *c_acc as f32;
let value = (*c - c_acc).abs();
*c = c_acc;
value
})
.sum::<f32>();
diff < accuracy
}
fn update_indexes(
pool: &ThreadPool,
data: &[f32],
centroid_indexes: &mut [u32],
centroids: &[f32],
) {
let dim = data.len() / centroid_indexes.len();
pool.install(|| {
centroid_indexes
.par_iter_mut()
.enumerate()
.for_each(|(i, c)| {
let vector_data = &data[dim * i..dim * (i + 1)];
let mut min_distance = f32::MAX;
let mut min_centroid_index = 0;
for (centroid_index, centroid_data) in centroids.chunks_exact(dim).enumerate() {
let distance = vector_data
.iter()
.zip(centroid_data.iter())
.map(|(a, b)| (a - b).powi(2))
.sum();
if distance < min_distance {
min_distance = distance;
min_centroid_index = centroid_index;
}
}
*c = min_centroid_index as u32;
})
});
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/encoded_vectors_pq.rs | lib/quantization/src/encoded_vectors_pq.rs | use std::alloc::Layout;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use std::arch::aarch64::*;
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
use std::iter::repeat_with;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::True;
use common::types::PointOffsetType;
use fs_err as fs;
use io::file_operations::atomic_save_json;
use memory::mmap_type::MmapFlusher;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use crate::encoded_storage::{EncodedStorage, EncodedStorageBuilder};
use crate::encoded_vectors::{EncodedVectors, VectorParameters, validate_vector_parameters};
use crate::kmeans::kmeans;
use crate::{ConditionalVariable, EncodingError};
pub const KMEANS_SAMPLE_SIZE: usize = 10_000;
pub const KMEANS_MAX_ITERATIONS: usize = 100;
pub const KMEANS_ACCURACY: f32 = 1e-5;
pub const CENTROIDS_COUNT: usize = 256;
pub struct EncodedVectorsPQ<TStorage: EncodedStorage> {
encoded_vectors: TStorage,
metadata: Metadata,
metadata_path: Option<PathBuf>,
}
/// PQ lookup table
/// Lookup table is a distance from each query chunk to
/// each centroid related to this chunk
pub struct EncodedQueryPQ {
lut: Vec<f32>,
}
#[derive(Serialize, Deserialize)]
pub struct Metadata {
pub centroids: Vec<Vec<f32>>,
pub vector_division: Vec<Range<usize>>,
pub vector_parameters: VectorParameters,
}
impl<TStorage: EncodedStorage> EncodedVectorsPQ<TStorage> {
pub fn storage(&self) -> &TStorage {
&self.encoded_vectors
}
/// Encode vector data using product quantization.
///
/// # Arguments
/// * `data` - iterator over original vector data
/// * `storage_builder` - encoding result storage builder
/// * `vector_parameters` - parameters of original vector data (dimension, distance, etc)
/// * `chunk_size` - Max size of f32 chunk that replaced by centroid index (in original vector dimension)
/// * `max_threads` - Max allowed threads for kmeans and encodind process
/// * `stopped` - Atomic bool that indicates if encoding should be stopped
#[allow(clippy::too_many_arguments)]
pub fn encode<'a>(
data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone + Send,
mut storage_builder: impl EncodedStorageBuilder<Storage = TStorage> + Send,
vector_parameters: &VectorParameters,
count: usize,
chunk_size: usize,
max_kmeans_threads: usize,
meta_path: Option<&Path>,
stopped: &AtomicBool,
) -> Result<Self, EncodingError> {
debug_assert!(validate_vector_parameters(data.clone(), vector_parameters).is_ok());
// first, divide vector into chunks
let vector_division = Self::get_vector_division(vector_parameters.dim, chunk_size);
// then, find flattened centroid positions
let centroids = Self::find_centroids(
data.clone(),
&vector_division,
vector_parameters,
count,
CENTROIDS_COUNT,
max_kmeans_threads,
stopped,
)?;
// finally, encode data
Self::encode_storage(
data,
&mut storage_builder,
&vector_division,
¢roids,
max_kmeans_threads,
stopped,
)?;
let encoded_vectors = storage_builder
.build()
.map_err(|e| EncodingError::EncodingError(format!("Failed to build storage: {e}",)))?;
let metadata = Metadata {
centroids,
vector_division,
vector_parameters: vector_parameters.clone(),
};
if let Some(meta_path) = meta_path {
meta_path
.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)
.map_err(|e| {
EncodingError::EncodingError(format!(
"Failed to create metadata directory: {e}",
))
})?;
atomic_save_json(meta_path, &metadata).map_err(|e| {
EncodingError::EncodingError(format!("Failed to save metadata: {e}",))
})?;
}
if !stopped.load(Ordering::Relaxed) {
Ok(Self {
encoded_vectors,
metadata,
metadata_path: meta_path.map(PathBuf::from),
})
} else {
Err(EncodingError::Stopped)
}
}
pub fn load(encoded_vectors: TStorage, meta_path: &Path) -> std::io::Result<Self> {
let contents = fs::read_to_string(meta_path)?;
let metadata: Metadata = serde_json::from_str(&contents)?;
let result = Self {
encoded_vectors,
metadata,
metadata_path: Some(meta_path.to_path_buf()),
};
Ok(result)
}
pub fn get_quantized_vector_size(
vector_parameters: &VectorParameters,
chunk_size: usize,
) -> usize {
(0..vector_parameters.dim).step_by(chunk_size).count()
}
fn get_vector_division(dim: usize, chunk_size: usize) -> Vec<Range<usize>> {
(0..dim)
.step_by(chunk_size)
.map(|i| i..std::cmp::min(i + chunk_size, dim))
.collect()
}
/// Encode whole storage
///
/// # Arguments
/// * `data` - Original vector data iterator
/// * `storage_builder` - Builder of encoded data container
/// * `vector_division` - Division of original vector into chunks
/// * `centroids` - Centroid positions (flattened by chunks; for similarity to vector data format)
/// * `max_threads` - Max allowed threads for encoding process
/// * `stopped` - Atomic bool that indicates if encoding should be stopped
///
/// # Lifetimes
/// 'a is lifetime of vector in vector storage
/// 'b is lifetime of parent scope
fn encode_storage<'a: 'b, 'b>(
data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone + Send + 'b,
storage_builder: &'b mut (impl EncodedStorageBuilder<Storage = TStorage> + Send),
vector_division: &'b [Range<usize>],
centroids: &'b [Vec<f32>],
max_threads: usize,
stopped: &AtomicBool,
) -> Result<(), EncodingError> {
rayon::ThreadPoolBuilder::new()
.thread_name(|idx| format!("pq-encoding-{idx}"))
.num_threads(std::cmp::max(1, max_threads))
.build()
.map_err(|e| {
EncodingError::EncodingError(format!(
"Failed PQ encoding while thread pool init: {e}"
))
})?
.scope(|s| {
Self::encode_storage_rayon(
s,
data,
storage_builder,
vector_division,
centroids,
max_threads,
stopped,
)
})
}
/// Encode whole storage inside rayon context
/// This function should be called inside `rayon::scope`
fn encode_storage_rayon<'a: 'b, 'b>(
scope: &rayon::Scope<'b>,
data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone + Send + 'b,
storage_builder: &'b mut (impl EncodedStorageBuilder<Storage = TStorage> + Send),
vector_division: &'b [Range<usize>],
centroids: &'b [Vec<f32>],
max_threads: usize,
stopped: &'b AtomicBool,
) -> Result<(), EncodingError> {
let storage_builder = Arc::new(Mutex::new(storage_builder));
// Synchronization between threads. Use conditional variable for
// each thread. Each condvar is blocked instead of first.
// While encoding, thread `N` after `storage_builder` usage blocks themself and
// unblock thread `N+1`.
// In summary, access to `storage_builder` is ordered by `thread_index` below.
let mut condvars: Vec<ConditionalVariable> =
repeat_with(Default::default).take(max_threads).collect();
condvars[0].notify(); // Allow first thread to use storage
let error = Arc::new(Mutex::new(None));
for thread_index in 0..max_threads {
// Thread process vectors `N` that `(N + thread_index) % max_threads == 0`.
let data = data.clone().skip(thread_index);
let storage_builder = storage_builder.clone();
let condvar = condvars[thread_index].clone();
let next_condvar = condvars[(thread_index + 1) % max_threads].clone();
let error = error.clone();
scope.spawn(move |_| {
let mut encoded_vector = Vec::with_capacity(vector_division.len());
for vector in data.step_by(max_threads) {
if stopped.load(Ordering::Relaxed) {
return;
}
Self::encode_vector(
vector.as_ref(),
vector_division,
centroids,
&mut encoded_vector,
);
// wait for permission from prev thread to use storage
let is_disconnected = condvar.wait();
// push encoded vector to storage
let insert_result = storage_builder.lock().push_vector_data(&encoded_vector);
// Check for errors
if let Err(e) = insert_result {
let mut error = error.lock();
*error = Some(EncodingError::EncodingError(format!(
"Failed to push encoded vector: {e}",
)));
// Notify next thread to allow them to exit
next_condvar.notify();
return;
}
// Notify next thread to use storage
next_condvar.notify();
if is_disconnected {
return;
}
}
});
}
// free condvars to allow threads to exit when panicking
condvars.clear();
if let Some(error) = error.lock().take() {
Err(error)
} else {
Ok(())
}
}
/// Encode single vector from `&[f32]` into `&[u8]`.
/// This method divides `vector_data` into chunks, for each chunk
/// finds nearest centroid and replace whole chunk by nearest centroid index.
///
/// # Arguments
/// * `vector_data` - Original vector data
/// * `vector_division` - Division of original vector into chunks
/// * `centroids` - Centroid positions (flattened by chunks; for similarity to vector data format)
/// * `encoded_vector` - Encoded result as a preallocated vector
fn encode_vector(
vector_data: &[f32],
vector_division: &[Range<usize>],
centroids: &[Vec<f32>],
encoded_vector: &mut Vec<u8>,
) {
encoded_vector.clear();
for range in vector_division {
let subvector_data = &vector_data[range.clone()];
let mut min_distance = f32::MAX;
let mut min_centroid_index = 0;
for (centroid_index, centroid) in centroids.iter().enumerate() {
// because centroids are flattened by chunks, take centroid position using `range`
let centroid_data = ¢roid[range.clone()];
// by product quantization algorithm use euclid metric for any similarity function
let distance = subvector_data
.iter()
.zip(centroid_data)
.map(|(a, b)| (a - b).powi(2))
.sum();
if distance < min_distance {
min_distance = distance;
min_centroid_index = centroid_index;
}
}
// encoding, replace whole chunk `range` by one `u8` index of nearest centroid
encoded_vector.push(min_centroid_index as u8);
}
}
/// Encode single vector from `&[f32]` into `&[u8]`.
/// This method divides `vector_data` into chunks, for each chunk
/// finds nearest centroid and replace whole chunk by nearest centroid index.
///
/// # Arguments
/// * `data` - Original vector data
/// * `vector_division` - Division of original vector into chunks
/// * `vector_parameters` - parameters of original vector data (dimension, distance, etc)
/// * `centroids_count` - Count of centroids for each chunk
/// * `max_kmeans_threads` - Max allowed threads for kmeans process
/// * `stopped` - Atomic bool that indicates if encoding should be stopped
fn find_centroids<'a>(
data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
vector_division: &[Range<usize>],
vector_parameters: &VectorParameters,
count: usize,
centroids_count: usize,
max_kmeans_threads: usize,
stopped: &AtomicBool,
) -> Result<Vec<Vec<f32>>, EncodingError> {
let sample_size = KMEANS_SAMPLE_SIZE.min(count);
let mut result = vec![vec![]; centroids_count];
// if there are not enough vectors, set centroids as point positions
if count <= centroids_count {
for (i, vector_data) in data.into_iter().enumerate() {
result[i] = vector_data.as_ref().to_vec();
}
// fill empty centroids just with zeros
result[count..centroids_count].fill(vec![0.0; vector_parameters.dim]);
return Ok(result);
}
// find random subset of data as random non-intersected indexes
let permutor = permutation_iterator::Permutor::new(count as u64);
let mut selected_vectors: Vec<usize> =
permutor.map(|i| i as usize).take(sample_size).collect();
if stopped.load(Ordering::Relaxed) {
return Err(EncodingError::Stopped);
}
selected_vectors.sort_unstable();
// find centroids for each chunk
for range in vector_division.iter() {
// take data subset using indexes from
let mut data_subset = Vec::with_capacity(sample_size * range.len());
let mut selected_index: usize = 0;
for (vector_index, vector_data) in data.clone().enumerate() {
let vector_data = vector_data.as_ref();
if vector_index == selected_vectors[selected_index] {
data_subset.extend_from_slice(&vector_data[range.clone()]);
selected_index += 1;
if selected_index == sample_size {
break;
}
}
}
let centroids = kmeans(
&data_subset,
centroids_count,
range.len(),
KMEANS_MAX_ITERATIONS,
max_kmeans_threads,
KMEANS_ACCURACY,
stopped,
)?;
// push found chunk centroids into result
for (centroid_index, centroid_data) in centroids.chunks_exact(range.len()).enumerate() {
result[centroid_index].extend_from_slice(centroid_data);
}
}
Ok(result)
}
#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse4.1")]
unsafe fn score_point_sse(&self, query: &EncodedQueryPQ, centroids: &[u8]) -> f32 {
unsafe {
let len = centroids.len();
let centroids_count = self.metadata.centroids.len();
let mut centroids = centroids.as_ptr();
let mut lut = query.lut.as_ptr();
let mut sum128: __m128 = _mm_setzero_ps();
for _ in 0..len / 4 {
let buffer = [
*lut.add(*centroids as usize),
*lut.add(centroids_count + *centroids.add(1) as usize),
*lut.add(2 * centroids_count + *centroids.add(2) as usize),
*lut.add(3 * centroids_count + *centroids.add(3) as usize),
];
let c = _mm_loadu_ps(buffer.as_ptr());
sum128 = _mm_add_ps(sum128, c);
centroids = centroids.add(4);
lut = lut.add(4 * centroids_count);
}
let sum64: __m128 = _mm_add_ps(sum128, _mm_movehl_ps(sum128, sum128));
let sum32: __m128 = _mm_add_ss(sum64, _mm_shuffle_ps(sum64, sum64, 0x55));
let mut sum = _mm_cvtss_f32(sum32);
for _ in 0..len % 4 {
sum += *lut.add(*centroids as usize);
centroids = centroids.add(1);
lut = lut.add(centroids_count);
}
sum
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
unsafe fn score_point_neon(&self, query: &EncodedQueryPQ, centroids: &[u8]) -> f32 {
unsafe {
let len = centroids.len();
let centroids_count = self.metadata.centroids.len();
let mut centroids = centroids.as_ptr();
let mut lut = query.lut.as_ptr();
let mut sum128 = vdupq_n_f32(0.);
for _ in 0..len / 4 {
let buffer = [
*lut.add(*centroids as usize),
*lut.add(centroids_count + *centroids.add(1) as usize),
*lut.add(2 * centroids_count + *centroids.add(2) as usize),
*lut.add(3 * centroids_count + *centroids.add(3) as usize),
];
let c = vld1q_f32(buffer.as_ptr());
sum128 = vaddq_f32(sum128, c);
centroids = centroids.add(4);
lut = lut.add(4 * centroids_count);
}
let mut sum = vaddvq_f32(sum128);
for _ in 0..len % 4 {
sum += *lut.add(*centroids as usize);
centroids = centroids.add(1);
lut = lut.add(centroids_count);
}
sum
}
}
fn score_point_simple(&self, query: &EncodedQueryPQ, centroids: &[u8]) -> f32 {
let len = centroids.len();
let centroids_count = self.metadata.centroids.len();
let mut centroids = centroids.as_ptr();
let mut lut = query.lut.as_ptr();
(0..len)
.map(|_| unsafe {
let value = *lut.add(*centroids as usize);
centroids = centroids.add(1);
lut = lut.add(centroids_count);
value
})
.sum()
}
pub fn get_quantized_vector(&self, i: PointOffsetType) -> &[u8] {
self.encoded_vectors.get_vector_data(i)
}
pub fn layout(&self) -> Layout {
Layout::from_size_align(self.metadata.vector_division.len(), align_of::<u8>()).unwrap()
}
pub fn get_metadata(&self) -> &Metadata {
&self.metadata
}
}
impl<TStorage: EncodedStorage> EncodedVectors for EncodedVectorsPQ<TStorage> {
type EncodedQuery = EncodedQueryPQ;
fn is_on_disk(&self) -> bool {
self.encoded_vectors.is_on_disk()
}
fn encode_query(&self, query: &[f32]) -> EncodedQueryPQ {
let lut_capacity = self.metadata.vector_division.len() * self.metadata.centroids.len();
let mut lut = Vec::with_capacity(lut_capacity);
for range in &self.metadata.vector_division {
let subquery = &query[range.clone()];
for i in 0..self.metadata.centroids.len() {
let centroid = &self.metadata.centroids[i];
let subcentroid = ¢roid[range.clone()];
let distance = self
.metadata
.vector_parameters
.distance_type
.distance(subquery, subcentroid);
let distance = if self.metadata.vector_parameters.invert {
-distance
} else {
distance
};
lut.push(distance);
}
}
EncodedQueryPQ { lut }
}
fn score_point(
&self,
query: &EncodedQueryPQ,
i: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
let centroids = self.encoded_vectors.get_vector_data(i);
self.score_bytes(True, query, centroids, hw_counter)
}
/// Score two points inside endoded data by their indexes
/// To find score, this method decode both encoded vectors.
/// Decocing in PQ is a replacing centroid index by centroid position
fn score_internal(
&self,
i: PointOffsetType,
j: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
let centroids_i = self.encoded_vectors.get_vector_data(i);
let centroids_j = self.encoded_vectors.get_vector_data(j);
hw_counter
.vector_io_read()
.incr_delta(self.metadata.vector_division.len() * 2);
hw_counter.cpu_counter().incr_delta(
centroids_i.len()
// Chunk size
* self
.metadata
.vector_division
.first()
.map(|i| i.len())
.unwrap_or(1),
);
let distance: f32 = centroids_i
.iter()
.zip(centroids_j)
.enumerate()
.map(|(range_index, (&c_i, &c_j))| {
let range = &self.metadata.vector_division[range_index];
// get centroid positions and calculate distance as distance between centroids
let data_i = &self.metadata.centroids[c_i as usize][range.clone()];
let data_j = &self.metadata.centroids[c_j as usize][range.clone()];
self.metadata
.vector_parameters
.distance_type
.distance(data_i, data_j)
})
.sum();
if self.metadata.vector_parameters.invert {
-distance
} else {
distance
}
}
fn quantized_vector_size(&self) -> usize {
self.metadata.vector_division.len()
}
fn encode_internal_vector(&self, _id: PointOffsetType) -> Option<EncodedQueryPQ> {
// We cannot create query in PQ from quantized vector without LUT accuracy loss
None
}
fn upsert_vector(
&mut self,
_id: PointOffsetType,
_vector: &[f32],
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
debug_assert!(false, "PQ does not support upsert_vector",);
Err(std::io::Error::new(
std::io::ErrorKind::Unsupported,
"PQ does not support upsert_vector",
))
}
fn vectors_count(&self) -> usize {
// `vector_division` size is equal to quantized vector size because each chunk is replaced by one `u8` centroid index.
self.encoded_vectors.vectors_count()
}
fn flusher(&self) -> MmapFlusher {
self.encoded_vectors.flusher()
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.encoded_vectors.files();
if let Some(meta_path) = &self.metadata_path {
files.push(meta_path.clone());
}
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
let mut files = self.encoded_vectors.immutable_files();
if let Some(meta_path) = &self.metadata_path {
files.push(meta_path.clone());
}
files
}
type SupportsBytes = True;
fn score_bytes(
&self,
_: Self::SupportsBytes,
query: &Self::EncodedQuery,
bytes: &[u8],
hw_counter: &HardwareCounterCell,
) -> f32 {
hw_counter
.cpu_counter()
.incr_delta(self.metadata.vector_division.len());
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.1") {
return unsafe { self.score_point_sse(query, bytes) };
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
return unsafe { self.score_point_neon(query, bytes) };
}
self.score_point_simple(query, bytes)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/lib.rs | lib/quantization/src/lib.rs | pub mod encoded_storage;
pub mod encoded_vectors;
pub mod encoded_vectors_binary;
pub mod encoded_vectors_pq;
pub mod encoded_vectors_u8;
pub mod kmeans;
pub mod p_square;
pub mod quantile;
pub mod vector_stats;
use std::fmt::Display;
use std::sync::{Arc, Condvar, Mutex};
pub use encoded_storage::{EncodedStorage, EncodedStorageBuilder};
pub use encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
pub use encoded_vectors_pq::{EncodedQueryPQ, EncodedVectorsPQ};
pub use encoded_vectors_u8::{EncodedQueryU8, EncodedVectorsU8};
#[derive(Debug, PartialEq, Eq)]
pub enum EncodingError {
IOError(String),
EncodingError(String),
ArgumentsError(String),
Stopped,
}
impl Display for EncodingError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EncodingError::IOError(description) => write!(f, "IOError: {description}"),
EncodingError::EncodingError(description) => {
write!(f, "EncodingError: {description}")
}
EncodingError::ArgumentsError(description) => {
write!(f, "ArgumentsError: {description}")
}
EncodingError::Stopped => write!(f, "Stopped"),
}
}
}
#[derive(Default, PartialEq, Clone, Copy)]
enum ConditionalVariableState {
#[default]
Waiting,
Notified,
}
// ConditionalVariable is a wrapper around a mutex and a condvar
#[derive(Default, Clone)]
pub struct ConditionalVariable {
mutex: Arc<Mutex<ConditionalVariableState>>,
condvar: Arc<Condvar>,
}
impl ConditionalVariable {
pub fn wait(&self) -> bool {
let mut guard = self.mutex.lock().unwrap();
while *guard == ConditionalVariableState::Waiting && Arc::strong_count(&self.mutex) > 1 {
guard = self.condvar.wait(guard).unwrap();
}
*guard = ConditionalVariableState::Waiting;
Arc::strong_count(&self.mutex) == 1
}
pub fn notify(&self) {
*self.mutex.lock().unwrap() = ConditionalVariableState::Notified;
self.condvar.notify_all();
}
}
impl Drop for ConditionalVariable {
fn drop(&mut self) {
self.condvar.notify_all();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/quantile.rs | lib/quantization/src/quantile.rs | use permutation_iterator::Permutor;
pub const QUANTILE_SAMPLE_SIZE: usize = 100_000;
pub(crate) fn find_min_max_from_iter<'a>(
iter: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
) -> (f32, f32) {
iter.fold((f32::MAX, f32::MIN), |(mut min, mut max), vector| {
for &value in vector.as_ref() {
if value < min {
min = value;
}
if value > max {
max = value;
}
}
(min, max)
})
}
pub(crate) fn find_quantile_interval<'a>(
vector_data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
dim: usize,
count: usize,
quantile: f32,
) -> Option<(f32, f32)> {
if count < 127 || quantile >= 1.0 {
return None;
}
let slice_size = std::cmp::min(count, QUANTILE_SAMPLE_SIZE);
let permutor = Permutor::new(count as u64);
let mut selected_vectors: Vec<usize> = permutor.map(|i| i as usize).take(slice_size).collect();
selected_vectors.sort_unstable();
let mut data_slice = Vec::with_capacity(slice_size * dim);
let mut selected_index: usize = 0;
for (vector_index, vector_data) in vector_data.into_iter().enumerate() {
if vector_index == selected_vectors[selected_index] {
data_slice.extend_from_slice(vector_data.as_ref());
selected_index += 1;
if selected_index == slice_size {
break;
}
}
}
let data_slice_len = data_slice.len();
if data_slice_len < 4 {
return None;
}
let cut_index = std::cmp::min(
(data_slice_len - 1) / 2,
(slice_size as f32 * (1.0 - quantile) / 2.0) as usize,
);
let cut_index = std::cmp::max(cut_index, 1);
let comparator = |a: &f32, b: &f32| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal);
let (selected_values, _, _) =
data_slice.select_nth_unstable_by(data_slice_len - cut_index, comparator);
let (_, _, selected_values) = selected_values.select_nth_unstable_by(cut_index, comparator);
if selected_values.len() < 2 {
return None;
}
let selected_values = [selected_values];
Some(find_min_max_from_iter(
selected_values.iter().map(|v| &v[..]),
))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/encoded_vectors.rs | lib/quantization/src/encoded_vectors.rs | use std::path::PathBuf;
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::TBool;
use common::types::PointOffsetType;
use memory::mmap_type::MmapFlusher;
use serde::{Deserialize, Serialize};
use crate::EncodingError;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum DistanceType {
Dot,
L1,
L2,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct VectorParameters {
pub dim: usize,
pub distance_type: DistanceType,
pub invert: bool,
// Deprecated, use `EncodedVectors::vectors_count` from quantization instead.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "count")]
pub deprecated_count: Option<usize>,
}
pub trait EncodedVectors: Sized {
type EncodedQuery;
fn is_on_disk(&self) -> bool;
fn encode_query(&self, query: &[f32]) -> Self::EncodedQuery;
fn score_point(
&self,
query: &Self::EncodedQuery,
i: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32;
fn score_internal(
&self,
i: PointOffsetType,
j: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32;
/// Return size in bytes of a quantized vector
fn quantized_vector_size(&self) -> usize;
/// Construct a query from stored vector, so it can be used for scoring.
/// Some implementations may not support this, in which case they should return `None`.
fn encode_internal_vector(&self, id: PointOffsetType) -> Option<Self::EncodedQuery>;
fn upsert_vector(
&mut self,
id: PointOffsetType,
vector: &[f32],
hw_counter: &HardwareCounterCell,
) -> std::io::Result<()>;
fn vectors_count(&self) -> usize;
fn flusher(&self) -> MmapFlusher;
fn files(&self) -> Vec<PathBuf>;
fn immutable_files(&self) -> Vec<PathBuf>;
type SupportsBytes: TBool;
fn score_bytes(
&self,
enabled: Self::SupportsBytes,
query: &Self::EncodedQuery,
bytes: &[u8],
hw_counter: &HardwareCounterCell,
) -> f32;
}
impl DistanceType {
pub fn distance(&self, a: &[f32], b: &[f32]) -> f32 {
match self {
DistanceType::Dot => a.iter().zip(b).map(|(a, b)| a * b).sum(),
DistanceType::L1 => a.iter().zip(b).map(|(a, b)| (a - b).abs()).sum(),
DistanceType::L2 => a.iter().zip(b).map(|(a, b)| (a - b) * (a - b)).sum(),
}
}
}
pub(crate) fn validate_vector_parameters<'a>(
data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
vector_parameters: &VectorParameters,
) -> Result<(), EncodingError> {
let mut count = 0;
for vector in data {
let vector = vector.as_ref();
if vector.len() != vector_parameters.dim {
return Err(EncodingError::ArgumentsError(format!(
"Vector length {} does not match vector parameters dim {}",
vector.len(),
vector_parameters.dim
)));
}
count += 1;
}
if let Some(vectors_count) = vector_parameters.deprecated_count
&& count != vectors_count
{
return Err(EncodingError::ArgumentsError(format!(
"Vector count {count} does not match vector parameters count {vectors_count}"
)));
}
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/encoded_vectors_u8.rs | lib/quantization/src/encoded_vectors_u8.rs | use std::alloc::Layout;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::True;
use common::types::PointOffsetType;
use fs_err as fs;
use io::file_operations::atomic_save_json;
use memory::mmap_type::MmapFlusher;
use serde::{Deserialize, Serialize};
use crate::EncodingError;
use crate::encoded_storage::{EncodedStorage, EncodedStorageBuilder};
use crate::encoded_vectors::{
DistanceType, EncodedVectors, VectorParameters, validate_vector_parameters,
};
use crate::quantile::{find_min_max_from_iter, find_quantile_interval};
pub const ALIGNMENT: usize = 16;
// Each encoded vector stores an additional f32 at the beginning. Define it's size here.
const ADDITIONAL_CONSTANT_SIZE: usize = std::mem::size_of::<f32>();
#[derive(Clone, PartialEq, Debug)]
pub enum ScalarQuantizationMethod {
Int8,
// Future methods can be added here
}
pub struct EncodedVectorsU8<TStorage: EncodedStorage> {
encoded_vectors: TStorage,
metadata: Metadata,
metadata_path: Option<PathBuf>,
}
pub struct EncodedQueryU8 {
offset: f32,
encoded_query: Vec<u8>,
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
enum Metadata {
Int8(MetadataInt8),
}
impl Metadata {
pub fn vector_parameters(&self) -> &VectorParameters {
match self {
Metadata::Int8(meta) => &meta.vector_parameters,
}
}
pub fn actual_dim(&self) -> usize {
match self {
Metadata::Int8(meta) => meta.actual_dim,
}
}
pub fn postprocess_score(&self, score: f32, query_offset: f32, vector_offset: f32) -> f32 {
match self {
Metadata::Int8(metadata) => {
metadata.postprocess_score(score, query_offset, vector_offset)
}
}
}
pub fn postprocess_internal_score(
&self,
score: f32,
query_offset: f32,
vector_offset: f32,
) -> f32 {
match self {
Metadata::Int8(metadata) => {
metadata.postprocess_internal_score(score, query_offset, vector_offset)
}
}
}
}
#[derive(Serialize, Deserialize)]
struct MetadataInt8 {
actual_dim: usize,
alpha: f32,
offset: f32,
multiplier: f32,
vector_parameters: VectorParameters,
}
impl MetadataInt8 {
#[inline]
pub fn encode_value(&self, value: f32) -> u8 {
let i = (value - self.offset) / self.alpha;
i.clamp(0.0, 127.0).round() as u8
}
#[inline]
fn postprocess_score(&self, score: f32, query_offset: f32, vector_offset: f32) -> f32 {
self.multiplier * score + query_offset + vector_offset
}
#[inline]
fn postprocess_internal_score(
&self,
score: f32,
vector_1_offset: f32,
vector_2_offset: f32,
) -> f32 {
let query_offset = vector_1_offset - self.get_shift();
self.postprocess_score(score, query_offset, vector_2_offset)
}
fn get_shift(&self) -> f32 {
// Dotprod after shifting produces a number which is not related to vector and query
// (x - a)(y - a) = xy - ax - ay + a^2
// this a^2 is returned here
// L2 is handled the same way as Dot here
let shift = match self.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => {
self.actual_dim as f32 * self.offset * self.offset
}
DistanceType::L1 => 0.0,
};
if self.vector_parameters.invert {
-shift
} else {
shift
}
}
}
impl<TStorage: EncodedStorage> EncodedVectorsU8<TStorage> {
pub fn storage(&self) -> &TStorage {
&self.encoded_vectors
}
#[allow(clippy::too_many_arguments)]
pub fn encode<'a>(
orig_data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
mut storage_builder: impl EncodedStorageBuilder<Storage = TStorage>,
vector_parameters: &VectorParameters,
count: usize,
quantile: Option<f32>,
method: ScalarQuantizationMethod,
meta_path: Option<&Path>,
stopped: &AtomicBool,
) -> Result<Self, EncodingError> {
assert_eq!(method, ScalarQuantizationMethod::Int8);
let actual_dim = Self::get_actual_dim(vector_parameters);
if count == 0 {
let metadata = Metadata::Int8(MetadataInt8 {
actual_dim,
alpha: 0.0,
offset: 0.0,
multiplier: 0.0,
vector_parameters: vector_parameters.clone(),
});
if let Some(meta_path) = meta_path {
meta_path
.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)
.map_err(|e| {
EncodingError::EncodingError(format!(
"Failed to create metadata directory: {e}",
))
})?;
atomic_save_json(meta_path, &metadata).map_err(|e| {
EncodingError::EncodingError(format!("Failed to save metadata: {e}",))
})?;
}
return Ok(EncodedVectorsU8 {
encoded_vectors: storage_builder.build().map_err(|e| {
EncodingError::EncodingError(format!("Failed to build storage: {e}",))
})?,
metadata,
metadata_path: meta_path.map(PathBuf::from),
});
}
debug_assert!(validate_vector_parameters(orig_data.clone(), vector_parameters).is_ok());
let (alpha, offset) = Self::find_alpha_offset_size_dim(orig_data.clone());
let (alpha, offset) = if let Some(quantile) = quantile {
if let Some((min, max)) =
find_quantile_interval(orig_data.clone(), vector_parameters.dim, count, quantile)
{
Self::alpha_offset_from_min_max(min, max)
} else {
(alpha, offset)
}
} else {
(alpha, offset)
};
let multiplier = match vector_parameters.distance_type {
// (alpha*x - offset) * (alpha*y - offset) = alpha^2*x*y - alpha*offset*x - alpha*offset*y + offset^2
// multiplier is applied to xy term only, so we need to multiply score by alpha^2
DistanceType::Dot => alpha * alpha,
// |(alpha*x - offset) - (alpha*y - offset)| = alpha*|x - y|
// multiplier is applied to |x - y| term only, so we need to multiply score by alpha
DistanceType::L1 => alpha,
// ((alpha*x - offset) - (alpha*y - offset))^2 = alpha^2*(x - y)^2 = alpha^2*x^2 - 2*alpha^2*xy + alpha^2*y^2
// multiplier is applied to (x - y)^2 term only, so we need to multiply score by -2*alpha^2
DistanceType::L2 => -2.0 * alpha * alpha,
};
let multiplier = if vector_parameters.invert {
-multiplier
} else {
multiplier
};
let metadata = MetadataInt8 {
actual_dim,
alpha,
offset,
multiplier,
vector_parameters: vector_parameters.clone(),
};
for vector in orig_data {
if stopped.load(Ordering::Relaxed) {
return Err(EncodingError::Stopped);
}
let mut encoded_vector = Vec::with_capacity(actual_dim + ADDITIONAL_CONSTANT_SIZE);
encoded_vector.extend_from_slice(&f32::default().to_ne_bytes());
for &value in vector.as_ref() {
let encoded = metadata.encode_value(value);
encoded_vector.push(encoded);
}
if !vector_parameters.dim.is_multiple_of(ALIGNMENT) {
for _ in 0..(ALIGNMENT - vector_parameters.dim % ALIGNMENT) {
let placeholder = match vector_parameters.distance_type {
DistanceType::Dot => 0.0,
DistanceType::L1 | DistanceType::L2 => offset,
};
let encoded = metadata.encode_value(placeholder);
encoded_vector.push(encoded);
}
}
let vector_offset = match vector_parameters.distance_type {
DistanceType::Dot => {
let elements_sum = encoded_vector.iter().map(|&x| f32::from(x)).sum::<f32>();
elements_sum * alpha * offset
}
DistanceType::L1 => 0.0,
DistanceType::L2 => {
let elements_sqr_sum = encoded_vector
.iter()
.map(|&x| f32::from(x) * f32::from(x))
.sum::<f32>();
elements_sqr_sum * alpha * alpha
}
};
let vector_offset = if vector_parameters.invert {
-vector_offset
} else {
vector_offset
};
// apply `a^2` shift
let vector_offset = metadata.get_shift() + vector_offset;
encoded_vector[0..ADDITIONAL_CONSTANT_SIZE]
.copy_from_slice(&vector_offset.to_ne_bytes());
storage_builder
.push_vector_data(&encoded_vector)
.map_err(|e| {
EncodingError::EncodingError(format!("Failed to push encoded vector: {e}",))
})?;
}
let encoded_vectors = storage_builder
.build()
.map_err(|e| EncodingError::EncodingError(format!("Failed to build storage: {e}",)))?;
let metadata = Metadata::Int8(metadata);
if let Some(meta_path) = meta_path {
meta_path
.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)
.map_err(|e| {
EncodingError::EncodingError(format!(
"Failed to create metadata directory: {e}",
))
})?;
atomic_save_json(meta_path, &metadata).map_err(|e| {
EncodingError::EncodingError(format!("Failed to save metadata: {e}",))
})?;
}
Ok(EncodedVectorsU8 {
encoded_vectors,
metadata,
metadata_path: meta_path.map(PathBuf::from),
})
}
pub fn load(encoded_vectors: TStorage, meta_path: &Path) -> std::io::Result<Self> {
let contents = fs::read_to_string(meta_path)?;
let metadata: Metadata = serde_json::from_str(&contents)?;
let result = Self {
encoded_vectors,
metadata,
metadata_path: Some(meta_path.to_path_buf()),
};
Ok(result)
}
pub fn score_point_simple(&self, query: &EncodedQueryU8, bytes: &[u8]) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (vector_offset, v_ptr) = Self::parse_vec_data(bytes);
let q_ptr = query.encoded_query.as_ptr();
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => {
impl_score_dot(q_ptr, v_ptr, metadata.actual_dim)
}
DistanceType::L1 => impl_score_l1(q_ptr, v_ptr, metadata.actual_dim),
};
self.metadata
.postprocess_score(score as f32, query.offset, vector_offset)
}
}
}
pub fn score_point_simple_internal(&self, i: PointOffsetType, j: PointOffsetType) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (query_offset, q_ptr) = self.get_vec_ptr(i);
let (vector_offset, v_ptr) = self.get_vec_ptr(j);
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => {
impl_score_dot(q_ptr, v_ptr, metadata.actual_dim)
}
DistanceType::L1 => impl_score_l1(q_ptr, v_ptr, metadata.actual_dim),
};
self.metadata
.postprocess_internal_score(score as f32, query_offset, vector_offset)
}
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
pub fn score_point_neon(&self, query: &EncodedQueryU8, bytes: &[u8]) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (vector_offset, v_ptr) = Self::parse_vec_data(bytes);
let q_ptr = query.encoded_query.as_ptr();
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_neon(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_neon(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_score(score as f32, query.offset, vector_offset)
}
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
pub fn score_point_neon_internal(&self, i: PointOffsetType, j: PointOffsetType) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (query_offset, q_ptr) = self.get_vec_ptr(i);
let (vector_offset, v_ptr) = self.get_vec_ptr(j);
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_neon(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_neon(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_internal_score(score as f32, query_offset, vector_offset)
}
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn score_point_sse(&self, query: &EncodedQueryU8, bytes: &[u8]) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (vector_offset, v_ptr) = Self::parse_vec_data(bytes);
let q_ptr = query.encoded_query.as_ptr();
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_sse(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_sse(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_score(score as f32, query.offset, vector_offset)
}
}
}
#[cfg(target_arch = "x86_64")]
pub fn score_point_sse_internal(&self, i: PointOffsetType, j: PointOffsetType) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (query_offset, q_ptr) = self.get_vec_ptr(i);
let (vector_offset, v_ptr) = self.get_vec_ptr(j);
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_sse(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_sse(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_internal_score(score as f32, query_offset, vector_offset)
}
}
}
#[cfg(target_arch = "x86_64")]
pub fn score_point_avx(&self, query: &EncodedQueryU8, bytes: &[u8]) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (vector_offset, v_ptr) = Self::parse_vec_data(bytes);
let q_ptr = query.encoded_query.as_ptr();
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_avx(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_avx(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_score(score as f32, query.offset, vector_offset)
}
}
}
#[cfg(target_arch = "x86_64")]
pub fn score_point_avx_internal(&self, i: PointOffsetType, j: PointOffsetType) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => {
let (query_offset, q_ptr) = self.get_vec_ptr(i);
let (vector_offset, v_ptr) = self.get_vec_ptr(j);
let score = match metadata.vector_parameters.distance_type {
DistanceType::Dot | DistanceType::L2 => unsafe {
impl_score_dot_avx(q_ptr, v_ptr, metadata.actual_dim as u32)
},
DistanceType::L1 => unsafe {
impl_score_l1_avx(q_ptr, v_ptr, metadata.actual_dim as u32)
},
};
self.metadata
.postprocess_internal_score(score as f32, query_offset, vector_offset)
}
}
}
fn find_alpha_offset_size_dim<'a>(
orig_data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
) -> (f32, f32) {
let (min, max) = find_min_max_from_iter(orig_data);
Self::alpha_offset_from_min_max(min, max)
}
fn alpha_offset_from_min_max(min: f32, max: f32) -> (f32, f32) {
let alpha = (max - min) / 127.0;
let offset = min;
(alpha, offset)
}
#[inline]
fn parse_vec_data(data: &[u8]) -> (f32, *const u8) {
debug_assert!(data.len() >= ADDITIONAL_CONSTANT_SIZE);
unsafe {
let offset = data.as_ptr().cast::<f32>().read_unaligned();
let v_ptr = data.as_ptr().add(ADDITIONAL_CONSTANT_SIZE);
(offset, v_ptr)
}
}
#[inline]
fn get_vec_ptr(&self, i: PointOffsetType) -> (f32, *const u8) {
let data = self.encoded_vectors.get_vector_data(i);
Self::parse_vec_data(data)
}
pub fn get_quantized_vector(&self, i: PointOffsetType) -> &[u8] {
self.encoded_vectors.get_vector_data(i)
}
pub fn layout(&self) -> Layout {
Layout::from_size_align(self.quantized_vector_size(), align_of::<u8>()).unwrap()
}
pub fn get_quantized_vector_offset_and_code(&self, i: PointOffsetType) -> (f32, &[u8]) {
let (offset, v_ptr) = self.get_vec_ptr(i);
let vector_data_size = self.metadata.actual_dim();
let code = unsafe { std::slice::from_raw_parts(v_ptr, vector_data_size) };
(offset, code)
}
pub fn get_quantized_vector_size(vector_parameters: &VectorParameters) -> usize {
let actual_dim = Self::get_actual_dim(vector_parameters);
actual_dim + ADDITIONAL_CONSTANT_SIZE
}
pub fn get_multiplier(&self) -> f32 {
match &self.metadata {
Metadata::Int8(meta) => meta.multiplier,
}
}
pub fn get_shift(&self) -> f32 {
match &self.metadata {
Metadata::Int8(metadata) => metadata.get_shift(),
}
}
pub fn get_actual_dim(vector_parameters: &VectorParameters) -> usize {
vector_parameters.dim + (ALIGNMENT - vector_parameters.dim % ALIGNMENT) % ALIGNMENT
}
fn encode_int8_query(metadata: &MetadataInt8, query: &[f32]) -> EncodedQueryU8 {
let dim = query.len();
let mut query: Vec<_> = query.iter().map(|&v| metadata.encode_value(v)).collect();
if !dim.is_multiple_of(ALIGNMENT) {
for _ in 0..(ALIGNMENT - dim % ALIGNMENT) {
let placeholder = match metadata.vector_parameters.distance_type {
DistanceType::Dot => 0.0,
DistanceType::L1 | DistanceType::L2 => metadata.offset,
};
let encoded = metadata.encode_value(placeholder);
query.push(encoded);
}
}
let offset = match metadata.vector_parameters.distance_type {
DistanceType::Dot => {
let query_elements_sum = query.iter().map(|&x| f32::from(x)).sum::<f32>();
query_elements_sum * metadata.alpha * metadata.offset
}
DistanceType::L1 => 0.0,
DistanceType::L2 => {
let query_elements_sqr_sum = query
.iter()
.map(|&x| f32::from(x) * f32::from(x))
.sum::<f32>();
query_elements_sqr_sum * metadata.alpha * metadata.alpha
}
};
let offset = if metadata.vector_parameters.invert {
-offset
} else {
offset
};
EncodedQueryU8 {
offset,
encoded_query: query,
}
}
}
impl<TStorage: EncodedStorage> EncodedVectors for EncodedVectorsU8<TStorage> {
type EncodedQuery = EncodedQueryU8;
fn is_on_disk(&self) -> bool {
self.encoded_vectors.is_on_disk()
}
fn encode_query(&self, query: &[f32]) -> EncodedQueryU8 {
match &self.metadata {
Metadata::Int8(meta) => Self::encode_int8_query(meta, query),
}
}
fn score_point(
&self,
query: &EncodedQueryU8,
i: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
let bytes = self.encoded_vectors.get_vector_data(i);
self.score_bytes(True, query, bytes, hw_counter)
}
fn score_internal(
&self,
i: PointOffsetType,
j: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
hw_counter
.cpu_counter()
.incr_delta(self.metadata.vector_parameters().dim);
hw_counter
.vector_io_read()
.incr_delta(self.metadata.vector_parameters().dim * 2);
#[cfg(target_arch = "x86_64")]
if is_x86_feature_detected!("avx2") && is_x86_feature_detected!("fma") {
return self.score_point_avx_internal(i, j);
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.1") {
return self.score_point_sse_internal(i, j);
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
return self.score_point_neon_internal(i, j);
}
self.score_point_simple_internal(i, j)
}
fn quantized_vector_size(&self) -> usize {
// Actual_dim rounds up vector_dimension to the next multiple of ALIGNMENT.
// Also add scaling factor to the tally.
match &self.metadata {
Metadata::Int8(_) => self.metadata.actual_dim() + ADDITIONAL_CONSTANT_SIZE,
}
}
fn encode_internal_vector(&self, id: PointOffsetType) -> Option<EncodedQueryU8> {
match &self.metadata {
Metadata::Int8(metadata) => {
let (vector_offset, q_ptr) = self.get_vec_ptr(id);
// Remove shift from offset because encoded query should not have it, it's contained in vector data only.
let query_offset = vector_offset - metadata.get_shift();
Some(EncodedQueryU8 {
offset: query_offset,
encoded_query: unsafe {
std::slice::from_raw_parts(q_ptr, metadata.actual_dim).to_vec()
},
})
}
}
}
fn upsert_vector(
&mut self,
_id: PointOffsetType,
_vector: &[f32],
_hw_counter: &HardwareCounterCell,
) -> std::io::Result<()> {
debug_assert!(false, "SQ does not support upsert_vector",);
Err(std::io::Error::new(
std::io::ErrorKind::Unsupported,
"SQ does not support upsert_vector",
))
}
fn vectors_count(&self) -> usize {
self.encoded_vectors.vectors_count()
}
fn flusher(&self) -> MmapFlusher {
self.encoded_vectors.flusher()
}
fn files(&self) -> Vec<PathBuf> {
let mut files = self.encoded_vectors.files();
if let Some(meta_path) = &self.metadata_path {
files.push(meta_path.clone());
}
files
}
fn immutable_files(&self) -> Vec<PathBuf> {
let mut files = self.encoded_vectors.immutable_files();
if let Some(meta_path) = &self.metadata_path {
files.push(meta_path.clone());
}
files
}
type SupportsBytes = True;
fn score_bytes(
&self,
_: Self::SupportsBytes,
query: &Self::EncodedQuery,
bytes: &[u8],
hw_counter: &HardwareCounterCell,
) -> f32 {
hw_counter
.cpu_counter()
.incr_delta(self.metadata.vector_parameters().dim);
debug_assert!(bytes.len() >= ADDITIONAL_CONSTANT_SIZE + self.metadata.actual_dim());
#[cfg(target_arch = "x86_64")]
if is_x86_feature_detected!("avx2") && is_x86_feature_detected!("fma") {
return self.score_point_avx(query, bytes);
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.1") {
return self.score_point_sse(query, bytes);
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
return self.score_point_neon(query, bytes);
}
self.score_point_simple(query, bytes)
}
}
fn impl_score_dot(q_ptr: *const u8, v_ptr: *const u8, actual_dim: usize) -> i32 {
unsafe {
let mut score = 0i32;
for i in 0..actual_dim {
score += i32::from(*q_ptr.add(i)) * i32::from(*v_ptr.add(i));
}
score
}
}
fn impl_score_l1(q_ptr: *const u8, v_ptr: *const u8, actual_dim: usize) -> i32 {
unsafe {
let mut score = 0i32;
for i in 0..actual_dim {
score += i32::from(*q_ptr.add(i)).abs_diff(i32::from(*v_ptr.add(i))) as i32;
}
score
}
}
#[cfg(target_arch = "x86_64")]
unsafe extern "C" {
fn impl_score_dot_avx(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
fn impl_score_l1_avx(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
fn impl_score_dot_sse(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
fn impl_score_l1_sse(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
unsafe extern "C" {
fn impl_score_dot_neon(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
fn impl_score_l1_neon(query_ptr: *const u8, vector_ptr: *const u8, dim: u32) -> f32;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/vector_stats.rs | lib/quantization/src/vector_stats.rs | use serde::{Deserialize, Serialize};
use crate::VectorParameters;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VectorStats {
pub elements_stats: Vec<VectorElementStats>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VectorElementStats {
pub min: f32,
pub max: f32,
pub mean: f32,
pub stddev: f32,
}
impl Default for VectorElementStats {
fn default() -> Self {
VectorElementStats {
min: f32::MAX,
max: f32::MIN,
mean: 0.0,
stddev: 0.0,
}
}
}
impl VectorStats {
pub fn build<'a>(
data: impl Iterator<Item = impl AsRef<[f32]> + 'a>,
vector_params: &VectorParameters,
) -> Self {
// The Welford's Algorithm.
let mut stats = VectorStats {
elements_stats: vec![VectorElementStats::default(); vector_params.dim],
};
// For internal calculations use higher precision.
let mut m2 = vec![0.0f64; vector_params.dim];
let mut means = vec![0.0f64; vector_params.dim];
let mut count = 0;
for vector in data {
let vector = vector.as_ref();
count += 1;
debug_assert_eq!(
vector.len(),
vector_params.dim,
"Vector length does not match the expected dimension"
);
for (((&value, element_stats), mean), m2) in vector
.iter()
.zip(stats.elements_stats.iter_mut())
.zip(means.iter_mut())
.zip(m2.iter_mut())
{
element_stats.min = if value < element_stats.min {
value
} else {
element_stats.min
};
element_stats.max = if value > element_stats.max {
value
} else {
element_stats.max
};
let delta = f64::from(value) - *mean;
*mean += delta / f64::from(count);
*m2 += delta * (f64::from(value) - *mean);
}
}
for ((element_stats, means), m2) in stats
.elements_stats
.iter_mut()
.zip(means.iter())
.zip(m2.iter())
{
element_stats.stddev = if count > 1 {
(*m2 / f64::from(count - 1)).sqrt() as f32
} else {
0.0
};
element_stats.mean = *means as f32;
}
stats
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/src/encoded_vectors_binary.rs | lib/quantization/src/encoded_vectors_binary.rs | use std::alloc::Layout;
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use common::counter::hardware_counter::HardwareCounterCell;
use common::typelevel::True;
use common::types::PointOffsetType;
use fs_err as fs;
use io::file_operations::atomic_save_json;
use memory::mmap_ops::{transmute_from_u8_to_slice, transmute_to_u8_slice};
use memory::mmap_type::MmapFlusher;
use serde::{Deserialize, Serialize};
use strum::EnumIter;
use crate::encoded_vectors::validate_vector_parameters;
use crate::vector_stats::{VectorElementStats, VectorStats};
use crate::{
DistanceType, EncodedStorage, EncodedStorageBuilder, EncodedVectors, EncodingError,
VectorParameters,
};
pub struct EncodedVectorsBin<TBitsStoreType: BitsStoreType, TStorage: EncodedStorage> {
encoded_vectors: TStorage,
metadata: Metadata,
metadata_path: Option<PathBuf>,
bits_store_type: PhantomData<TBitsStoreType>,
}
#[derive(Clone, Copy, Eq, PartialEq, Debug, Serialize, Deserialize, Default, EnumIter)]
pub enum Encoding {
#[default]
OneBit,
TwoBits,
OneAndHalfBits,
}
impl Encoding {
pub fn is_one(&self) -> bool {
matches!(self, Encoding::OneBit)
}
}
#[derive(Clone, Copy, Eq, PartialEq, Debug, Serialize, Deserialize, Default, EnumIter)]
pub enum QueryEncoding {
#[default]
SameAsStorage,
Scalar4bits,
Scalar8bits,
}
impl QueryEncoding {
pub fn is_same_as_storage(&self) -> bool {
matches!(self, QueryEncoding::SameAsStorage)
}
}
pub enum EncodedQueryBQ<TBitsStoreType: BitsStoreType> {
Binary(EncodedBinVector<TBitsStoreType>),
Scalar4bits(EncodedScalarVector<TBitsStoreType>),
Scalar8bits(EncodedScalarVector<TBitsStoreType>),
}
pub struct EncodedBinVector<TBitsStoreType: BitsStoreType> {
encoded_vector: Vec<TBitsStoreType>,
}
/// Transposed Scalar Encoded Vector
///
/// This data structure represents a scalar-encoded vector optimized for efficient scoring
/// against Binary Quantized (BQ) vectors through bit-level transposition.
///
/// STANDARD ENCODING:
/// A regular scalar vector [float_1, float_2, ..., float_n] is quantized to
/// [scalar_1, scalar_2, ..., scalar_n], where each scalar_i is a u8 value.
///
/// PERFORMANCE ISSUE:
/// Standard encoding is inefficient for scoring because it requires extracting
/// individual bits from each BQ vector in the dataset to perform XOR operations
/// with the scalar vector.
///
/// TRANSPOSITION OPTIMIZATION:
/// To improve scoring efficiency, we reorganize the data using bit-level transposition:
///
/// 1. Take the encoded scalar vector [scalar_1, scalar_2, ..., scalar_n]
/// 2. Divide into batches of size sizeof::<TBitsStoreType>() = N:
/// [[scalar_1, scalar_2, ..., scalar_N], [scalar_N+1, ...], ...]
/// 3. Transpose bit positions within each batch:
/// - Store all first bits: [scalar_1[0], scalar_2[0], ..., scalar_N[0]]
/// - Store all second bits: [scalar_1[1], scalar_2[1], ..., scalar_N[1]]
/// - Continue for all bit positions...
///
/// SCORING ADVANTAGE:
/// This layout enables efficient batch operations:
/// - Extract a single TBitsStoreType value from the BQ vector
/// - Perform N parallel operations with corresponding scalar bits
/// - Use shift operations to compute the final score:
/// (scalar_1[0] ^ bq_vector[0] + ) << 0 +
/// (scalar_1[0] ^ bq_vector[0] + ) << 1 +
/// (scalar_1[0] ^ bq_vector[0] + ) << 2 ...
///
/// This eliminates the need to extract individual bits from BQ vectors during scoring.
/// This idea was taken from http://arxiv.org/pdf/2405.12497, see Figure 2.
pub struct EncodedScalarVector<TBitsStoreType: BitsStoreType> {
pub encoded_vector: Vec<TBitsStoreType>,
}
#[derive(Serialize, Deserialize)]
struct Metadata {
vector_parameters: VectorParameters,
#[serde(default)]
#[serde(skip_serializing_if = "Encoding::is_one")]
encoding: Encoding,
#[serde(default)]
#[serde(skip_serializing_if = "QueryEncoding::is_same_as_storage")]
query_encoding: QueryEncoding,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
vector_stats: Option<VectorStats>,
}
pub trait BitsStoreType:
Default
+ Copy
+ Clone
+ core::ops::BitOrAssign
+ std::ops::Shl<usize, Output = Self>
+ std::ops::Shr<usize, Output = Self>
+ std::ops::BitAnd<Output = Self>
+ num_traits::identities::One
+ num_traits::cast::FromPrimitive
+ num_traits::cast::ToPrimitive
+ bytemuck::Pod
+ std::fmt::Debug
{
/// Xor vectors and return the number of bits set to 1
///
/// Assume that `v1` and `v2` are aligned to `BITS_STORE_TYPE_SIZE` with both with zeros
/// So it does not affect the resulting number of bits set to 1
fn xor_popcnt(v1: &[Self], v2: &[Self]) -> usize;
/// Calculate score between BQ encoded vector and `EncodedScalarVector<Self>` query.
///
/// It calculates sum of XOR popcount between each bit of the `vector` and the corresponding scalar value in the `query`.
/// XOR between scalar and bit is a XOR for each bit of the scalar value.
/// See `EncodedScalarVector` docs for more details about the transposition optimization to avoid extracting bits from BQ vectors.
fn xor_popcnt_scalar(vector: &[Self], query: &[Self], query_bits_count: usize) -> usize;
/// Estimates how many `StorageType` elements are needed to store `size` bits
fn get_storage_size(size: usize) -> usize;
}
impl BitsStoreType for u8 {
fn xor_popcnt(v1: &[Self], v2: &[Self]) -> usize {
debug_assert!(v1.len() == v2.len());
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.2") {
unsafe {
if v1.len() > 16 {
return impl_xor_popcnt_sse_uint128(
v1.as_ptr(),
v2.as_ptr(),
(v1.len() as u32) / 16,
) as usize;
} else if v1.len() > 8 {
return impl_xor_popcnt_sse_uint64(
v1.as_ptr(),
v2.as_ptr(),
(v1.len() as u32) / 8,
) as usize;
} else if v1.len() > 4 {
return impl_xor_popcnt_sse_uint32(
v1.as_ptr(),
v2.as_ptr(),
(v1.len() as u32) / 4,
) as usize;
}
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
unsafe {
if v1.len() > 16 {
return impl_xor_popcnt_neon_uint128(
v1.as_ptr(),
v2.as_ptr(),
(v1.len() as u32) / 16,
) as usize;
} else if v1.len() > 8 {
return impl_xor_popcnt_neon_uint64(
v1.as_ptr(),
v2.as_ptr(),
(v1.len() as u32) / 8,
) as usize;
}
}
}
let mut result = 0;
for (&b1, &b2) in v1.iter().zip(v2.iter()) {
result += (b1 ^ b2).count_ones() as usize;
}
result
}
fn xor_popcnt_scalar(vector: &[Self], query: &[Self], query_bits_count: usize) -> usize {
debug_assert!(query.len() >= vector.len() * query_bits_count);
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
if query_bits_count == 8 {
unsafe {
return impl_xor_popcnt_scalar8_neon_u8(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
} else if query_bits_count == 4 {
unsafe {
return impl_xor_popcnt_scalar4_neon_u8(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.2") {
if query_bits_count == 8 {
unsafe {
return impl_xor_popcnt_scalar8_sse_u8(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
} else if query_bits_count == 4 {
unsafe {
return impl_xor_popcnt_scalar4_sse_u8(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
}
}
let mut result = 0;
for (&b1, b2_chunk) in vector.iter().zip(query.chunks_exact(query_bits_count)) {
for (i, &b2) in b2_chunk.iter().enumerate() {
result += (b1 ^ b2).count_ones() << i;
}
}
result as usize
}
fn get_storage_size(size: usize) -> usize {
let bytes_count = if size > 128 {
std::mem::size_of::<u128>()
} else if size > 64 {
std::mem::size_of::<u64>()
} else if size > 32 {
std::mem::size_of::<u32>()
} else {
std::mem::size_of::<u8>()
};
let bits_count = u8::BITS as usize * bytes_count;
let mut result = size / bits_count;
if !size.is_multiple_of(bits_count) {
result += 1;
}
result * bytes_count
}
}
impl BitsStoreType for u128 {
fn xor_popcnt(v1: &[Self], v2: &[Self]) -> usize {
debug_assert!(v1.len() == v2.len());
#[cfg(target_arch = "x86_64")]
if is_x86_feature_detected!("avx512vl")
&& is_x86_feature_detected!("avx512vpopcntdq")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("sse4.1")
&& is_x86_feature_detected!("sse2")
{
unsafe {
return impl_xor_popcnt_avx512_uint128(
v1.as_ptr().cast::<u8>(),
v2.as_ptr().cast::<u8>(),
v1.len() as u32,
) as usize;
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.2") {
unsafe {
return impl_xor_popcnt_sse_uint128(
v1.as_ptr().cast::<u8>(),
v2.as_ptr().cast::<u8>(),
v1.len() as u32,
) as usize;
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
unsafe {
return impl_xor_popcnt_neon_uint128(
v1.as_ptr().cast::<u8>(),
v2.as_ptr().cast::<u8>(),
v1.len() as u32,
) as usize;
}
}
let mut result = 0;
for (&b1, &b2) in v1.iter().zip(v2.iter()) {
result += (b1 ^ b2).count_ones() as usize;
}
result
}
fn xor_popcnt_scalar(vector: &[Self], query: &[Self], query_bits_count: usize) -> usize {
debug_assert!(query.len() >= vector.len() * query_bits_count);
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
if std::arch::is_aarch64_feature_detected!("neon") {
if query_bits_count == 8 {
unsafe {
return impl_xor_popcnt_scalar8_neon_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
} else if query_bits_count == 4 {
unsafe {
return impl_xor_popcnt_scalar4_neon_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
}
}
#[cfg(target_arch = "x86_64")]
if is_x86_feature_detected!("avx2") && is_x86_feature_detected!("sse4.2") {
if query_bits_count == 8 {
unsafe {
return impl_xor_popcnt_scalar8_avx_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
} else if query_bits_count == 4 {
unsafe {
return impl_xor_popcnt_scalar4_avx_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
if is_x86_feature_detected!("sse4.2") {
if query_bits_count == 8 {
unsafe {
return impl_xor_popcnt_scalar8_sse_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
} else if query_bits_count == 4 {
unsafe {
return impl_xor_popcnt_scalar4_sse_uint128(
query.as_ptr().cast::<u8>(),
vector.as_ptr().cast::<u8>(),
vector.len() as u32,
) as usize;
}
}
}
let mut result = 0;
for (&b1, b2_chunk) in vector.iter().zip(query.chunks_exact(query_bits_count)) {
for (i, &b2) in b2_chunk.iter().enumerate() {
result += (b1 ^ b2).count_ones() << i;
}
}
result as usize
}
fn get_storage_size(size: usize) -> usize {
let bits_count = u8::BITS as usize * std::mem::size_of::<Self>();
let mut result = size / bits_count;
if !size.is_multiple_of(bits_count) {
result += 1;
}
result
}
}
impl<TBitsStoreType: BitsStoreType, TStorage: EncodedStorage>
EncodedVectorsBin<TBitsStoreType, TStorage>
{
pub fn storage(&self) -> &TStorage {
&self.encoded_vectors
}
pub fn encode<'a>(
orig_data: impl Iterator<Item = impl AsRef<[f32]> + 'a> + Clone,
mut storage_builder: impl EncodedStorageBuilder<Storage = TStorage>,
vector_parameters: &VectorParameters,
encoding: Encoding,
query_encoding: QueryEncoding,
meta_path: Option<&Path>,
stopped: &AtomicBool,
) -> Result<Self, EncodingError> {
debug_assert!(validate_vector_parameters(orig_data.clone(), vector_parameters).is_ok());
let storage_encoding_needs_states = match encoding {
Encoding::OneBit => false,
// Requires stats for bit boundaries
Encoding::TwoBits | Encoding::OneAndHalfBits => true,
};
let query_encoding_needs_stats = match query_encoding {
QueryEncoding::SameAsStorage => storage_encoding_needs_states,
QueryEncoding::Scalar4bits | QueryEncoding::Scalar8bits => true,
};
let vector_stats = if storage_encoding_needs_states || query_encoding_needs_stats {
Some(VectorStats::build(orig_data.clone(), vector_parameters))
} else {
None
};
for vector in orig_data {
if stopped.load(Ordering::Relaxed) {
return Err(EncodingError::Stopped);
}
let encoded_vector = Self::encode_vector(vector.as_ref(), &vector_stats, encoding);
let encoded_vector_slice = encoded_vector.encoded_vector.as_slice();
let bytes = transmute_to_u8_slice(encoded_vector_slice);
storage_builder.push_vector_data(bytes).map_err(|e| {
EncodingError::EncodingError(format!("Failed to push encoded vector: {e}",))
})?;
}
let encoded_vectors = storage_builder
.build()
.map_err(|e| EncodingError::EncodingError(format!("Failed to build storage: {e}",)))?;
let metadata = Metadata {
vector_parameters: vector_parameters.clone(),
encoding,
query_encoding,
vector_stats,
};
if let Some(meta_path) = meta_path {
meta_path
.parent()
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Path must have a parent directory",
)
})
.and_then(fs::create_dir_all)
.map_err(|e| {
EncodingError::EncodingError(format!(
"Failed to create metadata directory: {e}",
))
})?;
atomic_save_json(meta_path, &metadata).map_err(|e| {
EncodingError::EncodingError(format!("Failed to save metadata: {e}",))
})?;
}
Ok(Self {
encoded_vectors,
metadata,
metadata_path: meta_path.map(PathBuf::from),
bits_store_type: PhantomData,
})
}
pub fn load(encoded_vectors: TStorage, meta_path: &Path) -> std::io::Result<Self> {
let contents = fs::read_to_string(meta_path)?;
let metadata: Metadata = serde_json::from_str(&contents)?;
let result = Self {
metadata,
metadata_path: Some(meta_path.to_path_buf()),
encoded_vectors,
bits_store_type: PhantomData,
};
Ok(result)
}
fn encode_vector(
vector: &[f32],
vector_stats: &Option<VectorStats>,
encoding: Encoding,
) -> EncodedBinVector<TBitsStoreType> {
let encoded_vector_size =
Self::get_quantized_vector_size_from_params(vector.len(), encoding)
/ std::mem::size_of::<TBitsStoreType>();
let mut encoded_vector = vec![Default::default(); encoded_vector_size];
match encoding {
Encoding::OneBit => Self::encode_one_bit_vector(vector, &mut encoded_vector),
Encoding::TwoBits => {
Self::encode_two_bits_vector(vector, &mut encoded_vector, vector_stats)
}
Encoding::OneAndHalfBits => {
Self::encode_one_and_half_bits_vector(vector, &mut encoded_vector, vector_stats)
}
}
EncodedBinVector { encoded_vector }
}
fn encode_one_bit_vector(vector: &[f32], encoded_vector: &mut [TBitsStoreType]) {
let bits_count = u8::BITS as usize * std::mem::size_of::<TBitsStoreType>();
let one = TBitsStoreType::one();
for (i, &v) in vector.iter().enumerate() {
// flag is true if the value is positive
// It's expected that the vector value is in range [-1; 1]
if v > 0.0 {
encoded_vector[i / bits_count] |= one << (i % bits_count);
}
}
}
fn encode_two_bits_vector(
vector: &[f32],
encoded_vector: &mut [TBitsStoreType],
vector_stats: &Option<VectorStats>,
) {
let bits_count = u8::BITS as usize * std::mem::size_of::<TBitsStoreType>();
let one = TBitsStoreType::one();
for i in 0..vector.len() {
let value = vector[i];
let stats = vector_stats.as_ref().map(|stats| &stats.elements_stats[i]);
let (b1, b2) = Self::encode_two_bits_value(value, stats);
if b1 {
encoded_vector[i / bits_count] |= one << (i % bits_count);
}
if b2 {
let j = vector.len() + i;
encoded_vector[j / bits_count] |= one << (j % bits_count);
}
}
}
fn encode_one_and_half_bits_vector(
vector: &[f32],
encoded_vector: &mut [TBitsStoreType],
vector_stats: &Option<VectorStats>,
) {
// One and half bit encoding is a 2bit quantization but first bit,
// which describes that value is less that sigma,
// is united with the bit from the next value using OR operand.
// Scoring for 1.5bit quantization is the same as for 2bit and 1bit quantization.
//
// Example 1:
// `Value1` has `[1,0]` 2bits encoding, value `Value2` has `[1,1]` 2bits encoding.
// The resulting 1.5bit encoding will be `[value1[0], value2[0], value1[1] | value2[1]] = [1, 1, 1]`.
//
// Example 2:
// `Value1` has `[0,0]` 2bits encoding, value `Value2` has `[1,0]` 2bits encoding.
// The resulting 1.5bit encoding will be `[value1[0], value2[0], value1[1] | value2[1]] = [0, 1, 0]`.
let bits_count = u8::BITS as usize * std::mem::size_of::<TBitsStoreType>();
let one = TBitsStoreType::one();
for i in 0..vector.len() {
let value = vector[i];
let stats = vector_stats.as_ref().map(|stats| &stats.elements_stats[i]);
let (b1, b2) = Self::encode_two_bits_value(value, stats);
if b1 {
encoded_vector[i / bits_count] |= one << (i % bits_count);
}
if b2 {
let j = vector.len() + i / 2;
encoded_vector[j / bits_count] |= one << (j % bits_count);
}
}
}
fn encode_two_bits_value(
value: f32,
element_stats: Option<&VectorElementStats>,
) -> (bool, bool) {
// Two bit encoding is a regular BQ with "zero".
// It uses 2 bits per value and encodes values in the following way:
// 00 - if the value is in the range (-inf; -SIGMAS];
// 10 - if the value is in the range (-SIGMAS; SIGMAS);
// 11 - if the value is in the range [SIGMAS; +inf);
// where sigma is the standard deviation of the value.
//
// Scoring for 2bit quantization is the same as for 1bit quantization.
let Some(element_stats) = element_stats else {
return if value > 0.0 {
(true, true)
} else {
(false, false)
};
};
let VectorElementStats {
min: _,
max: _,
mean,
stddev,
} = element_stats;
let sd = *stddev;
if sd < f32::EPSILON {
// If standard deviation is zero,
// we cannot calculate z-score count so use regular BQ with zero-comparison.
return (value > 0.0, false);
}
// Calculate z-score for the value
let v_z = (value - mean) / sd;
// Define sigmas count which describes a zero range for 2bit encoding.
const SIGMAS: f32 = 2.0 / 3.0;
if v_z <= -SIGMAS {
(false, false) // (-inf; -SIGMAS]
} else if v_z < SIGMAS {
(true, false) // (-SIGMAS; SIGMAS)
} else {
(true, true) // [SIGMAS; +inf)
}
}
fn encode_query_vector(
query: &[f32],
vector_stats: &Option<VectorStats>,
encoding: Encoding,
query_encoding: QueryEncoding,
) -> EncodedQueryBQ<TBitsStoreType> {
match query_encoding {
QueryEncoding::SameAsStorage => {
EncodedQueryBQ::Binary(Self::encode_vector(query, vector_stats, encoding))
}
QueryEncoding::Scalar8bits => EncodedQueryBQ::Scalar8bits(
Self::encode_scalar_query_vector(query, encoding, u8::BITS as usize),
),
QueryEncoding::Scalar4bits => EncodedQueryBQ::Scalar4bits(
Self::encode_scalar_query_vector(query, encoding, (u8::BITS / 2) as usize),
),
}
}
fn encode_scalar_query_vector(
query: &[f32],
encoding: Encoding,
bits_count: usize,
) -> EncodedScalarVector<TBitsStoreType> {
match encoding {
Encoding::OneBit => Self::_encode_scalar_query_vector(query, bits_count),
Encoding::TwoBits => {
// For two bits encoding we need to extend the query vector
let mut extended_query = Vec::with_capacity(query.len() * 2);
// Copy the original query vector twice: for first and second bits in 2bit BQ encoding
extended_query.extend_from_slice(query);
extended_query.extend_from_slice(query);
Self::_encode_scalar_query_vector(&extended_query, bits_count)
}
Encoding::OneAndHalfBits => {
// For one and half bits encoding we need to extend the query vector
let mut extended_query = Vec::with_capacity(query.len() + query.len().div_ceil(2));
extended_query.extend_from_slice(query);
// For 1.5bit BQ use max of two consecutive values
extended_query.extend(
query
.chunks(2)
.map(|v| if v.len() == 2 { v[0].max(v[1]) } else { v[0] }),
);
Self::_encode_scalar_query_vector(&extended_query, bits_count)
}
}
}
fn _encode_scalar_query_vector(
query: &[f32],
bits_count: usize,
) -> EncodedScalarVector<TBitsStoreType> {
let encoded_query_size = TBitsStoreType::get_storage_size(query.len().max(1)) * bits_count;
let mut encoded_query: Vec<TBitsStoreType> = vec![Default::default(); encoded_query_size];
let max_abs_value = query.iter().map(|x| x.abs()).fold(0.0, f32::max);
let (min, max) = (-max_abs_value, max_abs_value);
let ranges = (1usize << bits_count) - 1;
let delta = (max - min) / ranges as f32;
let storage_bits_count = std::mem::size_of::<TBitsStoreType>() * u8::BITS as usize;
for (chunk_index, chunk) in query.chunks(storage_bits_count).enumerate() {
for (shift, value) in chunk.iter().enumerate() {
let shifted_value = value - min;
let delted_value = if delta > f32::EPSILON {
shifted_value / delta
} else {
0.0
};
let rounded_value = delted_value.round() as usize;
let quantized = rounded_value % (ranges + 1);
let quantized = TBitsStoreType::from_usize(quantized).unwrap_or_default();
for b in 0..bits_count {
let bit_value = ((quantized >> b) & TBitsStoreType::one()) << shift;
encoded_query[bits_count * chunk_index + b] |= bit_value;
}
}
}
EncodedScalarVector {
encoded_vector: encoded_query,
}
}
pub fn get_quantized_vector_size_from_params(dim: usize, encoding: Encoding) -> usize {
let extended_dim = match encoding {
Encoding::OneBit => dim,
Encoding::TwoBits => dim * 2,
Encoding::OneAndHalfBits => (dim * 3).div_ceil(2), // ceil(dim * 1.5)
};
TBitsStoreType::get_storage_size(extended_dim.max(1))
* std::mem::size_of::<TBitsStoreType>()
}
fn get_quantized_vector_size(&self) -> usize {
Self::get_quantized_vector_size_from_params(
self.metadata.vector_parameters.dim,
self.metadata.encoding,
)
}
fn calculate_metric(
&self,
vector: &[TBitsStoreType],
query: &[TBitsStoreType],
query_bits_count: usize,
) -> f32 {
// Dot product in a range [-1; 1] is approximated by NXOR in a range [0; 1]
// L1 distance in range [-1; 1] (alpha=2) is approximated by alpha*XOR in a range [0; 1]
// L2 distance in range [-1; 1] (alpha=2) is approximated by alpha*sqrt(XOR) in a range [0; 1]
// For example:
// | A | B | Dot product | L1 | L2 |
// | -0.5 | -0.5 | 0.25 | 0 | 0 |
// | -0.5 | 0.5 | -0.25 | 1 | 1 |
// | 0.5 | -0.5 | -0.25 | 1 | 1 |
// | 0.5 | 0.5 | 0.25 | 0 | 0 |
// | A | B | NXOR | XOR
// | 0 | 0 | 1 | 0
// | 0 | 1 | 0 | 1
// | 1 | 0 | 0 | 1
// | 1 | 1 | 1 | 0
let xor_product = if query_bits_count == 1 {
TBitsStoreType::xor_popcnt(vector, query) as f32
} else {
let xor_product = TBitsStoreType::xor_popcnt_scalar(vector, query, query_bits_count);
(xor_product as f32) / (((1 << query_bits_count) - 1) as f32)
};
let dim = self.metadata.vector_parameters.dim as f32;
let zeros_count = dim - xor_product;
match (
self.metadata.vector_parameters.distance_type,
self.metadata.vector_parameters.invert,
) {
// So if `invert` is true we return XOR, otherwise we return (dim - XOR)
(DistanceType::Dot, true) => xor_product - zeros_count,
(DistanceType::Dot, false) => zeros_count - xor_product,
// This also results in exact ordering as L1 and L2 but reversed.
(DistanceType::L1 | DistanceType::L2, true) => zeros_count - xor_product,
(DistanceType::L1 | DistanceType::L2, false) => xor_product - zeros_count,
}
}
pub fn get_quantized_vector(&self, i: PointOffsetType) -> &[u8] {
self.encoded_vectors.get_vector_data(i as _)
}
pub fn layout(&self) -> Layout {
Layout::from_size_align(
self.get_quantized_vector_size(),
align_of::<TBitsStoreType>(),
)
.unwrap()
}
pub fn get_vector_parameters(&self) -> &VectorParameters {
&self.metadata.vector_parameters
}
pub fn encode_internal_query(&self, point_id: u32) -> EncodedQueryBQ<TBitsStoreType> {
// For internal queries we use the same encoding as for storage
EncodedQueryBQ::Binary(EncodedBinVector {
encoded_vector: bytemuck::cast_slice::<u8, TBitsStoreType>(
self.get_quantized_vector(point_id),
)
.to_vec(),
})
}
}
impl<TBitsStoreType: BitsStoreType, TStorage: EncodedStorage> EncodedVectors
for EncodedVectorsBin<TBitsStoreType, TStorage>
{
type EncodedQuery = EncodedQueryBQ<TBitsStoreType>;
fn is_on_disk(&self) -> bool {
self.encoded_vectors.is_on_disk()
}
fn encode_query(&self, query: &[f32]) -> EncodedQueryBQ<TBitsStoreType> {
debug_assert!(query.len() == self.metadata.vector_parameters.dim);
Self::encode_query_vector(
query,
&self.metadata.vector_stats,
self.metadata.encoding,
self.metadata.query_encoding,
)
}
fn score_point(
&self,
query: &EncodedQueryBQ<TBitsStoreType>,
i: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
let vector_data = self.encoded_vectors.get_vector_data(i);
self.score_bytes(True, query, vector_data, hw_counter)
}
fn score_internal(
&self,
i: PointOffsetType,
j: PointOffsetType,
hw_counter: &HardwareCounterCell,
) -> f32 {
let vector_data_1 = self.encoded_vectors.get_vector_data(i);
let vector_data_2 = self.encoded_vectors.get_vector_data(j);
hw_counter
.vector_io_read()
.incr_delta(vector_data_1.len() + vector_data_2.len());
let vector_data_usize_1 = transmute_from_u8_to_slice(vector_data_1);
let vector_data_usize_2 = transmute_from_u8_to_slice(vector_data_2);
hw_counter
.cpu_counter()
.incr_delta(vector_data_usize_2.len());
self.calculate_metric(vector_data_usize_1, vector_data_usize_2, 1)
}
fn quantized_vector_size(&self) -> usize {
self.get_quantized_vector_size()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_simple.rs | lib/quantization/tests/integration/test_simple.rs | #[cfg(test)]
mod tests {
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use rand::{Rng, SeedableRng};
use rstest::rstest;
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l2_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random::<f32>()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random::<f32>()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l1_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_inverted_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = -dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l2_inverted_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random::<f32>()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random::<f32>()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: true,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = -l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l1_inverted_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: true,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = -l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_internal_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count: usize = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..vectors_count {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_inverted_internal_simple(#[case] method: ScalarQuantizationMethod) {
let vectors_count: usize = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..vectors_count {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = -dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_u8_large_quantile(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
Some(1.0 - f32::EPSILON), // almost 1.0 value, but not 1.0
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_simple(&query_u8, quantized_vector);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8, false)]
#[case(ScalarQuantizationMethod::Int8, true)]
fn test_sq_u8_encode_internal(#[case] method: ScalarQuantizationMethod, #[case] invert: bool) {
let vectors_count = 129;
let vector_dim = 70;
let error = 1e-3;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim)
.map(|_| 2.0 * rng.random::<f32>() - 1.0)
.collect();
vector_data.push(vector);
}
for distance_type in [DistanceType::Dot, DistanceType::L2, DistanceType::L1] {
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type,
invert,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
Some(1.0 - f32::EPSILON), // almost 1.0 value, but not 1.0
method.clone(),
None,
&AtomicBool::new(false),
)
.unwrap();
let hw = HardwareCounterCell::new();
for (i, vector) in vector_data.iter().enumerate() {
// encode vector using the encode_query method
let query = encoded.encode_query(vector);
// encode vector using the encode_internal_vector method
let query_internal = encoded.encode_internal_vector(i as u32).unwrap();
let score_query = encoded.score_point(&query, 0, &hw);
let score_internal_query = encoded.score_point(&query_internal, 0, &hw);
let score_internal = encoded.score_internal(i as u32, 0, &hw);
assert!((score_query - score_internal).abs() < error);
assert!((score_internal_query - score_internal).abs() < error);
assert!((score_query - score_internal_query).abs() < error);
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_binary_encodings.rs | lib/quantization/tests/integration/test_binary_encodings.rs | #[cfg(test)]
mod tests {
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_binary::{
BitsStoreType, EncodedVectorsBin, Encoding, QueryEncoding,
};
use rand::{Rng, SeedableRng};
use strum::IntoEnumIterator;
use crate::metrics::dot_similarity;
fn generate_number(rng: &mut rand::rngs::StdRng) -> f32 {
rng.random_range(-1.0..1.0)
}
fn generate_vector(dim: usize, rng: &mut rand::rngs::StdRng) -> Vec<f32> {
(0..dim)
.map(|_| generate_number(rng) / (dim as f32).sqrt())
.collect()
}
fn get_top(scores: &[f32], count: usize, invert: bool) -> Vec<usize> {
let mut indices: Vec<usize> = (0..scores.len()).collect();
indices.sort_by(|&a, &b| scores[b].partial_cmp(&scores[a]).unwrap());
if invert {
indices.reverse();
}
indices.into_iter().take(count).collect()
}
fn match_count(ids1: &[usize], ids2: &[usize]) -> usize {
ids1.iter().filter(|&&id| ids2.contains(&id)).count()
}
#[test]
fn test_binary_dot() {
test_binary_dot_impl::<u128>(0, false);
test_binary_dot_impl::<u128>(600, false);
test_binary_dot_impl::<u128>(601, false);
test_binary_dot_impl::<u8>(600, false);
}
#[test]
fn test_binary_dot_inverted() {
test_binary_dot_impl::<u128>(700, true);
test_binary_dot_impl::<u8>(700, true);
}
fn test_binary_dot_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize, invert: bool) {
let vectors_count = 1000;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let encodings = [
Encoding::OneBit,
Encoding::OneAndHalfBits,
Encoding::TwoBits,
];
let encoded: Vec<_> = encodings
.iter()
.map(|&encoding| {
let quantized_vector_size = EncodedVectorsBin::<
TBitsStoreType,
TestEncodedStorage,
>::get_quantized_vector_size_from_params(
vector_dim, encoding
);
EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert,
},
encoding,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap()
})
.collect();
let top = 10;
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let orig_scores: Vec<f32> = vector_data
.iter()
.map(|vector| dot_similarity(&query, vector))
.collect();
let original_top = get_top(&orig_scores, top, invert);
let tops = encoded
.iter()
.map(|encoded| {
let query_encoded = encoded.encode_query(&query);
let scores: Vec<f32> = (0..vector_data.len())
.map(|index| {
encoded.score_point(
&query_encoded,
index as u32,
&HardwareCounterCell::new(),
)
})
.collect();
let tops = get_top(&scores, top, false);
match_count(&original_top, &tops)
})
.collect::<Vec<_>>();
// Check if encoding has more accuracy than previous one
for i in 1..tops.len() {
assert!(
tops[i] >= tops[i - 1],
"Encoding {} has less accuracy than encoding {}",
i,
i - 1
);
}
}
#[test]
fn test_binary_dot_asymetric() {
test_binary_dot_asymentric_impl::<u128>(0, Encoding::OneBit, false);
test_binary_dot_asymentric_impl::<u8>(0, Encoding::OneBit, false);
test_binary_dot_asymentric_impl::<u128>(1024, Encoding::OneBit, false);
test_binary_dot_asymentric_impl::<u128>(601, Encoding::OneBit, false);
test_binary_dot_asymentric_impl::<u8>(600, Encoding::OneBit, false);
test_binary_dot_asymentric_impl::<u128>(1024, Encoding::OneAndHalfBits, false);
test_binary_dot_asymentric_impl::<u128>(601, Encoding::OneAndHalfBits, false);
test_binary_dot_asymentric_impl::<u8>(600, Encoding::OneAndHalfBits, false);
test_binary_dot_asymentric_impl::<u128>(1024, Encoding::TwoBits, false);
test_binary_dot_asymentric_impl::<u128>(701, Encoding::TwoBits, false);
test_binary_dot_asymentric_impl::<u8>(700, Encoding::TwoBits, false);
}
#[test]
fn test_binary_dot_inverted_asymetric() {
test_binary_dot_asymentric_impl::<u128>(0, Encoding::OneBit, true);
test_binary_dot_asymentric_impl::<u8>(0, Encoding::OneBit, true);
test_binary_dot_asymentric_impl::<u128>(1024, Encoding::OneBit, true);
test_binary_dot_asymentric_impl::<u128>(601, Encoding::OneBit, true);
test_binary_dot_asymentric_impl::<u8>(600, Encoding::OneBit, true);
}
fn test_binary_dot_asymentric_impl<TBitsStoreType: BitsStoreType>(
vector_dim: usize,
encoding: Encoding,
invert: bool,
) {
let vectors_count = 1000;
let mut rng = rand::rngs::StdRng::seed_from_u64(43);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let encoded: Vec<_> = QueryEncoding::iter()
.map(|query_encoding| {
let quantized_vector_size = EncodedVectorsBin::<
TBitsStoreType,
TestEncodedStorage,
>::get_quantized_vector_size_from_params(
vector_dim, encoding
);
EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert,
},
encoding,
query_encoding,
None,
&AtomicBool::new(false),
)
.unwrap()
})
.collect();
let top = 10;
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let orig_scores: Vec<f32> = vector_data
.iter()
.map(|vector| dot_similarity(&query, vector))
.collect();
let original_top = get_top(&orig_scores, top, invert);
let tops = encoded
.iter()
.map(|encoded| {
let query_encoded = encoded.encode_query(&query);
let scores: Vec<f32> = (0..vector_data.len())
.map(|index| {
encoded.score_point(
&query_encoded,
index as u32,
&HardwareCounterCell::new(),
)
})
.collect();
let tops = get_top(&scores, top, false);
match_count(&original_top, &tops)
})
.collect::<Vec<_>>();
// Check if encoding has more accuracy than previous one
for i in 1..tops.len() {
assert!(
tops[i] >= tops[0],
"Encoding {i} has less accuracy than original encoding",
);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_binary.rs | lib/quantization/tests/integration/test_binary.rs | #[cfg(test)]
mod tests {
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_binary::{
BitsStoreType, EncodedVectorsBin, Encoding, QueryEncoding,
};
use rand::{Rng, SeedableRng};
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
fn generate_number(rng: &mut rand::rngs::StdRng) -> f32 {
let n = f32::signum(rng.random_range(-1.0..1.0));
if n == 0.0 { 1.0 } else { n }
}
fn generate_vector(dim: usize, rng: &mut rand::rngs::StdRng) -> Vec<f32> {
(0..dim).map(|_| generate_number(rng)).collect()
}
#[test]
fn test_binary_dot() {
test_binary_dot_impl::<u8>(0);
test_binary_dot_impl::<u8>(1);
test_binary_dot_impl::<u8>(8);
test_binary_dot_impl::<u8>(33);
test_binary_dot_impl::<u8>(65);
test_binary_dot_impl::<u8>(3 * 129);
test_binary_dot_impl::<u128>(1);
test_binary_dot_impl::<u128>(3 * 129);
}
fn test_binary_dot_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let error = vector_dim as f32 * 0.01;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_encoded = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_encoded, index as u32, &counter);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() <= error);
}
}
#[test]
fn test_binary_dot_inverted() {
test_binary_dot_inverted_impl::<u8>(0);
test_binary_dot_inverted_impl::<u8>(1);
test_binary_dot_inverted_impl::<u8>(8);
test_binary_dot_inverted_impl::<u8>(33);
test_binary_dot_inverted_impl::<u8>(65);
test_binary_dot_inverted_impl::<u8>(3 * 129);
test_binary_dot_inverted_impl::<u128>(1);
test_binary_dot_inverted_impl::<u128>(3 * 129);
}
fn test_binary_dot_inverted_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let error = vector_dim as f32 * 0.01;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_encoded = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_encoded, index as u32, &counter);
let original_score = -dot_similarity(&query, vector);
assert!((score - original_score).abs() <= error);
}
}
#[test]
fn test_binary_dot_internal() {
test_binary_dot_internal_impl::<u8>(0);
test_binary_dot_internal_impl::<u8>(1);
test_binary_dot_internal_impl::<u8>(8);
test_binary_dot_internal_impl::<u8>(33);
test_binary_dot_internal_impl::<u8>(65);
test_binary_dot_internal_impl::<u8>(3 * 129);
test_binary_dot_internal_impl::<u128>(1);
test_binary_dot_internal_impl::<u128>(3 * 129);
}
fn test_binary_dot_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let error = vector_dim as f32 * 0.01;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..vectors_count {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() <= error);
}
}
#[test]
fn test_binary_dot_inverted_internal() {
test_binary_dot_inverted_internal_impl::<u8>(0);
test_binary_dot_inverted_internal_impl::<u8>(1);
test_binary_dot_inverted_internal_impl::<u8>(8);
test_binary_dot_inverted_internal_impl::<u8>(33);
test_binary_dot_inverted_internal_impl::<u8>(65);
test_binary_dot_inverted_internal_impl::<u8>(3 * 129);
test_binary_dot_inverted_internal_impl::<u128>(1);
test_binary_dot_inverted_internal_impl::<u128>(3 * 129);
}
fn test_binary_dot_inverted_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let error = vector_dim as f32 * 0.01;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..vectors_count {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = -dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() <= error);
}
}
#[test]
fn test_binary_l1() {
test_binary_l1_impl::<u8>(0);
test_binary_l1_impl::<u8>(1);
test_binary_l1_impl::<u8>(8);
test_binary_l1_impl::<u8>(33);
test_binary_l1_impl::<u8>(65);
test_binary_l1_impl::<u8>(3 * 129);
test_binary_l1_impl::<u128>(1);
test_binary_l1_impl::<u128>(3 * 129);
}
fn test_binary_l1_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_b = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_point(&query_b, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l1_similarity(&query, v), i))
.collect();
original_scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l1_inverted() {
test_binary_l1_inverted_impl::<u8>(0);
test_binary_l1_inverted_impl::<u8>(1);
test_binary_l1_inverted_impl::<u8>(8);
test_binary_l1_inverted_impl::<u8>(33);
test_binary_l1_inverted_impl::<u8>(65);
test_binary_l1_inverted_impl::<u8>(3 * 129);
test_binary_l1_inverted_impl::<u128>(1);
test_binary_l1_inverted_impl::<u128>(3 * 129);
}
fn test_binary_l1_inverted_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_b = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_point(&query_b, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l1_similarity(&query, v), i))
.collect();
original_scores.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l1_internal() {
test_binary_l1_internal_impl::<u8>(0);
test_binary_l1_internal_impl::<u8>(1);
test_binary_l1_internal_impl::<u8>(8);
test_binary_l1_internal_impl::<u8>(33);
test_binary_l1_internal_impl::<u8>(65);
test_binary_l1_internal_impl::<u8>(3 * 129);
test_binary_l1_internal_impl::<u128>(1);
test_binary_l1_internal_impl::<u128>(3 * 129);
}
fn test_binary_l1_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_internal(0, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l1_similarity(&vector_data[0], v), i))
.collect();
original_scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l1_inverted_internal() {
test_binary_l1_inverted_internal_impl::<u8>(0);
test_binary_l1_inverted_internal_impl::<u8>(1);
test_binary_l1_inverted_internal_impl::<u8>(8);
test_binary_l1_inverted_internal_impl::<u8>(33);
test_binary_l1_inverted_internal_impl::<u8>(65);
test_binary_l1_inverted_internal_impl::<u8>(3 * 129);
test_binary_l1_inverted_internal_impl::<u128>(1);
test_binary_l1_inverted_internal_impl::<u128>(3 * 129);
}
fn test_binary_l1_inverted_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_internal(0, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l1_similarity(&vector_data[0], v), i))
.collect();
original_scores.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l2() {
test_binary_l2_impl::<u8>(0);
test_binary_l2_impl::<u8>(1);
test_binary_l2_impl::<u8>(8);
test_binary_l2_impl::<u8>(33);
test_binary_l2_impl::<u8>(65);
test_binary_l2_impl::<u8>(3 * 129);
test_binary_l2_impl::<u128>(1);
test_binary_l2_impl::<u128>(3 * 129);
}
fn test_binary_l2_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_b = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_point(&query_b, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l2_similarity(&query, v), i))
.collect();
original_scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l2_inverted() {
test_binary_l2_inverted_impl::<u8>(0);
test_binary_l2_inverted_impl::<u8>(1);
test_binary_l2_inverted_impl::<u8>(8);
test_binary_l2_inverted_impl::<u8>(33);
test_binary_l2_inverted_impl::<u8>(65);
test_binary_l2_inverted_impl::<u8>(3 * 129);
test_binary_l2_inverted_impl::<u128>(1);
test_binary_l2_inverted_impl::<u128>(3 * 129);
}
fn test_binary_l2_inverted_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = generate_vector(vector_dim, &mut rng);
let query_b = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_point(&query_b, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l2_similarity(&query, v), i))
.collect();
original_scores.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l2_internal() {
test_binary_l2_internal_impl::<u8>(0);
test_binary_l2_internal_impl::<u8>(1);
test_binary_l2_internal_impl::<u8>(8);
test_binary_l2_internal_impl::<u8>(33);
test_binary_l2_internal_impl::<u8>(65);
test_binary_l2_internal_impl::<u8>(3 * 129);
test_binary_l2_internal_impl::<u128>(1);
test_binary_l2_internal_impl::<u128>(3 * 129);
}
fn test_binary_l2_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_internal(0, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l2_similarity(&vector_data[0], v), i))
.collect();
original_scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
#[test]
fn test_binary_l2_inverted_internal() {
test_binary_l2_inverted_internal_impl::<u8>(0);
test_binary_l2_inverted_internal_impl::<u8>(1);
test_binary_l2_inverted_internal_impl::<u8>(8);
test_binary_l2_inverted_internal_impl::<u8>(33);
test_binary_l2_inverted_internal_impl::<u8>(65);
test_binary_l2_inverted_internal_impl::<u8>(3 * 129);
test_binary_l2_inverted_internal_impl::<u128>(1);
test_binary_l2_inverted_internal_impl::<u128>(3 * 129);
}
fn test_binary_l2_inverted_internal_impl<TBitsStoreType: BitsStoreType>(vector_dim: usize) {
let vectors_count = 128;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
vector_data.push(generate_vector(vector_dim, &mut rng));
}
let quantized_vector_size = EncodedVectorsBin::<TBitsStoreType, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded = EncodedVectorsBin::<TBitsStoreType, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: true,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
let mut scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, _)| (encoded.score_internal(0, i as u32, &counter), i))
.collect();
scores.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let sorted_indices: Vec<_> = scores.into_iter().map(|(_, i)| i).collect();
let mut original_scores: Vec<_> = vector_data
.iter()
.enumerate()
.map(|(i, v)| (l1_similarity(&vector_data[0], v), i))
.collect();
original_scores.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
let sorted_original_indices: Vec<_> = original_scores.into_iter().map(|(_, i)| i).collect();
assert_eq!(sorted_original_indices, sorted_indices);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_avx2.rs | lib/quantization/tests/integration/test_avx2.rs | #[cfg(test)]
#[cfg(target_arch = "x86_64")]
mod tests {
use std::sync::atomic::AtomicBool;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use rand::{Rng, SeedableRng};
use rstest::rstest;
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_avx(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_avx(&query_u8, quantized_vector);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l2_avx(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_avx(&query_u8, quantized_vector);
let orginal_score = l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l1_avx(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim)
.map(|_| rng.random_range(-1.0..=1.0))
.collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_avx(&query_u8, quantized_vector);
let orginal_score = l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/stop_condition.rs | lib/quantization/tests/integration/stop_condition.rs | #[cfg(test)]
mod tests {
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use quantization::{EncodedVectorsPQ, EncodingError};
#[test]
fn stop_condition_u8() {
let stopped = Arc::new(AtomicBool::new(false));
let stopped_clone = stopped.clone();
let stopped_ref = stopped.as_ref();
let stop_thread = std::thread::spawn(move || {
std::thread::sleep(std::time::Duration::from_millis(100));
stopped_clone.store(true, Ordering::Relaxed);
});
let vectors_count = 1_000_000;
let vector_dim = 8;
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let zero_vector = vec![0.0; vector_dim];
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
assert_eq!(
EncodedVectorsU8::encode(
(0..vectors_count).map(|_| &zero_vector),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
ScalarQuantizationMethod::Int8,
None,
stopped_ref,
)
.err(),
Some(EncodingError::Stopped)
);
stop_thread.join().unwrap();
}
#[test]
fn stop_condition_pq() {
let stopped = Arc::new(AtomicBool::new(false));
let stopped_clone = stopped.clone();
let stopped_ref = stopped.as_ref();
let stop_thread = std::thread::spawn(move || {
std::thread::sleep(std::time::Duration::from_millis(300));
stopped_clone.store(true, Ordering::Relaxed);
});
let vectors_count = 1_000_000;
let vector_dim = 8;
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let zero_vector = vec![0.0; vector_dim];
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
2,
);
assert_eq!(
EncodedVectorsPQ::encode(
(0..vectors_count).map(|_| &zero_vector),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
2,
1,
None,
stopped_ref,
)
.err(),
Some(EncodingError::Stopped)
);
stop_thread.join().unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_pq.rs | lib/quantization/tests/integration/test_pq.rs | #[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::time::Duration;
use common::counter::hardware_counter::HardwareCounterCell;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_pq::EncodedVectorsPQ;
use rand::{Rng, SeedableRng};
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
const VECTORS_COUNT: usize = 513;
const VECTOR_DIM: usize = 65;
const ERROR: f32 = VECTOR_DIM as f32 * 0.05;
#[test]
fn test_pq_dot() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_l2() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_l1() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_dot_inverted() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = -dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_l2_inverted() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: true,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = -l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_l1_inverted() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let query: Vec<_> = (0..VECTOR_DIM).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: true,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
let counter = HardwareCounterCell::new();
for (index, vector) in vector_data.iter().enumerate() {
let score = encoded.score_point(&query_u8, index as u32, &counter);
let orginal_score = -l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_dot_internal() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..VECTORS_COUNT {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() < ERROR);
}
}
#[test]
fn test_pq_dot_inverted_internal() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
1,
None,
&AtomicBool::new(false),
)
.unwrap();
let counter = HardwareCounterCell::new();
for i in 1..VECTORS_COUNT {
let score = encoded.score_internal(0, i as u32, &counter);
let orginal_score = -dot_similarity(&vector_data[0], &vector_data[i]);
assert!((score - orginal_score).abs() < ERROR);
}
}
// ignore this test because it requires long time
// this test should be started separately of with `--test-threads=1` flag
// because `num_threads::num_threads()` is used to check that all encode threads finished
#[ignore]
#[test]
fn test_encode_panic() {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<_>> = vec![];
for _ in 0..VECTORS_COUNT {
vector_data.push((0..VECTOR_DIM).map(|_| rng.random()).collect());
}
for i in 0.. {
let counter = AtomicUsize::new(0);
let panic_index = i * VECTORS_COUNT / 3;
let start_num_threads = num_threads::num_threads();
let vector_data = vector_data.clone();
let vector_parameters = VectorParameters {
dim: VECTOR_DIM,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
1,
);
let result = std::thread::spawn(move || {
EncodedVectorsPQ::encode(
vector_data.iter().inspect(|_| {
let cnt = counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
if cnt == panic_index {
panic!("test panic")
}
if cnt > panic_index {
// after panic add start sleeping to simulate large amount of data
std::thread::sleep(Duration::from_micros(100));
}
}),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
VECTORS_COUNT,
1,
5,
None,
&AtomicBool::new(false),
)
.unwrap()
})
.join();
if result.is_ok() {
// no panic, panic_index is too big, all panic cases are handled
return;
}
// some time required to finish encoding threads
std::thread::sleep(Duration::from_millis(50));
// check that all threads are finished
assert_eq!(num_threads::num_threads(), start_num_threads);
println!("Finished iteration {i}");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/main.rs | lib/quantization/tests/integration/main.rs | #[cfg(test)]
pub mod empty_storage;
#[cfg(test)]
pub mod metrics;
#[cfg(test)]
pub mod stop_condition;
#[cfg(test)]
pub mod test_avx2;
#[cfg(test)]
pub mod test_binary;
#[cfg(test)]
pub mod test_binary_encodings;
#[cfg(test)]
pub mod test_neon;
#[cfg(test)]
pub mod test_pq;
#[cfg(test)]
pub mod test_simple;
#[cfg(test)]
pub mod test_sse;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/metrics.rs | lib/quantization/tests/integration/metrics.rs | pub fn dot_similarity(v1: &[f32], v2: &[f32]) -> f32 {
v1.iter().zip(v2).map(|(a, b)| a * b).sum()
}
pub fn l2_similarity(v1: &[f32], v2: &[f32]) -> f32 {
v1.iter().zip(v2).map(|(a, b)| (a - b).powi(2)).sum()
}
pub fn l1_similarity(v1: &[f32], v2: &[f32]) -> f32 {
v1.iter().zip(v2).map(|(a, b)| (a - b).abs()).sum()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/empty_storage.rs | lib/quantization/tests/integration/empty_storage.rs | #[cfg(test)]
mod tests {
use std::sync::atomic::AtomicBool;
use quantization::EncodedVectorsPQ;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, VectorParameters};
use quantization::encoded_vectors_binary::{EncodedVectorsBin, QueryEncoding};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use tempfile::Builder;
#[test]
fn empty_data_u8() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let vectors_count = 0;
let vector_dim = 256;
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let vector_data: Vec<Vec<f32>> = Default::default();
let data_path = dir.path().join("data.bin");
let meta_path = dir.path().join("meta.json");
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let _encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(Some(data_path.as_path()), quantized_vector_size),
&vector_parameters,
vectors_count,
None,
ScalarQuantizationMethod::Int8,
Some(meta_path.as_path()),
&AtomicBool::new(false),
)
.unwrap();
EncodedVectorsU8::<TestEncodedStorage>::load(
TestEncodedStorage::from_file(data_path.as_path(), quantized_vector_size).unwrap(),
meta_path.as_path(),
)
.unwrap();
}
#[test]
fn empty_data_pq() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let vectors_count = 0;
let vector_dim = 8;
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let vector_data: Vec<Vec<f32>> = Default::default();
let data_path = dir.path().join("data.bin");
let meta_path = dir.path().join("meta.json");
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(
&vector_parameters,
2,
);
let _encoded = EncodedVectorsPQ::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(Some(data_path.as_path()), quantized_vector_size),
&vector_parameters,
vectors_count,
2,
1,
Some(meta_path.as_path()),
&AtomicBool::new(false),
)
.unwrap();
EncodedVectorsPQ::<TestEncodedStorage>::load(
TestEncodedStorage::from_file(data_path.as_path(), quantized_vector_size).unwrap(),
meta_path.as_path(),
)
.unwrap();
}
#[test]
fn empty_data_bq() {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let vector_dim = 8;
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: true,
};
let vector_data: Vec<Vec<f32>> = Default::default();
let data_path = dir.path().join("data.bin");
let meta_path = dir.path().join("meta.json");
let quantized_vector_size =
EncodedVectorsBin::<u8, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
quantization::encoded_vectors_binary::Encoding::OneBit,
);
let _encoded = EncodedVectorsBin::<u8, _>::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(Some(data_path.as_path()), quantized_vector_size),
&vector_parameters,
quantization::encoded_vectors_binary::Encoding::OneBit,
QueryEncoding::SameAsStorage,
Some(meta_path.as_path()),
&AtomicBool::new(false),
)
.unwrap();
EncodedVectorsBin::<u8, TestEncodedStorage>::load(
TestEncodedStorage::from_file(data_path.as_path(), quantized_vector_size).unwrap(),
meta_path.as_path(),
)
.unwrap();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_neon.rs | lib/quantization/tests/integration/test_neon.rs | #[cfg(test)]
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
mod tests {
use std::sync::atomic::AtomicBool;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use rand::{Rng, SeedableRng};
use rstest::rstest;
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_neon(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_neon(&query_u8, quantized_vector);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l2_neon(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_neon(&query_u8, quantized_vector);
let orginal_score = l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l1_neon(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_neon(&query_u8, quantized_vector);
let orginal_score = l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/tests/integration/test_sse.rs | lib/quantization/tests/integration/test_sse.rs | #[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod tests {
use std::sync::atomic::AtomicBool;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use rand::{Rng, SeedableRng};
use rstest::rstest;
use crate::metrics::{dot_similarity, l1_similarity, l2_similarity};
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_dot_sse(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_sse(&query_u8, quantized_vector);
let orginal_score = dot_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l2_sse(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L2,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_sse(&query_u8, quantized_vector);
let orginal_score = l2_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
#[rstest]
#[case(ScalarQuantizationMethod::Int8)]
fn test_l1_sse(#[case] method: ScalarQuantizationMethod) {
let vectors_count = 129;
let vector_dim = 65;
let error = vector_dim as f32 * 0.1;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vector_data: Vec<Vec<f32>> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vector_data.push(vector);
}
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let encoded = EncodedVectorsU8::encode(
vector_data.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
method,
None,
&AtomicBool::new(false),
)
.unwrap();
let query_u8 = encoded.encode_query(&query);
for (index, vector) in vector_data.iter().enumerate() {
let quantized_vector = encoded.get_quantized_vector(index as u32);
let score = encoded.score_point_sse(&query_u8, quantized_vector);
let orginal_score = l1_similarity(&query, vector);
assert!((score - orginal_score).abs() < error);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/benches/p_square.rs | lib/quantization/benches/p_square.rs | use std::hint::black_box;
use criterion::{Criterion, criterion_group, criterion_main};
use quantization::p_square::P2Quantile;
fn p_square(c: &mut Criterion) {
let mut group = c.benchmark_group("p_square");
let count = 10_000;
let data = (0..count)
.map(|_| rand::random::<f64>())
.collect::<Vec<f64>>();
let quantile = 0.99;
group.bench_function("p_square_5", |b| {
b.iter(|| {
let mut p2 = P2Quantile::<5>::new(quantile).unwrap();
for &x in &data {
p2.push(x);
}
black_box(p2.estimate());
});
});
group.bench_function("p_square_7", |b| {
b.iter(|| {
let mut p2 = P2Quantile::<7>::new(quantile).unwrap();
for &x in &data {
p2.push(x);
}
black_box(p2.estimate());
});
});
group.bench_function("p_square_9", |b| {
b.iter(|| {
let mut p2 = P2Quantile::<9>::new(quantile).unwrap();
for &x in &data {
p2.push(x);
}
black_box(p2.estimate());
});
});
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = p_square,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/benches/encode.rs | lib/quantization/benches/encode.rs | use std::sync::atomic::AtomicBool;
use criterion::{Criterion, criterion_group, criterion_main};
use permutation_iterator::Permutor;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_u8::{EncodedVectorsU8, ScalarQuantizationMethod};
use rand::Rng;
fn encode_dot_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("encode dot");
let vectors_count = 100_000;
let vector_dim = 1024;
let mut rng = rand::rng();
let mut list: Vec<f32> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
list.extend_from_slice(&vector);
}
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let i8_encoded = EncodedVectorsU8::encode(
(0..vectors_count).map(|i| &list[i * vector_dim..(i + 1) * vector_dim]),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
ScalarQuantizationMethod::Int8,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let encoded_query = i8_encoded.encode_query(&query);
#[cfg(target_arch = "x86_64")]
group.bench_function("score all u8 avx", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_avx(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("score all u8 sse", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_sse(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("score all u8 neon", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_neon(&encoded_query, quantized_vector);
}
});
});
let permutor = Permutor::new(vectors_count as u64);
let permutation: Vec<u32> = permutor.map(|i| i as u32).collect();
#[cfg(target_arch = "x86_64")]
group.bench_function("score random access u8 avx", |b| {
b.iter(|| {
let mut _s = 0.0;
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_avx(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("score random access u8 sse", |b| {
let mut _s = 0.0;
b.iter(|| {
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_sse(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("score random access u8 neon", |b| {
let mut _s = 0.0;
b.iter(|| {
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_neon(&encoded_query, quantized_vector);
}
});
});
}
fn encode_l1_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("encode l1");
let vectors_count = 100_000;
let vector_dim = 1024;
let mut rng = rand::rng();
let mut list: Vec<f32> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
list.extend_from_slice(&vector);
}
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::L1,
invert: true,
};
let quantized_vector_size =
EncodedVectorsU8::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters);
let i8_encoded = EncodedVectorsU8::encode(
(0..vectors_count).map(|i| &list[i * vector_dim..(i + 1) * vector_dim]),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
None,
ScalarQuantizationMethod::Int8,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let encoded_query = i8_encoded.encode_query(&query);
#[cfg(target_arch = "x86_64")]
group.bench_function("score all u8 avx", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_avx(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("score all u8 sse", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_sse(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("score all u8 neon", |b| {
b.iter(|| {
let mut _s = 0.0;
for i in 0..vectors_count as u32 {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_neon(&encoded_query, quantized_vector);
}
});
});
let permutor = Permutor::new(vectors_count as u64);
let permutation: Vec<u32> = permutor.map(|i| i as u32).collect();
#[cfg(target_arch = "x86_64")]
group.bench_function("score random access u8 avx", |b| {
b.iter(|| {
let mut _s = 0.0;
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_avx(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "x86_64")]
group.bench_function("score random access u8 sse", |b| {
let mut _s = 0.0;
b.iter(|| {
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_sse(&encoded_query, quantized_vector);
}
});
});
#[cfg(target_arch = "aarch64")]
group.bench_function("score random access u8 neon", |b| {
let mut _s = 0.0;
b.iter(|| {
for &i in &permutation {
let quantized_vector = i8_encoded.get_quantized_vector(i);
_s = i8_encoded.score_point_neon(&encoded_query, quantized_vector);
}
});
});
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = encode_dot_bench, encode_l1_bench
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/benches/pq.rs | lib/quantization/benches/pq.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_pq::EncodedVectorsPQ;
use rand::Rng;
fn encode_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("encode");
let vectors_count = 100_000;
let vector_dim = 1024;
let mut rng = rand::rng();
let mut list: Vec<f32> = Vec::new();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
list.extend_from_slice(&vector);
}
let vector_parameters = VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
};
let quantized_vector_size =
EncodedVectorsPQ::<TestEncodedStorage>::get_quantized_vector_size(&vector_parameters, 2);
let pq_encoded = EncodedVectorsPQ::encode(
(0..vectors_count).map(|i| &list[i * vector_dim..(i + 1) * vector_dim]),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&vector_parameters,
vectors_count,
2,
2,
None,
&AtomicBool::new(false),
)
.unwrap();
let query: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
let encoded_query = pq_encoded.encode_query(&query);
let mut total = 0.0;
let hardware_counter = HardwareCounterCell::new();
group.bench_function("score random access pq", |b| {
b.iter(|| {
let random_idx = rand::random::<u32>() % vectors_count as u32;
total += pq_encoded.score_point(&encoded_query, random_idx, &hardware_counter);
});
});
println!("total: {total}");
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = encode_bench
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/quantization/benches/binary.rs | lib/quantization/benches/binary.rs | use std::hint::black_box;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use permutation_iterator::Permutor;
use quantization::encoded_storage::{TestEncodedStorage, TestEncodedStorageBuilder};
use quantization::encoded_vectors::{DistanceType, EncodedVectors, VectorParameters};
use quantization::encoded_vectors_binary::{
EncodedQueryBQ, EncodedVectorsBin, Encoding, QueryEncoding,
};
use rand::{Rng, SeedableRng};
fn generate_number(rng: &mut rand::rngs::StdRng) -> f32 {
let n = f32::signum(rng.random_range(-1.0..1.0));
if n == 0.0 { 1.0 } else { n }
}
fn generate_vector(dim: usize, rng: &mut rand::rngs::StdRng) -> Vec<f32> {
(0..dim).map(|_| generate_number(rng)).collect()
}
fn binary_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("encode");
let vectors_count = 100_000;
let vector_dim = 1024;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vectors: Vec<Vec<f32>> = (0..vectors_count)
.map(|_| generate_vector(vector_dim, &mut rng))
.collect();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vectors.push(vector);
}
let quantized_vector_size =
EncodedVectorsBin::<u128, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded_u128 = EncodedVectorsBin::<u128, _>::encode(
vectors.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query = generate_vector(vector_dim, &mut rng);
let encoded_query = encoded_u128.encode_query(&query);
let hardware_counter = HardwareCounterCell::new();
group.bench_function("score binary linear access u128", |b| {
b.iter(|| {
for i in 0..vectors_count as u32 {
let s = encoded_u128.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
let permutor = Permutor::new(vectors_count as u64);
let permutation: Vec<u32> = permutor.map(|i| i as u32).collect();
group.bench_function("score binary random access u128", |b| {
b.iter(|| {
for &i in &permutation {
let s = encoded_u128.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
let quantized_vector_size =
EncodedVectorsBin::<u8, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded_u8 = EncodedVectorsBin::<u8, _>::encode(
vectors.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::SameAsStorage,
None,
&AtomicBool::new(false),
)
.unwrap();
let query = generate_vector(vector_dim, &mut rng);
let encoded_query = encoded_u8.encode_query(&query);
group.bench_function("score binary linear access u8", |b| {
b.iter(|| {
for i in 0..vectors_count as u32 {
let s = encoded_u8.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
let permutor = Permutor::new(vectors_count as u64);
let permutation: Vec<u32> = permutor.map(|i| i as u32).collect();
group.bench_function("score binary random access u8", |b| {
b.iter(|| {
for &i in &permutation {
let s = encoded_u8.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
}
fn binary_scalar_query_bench_impl(c: &mut Criterion) {
let mut group = c.benchmark_group("binary_u128_scalar_query");
let vectors_count = 100_000;
let vector_dim = 1024;
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let mut vectors: Vec<Vec<f32>> = (0..vectors_count)
.map(|_| generate_vector(vector_dim, &mut rng))
.collect();
for _ in 0..vectors_count {
let vector: Vec<f32> = (0..vector_dim).map(|_| rng.random()).collect();
vectors.push(vector);
}
let quantized_vector_size =
EncodedVectorsBin::<u128, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded_u128 = EncodedVectorsBin::<u128, _>::encode(
vectors.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::Scalar8bits,
None,
&AtomicBool::new(false),
)
.unwrap();
let query = generate_vector(vector_dim, &mut rng);
let encoded_query = encoded_u128.encode_query(&query);
let hardware_counter = HardwareCounterCell::new();
let permutor = Permutor::new(vectors_count as u64);
let permutation: Vec<u32> = permutor.map(|i| i as u32).collect();
group.bench_function("binary u128 scalar query", |b| {
b.iter(|| {
for &i in &permutation {
let s = encoded_u128.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
let native_scorer = |query: &[u128], vector: &[u128]| {
let mut result = 0;
for (&b1, b2_chunk) in vector.iter().zip(query.chunks_exact(8)) {
for (i, &b2) in b2_chunk.iter().enumerate() {
result += (b1 ^ b2).count_ones() << i;
}
}
result as usize
};
let query = match &encoded_query {
EncodedQueryBQ::Scalar8bits(encoded) => &encoded.encoded_vector,
_ => panic!("Expected Scalar8bits"),
};
group.bench_function("binary u128 scalar query native", |b| {
b.iter(|| {
for &i in &permutation {
let vector = encoded_u128.get_quantized_vector(i);
let vector = bytemuck::cast_slice::<u8, u128>(vector);
let s = native_scorer(query, vector);
black_box(s);
}
});
});
let quantized_vector_size =
EncodedVectorsBin::<u8, TestEncodedStorage>::get_quantized_vector_size_from_params(
vector_dim,
Encoding::OneBit,
);
let encoded_u8 = EncodedVectorsBin::<u8, _>::encode(
vectors.iter(),
TestEncodedStorageBuilder::new(None, quantized_vector_size),
&VectorParameters {
dim: vector_dim,
deprecated_count: None,
distance_type: DistanceType::Dot,
invert: false,
},
Encoding::OneBit,
QueryEncoding::Scalar8bits,
None,
&AtomicBool::new(false),
)
.unwrap();
let query = generate_vector(vector_dim, &mut rng);
let encoded_query = encoded_u8.encode_query(&query);
group.bench_function("binary u8 scalar query", |b| {
b.iter(|| {
for &i in &permutation {
let s = encoded_u8.score_point(&encoded_query, i, &hardware_counter);
black_box(s);
}
});
});
let native_scorer = |query: &[u8], vector: &[u8]| {
let mut result = 0;
for (&b1, b2_chunk) in vector.iter().zip(query.chunks_exact(8)) {
for (i, &b2) in b2_chunk.iter().enumerate() {
result += (b1 ^ b2).count_ones() << i;
}
}
result as usize
};
let query = match &encoded_query {
EncodedQueryBQ::Scalar8bits(encoded) => &encoded.encoded_vector,
_ => panic!("Expected Scalar8bits"),
};
group.bench_function("binary u8 scalar query native", |b| {
b.iter(|| {
for &i in &permutation {
let vector = encoded_u8.get_quantized_vector(i);
let s = native_scorer(query, vector);
black_box(s);
}
});
});
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = binary_bench, binary_scalar_query_bench_impl
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/debug_messenger.rs | lib/gpu/src/debug_messenger.rs | use std::ffi::CStr;
use std::os::raw::c_void;
use ash::vk;
/// Trait for debug messenger.
/// Debug messenger is used to handle Vulkan debug messages.
/// If presented, vulkan instance will be created with validation layers and debug messenger.
/// Validation layer has a large performance cost, so it should be used only for tests and debugging.
pub trait DebugMessenger: Sync + Send {
fn callback(&self) -> vk::PFN_vkDebugUtilsMessengerCallbackEXT;
fn severity_flags(&self) -> vk::DebugUtilsMessageSeverityFlagsEXT;
fn message_type_flags(&self) -> vk::DebugUtilsMessageTypeFlagsEXT;
}
/// Log all messages from the Vulkan validation layer.
pub struct LogAllMessenger {}
impl DebugMessenger for LogAllMessenger {
fn callback(&self) -> vk::PFN_vkDebugUtilsMessengerCallbackEXT {
Some(vulkan_debug_callback_log)
}
fn severity_flags(&self) -> vk::DebugUtilsMessageSeverityFlagsEXT {
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
| vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE
| vk::DebugUtilsMessageSeverityFlagsEXT::INFO
| vk::DebugUtilsMessageSeverityFlagsEXT::ERROR
}
fn message_type_flags(&self) -> vk::DebugUtilsMessageTypeFlagsEXT {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
}
}
unsafe extern "system" fn vulkan_debug_callback_log(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_p_user_data: *mut c_void,
) -> vk::Bool32 {
let message = unsafe { CStr::from_ptr((*p_callback_data).p_message) };
let message_type_str = match message_type {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL => "[General]",
vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE => "[Performance]",
vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION => "[Validation]",
_ => "[Unknown]",
};
match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => {
log::info!("{message_type_str} {message:?}")
}
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => {
log::warn!("{message_type_str} {message:?}")
}
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => {
log::error!("{message_type_str} {message:?}")
}
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => {
log::info!("{message_type_str} {message:?}")
}
_ => log::info!("{message_type_str} {message:?}"),
};
vk::FALSE
}
/// Panic if some message from the Vulkan validation layer.
pub struct PanicIfErrorMessenger {}
impl DebugMessenger for PanicIfErrorMessenger {
fn callback(&self) -> vk::PFN_vkDebugUtilsMessengerCallbackEXT {
Some(vulkan_debug_callback_panic)
}
fn severity_flags(&self) -> vk::DebugUtilsMessageSeverityFlagsEXT {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR
}
fn message_type_flags(&self) -> vk::DebugUtilsMessageTypeFlagsEXT {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
}
}
unsafe extern "system" fn vulkan_debug_callback_panic(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_p_user_data: *mut c_void,
) -> vk::Bool32 {
if std::thread::panicking() {
return vk::FALSE;
}
let message = unsafe { CStr::from_ptr((*p_callback_data).p_message) };
let message = message.to_str().unwrap();
let message_type = match message_type {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL => "General",
vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE => "Performance",
vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION => "Validation",
_ => "Unknown",
};
let severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => "error",
_ => "info",
};
let backtrace = std::backtrace::Backtrace::force_capture().to_string();
panic!(
"Vulkan panic ({message_type} {severity}) \n\
With message: {message}, \n\
Backrace: {backtrace}",
)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/lib.rs | lib/gpu/src/lib.rs | #![cfg(feature = "gpu")]
pub mod allocation_callbacks;
pub use allocation_callbacks::*;
pub mod context;
pub use context::*;
pub mod debug_messenger;
pub use debug_messenger::*;
pub mod descriptor_set;
pub use descriptor_set::*;
pub mod descriptor_set_layout;
pub use descriptor_set_layout::*;
pub mod buffer;
pub use buffer::*;
pub mod device;
pub use device::*;
pub mod instance;
pub use instance::*;
pub mod pipeline;
pub use pipeline::*;
pub mod shader;
use ash::vk;
pub use shader::*;
#[cfg(test)]
mod basic_test;
#[cfg(any(test, feature = "testing"))]
pub static GPU_TEST_INSTANCE: std::sync::LazyLock<std::sync::Arc<Instance>> =
std::sync::LazyLock::new(|| {
Instance::builder()
.with_debug_messenger(Box::new(PanicIfErrorMessenger {}))
.build()
.unwrap()
});
/// A trait for GPU resources.
/// It's used keep GPU resources alive while they are in use by the GPU context.
pub trait Resource: Send + Sync {}
#[derive(Debug)]
pub enum GpuError {
/// Error during allocation. This can happen when the GPU runs out of memory.
/// But also for RAM out of memory in case of CpuToGpu or GpuToCpu buffers allocations.
OutOfMemory,
/// Error during buffer access while mapping or command buffer recording.
/// Warning. This error doesn't handle shader out of bounds access.
OutOfBounds(String),
/// Some of required hardware features are not supported by the GPU.
NotSupported(String),
/// A fence or query has not yet completed.
NotReady,
/// A wait operation has not completed in the specified time.
Timeout,
/// All others errors.
Other(String),
}
pub type GpuResult<T> = Result<T, GpuError>;
impl From<gpu_allocator::AllocationError> for GpuError {
fn from(error: gpu_allocator::AllocationError) -> GpuError {
match error {
gpu_allocator::AllocationError::OutOfMemory => GpuError::OutOfMemory,
_ => GpuError::Other(format!("GPU allocator error: {error:?}")),
}
}
}
impl From<vk::Result> for GpuError {
fn from(result: vk::Result) -> Self {
match result {
vk::Result::NOT_READY => GpuError::NotReady,
vk::Result::TIMEOUT => GpuError::Timeout,
vk::Result::ERROR_OUT_OF_HOST_MEMORY => GpuError::OutOfMemory,
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => GpuError::OutOfMemory,
vk::Result::ERROR_LAYER_NOT_PRESENT => {
GpuError::NotSupported("Layer is not present".to_string())
}
vk::Result::ERROR_EXTENSION_NOT_PRESENT => {
GpuError::NotSupported("Extension is not present".to_string())
}
vk::Result::ERROR_FEATURE_NOT_PRESENT => {
GpuError::NotSupported("Feature is not present".to_string())
}
vk::Result::ERROR_INCOMPATIBLE_DRIVER => {
GpuError::NotSupported("Unable to find a Vulkan driver".to_string())
}
vk::Result::ERROR_FORMAT_NOT_SUPPORTED => {
GpuError::NotSupported("Format is not supported".to_string())
}
_ => GpuError::Other(format!("Vulkan API error: {result:?}")),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/device.rs | lib/gpu/src/device.rs | use std::ffi::CString;
use std::sync::Arc;
use ash::vk;
use gpu_allocator::vulkan::{Allocation, AllocationCreateDesc, Allocator, AllocatorCreateDesc};
use parking_lot::Mutex;
use crate::*;
/// GPU device structure.
/// It's a wrapper around Vulkan device.
pub struct Device {
/// Instance that owns the device.
instance: Arc<Instance>,
/// Native Vulkan device handle.
vk_device: ash::Device,
/// Hardware device name.
name: String,
/// GPU memory allocator from `gpu-allocator` crate.
/// It's an Option because of drop order. We need to drop it before the device.
/// But `Allocator` is destroyed by it's own drop.
gpu_allocator: Option<Mutex<Allocator>>,
/// All found compute queues.
compute_queues: Vec<Queue>,
/// All found transfer queues.
_transfer_queues: Vec<Queue>,
/// GPU subgroup (warp in CUDA terms) size.
subgroup_size: usize,
/// Is subgroup size (warp) dynamic.
/// If true, we need to use additional subgroup size control in the pipeline.
/// And use Vulkan extension that allows to set subgroup size.
is_dynamic_subgroup_size: bool,
/// Maximum work group size for compute shaders.
/// It's used in bounds checking in Context.
max_compute_work_group_count: [usize; 3],
/// Maximum GPU buffer size.
max_buffer_size: usize,
/// Selected queue index to use.
queue_index: usize,
/// Does the device support half precision floats.
has_half_precision: bool,
}
// GPU execution queue.
#[derive(Clone)]
pub struct Queue {
// Native Vulkan queue handler.
pub vk_queue: vk::Queue,
// Queue family index for the native Vulkan queue.
pub vk_queue_family_index: usize,
// Index in the family for the native Vulkan queue.
pub vk_queue_index: usize,
}
impl Device {
pub fn new(
instance: Arc<Instance>,
vk_physical_device: &PhysicalDevice,
) -> GpuResult<Arc<Device>> {
Self::new_with_params(instance, vk_physical_device, 0, false)
}
pub fn new_with_params(
instance: Arc<Instance>,
vk_physical_device: &PhysicalDevice,
queue_index: usize,
skip_half_precision: bool,
) -> GpuResult<Arc<Device>> {
#[allow(unused_mut)]
let mut extensions_cstr: Vec<CString> = vec![CString::from(ash::khr::maintenance1::NAME)];
#[cfg(target_os = "macos")]
{
extensions_cstr.push(CString::from(ash::khr::portability_subset::NAME));
}
let vk_queue_families = unsafe {
instance
.vk_instance()
.get_physical_device_queue_family_properties(vk_physical_device.vk_physical_device)
};
let max_queue_priorities_counts: Vec<Vec<f32>> = vk_queue_families
.iter()
.map(|vk_queue_family| vec![0.; vk_queue_family.queue_count as usize])
.collect();
let queue_create_infos: Vec<vk::DeviceQueueCreateInfo> = max_queue_priorities_counts
.iter()
.enumerate()
.map(|(queue_family_index, queue_priorities)| {
vk::DeviceQueueCreateInfo::default()
.flags(vk::DeviceQueueCreateFlags::empty())
.queue_family_index(queue_family_index as u32)
.queue_priorities(queue_priorities.as_slice())
})
.collect();
let physical_device_features = vk::PhysicalDeviceFeatures::default();
// Define Vulkan features that we need.
let mut enabled_physical_device_features_1_1 =
vk::PhysicalDeviceVulkan11Features::default();
let mut enabled_physical_device_features_1_2 =
vk::PhysicalDeviceVulkan12Features::default();
let mut enabled_physical_device_features_1_3 =
vk::PhysicalDeviceVulkan13Features::default();
let mut enabled_physical_devices_features = vk::PhysicalDeviceFeatures2::default()
.push_next(&mut enabled_physical_device_features_1_1)
.push_next(&mut enabled_physical_device_features_1_2)
.push_next(&mut enabled_physical_device_features_1_3);
unsafe {
instance.vk_instance().get_physical_device_features2(
vk_physical_device.vk_physical_device,
&mut enabled_physical_devices_features,
);
};
// From Vulkan 1.1 we need storage buffer 16 bit access.
if !enabled_physical_device_features_1_1.storage_buffer16_bit_access == 0 {
return Err(GpuError::NotSupported(
"Storage buffer 16 bit access is not supported".to_string(),
));
}
let mut physical_device_features_1_1 =
vk::PhysicalDeviceVulkan11Features::default().storage_buffer16_bit_access(true);
// From Vulkan 1.2 we need int8/float16 support.
if !enabled_physical_device_features_1_2.shader_int8 == 0 {
return Err(GpuError::NotSupported("Int8 is not supported".to_string()));
}
let has_half_precision =
!skip_half_precision && enabled_physical_device_features_1_2.shader_float16 == 1;
if !has_half_precision {
log::warn!("Half precision is not supported, falling back to full precision floats");
}
if !enabled_physical_device_features_1_2.storage_buffer8_bit_access == 0 {
return Err(GpuError::NotSupported(
"Storage buffer 8 bit access is not supported".to_string(),
));
}
let mut physical_device_features_1_2 = vk::PhysicalDeviceVulkan12Features::default()
.shader_int8(true)
.shader_float16(has_half_precision)
.storage_buffer8_bit_access(true);
// From Vulkan 1.3 we need subgroup size control if it's dynamic.
let mut physical_device_features_1_3 = vk::PhysicalDeviceVulkan13Features::default();
let max_compute_work_group_count;
let max_buffer_size;
let mut is_dynamic_subgroup_size = false;
let subgroup_size = {
let props = unsafe {
instance
.vk_instance()
.get_physical_device_properties(vk_physical_device.vk_physical_device)
};
max_compute_work_group_count = [
props.limits.max_compute_work_group_count[0] as usize,
props.limits.max_compute_work_group_count[1] as usize,
props.limits.max_compute_work_group_count[2] as usize,
];
max_buffer_size = props.limits.max_storage_buffer_range as usize;
let mut subgroup_properties = vk::PhysicalDeviceSubgroupProperties::default();
let mut vulkan_1_3_properties = vk::PhysicalDeviceVulkan13Properties::default();
let mut props2 = vk::PhysicalDeviceProperties2::default()
.push_next(&mut subgroup_properties)
.push_next(&mut vulkan_1_3_properties);
unsafe {
instance.vk_instance().get_physical_device_properties2(
vk_physical_device.vk_physical_device,
&mut props2,
);
}
let subgroup_size = if vulkan_1_3_properties.min_subgroup_size
!= vulkan_1_3_properties.max_subgroup_size
{
if !enabled_physical_device_features_1_3.subgroup_size_control == 0 {
return Err(GpuError::NotSupported(
"Subgroup size control is not supported".to_string(),
));
}
physical_device_features_1_3 =
physical_device_features_1_3.subgroup_size_control(true);
if !vulkan_1_3_properties
.required_subgroup_size_stages
.contains(vk::ShaderStageFlags::COMPUTE)
{
// A strange situation where subgroup size can be different but we cannot set it.
// We cannot handle this case (we have to know subgroup size), so skip device creation.
return Err(GpuError::NotSupported(
"Subgroup size is dynamic but not supported for compute stage".to_string(),
));
}
is_dynamic_subgroup_size = true;
// prefer max subgroup size
vulkan_1_3_properties.max_subgroup_size as usize
} else {
subgroup_properties.subgroup_size as usize
};
log::info!("Create GPU device {}", vk_physical_device.name);
log::debug!("GPU subgroup size: {subgroup_size}");
subgroup_size
};
// convert extension names to raw pointers to provide to Vulkan
Self::check_extensions_list(
&instance,
vk_physical_device.vk_physical_device,
&extensions_cstr,
)?;
let extension_names_raw: Vec<*const i8> = extensions_cstr
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
let device_create_info = vk::DeviceCreateInfo::default()
.flags(vk::DeviceCreateFlags::empty())
.queue_create_infos(&queue_create_infos)
.enabled_extension_names(&extension_names_raw)
.enabled_features(&physical_device_features)
.push_next(&mut physical_device_features_1_1)
.push_next(&mut physical_device_features_1_2)
.push_next(&mut physical_device_features_1_3);
let vk_device_result = unsafe {
instance.vk_instance().create_device(
vk_physical_device.vk_physical_device,
&device_create_info,
instance.cpu_allocation_callbacks(),
)
};
let vk_device = match vk_device_result {
Ok(vk_device) => vk_device,
Err(e) => return Err(GpuError::from(e)),
};
let mut compute_queues = Vec::new();
let mut transfer_queues = Vec::new();
for (vk_queue_family_index, vk_queue_family) in vk_queue_families.iter().enumerate() {
for vk_queue_index in 0..vk_queue_family.queue_count as usize {
let vk_queue = unsafe {
vk_device.get_device_queue(vk_queue_family_index as u32, vk_queue_index as u32)
};
let queue = Queue {
vk_queue,
vk_queue_family_index,
vk_queue_index,
};
let queue_flags = vk_queue_family.queue_flags;
if vk_queue != vk::Queue::null() {
if queue_flags.contains(vk::QueueFlags::TRANSFER) {
transfer_queues.push(queue.clone());
}
if queue_flags.contains(vk::QueueFlags::COMPUTE) {
compute_queues.push(queue);
}
}
}
}
let gpu_allocator_result = Allocator::new(&AllocatorCreateDesc {
instance: instance.vk_instance().clone(),
device: vk_device.clone(),
physical_device: vk_physical_device.vk_physical_device,
debug_settings: Default::default(),
buffer_device_address: false,
allocation_sizes: Default::default(),
});
let gpu_allocator = match gpu_allocator_result {
Ok(gpu_allocator) => Some(Mutex::new(gpu_allocator)),
Err(e) => {
unsafe {
vk_device.destroy_device(instance.cpu_allocation_callbacks());
}
return Err(GpuError::from(e));
}
};
Ok(Arc::new(Device {
instance: instance.clone(),
vk_device,
gpu_allocator,
compute_queues,
_transfer_queues: transfer_queues,
subgroup_size,
max_compute_work_group_count,
max_buffer_size,
is_dynamic_subgroup_size,
queue_index,
name: vk_physical_device.name.clone(),
has_half_precision,
}))
}
/// Get CPU allocator.
pub fn cpu_allocation_callbacks(&self) -> Option<&vk::AllocationCallbacks<'_>> {
self.instance.cpu_allocation_callbacks()
}
/// Allocate GPU memory.
pub fn allocate(&self, allocation_desc: &AllocationCreateDesc) -> GpuResult<Allocation> {
if let Some(gpu_allocator) = &self.gpu_allocator {
let mut gpu_allocator = gpu_allocator.lock();
gpu_allocator
.allocate(allocation_desc)
.map_err(GpuError::from)
} else {
Err(GpuError::Other(
"GPU allocator is not available".to_string(),
))
}
}
/// Free GPU memory.
pub fn free(&self, allocation: Allocation) {
if let Some(gpu_allocator) = &self.gpu_allocator {
let mut gpu_allocator = gpu_allocator.lock();
if let Err(e) = gpu_allocator.free(allocation) {
// Log error because free is called from Drop.
log::error!("Failed to free GPU memory: {e:?}");
}
} else {
log::error!("GPU allocator is not available");
}
}
/// Get subgroup size (warp in terms of CUDA).
pub fn subgroup_size(&self) -> usize {
self.subgroup_size
}
pub fn instance(&self) -> Arc<Instance> {
self.instance.clone()
}
pub fn vk_device(&self) -> &ash::Device {
&self.vk_device
}
pub fn is_dynamic_subgroup_size(&self) -> bool {
self.is_dynamic_subgroup_size
}
pub fn max_compute_work_group_count(&self) -> [usize; 3] {
self.max_compute_work_group_count
}
pub fn max_buffer_size(&self) -> usize {
self.max_buffer_size
}
pub fn has_half_precision(&self) -> bool {
self.has_half_precision
}
pub fn compute_queue(&self) -> &Queue {
&self.compute_queues[self.queue_index % self.compute_queues.len()]
}
pub fn name(&self) -> &str {
&self.name
}
fn check_extensions_list(
instance: &Instance,
vk_physical_device: vk::PhysicalDevice,
required_extensions: &[CString],
) -> GpuResult<()> {
let available_extensions = unsafe {
instance
.vk_instance()
.enumerate_device_extension_properties(vk_physical_device)?
};
for required_extension in required_extensions {
let is_extension_available = available_extensions.iter().any(|extension| {
let extension_name =
unsafe { std::ffi::CStr::from_ptr(extension.extension_name.as_ptr()) };
extension_name == required_extension.as_c_str()
});
if !is_extension_available {
return Err(GpuError::NotSupported(format!(
"Extension {required_extension:?} is not supported"
)));
}
}
Ok(())
}
}
impl Drop for Device {
fn drop(&mut self) {
self.gpu_allocator = None;
unsafe {
// For now, we don't need to wait for device idle.
// It doesn't have timeout, so it can hang the application.
// Moreover, we control all execution by Context and catch timeout.
// It we have infinity loops in shader we leak this device and let it running intil OS stops it.
// self.vk_device.device_wait_idle().unwrap();
self.vk_device
.destroy_device(self.cpu_allocation_callbacks());
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/descriptor_set_layout.rs | lib/gpu/src/descriptor_set_layout.rs | use std::sync::Arc;
use ash::vk;
use crate::*;
/// `DescriptorSetLayout` defines the linkage to the shader.
/// It describes which resources are defined in the shader and how they must be binded.
/// This structure does not need shader directly, it defines only linking rules.
/// It can be reused between different pipelines and shaders with the same layout.
#[derive(Clone)]
pub struct DescriptorSetLayout {
// Device that owns the descriptor set layout.
device: Arc<Device>,
// Bindings for uniform buffers.
// It contains index defined in the shader.
uniform_buffer_bindings: Vec<usize>,
// Bindings for storage buffers.
// It contains index defined in the shader.
storage_buffer_bindings: Vec<usize>,
// Native Vulkan descriptor set layout handle.
vk_descriptor_set_layout: vk::DescriptorSetLayout,
}
pub struct DescriptorSetLayoutBuilder {
uniform_buffer_bindings: Vec<usize>,
storage_buffer_bindings: Vec<usize>,
}
impl DescriptorSetLayoutBuilder {
pub fn add_uniform_buffer(mut self, binding: usize) -> Self {
self.uniform_buffer_bindings.push(binding);
self
}
pub fn add_storage_buffer(mut self, binding: usize) -> Self {
self.storage_buffer_bindings.push(binding);
self
}
pub fn build(&self, device: Arc<Device>) -> GpuResult<Arc<DescriptorSetLayout>> {
let mut descriptor_set_layout_bindings = Vec::new();
for binding in &self.uniform_buffer_bindings {
descriptor_set_layout_bindings.push(
vk::DescriptorSetLayoutBinding::default()
.binding(*binding as u32)
.descriptor_type(vk::DescriptorType::UNIFORM_BUFFER)
.descriptor_count(1)
.stage_flags(vk::ShaderStageFlags::COMPUTE),
);
}
for binding in &self.storage_buffer_bindings {
descriptor_set_layout_bindings.push(
vk::DescriptorSetLayoutBinding::default()
.binding(*binding as u32)
.descriptor_type(vk::DescriptorType::STORAGE_BUFFER)
.descriptor_count(1)
.stage_flags(vk::ShaderStageFlags::COMPUTE),
);
}
let descriptor_set_layout_create_info =
vk::DescriptorSetLayoutCreateInfo::default().bindings(&descriptor_set_layout_bindings);
let vk_descriptor_set_layout = unsafe {
device.vk_device().create_descriptor_set_layout(
&descriptor_set_layout_create_info,
device.cpu_allocation_callbacks(),
)?
};
Ok(Arc::new(DescriptorSetLayout {
device,
uniform_buffer_bindings: self.uniform_buffer_bindings.clone(),
storage_buffer_bindings: self.storage_buffer_bindings.clone(),
vk_descriptor_set_layout,
}))
}
}
impl Drop for DescriptorSetLayout {
fn drop(&mut self) {
if self.vk_descriptor_set_layout != vk::DescriptorSetLayout::null() {
unsafe {
self.device.vk_device().destroy_descriptor_set_layout(
self.vk_descriptor_set_layout,
self.device.cpu_allocation_callbacks(),
);
}
self.vk_descriptor_set_layout = vk::DescriptorSetLayout::null();
}
self.storage_buffer_bindings.clear();
self.uniform_buffer_bindings.clear();
}
}
impl DescriptorSetLayout {
pub fn builder() -> DescriptorSetLayoutBuilder {
DescriptorSetLayoutBuilder {
uniform_buffer_bindings: Vec::new(),
storage_buffer_bindings: Vec::new(),
}
}
pub fn vk_descriptor_set_layout(&self) -> vk::DescriptorSetLayout {
self.vk_descriptor_set_layout
}
pub fn device(&self) -> Arc<Device> {
self.device.clone()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/allocation_callbacks.rs | lib/gpu/src/allocation_callbacks.rs | use ash::vk;
/// AllocationCallbacks is a trait that provides access to Vulkan allocation callbacks.
/// It's used to provide custom memory allocation and deallocation functions on CPU side.
/// GPU memory allocation is managed by the `gpu-allocator` crate.
/// Even though Vulkan provides default allocation callbacks, it's helpful at least for debugging purposes.
pub trait AllocationCallbacks: Send + Sync + 'static {
fn allocation_callbacks(&self) -> &vk::AllocationCallbacks<'_>;
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/pipeline.rs | lib/gpu/src/pipeline.rs | use std::collections::HashMap;
use std::sync::Arc;
use ash::vk;
use crate::*;
static SHADER_ENTRY_POINT: &std::ffi::CStr = c"main";
/// Pipeline is an abstraction over a Vulkan compute pipeline.
/// Pipeline is a GPU resource that defines how a shader should be executed on the GPU.
/// For compute pipelines it's a single shader with binded resources.
pub struct Pipeline {
// Device that owns the pipeline.
device: Arc<Device>,
// Shader that is executed by the pipeline.
// Keep a reference to the shader to prevent it from being dropped.
_shader: Arc<Shader>,
// Descriptor set layouts that are used by the pipeline.
// It describes how the resources are binded to the shader.
descriptor_set_layouts: Vec<Arc<DescriptorSetLayout>>,
// Native Vulkan pipeline layout handle.
vk_pipeline_layout: vk::PipelineLayout,
// Native Vulkan pipeline handle.
vk_pipeline: vk::Pipeline,
}
#[derive(Default)]
pub struct PipelineBuilder {
shader: Option<Arc<Shader>>,
descriptor_set_layouts: HashMap<usize, Arc<DescriptorSetLayout>>,
}
// Mark `Pipeline` as a GPU resource that should be kept alive while it's in use by the GPU context.
impl Resource for Pipeline {}
impl PipelineBuilder {
pub fn add_shader(mut self, shader: Arc<Shader>) -> Self {
self.shader = Some(shader);
self
}
pub fn add_descriptor_set_layout(
mut self,
set: usize,
descriptor_set_layout: Arc<DescriptorSetLayout>,
) -> Self {
self.descriptor_set_layouts
.insert(set, descriptor_set_layout);
self
}
pub fn build(&self, device: Arc<Device>) -> GpuResult<Arc<Pipeline>> {
Ok(Arc::new(Pipeline::new(device, self)?))
}
}
impl Pipeline {
pub fn builder() -> PipelineBuilder {
Default::default()
}
pub(crate) fn new(device: Arc<Device>, builder: &PipelineBuilder) -> GpuResult<Self> {
let vk_descriptor_set_layouts = (0..builder.descriptor_set_layouts.len())
.map(|descriptor_set_index| {
if let Some(descriptor_set_layouts) =
builder.descriptor_set_layouts.get(&descriptor_set_index)
{
Ok(descriptor_set_layouts.vk_descriptor_set_layout())
} else {
Err(GpuError::Other(format!(
"Descriptor set layout {descriptor_set_index} is missing"
)))
}
})
.collect::<GpuResult<Vec<_>>>()?;
// Create a Vulkan pipeline layout.
let vk_pipeline_layout_create_info = vk::PipelineLayoutCreateInfo::default()
.set_layouts(&vk_descriptor_set_layouts)
.push_constant_ranges(&[]);
let vk_pipeline_layout = unsafe {
device.vk_device().create_pipeline_layout(
&vk_pipeline_layout_create_info,
device.cpu_allocation_callbacks(),
)?
};
let shader = builder
.shader
.clone()
.ok_or_else(|| GpuError::Other("Pipeline shader is required".to_string()))?;
// Create a Vulkan compute pipeline.
// Before we start, we need to check if the device supports dynamic subgroup size.
// If it does, we need to set the required subgroup size for the shader.
// Do do that, we need to create a `vk::PipelineShaderStageRequiredSubgroupSizeCreateInfo`
// which is an v1.3 api structure that is used to specify the required subgroup size for a shader.
let mut subgroup_size_create_info = if device.is_dynamic_subgroup_size() {
Some(
vk::PipelineShaderStageRequiredSubgroupSizeCreateInfo::default()
.required_subgroup_size(device.subgroup_size() as u32),
)
} else {
None
};
// Initialize the shader stage create info.
// It contains the shader module, entry point, mark the stage as compute etc.
let mut vk_pipeline_shader_stage_create_info = vk::PipelineShaderStageCreateInfo::default()
.stage(vk::ShaderStageFlags::COMPUTE)
.module(shader.vk_shader_module())
.name(SHADER_ENTRY_POINT);
// Append the subgroup size info to the shader stage create info if it's present.
if let Some(subgroup_size_info) = &mut subgroup_size_create_info {
vk_pipeline_shader_stage_create_info =
vk_pipeline_shader_stage_create_info.push_next(subgroup_size_info);
}
// Finally, create the pipeline.
let vk_compute_pipeline_create_info = vk::ComputePipelineCreateInfo::default()
.stage(vk_pipeline_shader_stage_create_info)
.layout(vk_pipeline_layout);
let vk_pipelines_result = unsafe {
device.vk_device().create_compute_pipelines(
vk::PipelineCache::null(),
&[vk_compute_pipeline_create_info],
device.cpu_allocation_callbacks(),
)
};
match vk_pipelines_result {
Ok(vk_pipelines) => {
let vk_pipeline = vk_pipelines.first().copied().ok_or_else(|| {
GpuError::Other("Failed to create compute pipeline".to_string())
})?;
Ok(Self {
device,
_shader: shader,
vk_pipeline_layout,
vk_pipeline,
descriptor_set_layouts: builder
.descriptor_set_layouts
.values()
.cloned()
.collect(),
})
}
Err(error) => {
// if we failed to create the pipeline, we need to destroy the pipeline layout.
unsafe {
device.vk_device().destroy_pipeline_layout(
vk_pipeline_layout,
device.cpu_allocation_callbacks(),
);
}
Err(GpuError::from(error.1))
}
}
}
pub fn vk_pipeline(&self) -> vk::Pipeline {
self.vk_pipeline
}
pub fn vk_pipeline_layout(&self) -> vk::PipelineLayout {
self.vk_pipeline_layout
}
}
impl Drop for Pipeline {
fn drop(&mut self) {
if self.vk_pipeline != vk::Pipeline::null() {
unsafe {
self.device
.vk_device()
.destroy_pipeline(self.vk_pipeline, self.device.cpu_allocation_callbacks());
}
self.vk_pipeline = vk::Pipeline::null();
}
if self.vk_pipeline_layout != vk::PipelineLayout::null() {
unsafe {
self.device.vk_device().destroy_pipeline_layout(
self.vk_pipeline_layout,
self.device.cpu_allocation_callbacks(),
);
}
self.vk_pipeline_layout = vk::PipelineLayout::null();
}
// free binded resources
self.descriptor_set_layouts.clear();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/basic_test.rs | lib/gpu/src/basic_test.rs | static SHADER_CODE: &str = "
#version 450
layout(set = 0, binding = 0) buffer Numbers {
float data[];
} numbers;
layout(set = 0, binding = 1) uniform Param {
float param;
} param;
void main() {
uint index = gl_GlobalInvocationID.x;
numbers.data[index] += param.param;
}
";
// Basic GPU test.
// It takes list of numbers and adds parameter to each number.
#[test]
fn basic_gpu_test() {
// Get Vulkan API instance.
let instance = crate::GPU_TEST_INSTANCE.clone();
// Choose any GPU hardware to use.
let physical_device = &instance.physical_devices()[0];
// Create GPU device.
let device = crate::Device::new(instance.clone(), physical_device).unwrap();
// Create gpu context that records command to GPU and runs them.
let mut context = crate::Context::new(device.clone()).unwrap();
let context_timeout = std::time::Duration::from_secs(30);
// Second step: create GPU resources.
// Generate input numbers.
let numbers_count = 512;
let numbers = (0..numbers_count).map(|x| x as f32).collect::<Vec<_>>();
let param = 5f32;
// Create GPU buffers.
// Storage buffer that contains numbers.
let storage_buffer = crate::Buffer::new(
device.clone(),
"Storage buffer",
crate::BufferType::Storage,
numbers.len() * std::mem::size_of::<f32>(),
)
.unwrap();
// Uniform buffer that contains parameter.
let uniform_buffer = crate::Buffer::new(
device.clone(),
"Uniform buffer",
crate::BufferType::Uniform,
std::mem::size_of::<f32>(),
)
.unwrap();
// Upload data to the GPU.
// We cannot just call `memcpy` because GPU don't see RAM memory.
// To upload data we need to create intermediate buffer
// which is visible to both CPU and GPU.
// We will copy data to this buffer and then copy it to the GPU buffer.
// Use one buffer for both data and parameter.
let upload_buffer = crate::Buffer::new(
device.clone(),
"Upload buffer",
crate::BufferType::CpuToGpu, // Mark buffer as buffer to copy from CPU to GPU.
storage_buffer.size() + uniform_buffer.size(),
)
.unwrap();
// Copy numbers.
upload_buffer.upload(numbers.as_slice(), 0).unwrap();
// Copy parameter to the end.
upload_buffer.upload(¶m, storage_buffer.size()).unwrap();
// Upload from intermediate buffer to GPU buffers.
context
.copy_gpu_buffer(
upload_buffer.clone(),
storage_buffer.clone(),
0,
0,
storage_buffer.size(),
)
.unwrap();
context
.copy_gpu_buffer(
upload_buffer.clone(),
uniform_buffer.clone(),
storage_buffer.size(),
0,
uniform_buffer.size(),
)
.unwrap();
// run copy commands.
context.run().unwrap();
context.wait_finish(context_timeout).unwrap();
// Third step: create computation pipeline.
// Compile shader code to SPIR-V.
let spirv = crate::GPU_TEST_INSTANCE
.compile_shader(SHADER_CODE, "shader.glsl", None, None)
.unwrap();
// Create shader.
let shader = crate::Shader::new(device.clone(), &spirv).unwrap();
// Create linking to the shader.
let descriptor_set_layout = crate::DescriptorSetLayout::builder()
.add_storage_buffer(0)
.add_uniform_buffer(1)
.build(device.clone())
.unwrap();
let descriptor_set = crate::DescriptorSet::builder(descriptor_set_layout.clone())
.add_storage_buffer(0, storage_buffer.clone())
.add_uniform_buffer(1, uniform_buffer.clone())
.build()
.unwrap();
// Create computation pipeline.
let pipeline = crate::Pipeline::builder()
.add_descriptor_set_layout(0, descriptor_set_layout)
.add_shader(shader)
.build(device.clone())
.unwrap();
// Fourth step: run computation.
let descriptor_sets = [descriptor_set.clone()];
// Bind pipeline and descriptor sets.
context.bind_pipeline(pipeline, &descriptor_sets).unwrap();
// Run computeation command. Threads count is the inputs count.
context.dispatch(numbers_count, 1, 1).unwrap();
// Add barrier to ensure that all writes to the storage buffer are visible to the next command.
context
.barrier_buffers(std::slice::from_ref(&storage_buffer))
.unwrap();
// Run GPU and wait finish.
context.run().unwrap();
context.wait_finish(context_timeout).unwrap();
// Fifth step: download and check results.
// Like upload, we need to create intermediate buffer to download data from GPU.
let download_buffer = crate::Buffer::new(
device.clone(),
"Download buffer",
crate::BufferType::GpuToCpu, // Mark buffer as buffer to copy from GPU to CPU.
storage_buffer.size(),
)
.unwrap();
// Copy data from GPU to intermediate buffer.
context
.copy_gpu_buffer(
storage_buffer.clone(),
download_buffer.clone(),
0,
0,
storage_buffer.size(),
)
.unwrap();
// Run copy command.
context.run().unwrap();
context.wait_finish(context_timeout).unwrap();
// Download data from intermediate buffer.
let result = download_buffer
.download_vec::<f32>(0, numbers_count)
.unwrap();
// Check results.
for i in 0..numbers_count {
assert_eq!(result[i], numbers[i] + param);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/buffer.rs | lib/gpu/src/buffer.rs | use std::ops::Range;
use std::sync::Arc;
use ash::vk;
use gpu_allocator::MemoryLocation;
use gpu_allocator::vulkan::{Allocation, AllocationCreateDesc, AllocationScheme};
use parking_lot::{MappedMutexGuard, Mutex, MutexGuard};
use zerocopy::{FromBytes, Immutable, IntoBytes};
use crate::*;
static DOWNLOAD_NOT_ALLOWED_ERROR: &str = "Download from the GPU buffer is not allowed";
static UPLOAD_NOT_ALLOWED_ERROR: &str = "Upload to the GPU buffer is not allowed";
/// Buffer is a GPU resource that represents a linear memory region.
pub struct Buffer {
/// Device that owns the buffer.
device: Arc<Device>,
/// Vulkan buffer handle.
vk_buffer: vk::Buffer,
/// Buffer type. It defines how the buffer can be used.
buffer_type: BufferType,
/// Buffer size in bytes.
size: usize,
/// GPU memory allocation that backs the buffer.
allocation: Mutex<Allocation>,
}
/// Buffer type defines how the buffer can be used.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum BufferType {
/// Uniform data for a shader.
Uniform,
/// Storage buffer can be used as a large read/write buffer.
Storage,
/// CpuToGpu buffer can be used as a source for transfer operations only.
CpuToGpu,
/// GpuToCpu buffer can be used as a destination for transfer operations only.
GpuToCpu,
}
// Mark `Buffer` as a GPU resource that should be kept alive while it's in use by the GPU context.
impl Resource for Buffer {}
impl Buffer {
pub fn new(
device: Arc<Device>, // Device that owns the buffer.
name: impl AsRef<str>, // Name of the buffer for tracking and debugging purposes.
buffer_type: BufferType,
size: usize,
) -> GpuResult<Arc<Self>> {
if size == 0 {
return Err(GpuError::NotSupported(
"Zero-sized GPU buffers are not supported".to_string(),
));
}
// Vulkan API requires buffer usage flags to be specified during the buffer creation.
let vk_usage_flags = match buffer_type {
BufferType::Uniform => {
vk::BufferUsageFlags::UNIFORM_BUFFER // mark as uniform buffer.
| vk::BufferUsageFlags::TRANSFER_DST // For uploading.
| vk::BufferUsageFlags::TRANSFER_SRC // For downloading.
}
BufferType::Storage => {
vk::BufferUsageFlags::STORAGE_BUFFER // mark as storage buffer.
| vk::BufferUsageFlags::TRANSFER_DST // For uploading.
| vk::BufferUsageFlags::TRANSFER_SRC // For downloading.
}
// CpuToGpu buffer can be used as a source for transfer operations only.
BufferType::CpuToGpu => vk::BufferUsageFlags::TRANSFER_SRC,
// GpuToCpu buffer can be used as a destination for transfer operations only.
BufferType::GpuToCpu => vk::BufferUsageFlags::TRANSFER_DST,
};
// Memory location depends on the buffer type.
let location = match buffer_type {
// Allocate Uniform/Storage buffers in GPU memory only.
BufferType::Uniform => MemoryLocation::GpuOnly,
BufferType::Storage => MemoryLocation::GpuOnly,
// Transfer buffers will be visible to both CPU and GPU.
BufferType::CpuToGpu => MemoryLocation::CpuToGpu,
BufferType::GpuToCpu => MemoryLocation::GpuToCpu,
};
// Create a Vulkan buffer.
let vk_create_buffer_info = vk::BufferCreateInfo::default()
.size(size as vk::DeviceSize)
.usage(vk_usage_flags)
.sharing_mode(vk::SharingMode::EXCLUSIVE);
let vk_buffer = unsafe {
device
.vk_device()
.create_buffer(&vk_create_buffer_info, device.cpu_allocation_callbacks())
.unwrap()
};
// Allocate memory for the buffer.
let buffer_allocation_requirements =
unsafe { device.vk_device().get_buffer_memory_requirements(vk_buffer) };
let allocation_result = device.allocate(&AllocationCreateDesc {
name: name.as_ref(),
requirements: buffer_allocation_requirements,
location,
linear: true, // Buffers are always linear.
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
});
// Check if the allocation was successful.
let allocation = match allocation_result {
Ok(allocation) => allocation,
Err(e) => {
unsafe {
// Because vulkan buffers lifetime is managed manually,
// we need to destroy the buffer in case of an allocation error.
device
.vk_device()
.destroy_buffer(vk_buffer, device.cpu_allocation_callbacks());
}
return Err(e);
}
};
// Bind the buffer to the allocated memory.
let bind_result = unsafe {
device.vk_device().bind_buffer_memory(
vk_buffer,
allocation.memory(),
allocation.offset(),
)
};
if let Err(e) = bind_result {
// Free the allocated memory in case of an error.
device.free(allocation);
unsafe {
// Destroy the buffer.
device
.vk_device()
.destroy_buffer(vk_buffer, device.cpu_allocation_callbacks());
}
return Err(GpuError::from(e));
}
Ok(Arc::new(Self {
device,
vk_buffer,
buffer_type,
size,
allocation: Mutex::new(allocation),
}))
}
pub fn size(&self) -> usize {
self.size
}
pub fn vk_buffer(&self) -> vk::Buffer {
self.vk_buffer
}
pub fn buffer_type(&self) -> BufferType {
self.buffer_type
}
/// Download a value from the buffer to the RAM.
pub fn download<T>(&self, data: &mut T, offset: usize) -> GpuResult<()>
where
T: FromBytes + IntoBytes + ?Sized,
{
if self.buffer_type != BufferType::GpuToCpu {
return Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()));
}
let data_bytes = data.as_mut_bytes();
let end = checked_add(offset, data_bytes.len())?;
self.view(offset..end)?.copy_from_slice(data_bytes);
Ok(())
}
/// Download a vector of `len` elements from the buffer to the RAM.
pub fn download_vec<T>(&self, offset: usize, len: usize) -> GpuResult<Vec<T>>
where
T: FromBytes + IntoBytes + Clone,
{
if self.buffer_type != BufferType::GpuToCpu {
return Err(GpuError::Other(DOWNLOAD_NOT_ALLOWED_ERROR.to_string()));
}
let end = len
.checked_mul(size_of::<T>())
.and_then(|total_bytes| offset.checked_add(total_bytes))
.ok_or_else(|| {
GpuError::OutOfBounds(format!(
"Size overflow while downloading from GPU: \
{len}*{} + {offset} overflows",
size_of::<T>()
))
})?;
let buffer_slice = self.view(offset..end)?;
let mut result = vec![T::new_zeroed(); len];
result.as_mut_bytes().copy_from_slice(&buffer_slice);
Ok(result)
}
/// Upload a value from the RAM to the buffer.
pub fn upload<T>(&self, data: &T, offset: usize) -> GpuResult<()>
where
T: IntoBytes + Immutable + ?Sized,
{
if self.buffer_type != BufferType::CpuToGpu {
return Err(GpuError::Other(UPLOAD_NOT_ALLOWED_ERROR.to_string()));
}
let bytes = data.as_bytes();
let end = checked_add(offset, bytes.len())?;
self.view(offset..end)?.copy_from_slice(bytes);
Ok(())
}
fn view(&self, range: Range<usize>) -> GpuResult<MappedMutexGuard<'_, [u8]>> {
let slice = MutexGuard::try_map(self.allocation.lock(), |allocation| {
allocation.mapped_slice_mut()
})
.map_err(|_| GpuError::Other("Accessing the GPU buffer is not allowed".to_string()))?;
MappedMutexGuard::try_map(slice, |slice| slice.get_mut(range.clone())).map_err(|slice| {
GpuError::OutOfBounds(format!(
"Out of bounds while accessing the buffer: \
range {range:?} exceeds the buffer size of {} bytes.",
slice.len()
))
})
}
}
fn checked_add(a: usize, b: usize) -> GpuResult<usize> {
a.checked_add(b).ok_or_else(|| {
GpuError::OutOfBounds(format!(
"Size overflow while accessing the buffer: {a} + {b} overflows"
))
})
}
impl Drop for Buffer {
fn drop(&mut self) {
if self.vk_buffer != vk::Buffer::null() {
// Drop the allocation and free the allocated memory.
let mut allocation = Mutex::new(Allocation::default());
std::mem::swap(&mut allocation, &mut self.allocation);
let allocation = allocation.into_inner();
self.device.free(allocation);
// Destroy the buffer.
unsafe {
self.device
.vk_device()
.destroy_buffer(self.vk_buffer, self.device.cpu_allocation_callbacks())
};
self.vk_buffer = vk::Buffer::null();
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/context.rs | lib/gpu/src/context.rs | use std::sync::Arc;
use ash::vk;
use crate::*;
/// Timeout to wait for GPU execution in drop function.
static DROP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30 * 60);
/// GPU execution context.
/// It records commands and run them on GPU.
/// It keeps track of resources used in the commands.
/// Warnings!
/// Context is not thread safe.
/// Execution order is not guaranteed. Don't rely on it.
/// If you need to run commands in specific order, use `wait_finish` method.
/// And start next command after previous one is finished.
pub struct Context {
// Which device to execute on.
device: Arc<Device>,
// GPU execution handler.
vk_queue: vk::Queue,
// Command buffer is created using command pool.
vk_command_pool: vk::CommandPool,
// Command buffer is used to record commands to execute.
vk_command_buffer: vk::CommandBuffer,
// Synchronization fence to wait for GPU execution.
vk_fence: vk::Fence,
// Resources used in the context.
resources: Vec<Arc<dyn Resource>>,
}
impl Context {
pub fn new(device: Arc<Device>) -> GpuResult<Self> {
// Get GPU execution queue from device.
let queue = device.compute_queue();
// Create command pool.
let command_pool_create_info = vk::CommandPoolCreateInfo::default()
.queue_family_index(queue.vk_queue_family_index as u32)
.flags(vk::CommandPoolCreateFlags::default());
let vk_command_pool = unsafe {
device
.vk_device()
.create_command_pool(&command_pool_create_info, device.cpu_allocation_callbacks())?
};
// Create fence to wait for GPU execution.
// We create fence as signaled because we reset fence before start.
let fence_create_info =
vk::FenceCreateInfo::default().flags(vk::FenceCreateFlags::SIGNALED);
let vk_fence = unsafe {
device
.vk_device()
.create_fence(&fence_create_info, device.cpu_allocation_callbacks())
};
let vk_fence = match vk_fence {
Ok(fence) => fence,
Err(e) => {
// If fence creation failed, destroy created command pool and return error.
unsafe {
device
.vk_device()
.destroy_command_pool(vk_command_pool, device.cpu_allocation_callbacks());
}
return Err(GpuError::from(e));
}
};
let mut context = Self {
vk_queue: queue.vk_queue,
device,
vk_command_pool,
vk_command_buffer: vk::CommandBuffer::null(),
vk_fence,
resources: Vec::new(),
};
context.init_command_buffer()?;
Ok(context)
}
pub fn dispatch(&mut self, x: usize, y: usize, z: usize) -> GpuResult<()> {
if self.vk_command_buffer == vk::CommandBuffer::null() {
self.init_command_buffer()?;
}
let max_compute_work_group_count = self.device.max_compute_work_group_count();
if x > max_compute_work_group_count[0]
|| y > max_compute_work_group_count[1]
|| z > max_compute_work_group_count[2]
{
return Err(GpuError::OutOfBounds(
"Dispatch work group size is out of bounds".to_string(),
));
}
unsafe {
self.device.vk_device().cmd_dispatch(
self.vk_command_buffer,
x as u32,
y as u32,
z as u32,
);
}
Ok(())
}
/// Barrier for buffers.
/// It's used to synchronize access to buffers between different shaders.
/// By Vulkan specification, resources between different shaders/transfers must be synchronized.
/// Example of compute shader sync:
/// https://github.com/KhronosGroup/Vulkan-Docs/wiki/Synchronization-Examples#compute-to-compute-dependencies
/// In practice, we use `context.wait_finish()` to synchronize between different shaders.
/// But, for additional safety, we have this `barrier_buffers` to synchronize access to buffers.
pub fn barrier_buffers(&mut self, buffers: &[Arc<Buffer>]) -> GpuResult<()> {
if self.vk_command_buffer == vk::CommandBuffer::null() {
self.init_command_buffer()?;
}
let buffer_memory_barriers = buffers
.iter()
.map(|buffer| {
self.resources.push(buffer.clone() as Arc<dyn Resource>);
vk::BufferMemoryBarrier::default()
.buffer(buffer.vk_buffer())
.offset(0)
.size(buffer.size() as vk::DeviceSize)
.src_queue_family_index(
self.device.compute_queue().vk_queue_family_index as u32,
)
.dst_queue_family_index(
self.device.compute_queue().vk_queue_family_index as u32,
)
.src_access_mask(vk::AccessFlags::SHADER_WRITE)
.dst_access_mask(
vk::AccessFlags::TRANSFER_READ
| vk::AccessFlags::SHADER_READ
| vk::AccessFlags::SHADER_WRITE,
)
})
.collect::<Vec<_>>();
unsafe {
self.device.vk_device().cmd_pipeline_barrier(
self.vk_command_buffer,
vk::PipelineStageFlags::COMPUTE_SHADER,
vk::PipelineStageFlags::ALL_COMMANDS,
vk::DependencyFlags::empty(),
&[],
&buffer_memory_barriers,
&[],
);
}
Ok(())
}
/// Bind pipeline to the context.
/// It means which shader and binded resources to shader will be used.
/// It records command to run it on GPU after `run` call.
pub fn bind_pipeline(
&mut self,
pipeline: Arc<Pipeline>,
descriptor_sets: &[Arc<DescriptorSet>],
) -> GpuResult<()> {
if self.vk_command_buffer == vk::CommandBuffer::null() {
self.init_command_buffer()?;
}
unsafe {
self.device.vk_device().cmd_bind_pipeline(
self.vk_command_buffer,
vk::PipelineBindPoint::COMPUTE,
pipeline.vk_pipeline(),
);
}
unsafe {
if !descriptor_sets.is_empty() {
let vk_descriptor_sets: Vec<_> = descriptor_sets
.iter()
.map(|set| set.as_ref().vk_descriptor_set())
.collect();
self.device.vk_device().cmd_bind_descriptor_sets(
self.vk_command_buffer,
vk::PipelineBindPoint::COMPUTE,
pipeline.vk_pipeline_layout(),
0,
&vk_descriptor_sets,
&[],
);
}
}
// Add resources to the list to keep them alive.
self.resources.extend(
descriptor_sets
.iter()
.map(|r| r.clone() as Arc<dyn Resource>),
);
self.resources.push(pipeline);
Ok(())
}
/// Copy data from one buffer to another. It records command to run it on GPU after `run` call.
pub fn copy_gpu_buffer(
&mut self,
src: Arc<Buffer>,
dst: Arc<Buffer>,
src_offset: usize,
dst_offset: usize,
size: usize,
) -> GpuResult<()> {
if size == 0 {
return Ok(());
}
if self.vk_command_buffer == vk::CommandBuffer::null() {
self.init_command_buffer()?;
}
if src.size() < src_offset + size || dst.size() < dst_offset + size {
return Err(GpuError::OutOfBounds(
"Buffer copy out of bounds".to_string(),
));
}
let buffer_copy = vk::BufferCopy::default()
.src_offset(src_offset as vk::DeviceSize)
.dst_offset(dst_offset as vk::DeviceSize)
.size(size as vk::DeviceSize);
unsafe {
self.device.vk_device().cmd_copy_buffer(
self.vk_command_buffer,
src.vk_buffer(),
dst.vk_buffer(),
&[buffer_copy],
);
}
// Add resources to the list to keep them alive.
self.resources.push(src);
self.resources.push(dst);
Ok(())
}
/// Clear buffer with zeros command. It records command to run it on GPU after `run` call.
pub fn clear_buffer(&mut self, buffer: Arc<Buffer>) -> GpuResult<()> {
if buffer.size() == 0 {
return Ok(());
}
if !buffer.size().is_multiple_of(std::mem::size_of::<u32>()) {
return Err(GpuError::OutOfBounds(
"Buffer size must be a multiple of `uint32` size to clear it".to_string(),
));
}
if self.vk_command_buffer == vk::CommandBuffer::null() {
self.init_command_buffer()?;
}
unsafe {
self.device.vk_device().cmd_fill_buffer(
self.vk_command_buffer,
buffer.vk_buffer(),
0,
buffer.size() as vk::DeviceSize,
0,
);
}
// Add resources to the list to keep them alive.
self.resources.push(buffer);
Ok(())
}
/// Run the recorded commands on GPU.
/// Warning: order of recorded commands is not guaranteed. Don't rely on it.
pub fn run(&mut self) -> GpuResult<()> {
if self.vk_command_buffer == vk::CommandBuffer::null() {
// Nothing to run.
return Ok(());
}
// Finish recording of command buffer.
let end_record_result = unsafe {
self.device
.vk_device()
.end_command_buffer(self.vk_command_buffer)
};
// If command buffer recording failed, destroy created command buffer and return error.
if let Err(e) = end_record_result {
self.destroy_command_buffer();
return Err(GpuError::from(e));
}
// Reset fence to unsignaled state.
let fence_reset_result = unsafe { self.device.vk_device().reset_fences(&[self.vk_fence]) };
if let Err(e) = fence_reset_result {
self.destroy_command_buffer();
return Err(GpuError::from(e));
}
// Start execution of recorded commands.
let submit_buffers = [self.vk_command_buffer];
let submit_info = vec![vk::SubmitInfo::default().command_buffers(&submit_buffers)];
let submit_result = unsafe {
self.device
.vk_device()
.queue_submit(self.vk_queue, &submit_info, self.vk_fence)
};
if let Err(e) = submit_result {
// If submit failed, destroy created command buffer and return error.
// It's important here to avoid fence waiting of non-started command buffer.
self.destroy_command_buffer();
return Err(GpuError::from(e));
}
Ok(())
}
/// Wait for GPU execution to finish.
pub fn wait_finish(&mut self, timeout: std::time::Duration) -> GpuResult<()> {
if self.vk_command_buffer == vk::CommandBuffer::null() {
// Nothing to wait for.
return Ok(());
}
// Get the current status of fence.
let fence_status = unsafe {
self.device
.vk_device()
.get_fence_status(self.vk_fence)
.map_err(GpuError::from)
};
match fence_status {
Ok(true) => {
// GPU execution finished already, clear command buffer and return.
self.destroy_command_buffer();
Ok(())
}
Ok(false) => {
// GPU is processing. Wait for signal with timeout.
let wait_result = unsafe {
self.device
.vk_device()
.wait_for_fences(&[self.vk_fence], true, timeout.as_nanos() as u64)
.map_err(GpuError::from)
};
if matches!(wait_result, Err(GpuError::Timeout)) {
// If we detect timeout, don't clear command buffer, just return a timeout error.
Err(GpuError::Timeout)
} else {
// If the error is not a timeout, clear command buffer.
self.destroy_command_buffer();
wait_result
}
}
Err(e) => {
// By Vulkan specification, error while getting fence status
// may happen is special cases like hardware device lost.
// In this cases we don't care about status of gpu execution and just clear resources.
self.destroy_command_buffer();
Err(e)
}
}
}
fn init_command_buffer(&mut self) -> GpuResult<()> {
if self.vk_command_buffer != vk::CommandBuffer::null() {
return Err(GpuError::Other(
"Vulkan command buffer was already created".to_string(),
));
}
// Create new command buffer from pool.
let command_buffer_allocate_info = vk::CommandBufferAllocateInfo::default()
.command_pool(self.vk_command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(1);
self.vk_command_buffer = unsafe {
self.device
.vk_device()
.allocate_command_buffers(&command_buffer_allocate_info)?[0]
};
let command_buffer_begin_info =
vk::CommandBufferBeginInfo::default().flags(vk::CommandBufferUsageFlags::default());
//.inheritance_info(..);
let begin_result = unsafe {
self.device
.vk_device()
.begin_command_buffer(self.vk_command_buffer, &command_buffer_begin_info)
};
// If command buffer creation failed, destroy created command buffer and return error.
if let Err(e) = begin_result {
self.destroy_command_buffer();
return Err(GpuError::from(e));
}
Ok(())
}
fn destroy_command_buffer(&mut self) {
if self.vk_command_buffer != vk::CommandBuffer::null() {
unsafe {
self.device
.vk_device()
.free_command_buffers(self.vk_command_pool, &[self.vk_command_buffer]);
}
self.vk_command_buffer = vk::CommandBuffer::null();
}
self.resources.clear();
}
}
impl Drop for Context {
fn drop(&mut self) {
let wait_result = self.wait_finish(DROP_TIMEOUT);
match wait_result {
Err(GpuError::Timeout) => {
// Timeout reached, resources are still in use.
// Vulkan API cannot stop GPU execution.
// This sutiation may appear if shader has infinite loop, etc.
// There is no good way to handle this error.
// So just log it and ignore resources deallocation.
// This approach may cause memory leaks and used gpu kernels,
// but it's better than potential segfault.
log::error!("Failed to wait for GPU context to finish");
// Error was logged, do memory leak to keep the gpu running.
let resources = self.resources.clone();
self.resources.clear();
for resource in resources {
// !!!!!!!!!
std::mem::forget(resource);
}
}
// If there is no timeout, we can safely deallocate resources.
wait_result => {
wait_result.unwrap_or_else(|e|
// Cannot return error from Drop trait.
// Log it instead.
log::error!("Error while clear GPU context: {e:?}"));
// If command buffer was not destroyed, destroy it.
// This situation may appear if `wait_finish` is an error.
self.destroy_command_buffer();
// Destroy fence.
if self.vk_fence != vk::Fence::null() {
unsafe {
self.device
.vk_device()
.destroy_fence(self.vk_fence, self.device.cpu_allocation_callbacks());
}
self.vk_fence = vk::Fence::null();
}
// Destroy command pool.
if self.vk_command_pool != vk::CommandPool::null() {
unsafe {
self.device.vk_device().destroy_command_pool(
self.vk_command_pool,
self.device.cpu_allocation_callbacks(),
);
}
self.vk_command_pool = vk::CommandPool::null();
}
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/shader.rs | lib/gpu/src/shader.rs | use std::sync::Arc;
use ash::vk;
use crate::*;
/// Struct that represents a compiled shader.
/// If you want to create a new shader, you have to compile it first using `shaderc`.
pub struct Shader {
/// Device that owns the shader.
device: Arc<Device>,
/// Vulkan shader module handle.
vk_shader_module: vk::ShaderModule,
}
// Mark `Shader` as a GPU resource that should be kept alive while it's in use by the GPU context.
impl Resource for Shader {}
impl Shader {
/// Create a new shader from the given compiled shader code.
/// `shader_code` is a compiled shader code in the binary SPIR-V format.
pub fn new(device: Arc<Device>, shader_code: &[u8]) -> GpuResult<Arc<Self>> {
// Decode SPIR-V from bytes with correct alignment.
let mut spv_file = std::io::Cursor::new(shader_code);
let shader_code = ash::util::read_spv(&mut spv_file)
.map_err(|_| GpuError::Other("Failed to read SPIR-V shader code".to_string()))?;
// Create shader.
let shader_module_create_info = vk::ShaderModuleCreateInfo::default().code(&shader_code);
let shader_module = unsafe {
device.vk_device().create_shader_module(
&shader_module_create_info,
device.cpu_allocation_callbacks(),
)?
};
Ok(Arc::new(Self {
device,
vk_shader_module: shader_module,
}))
}
pub fn vk_shader_module(&self) -> vk::ShaderModule {
self.vk_shader_module
}
}
impl Drop for Shader {
fn drop(&mut self) {
if self.vk_shader_module != vk::ShaderModule::null() {
unsafe {
self.device.vk_device().destroy_shader_module(
self.vk_shader_module,
self.device.cpu_allocation_callbacks(),
);
}
self.vk_shader_module = vk::ShaderModule::null();
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/descriptor_set.rs | lib/gpu/src/descriptor_set.rs | use std::sync::Arc;
use ash::vk;
use crate::*;
/// `DescriptorSet` is a collection of buffers that can be bound to a shader.
/// It depends on a DescriptorSetLayout which defines linkage to the shader.
/// This structure does not need shader directly, shader will be provided by `Pipeline`.
/// It can be reused between different pipelines and shaders with the same layout.
#[derive(Clone)]
pub struct DescriptorSet {
/// Device that owns the descriptor set.
device: Arc<Device>,
/// Descriptor set layout that defines linkage to the shader.
/// Keep it alive to prevent it from being dropped.
_layout: Arc<DescriptorSetLayout>,
/// Collection of uniform buffers.
uniform_buffers: Vec<(usize, Arc<Buffer>)>,
/// Collection of storage buffers.
storage_buffers: Vec<(usize, Arc<Buffer>)>,
/// Native Vulkan descriptor pool handle.
vk_descriptor_pool: vk::DescriptorPool,
/// Native Vulkan descriptor set handle.
vk_descriptor_set: vk::DescriptorSet,
}
pub struct DescriptorSetBuilder {
descriptor_set_layout: Arc<DescriptorSetLayout>,
uniform_buffers: Vec<(usize, Arc<Buffer>)>,
storage_buffers: Vec<(usize, Arc<Buffer>)>,
}
// Mark `DescriptorSet` as a GPU resource that should be kept alive while it's in use by the GPU context.
impl Resource for DescriptorSet {}
impl DescriptorSetBuilder {
pub fn add_uniform_buffer(mut self, binding: usize, uniform_buffer: Arc<Buffer>) -> Self {
self.uniform_buffers.push((binding, uniform_buffer));
self
}
pub fn add_storage_buffer(mut self, binding: usize, storage_buffer: Arc<Buffer>) -> Self {
self.storage_buffers.push((binding, storage_buffer));
self
}
pub fn build(&self) -> GpuResult<Arc<DescriptorSet>> {
DescriptorSet::new(
self.descriptor_set_layout.device().clone(),
self.descriptor_set_layout.clone(),
self.uniform_buffers.clone(),
self.storage_buffers.clone(),
)
}
}
impl DescriptorSet {
pub fn builder(descriptor_set_layout: Arc<DescriptorSetLayout>) -> DescriptorSetBuilder {
DescriptorSetBuilder {
descriptor_set_layout,
uniform_buffers: Vec::new(),
storage_buffers: Vec::new(),
}
}
fn new(
device: Arc<Device>,
layout: Arc<DescriptorSetLayout>,
uniform_buffers: Vec<(usize, Arc<Buffer>)>,
storage_buffers: Vec<(usize, Arc<Buffer>)>,
) -> GpuResult<Arc<Self>> {
// Vulkan descriptor pool is required to allocate descriptor sets.
let vk_descriptor_pool =
Self::create_vk_descriptor_pool(&device, &uniform_buffers, &storage_buffers)?;
// Create Vulkan descriptor set.
let vk_descriptor_set_layouts = vec![layout.vk_descriptor_set_layout()];
let vk_descriptor_set_result =
Self::create_vk_descriptor_set(&device, &vk_descriptor_set_layouts, vk_descriptor_pool);
let vk_descriptor_set = match vk_descriptor_set_result {
Ok(vk_descriptor_set) => vk_descriptor_set,
Err(error) => {
unsafe {
// Destroy descriptor pool if descriptor set creation failed.
device.vk_device().destroy_descriptor_pool(
vk_descriptor_pool,
device.cpu_allocation_callbacks(),
);
}
return Err(error);
}
};
let result = Arc::new(Self {
device,
_layout: layout,
uniform_buffers,
storage_buffers,
vk_descriptor_pool,
vk_descriptor_set,
});
result.update()?;
Ok(result)
}
pub fn vk_descriptor_set(&self) -> vk::DescriptorSet {
self.vk_descriptor_set
}
fn create_vk_descriptor_pool(
device: &Arc<Device>,
uniform_buffers: &[(usize, Arc<Buffer>)],
storage_buffers: &[(usize, Arc<Buffer>)],
) -> GpuResult<vk::DescriptorPool> {
let mut vk_descriptor_pool_sizes = Vec::new();
if !uniform_buffers.is_empty() {
vk_descriptor_pool_sizes.push(
vk::DescriptorPoolSize::default()
.ty(vk::DescriptorType::UNIFORM_BUFFER)
.descriptor_count(uniform_buffers.len() as u32),
);
}
if !storage_buffers.is_empty() {
vk_descriptor_pool_sizes.push(
vk::DescriptorPoolSize::default()
.ty(vk::DescriptorType::STORAGE_BUFFER)
.descriptor_count(storage_buffers.len() as u32),
);
}
if !vk_descriptor_pool_sizes.is_empty() {
let vk_descriptor_pool_create_info = vk::DescriptorPoolCreateInfo::default()
.pool_sizes(&vk_descriptor_pool_sizes)
.max_sets(1);
unsafe {
device
.vk_device()
.create_descriptor_pool(
&vk_descriptor_pool_create_info,
device.cpu_allocation_callbacks(),
)
.map_err(GpuError::from)
}
} else {
Err(GpuError::Other(
"DescriptorSet must have at least one uniform or storage buffer".to_string(),
))
}
}
fn create_vk_descriptor_set(
device: &Arc<Device>,
vk_descriptor_set_layout: &[vk::DescriptorSetLayout],
vk_descriptor_pool: vk::DescriptorPool,
) -> GpuResult<vk::DescriptorSet> {
let vk_descriptor_set_allocate_info = vk::DescriptorSetAllocateInfo::default()
.descriptor_pool(vk_descriptor_pool)
.set_layouts(vk_descriptor_set_layout);
unsafe {
Ok(device
.vk_device()
.allocate_descriptor_sets(&vk_descriptor_set_allocate_info)?[0])
}
}
fn update(&self) -> GpuResult<()> {
// Collected parameters for vk::update_descriptor_sets
let mut vk_write_descriptor_sets: Vec<vk::WriteDescriptorSet> = vec![];
// `vk_descriptor_uniform_buffer_infos` is a collection of uniform buffers.
// It should be alive because `vk_write_descriptor_sets` references pointer to it.
let mut vk_descriptor_uniform_buffer_infos = Vec::new();
for (_binding, uniform_buffer) in &self.uniform_buffers {
if uniform_buffer.buffer_type() != BufferType::Uniform {
return Err(GpuError::Other(
"Uniform buffer type must be `BufferType::Uniform`".to_string(),
));
}
let vk_descriptor_buffer_info = vk::DescriptorBufferInfo::default()
.buffer(uniform_buffer.vk_buffer())
.offset(0)
.range(uniform_buffer.size() as u64);
vk_descriptor_uniform_buffer_infos.push(vec![vk_descriptor_buffer_info]);
}
for ((binding, _uniform_buffer), buffer_infos) in self
.uniform_buffers
.iter()
.zip(vk_descriptor_uniform_buffer_infos.iter())
{
let vk_write_descriptor_set = vk::WriteDescriptorSet::default()
.dst_set(self.vk_descriptor_set)
.dst_binding(*binding as u32)
.dst_array_element(0)
.descriptor_type(vk::DescriptorType::UNIFORM_BUFFER)
.buffer_info(buffer_infos);
vk_write_descriptor_sets.push(vk_write_descriptor_set);
}
// `vk_descriptor_storage_buffer_infos` is a collection of storage buffers.
// It should be alive because `vk_write_descriptor_sets` references pointer to it.
let mut vk_descriptor_storage_buffer_infos = Vec::new();
for (_binding, storage_buffer) in &self.storage_buffers {
if storage_buffer.buffer_type() != BufferType::Storage {
return Err(GpuError::Other(
"Storage buffer type must be `BufferType::Storage`".to_string(),
));
}
let vk_descriptor_buffer_info = vk::DescriptorBufferInfo::default()
.buffer(storage_buffer.vk_buffer())
.offset(0)
.range(storage_buffer.size() as u64);
vk_descriptor_storage_buffer_infos.push(vec![vk_descriptor_buffer_info]);
}
for ((binding, _storage_buffer), buffer_info) in self
.storage_buffers
.iter()
.zip(vk_descriptor_storage_buffer_infos.iter())
{
let vk_write_descriptor_set = vk::WriteDescriptorSet::default()
.dst_set(self.vk_descriptor_set)
.dst_binding(*binding as u32)
.dst_array_element(0)
.descriptor_type(vk::DescriptorType::STORAGE_BUFFER)
.buffer_info(buffer_info);
vk_write_descriptor_sets.push(vk_write_descriptor_set);
}
unsafe {
self.device
.vk_device()
.update_descriptor_sets(&vk_write_descriptor_sets, &[]);
}
Ok(())
}
}
impl Drop for DescriptorSet {
fn drop(&mut self) {
if self.vk_descriptor_pool != vk::DescriptorPool::null() {
unsafe {
self.device.vk_device().destroy_descriptor_pool(
self.vk_descriptor_pool,
self.device.cpu_allocation_callbacks(),
);
}
self.vk_descriptor_pool = vk::DescriptorPool::null()
}
self.uniform_buffers.clear();
self.storage_buffers.clear();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/gpu/src/instance.rs | lib/gpu/src/instance.rs | use std::collections::HashMap;
use std::ffi::CString;
use std::ops::Deref;
use std::sync::Arc;
use ash::vk;
use parking_lot::Mutex;
use crate::*;
static APPLICATION_NAME: &std::ffi::CStr = c"qdrant";
/// `Instance` is a Vulkan instance wrapper.
/// It's a root structure for all Vulkan operations and provides access the API.
/// It also manages all Vulkan API layers and API extensions.
pub struct Instance {
/// Vulkan API entry point. It should be kept alive while the instance is alive.
_entry: ash::Entry,
/// Native Vulkan instance handle.
vk_instance: ash::Instance,
/// List of physical devices found by the Vulkan instance.
vk_physical_devices: Vec<PhysicalDevice>,
/// CPU allocator.
allocation_callbacks: Option<Box<dyn AllocationCallbacks>>,
/// Validation layer handler.
vk_debug_utils_loader: Option<ash::ext::debug_utils::Instance>,
/// Validation layer messenger.
vk_debug_messenger: vk::DebugUtilsMessengerEXT,
/// Shader compiler.
compiler: Mutex<shaderc::Compiler>,
/// Debug messenger for the instance. It contains validation error callbacks.
/// Should be kept alive while the instance is alive because it contains raw pointers to callbacks.
_debug_messenger: Option<Box<dyn DebugMessenger>>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// Hardware type of the physical device.
pub enum PhysicalDeviceType {
/// Discrete GPU like Nvidia or AMD.
Discrete,
/// Integrated graphics like Intel HD Graphics.
Integrated,
/// Other types of hardware like software emulated GPU.
Other,
}
#[derive(Clone)]
pub struct PhysicalDevice {
pub vk_physical_device: vk::PhysicalDevice,
pub name: String,
pub device_type: PhysicalDeviceType,
}
#[derive(Default)]
pub struct InstanceBuilder {
debug_messenger: Option<Box<dyn DebugMessenger>>,
allocation_callbacks: Option<Box<dyn AllocationCallbacks>>,
dump_api: bool,
}
impl InstanceBuilder {
pub fn new() -> Self {
Self::default()
}
/// Set debug messenger for the instance.
pub fn with_debug_messenger(mut self, debug_messenger: Box<dyn DebugMessenger>) -> Self {
self.debug_messenger = Some(debug_messenger);
self
}
/// Set CPU allocation callbacks for the instance.
pub fn with_allocation_callbacks(
mut self,
allocation_callbacks: Box<dyn AllocationCallbacks>,
) -> Self {
self.allocation_callbacks = Some(allocation_callbacks);
self
}
// Enable API dump layer.
pub fn with_dump_api(mut self, dump_api: bool) -> Self {
self.dump_api = dump_api;
self
}
pub fn build(self) -> GpuResult<Arc<Instance>> {
Instance::new(
self.debug_messenger,
self.allocation_callbacks,
self.dump_api,
)
}
}
impl Instance {
pub fn builder() -> InstanceBuilder {
InstanceBuilder::new()
}
fn new(
debug_messenger: Option<Box<dyn DebugMessenger>>,
allocation_callbacks: Option<Box<dyn AllocationCallbacks>>,
dump_api: bool,
) -> GpuResult<Arc<Self>> {
// Create a shader compiler before we start.
// It's used to compile GLSL into SPIR-V.
let compiler = Mutex::new(
shaderc::Compiler::new()
.map_err(|_| GpuError::Other("Failed to create shaderc compiler".to_string()))?,
);
// Create Vulkan API entry point.
let entry = unsafe {
ash::Entry::load().map_err(|e| {
GpuError::Other(format!("Failed to create Vulkan API entry point {e:?}"))
})?
};
// Collect Vulkan application info.
// It contains application name and required Vulkan API version.
let app_info = vk::ApplicationInfo::default()
.application_name(APPLICATION_NAME)
.application_version(0)
.engine_name(APPLICATION_NAME)
.engine_version(0)
.api_version(vk::make_api_version(0, 1, 3, 0));
// Collect Vulkan API extensions and convert it in raw pointers.
let extensions = Self::extensions_list(debug_messenger.is_some());
// Check presence of all required extensions.
Self::check_extensions_list(&entry, &extensions)?;
let extensions_cstr: Vec<CString> = extensions
.iter()
.filter_map(|s| CString::new(s.clone().into_bytes()).ok())
.collect();
let extension_names_raw: Vec<*const i8> = extensions_cstr
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
// Collect Vulkan API layers and convert it in raw pointers.
let layers = Self::layers_list(debug_messenger.is_some(), dump_api);
// Check presence of all required layers.
Self::check_layers_list(&entry, &layers)?;
let layers_cstr: Vec<CString> = layers
.iter()
.filter_map(|s| CString::new(s.clone().into_bytes()).ok())
.collect();
let layers_raw: Vec<*const i8> = layers_cstr
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
// If we provide debug messenger, we need to create a debug messenger info.
let mut debug_utils_create_info = debug_messenger
.as_deref()
.map(Self::debug_messenger_create_info);
let create_flags = if cfg!(any(target_os = "macos")) {
// On MacOS we need to enable portability extension to enable MoltenVK.
vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR
} else {
vk::InstanceCreateFlags::default()
};
// Collect all parameters together and create Vulkan instance.
let mut create_info = vk::InstanceCreateInfo::default()
.flags(create_flags)
.application_info(&app_info)
.enabled_layer_names(&layers_raw)
.enabled_extension_names(&extension_names_raw);
if let Some(debug_utils_create_info) = &mut debug_utils_create_info {
create_info = create_info.push_next(debug_utils_create_info);
}
// Get CPU allocation callbacks if they are provided.
let vk_allocation_callbacks = allocation_callbacks
.as_ref()
.map(|a| a.allocation_callbacks());
// Finally, create Vulkan instance.
let vk_instance: ash::Instance =
unsafe { entry.create_instance(&create_info, vk_allocation_callbacks)? };
// Find all available physical GPU devices.
let vk_physical_devices_result = unsafe { vk_instance.enumerate_physical_devices() };
let vk_physical_devices = match vk_physical_devices_result {
Ok(vk_physical_devices) => vk_physical_devices,
Err(e) => {
// Don't forget to destroy the instance if we failed to find any physical devices.
unsafe {
vk_instance.destroy_instance(vk_allocation_callbacks);
}
return Err(GpuError::from(e));
}
};
let vk_physical_devices = vk_physical_devices
.iter()
.map(|&vk_physical_device| {
let device_properties =
unsafe { vk_instance.get_physical_device_properties(vk_physical_device) };
let device_name =
unsafe { ::std::ffi::CStr::from_ptr(device_properties.device_name.as_ptr()) };
let device_name = device_name.to_str().unwrap_or("Unnamed GPU").to_owned();
let device_type = match device_properties.device_type {
vk::PhysicalDeviceType::DISCRETE_GPU => PhysicalDeviceType::Discrete,
vk::PhysicalDeviceType::INTEGRATED_GPU => PhysicalDeviceType::Integrated,
_ => PhysicalDeviceType::Other,
};
log::info!("Found GPU device: {device_name}");
PhysicalDevice {
vk_physical_device,
name: device_name,
device_type,
}
})
.collect::<Vec<_>>();
if vk_physical_devices.is_empty() {
// Don't forget to destroy the instance if we failed to find any physical devices.
unsafe {
vk_instance.destroy_instance(vk_allocation_callbacks);
}
return Err(GpuError::Other(
"No Vulkan physical devices found".to_string(),
));
}
// If we have a debug messenger, we need to create it.
let (vk_debug_utils_loader, vk_debug_messenger) = if let Some(debug_messenger) =
debug_messenger.as_ref()
{
let debug_utils_loader = ash::ext::debug_utils::Instance::new(&entry, &vk_instance);
let messenger_create_info = Self::debug_messenger_create_info(debug_messenger.deref());
let utils_messenger_result = unsafe {
debug_utils_loader
.create_debug_utils_messenger(&messenger_create_info, vk_allocation_callbacks)
};
let utils_messenger = match utils_messenger_result {
Ok(messenger) => messenger,
Err(e) => {
// Don't forget to destroy the instance if we failed to create a debug messenger.
unsafe {
vk_instance.destroy_instance(vk_allocation_callbacks);
}
return Err(GpuError::from(e));
}
};
(Some(debug_utils_loader), utils_messenger)
} else {
(None, vk::DebugUtilsMessengerEXT::null())
};
Ok(Arc::new(Self {
_entry: entry,
vk_instance,
vk_physical_devices,
allocation_callbacks,
vk_debug_utils_loader,
vk_debug_messenger,
compiler,
_debug_messenger: debug_messenger,
}))
}
fn debug_messenger_create_info(
debug_messenger: &dyn DebugMessenger,
) -> vk::DebugUtilsMessengerCreateInfoEXT<'_> {
vk::DebugUtilsMessengerCreateInfoEXT::default()
.flags(vk::DebugUtilsMessengerCreateFlagsEXT::empty())
.message_severity(debug_messenger.severity_flags())
.message_type(debug_messenger.message_type_flags())
.pfn_user_callback(debug_messenger.callback())
}
pub fn cpu_allocation_callbacks(&self) -> Option<&vk::AllocationCallbacks<'_>> {
self.allocation_callbacks
.as_ref()
.map(|alloc| alloc.allocation_callbacks())
}
pub fn vk_instance(&self) -> &ash::Instance {
&self.vk_instance
}
pub fn physical_devices(&self) -> &[PhysicalDevice] {
&self.vk_physical_devices
}
pub fn compile_shader(
&self,
shader: &str,
shader_name: &str,
defines: Option<&HashMap<String, Option<String>>>,
includes: Option<&HashMap<String, String>>,
) -> GpuResult<Vec<u8>> {
let mut options = shaderc::CompileOptions::new()
.map_err(|_| GpuError::Other("Failed to create shaderc compile options".to_string()))?;
options.set_optimization_level(shaderc::OptimizationLevel::Performance);
options.set_target_env(
shaderc::TargetEnv::Vulkan,
shaderc::EnvVersion::Vulkan1_3 as u32,
);
options.set_target_spirv(shaderc::SpirvVersion::V1_3);
if let Some(defines) = defines {
for (define, value) in defines {
match value {
Some(value) => {
options.add_macro_definition(define, Some(value));
}
None => {
options.add_macro_definition(define, None);
}
}
}
}
if let Some(includes) = includes {
options.set_include_callback(|filename, _, _, _| {
if let Some(code) = includes.get(filename) {
Ok(shaderc::ResolvedInclude {
resolved_name: filename.to_string(),
content: code.to_owned(),
})
} else {
Err(format!("Include file not found: {filename}"))
}
});
}
let compiler = self.compiler.lock();
let result = compiler
.compile_into_spirv(
shader,
shaderc::ShaderKind::Compute,
shader_name,
"main",
Some(&options),
)
.map_err(|e| GpuError::Other(format!("Failed to compile shader: {e:?}")))?;
Ok(result.as_binary_u8().to_owned())
}
fn layers_list(validation: bool, dump_api: bool) -> Vec<String> {
let mut result = Vec::new();
if validation {
result.push("VK_LAYER_KHRONOS_validation".to_owned());
}
if dump_api {
result.push("VK_LAYER_LUNARG_api_dump".to_owned());
}
result
}
fn extensions_list(validation: bool) -> Vec<String> {
let mut extensions_list = Vec::new();
if validation && let Ok(ext) = ash::ext::debug_utils::NAME.to_str() {
extensions_list.push(ext.to_string());
}
#[cfg(target_os = "macos")]
{
if let Ok(ext) = ash::khr::portability_enumeration::NAME.to_str() {
extensions_list.push(ext.to_string());
}
if let Ok(ext) = ash::khr::get_physical_device_properties2::NAME.to_str() {
extensions_list.push(ext.to_string());
}
}
extensions_list
}
fn check_extensions_list(entry: &ash::Entry, extensions: &[String]) -> GpuResult<()> {
let extension_properties = unsafe { entry.enumerate_instance_extension_properties(None)? };
for extension in extensions {
let extension_found = extension_properties.iter().any(|ep| {
let name = unsafe { ::std::ffi::CStr::from_ptr(ep.extension_name.as_ptr()) };
name.to_str().unwrap_or("") == extension
});
if !extension_found {
return Err(GpuError::Other(format!("Extension {extension} not found")));
}
}
Ok(())
}
fn check_layers_list(entry: &ash::Entry, layers: &[String]) -> GpuResult<()> {
let layer_properties = unsafe { entry.enumerate_instance_layer_properties()? };
for layer in layers {
let layer_found = layer_properties.iter().any(|lp| {
let name = unsafe { ::std::ffi::CStr::from_ptr(lp.layer_name.as_ptr()) };
name.to_str().unwrap_or("") == layer
});
if !layer_found {
return Err(GpuError::Other(format!("Layer {layer} not found")));
}
}
Ok(())
}
}
impl Drop for Instance {
fn drop(&mut self) {
let allocation_callbacks = self.cpu_allocation_callbacks();
unsafe {
// Destroy first debug messenger if it's present.
if let Some(loader) = &self.vk_debug_utils_loader
&& self.vk_debug_messenger != vk::DebugUtilsMessengerEXT::null()
{
loader.destroy_debug_utils_messenger(self.vk_debug_messenger, allocation_callbacks);
}
// Last step after all drops of all GPU resources: destroy vulkan instance.
self.vk_instance.destroy_instance(allocation_callbacks);
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/visitor.rs | lib/posting_list/src/visitor.rs | use common::types::PointOffsetType;
use crate::iterator::PostingIterator;
use crate::value_handler::{PostingValue, ValueHandler};
use crate::view::PostingListView;
use crate::{CHUNK_LEN, PostingElement};
/// A visitor for a posting list which caches the latest decompressed chunk of ids.
pub struct PostingVisitor<'a, V: PostingValue> {
pub(crate) list: PostingListView<'a, V>,
/// Index of the decompressed chunk.
/// It is used to shorten the search range of chunk index for the next value.
decompressed_chunk_idx: Option<usize>,
/// Lazy decompressed chunk of ids. Never access this directly, prefer [`Self::decompressed_chunk`] function
decompressed_chunk: [PointOffsetType; CHUNK_LEN],
}
impl<'a, V: PostingValue> PostingVisitor<'a, V> {
pub(crate) fn new(view: PostingListView<'a, V>) -> Self {
Self {
list: view,
decompressed_chunk_idx: None,
decompressed_chunk: [0; CHUNK_LEN],
}
}
pub fn len(&self) -> usize {
self.list.len()
}
pub fn is_empty(&self) -> bool {
self.list.is_empty()
}
/// Returns the decompressed slice of ids for a chunk.
///
/// Assumes the chunk_idx is valid.
fn decompressed_chunk(&mut self, chunk_idx: usize) -> &[PointOffsetType; CHUNK_LEN] {
if self.decompressed_chunk_idx != Some(chunk_idx) {
self.list
.decompress_chunk(chunk_idx, &mut self.decompressed_chunk);
self.decompressed_chunk_idx = Some(chunk_idx);
}
&self.decompressed_chunk
}
/// Returns the first offset whose element id is greater or equal to the given id.
///
/// Returns `None` if there is no such element in the posting list
pub(crate) fn search_greater_or_equal(
&mut self,
id: PointOffsetType,
start_from_offset: Option<usize>,
) -> Option<usize> {
let start_chunk = start_from_offset
.map(|offset| offset / CHUNK_LEN)
.unwrap_or(0);
let ids_range = self.list.ids_range(start_chunk)?;
// check if the first in the chunk is already greater or equal to the target id
if ids_range.start() >= &id {
return Some(start_chunk * CHUNK_LEN);
}
// check if the target id is already greater than the last id
if ids_range.end() < &id {
return None;
}
// Find the chunk that may contain the id and check if the id is in the chunk
let chunk_index = self.list.find_chunk(id, Some(start_chunk));
if let Some(chunk_index) = chunk_index {
let local_offset = match self.decompressed_chunk(chunk_index).binary_search(&id) {
Ok(found_local_offset) => found_local_offset,
Err(closest_local_offset) => {
// If the target id is bigger than all the values here, and smaller than the first id
// in the next chunk or remainders, then that next id is the closest greater id
if closest_local_offset >= CHUNK_LEN {
let next_offset = (chunk_index + 1) * CHUNK_LEN;
let next_offset_exists = next_offset < self.len();
return next_offset_exists.then_some(next_offset);
}
closest_local_offset
}
};
return Some(local_offset + (chunk_index * CHUNK_LEN));
}
// Check in remainders
let remainder_offset = match self.list.search_in_remainders(id) {
Ok(found_remainder_offset) => found_remainder_offset,
Err(closest_remainder_offset) => {
if closest_remainder_offset >= self.list.remainders_len() {
// There is no greater or equal id in the posting list
return None;
}
closest_remainder_offset
}
};
Some(remainder_offset + self.list.chunks_len() * CHUNK_LEN)
}
pub fn contains(&mut self, id: PointOffsetType) -> bool {
if self
.list
.ids_range(0)
.is_none_or(|range| !range.contains(&id))
{
return false;
}
// Find the chunk that may contain the id and check if the id is in the chunk
let chunk_index = self.list.find_chunk(id, None);
if let Some(chunk_idx) = chunk_index {
if self.list.get_chunk_unchecked(chunk_idx).initial_id == id {
return true;
}
self.decompressed_chunk(chunk_idx)
.binary_search(&id)
.is_ok()
} else {
self.list.search_in_remainders(id).is_ok()
}
}
pub(crate) fn get_by_offset(&mut self, offset: usize) -> Option<PostingElement<V>> {
let chunk_idx = offset / CHUNK_LEN;
let local_offset = offset % CHUNK_LEN;
// bound check
if offset >= self.list.len() {
return None;
}
// get from chunk
if chunk_idx < self.list.chunks_len() {
let id = self.decompressed_chunk(chunk_idx)[local_offset];
let chunk_sized_values = self.list.get_chunk_unchecked(chunk_idx).sized_values;
let sized_value = chunk_sized_values[local_offset];
let next_sized_value = || {
chunk_sized_values
.get(local_offset + 1)
.copied()
// or check first of the next chunk
.or_else(|| {
self.list
.get_chunk(chunk_idx + 1)
.map(|chunk| chunk.sized_values[0])
})
// or, if it is the last one, check first from remainders
.or_else(|| self.list.get_remainder(0).map(|e| e.value))
};
let value =
V::Handler::get_value(sized_value, next_sized_value, self.list.var_size_data);
return Some(PostingElement { id, value });
}
// else, get from remainder
self.list.get_remainder(local_offset).map(|e| {
let id = e.id;
let next_sized_value = || self.list.get_remainder(local_offset + 1).map(|r| r.value);
let value = V::Handler::get_value(e.value, next_sized_value, self.list.var_size_data);
PostingElement {
id: id.get(),
value,
}
})
}
}
impl<'a, V: PostingValue> IntoIterator for PostingVisitor<'a, V> {
type Item = PostingElement<V>;
type IntoIter = PostingIterator<'a, V>;
fn into_iter(self) -> Self::IntoIter {
PostingIterator::new(self)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/posting_list/src/builder.rs | lib/posting_list/src/builder.rs | use std::marker::PhantomData;
use bitpacking::BitPacker;
use common::types::PointOffsetType;
use zerocopy::little_endian::U32;
use crate::posting_list::{PostingChunk, PostingElement, PostingList, RemainderPosting};
use crate::value_handler::{PostingValue, ValueHandler};
use crate::{BitPackerImpl, CHUNK_LEN};
pub struct PostingBuilder<V> {
elements: Vec<PostingElement<V>>,
}
impl<V> Default for PostingBuilder<V> {
fn default() -> Self {
Self {
elements: Vec::new(),
}
}
}
impl<V> PostingBuilder<V> {
pub fn new() -> Self {
Self::default()
}
pub fn add(&mut self, id: PointOffsetType, value: V) {
self.elements.push(PostingElement { id, value });
}
/// Unified implementation that works for both fixed-size and variable-size values
///
/// This method uses the `ValueHandler::process_values` trait function to abstract the
/// differences between the two implementations, allowing us to share the common logic.
pub fn build(mut self) -> PostingList<V>
where
V: PostingValue,
{
self.elements.sort_unstable_by_key(|e| e.id);
let num_elements = self.elements.len();
// extract ids and values into separate lists
let (ids, values): (Vec<_>, Vec<_>) =
self.elements.into_iter().map(|e| (e.id, e.value)).unzip();
// process values
let (sized_values, var_size_data) = V::Handler::process_values(values);
let bitpacker = BitPackerImpl::new();
let mut chunks = Vec::with_capacity(ids.len() / CHUNK_LEN);
let mut id_data_size = 0;
// process full chunks
let ids_chunks_iter = ids.chunks_exact(CHUNK_LEN);
let values_chunks_iter = sized_values.chunks_exact(CHUNK_LEN);
let remainder_ids = ids_chunks_iter.remainder();
let remainder_values = values_chunks_iter.remainder();
for (chunk_ids, chunk_values) in ids_chunks_iter.zip(values_chunks_iter) {
let initial = chunk_ids[0];
let chunk_bits = bitpacker.num_bits_sorted(initial, chunk_ids);
let chunk_size = BitPackerImpl::compressed_block_size(chunk_bits);
chunks.push(PostingChunk {
initial_id: U32::from(initial),
offset: u32::try_from(id_data_size)
.expect("id_data_size should fit in u32, (smaller than 4GB)")
.into(),
sized_values: chunk_values
.try_into()
.expect("should be a valid chunk size"),
});
id_data_size += chunk_size;
}
// now process remainders
let mut remainders = Vec::with_capacity(num_elements % CHUNK_LEN);
for (&id, &value) in remainder_ids.iter().zip(remainder_values) {
remainders.push(RemainderPosting {
id: U32::from(id),
value,
});
}
// compress id_data
let mut id_data = vec![0u8; id_data_size];
for (chunk_index, chunk_ids) in ids.chunks_exact(CHUNK_LEN).enumerate() {
let chunk = &chunks[chunk_index];
let compressed_size = PostingChunk::get_compressed_size(&chunks, &id_data, chunk_index);
let chunk_bits = compressed_size * u8::BITS as usize / CHUNK_LEN;
bitpacker.compress_sorted(
chunk.initial_id.get(),
chunk_ids,
&mut id_data
[chunk.offset.get() as usize..chunk.offset.get() as usize + compressed_size],
chunk_bits as u8,
);
}
let last_id = ids.last().copied();
PostingList {
id_data,
var_size_data,
chunks,
remainders,
last_id,
_phantom: PhantomData,
}
}
}
impl PostingBuilder<()> {
/// Add an id without a value.
pub fn add_id(&mut self, id: PointOffsetType) {
self.add(id, ());
}
}
impl<V: PostingValue> From<PostingBuilder<V>> for PostingList<V> {
fn from(value: PostingBuilder<V>) -> Self {
value.build()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.