repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/simple_dot.rs | lib/segment/src/spaces/metric_uint/simple_dot.rs | use common::types::ScoreType;
use crate::data_types::vectors::{DenseVector, VectorElementTypeByte};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_uint::avx2::dot::avx_dot_similarity_bytes;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::spaces::metric_uint::neon::dot::neon_dot_similarity_bytes;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_uint::sse2::dot::sse_dot_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{DotProductMetric, MIN_DIM_SIZE_SIMD};
use crate::types::Distance;
impl Metric<VectorElementTypeByte> for DotProductMetric {
fn distance() -> Distance {
Distance::Dot
}
fn similarity(v1: &[VectorElementTypeByte], v2: &[VectorElementTypeByte]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_dot_similarity_bytes(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse")
&& is_x86_feature_detected!("sse2")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { sse_dot_similarity_bytes(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { neon_dot_similarity_bytes(v1, v2) };
}
}
dot_similarity_bytes(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn dot_similarity_bytes(
v1: &[VectorElementTypeByte],
v2: &[VectorElementTypeByte],
) -> ScoreType {
let mut dot_product = 0;
for (a, b) in v1.iter().zip(v2) {
dot_product += i32::from(*a) * i32::from(*b);
}
dot_product as ScoreType
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/simple_cosine.rs | lib/segment/src/spaces/metric_uint/simple_cosine.rs | use common::types::ScoreType;
use crate::data_types::vectors::{DenseVector, VectorElementTypeByte};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_uint::avx2::cosine::avx_cosine_similarity_bytes;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::spaces::metric_uint::neon::cosine::neon_cosine_similarity_bytes;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_uint::sse2::cosine::sse_cosine_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{CosineMetric, MIN_DIM_SIZE_SIMD};
use crate::types::Distance;
impl Metric<VectorElementTypeByte> for CosineMetric {
fn distance() -> Distance {
Distance::Cosine
}
fn similarity(v1: &[VectorElementTypeByte], v2: &[VectorElementTypeByte]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_cosine_similarity_bytes(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse")
&& is_x86_feature_detected!("sse2")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { sse_cosine_similarity_bytes(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { neon_cosine_similarity_bytes(v1, v2) };
}
}
cosine_similarity_bytes(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn cosine_similarity_bytes(
v1: &[VectorElementTypeByte],
v2: &[VectorElementTypeByte],
) -> ScoreType {
let mut dot_product = 0;
let mut norm1 = 0;
let mut norm2 = 0;
for (a, b) in v1.iter().zip(v2) {
dot_product += i32::from(*a) * i32::from(*b);
norm1 += i32::from(*a) * i32::from(*a);
norm2 += i32::from(*b) * i32::from(*b);
}
if norm1 == 0 || norm2 == 0 {
return 0.0;
}
dot_product as ScoreType / (norm1 as ScoreType * norm2 as ScoreType).sqrt()
}
#[test]
fn test_zero() {
let v1: Vec<u8> = vec![0, 0, 0, 0, 0, 0, 0, 0];
let v2: Vec<u8> = vec![255, 255, 0, 254, 253, 252, 251, 250];
assert_eq!(cosine_similarity_bytes(&v1, &v2), 0.0);
assert_eq!(cosine_similarity_bytes(&v2, &v1), 0.0);
assert_eq!(cosine_similarity_bytes(&v1, &v1), 0.0);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/simple_euclid.rs | lib/segment/src/spaces/metric_uint/simple_euclid.rs | use common::types::ScoreType;
use crate::data_types::vectors::{DenseVector, VectorElementTypeByte};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_uint::avx2::euclid::avx_euclid_similarity_bytes;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::spaces::metric_uint::neon::euclid::neon_euclid_similarity_bytes;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_uint::sse2::euclid::sse_euclid_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{EuclidMetric, MIN_DIM_SIZE_SIMD};
use crate::types::Distance;
impl Metric<VectorElementTypeByte> for EuclidMetric {
fn distance() -> Distance {
Distance::Euclid
}
fn similarity(v1: &[VectorElementTypeByte], v2: &[VectorElementTypeByte]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_euclid_similarity_bytes(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse")
&& is_x86_feature_detected!("sse2")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { sse_euclid_similarity_bytes(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { neon_euclid_similarity_bytes(v1, v2) };
}
}
euclid_similarity_bytes(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn euclid_similarity_bytes(
v1: &[VectorElementTypeByte],
v2: &[VectorElementTypeByte],
) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| {
let diff = i32::from(*a) - i32::from(*b);
diff * diff
})
.sum::<i32>() as ScoreType
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use super::*;
use crate::data_types::primitive::PrimitiveVectorElement;
use crate::data_types::vectors::{TypedDenseVector, VectorElementType};
#[test]
fn test_conversion_to_bytes() {
let dense_vector = DenseVector::from(vec![-10.0, 1.0, 2.0, 3.0, 255., 300.]);
let preprocessed_vector =
<EuclidMetric as Metric<VectorElementType>>::preprocess(dense_vector);
let typed_dense_vector =
VectorElementTypeByte::slice_from_float_cow(Cow::from(preprocessed_vector));
let expected: TypedDenseVector<VectorElementTypeByte> = vec![0, 1, 2, 3, 255, 255];
assert_eq!(typed_dense_vector, expected);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/mod.rs | lib/segment/src/spaces/metric_uint/mod.rs | pub mod simple_cosine;
pub mod simple_dot;
pub mod simple_euclid;
pub mod simple_manhattan;
#[cfg(target_arch = "x86_64")]
pub mod avx2;
#[cfg(target_arch = "aarch64")]
pub mod neon;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub mod sse2;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/simple_manhattan.rs | lib/segment/src/spaces/metric_uint/simple_manhattan.rs | use common::types::ScoreType;
use crate::data_types::vectors::{DenseVector, VectorElementTypeByte};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_uint::avx2::manhattan::avx_manhattan_similarity_bytes;
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
use crate::spaces::metric_uint::neon::manhattan::neon_manhattan_similarity_bytes;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_uint::sse2::manhattan::sse_manhattan_similarity_bytes;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{MIN_DIM_SIZE_SIMD, ManhattanMetric};
use crate::types::Distance;
impl Metric<VectorElementTypeByte> for ManhattanMetric {
fn distance() -> Distance {
Distance::Manhattan
}
fn similarity(v1: &[VectorElementTypeByte], v2: &[VectorElementTypeByte]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_manhattan_similarity_bytes(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse")
&& is_x86_feature_detected!("sse2")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { sse_manhattan_similarity_bytes(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { neon_manhattan_similarity_bytes(v1, v2) };
}
}
manhattan_similarity_bytes(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn manhattan_similarity_bytes(
v1: &[VectorElementTypeByte],
v2: &[VectorElementTypeByte],
) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| (i32::from(*a) - i32::from(*b)).abs())
.sum::<i32>() as ScoreType
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/neon/cosine.rs | lib/segment/src/spaces/metric_uint/neon/cosine.rs | use std::arch::aarch64::*;
#[target_feature(enable = "neon")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn neon_cosine_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
let mut mul1 = vdupq_n_u32(0);
let mut mul2 = vdupq_n_u32(0);
let mut norm11 = vdupq_n_u32(0);
let mut norm12 = vdupq_n_u32(0);
let mut norm21 = vdupq_n_u32(0);
let mut norm22 = vdupq_n_u32(0);
let len = v1.len();
for _ in 0..len / 16 {
let p1 = vld1q_u8(ptr1);
let p2 = vld1q_u8(ptr2);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
let p1_low = vget_low_u8(p1);
let p1_high = vget_high_u8(p1);
let p2_low = vget_low_u8(p2);
let p2_high = vget_high_u8(p2);
let mul_low = vmull_u8(p1_low, p2_low);
let mul_high = vmull_u8(p1_high, p2_high);
mul1 = vpadalq_u16(mul1, mul_low);
mul2 = vpadalq_u16(mul2, mul_high);
let mul_low = vmull_u8(p1_low, p1_low);
let mul_high = vmull_u8(p1_high, p1_high);
norm11 = vpadalq_u16(norm11, mul_low);
norm12 = vpadalq_u16(norm12, mul_high);
let mul_low = vmull_u8(p2_low, p2_low);
let mul_high = vmull_u8(p2_high, p2_high);
norm21 = vpadalq_u16(norm21, mul_low);
norm22 = vpadalq_u16(norm22, mul_high);
}
let mut dot_product = vaddvq_u32(vaddq_u32(mul1, mul2)) as f32;
let mut norm1 = vaddvq_u32(vaddq_u32(norm11, norm12)) as f32;
let mut norm2 = vaddvq_u32(vaddq_u32(norm21, norm22)) as f32;
let remainder = len % 16;
if remainder != 0 {
let mut remainder_dot_product = 0;
let mut remainder_norm1 = 0;
let mut remainder_norm2 = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_dot_product += i32::from(v1) * i32::from(v2);
remainder_norm1 += i32::from(v1) * i32::from(v1);
remainder_norm2 += i32::from(v2) * i32::from(v2);
}
dot_product += remainder_dot_product as f32;
norm1 += remainder_norm1 as f32;
norm2 += remainder_norm2 as f32;
}
let denominator = norm1 * norm2;
if denominator == 0.0 {
return 0.0;
}
dot_product / denominator.sqrt()
}
}
#[cfg(test)]
mod tests {
use std::arch::is_aarch64_feature_detected;
use super::*;
use crate::spaces::metric_uint::simple_cosine::cosine_similarity_bytes;
#[test]
fn test_spaces_neon() {
if is_aarch64_feature_detected!("neon") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { neon_cosine_similarity_bytes(&v1, &v2) };
let dot = cosine_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("neon test skipped");
}
}
#[test]
fn test_zero_neon() {
if is_aarch64_feature_detected!("neon") {
let v1: Vec<u8> = vec![0, 0, 0, 0, 0, 0, 0, 0];
let v2: Vec<u8> = vec![255, 255, 0, 254, 253, 252, 251, 250];
let dot_simd = unsafe { neon_cosine_similarity_bytes(&v1, &v2) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { neon_cosine_similarity_bytes(&v2, &v1) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { neon_cosine_similarity_bytes(&v1, &v1) };
assert_eq!(dot_simd, 0.0);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/neon/euclid.rs | lib/segment/src/spaces/metric_uint/neon/euclid.rs | use std::arch::aarch64::*;
#[target_feature(enable = "neon")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn neon_euclid_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
let mut mul1 = vdupq_n_u32(0);
let mut mul2 = vdupq_n_u32(0);
let len = v1.len();
for _ in 0..len / 16 {
let p1 = vld1q_u8(ptr1);
let p2 = vld1q_u8(ptr2);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
let abs_diff = vabdq_u8(p1, p2);
let abs_diff_low = vget_low_u8(abs_diff);
let abs_diff_high = vget_high_u8(abs_diff);
let mul_low = vmull_u8(abs_diff_low, abs_diff_low);
let mul_high = vmull_u8(abs_diff_high, abs_diff_high);
mul1 = vpadalq_u16(mul1, mul_low);
mul2 = vpadalq_u16(mul2, mul_high);
}
let mut score = vaddvq_u32(vaddq_u32(mul1, mul2)) as f32;
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..remainder {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
let diff = v1 - v2;
remainder_score += diff * diff;
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use std::arch::is_aarch64_feature_detected;
use super::*;
use crate::spaces::metric_uint::simple_euclid::euclid_similarity_bytes;
#[test]
fn test_spaces_neon() {
if is_aarch64_feature_detected!("neon") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { neon_euclid_similarity_bytes(&v1, &v2) };
let dot = euclid_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/neon/mod.rs | lib/segment/src/spaces/metric_uint/neon/mod.rs | pub mod cosine;
pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/neon/dot.rs | lib/segment/src/spaces/metric_uint/neon/dot.rs | use std::arch::aarch64::*;
#[target_feature(enable = "neon")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn neon_dot_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
let mut mul1 = vdupq_n_u32(0);
let mut mul2 = vdupq_n_u32(0);
let len = v1.len();
for _ in 0..len / 16 {
let p1 = vld1q_u8(ptr1);
let p2 = vld1q_u8(ptr2);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
let mul_low = vmull_u8(vget_low_u8(p1), vget_low_u8(p2));
let mul_high = vmull_u8(vget_high_u8(p1), vget_high_u8(p2));
mul1 = vpadalq_u16(mul1, mul_low);
mul2 = vpadalq_u16(mul2, mul_high);
}
let mut score = vaddvq_u32(vaddq_u32(mul1, mul2)) as f32;
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_score += i32::from(v1) * i32::from(v2);
}
score += remainder_score as f32;
}
score
}
}
#[cfg(test)]
mod tests {
use std::arch::is_aarch64_feature_detected;
use super::*;
use crate::spaces::metric_uint::simple_dot::dot_similarity_bytes;
#[test]
fn test_spaces_neon() {
if is_aarch64_feature_detected!("neon") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { neon_dot_similarity_bytes(&v1, &v2) };
let dot = dot_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/neon/manhattan.rs | lib/segment/src/spaces/metric_uint/neon/manhattan.rs | use std::arch::aarch64::*;
#[target_feature(enable = "neon")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn neon_manhattan_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
let mut sum16_low = vdupq_n_u16(0);
let mut sum16_high = vdupq_n_u16(0);
let len = v1.len();
for _ in 0..len / 16 {
let p1 = vld1q_u8(ptr1);
let p2 = vld1q_u8(ptr2);
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
let abs_diff = vabdq_u8(p1, p2);
let abs_diff16_low = vmovl_u8(vget_low_u8(abs_diff));
let abs_diff16_high = vmovl_u8(vget_high_u8(abs_diff));
sum16_low = vaddq_u16(sum16_low, abs_diff16_low);
sum16_high = vaddq_u16(sum16_high, abs_diff16_high);
}
// Horizontal sum of 16-bit integers
let sum32_low = vpaddlq_u16(sum16_low);
let sum32_high = vpaddlq_u16(sum16_high);
let sum32 = vaddq_u32(sum32_low, sum32_high);
let sum64_low = vadd_u32(vget_low_u32(sum32), vget_high_u32(sum32));
let sum64_high = vpadd_u32(sum64_low, sum64_low);
let mut score = vget_lane_u32(sum64_high, 0) as f32;
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..len % 16 {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_score += (v1 - v2).abs();
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use std::arch::is_aarch64_feature_detected;
use super::*;
use crate::spaces::metric_uint::simple_manhattan::manhattan_similarity_bytes;
#[test]
fn test_spaces_neon() {
if is_aarch64_feature_detected!("neon") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { neon_manhattan_similarity_bytes(&v1, &v2) };
let dot = manhattan_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/sse2/cosine.rs | lib/segment/src/spaces/metric_uint/sse2/cosine.rs | use std::arch::x86_64::*;
use crate::spaces::simple_sse::hsum128_ps_sse;
#[target_feature(enable = "sse")]
#[target_feature(enable = "sse2")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_cosine_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 4x32 bit integers
let mut dot_acc = _mm_setzero_si128();
let mut norm1_acc = _mm_setzero_si128();
let mut norm2_acc = _mm_setzero_si128();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 16 {
// load 16 bytes
let p1 = _mm_loadu_si128(ptr1.cast::<__m128i>());
let p2 = _mm_loadu_si128(ptr2.cast::<__m128i>());
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
// convert 16x8 bit integers into 8x16 bit integers using bitwise AND
// conversion is done by taking only lower 8 bits from 16 bits
// p1 = [byte0, byte1, byte2, byte3, ..]
// p1_low = [0, byte1, 0, byte3, ..]
// p1_high = [0, byte0, 0, byte2, ..]
let p1_low = _mm_and_si128(p1, mask_epu16_epu8);
let p1_high = _mm_and_si128(_mm_bsrli_si128(p1, 1), mask_epu16_epu8);
let p2_low = _mm_and_si128(p2, mask_epu16_epu8);
let p2_high = _mm_and_si128(_mm_bsrli_si128(p2, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm1_low = _mm_madd_epi16(p1_low, p1_low);
norm1_acc = _mm_add_epi32(norm1_acc, norm1_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm2_low = _mm_madd_epi16(p2_low, p2_low);
norm2_acc = _mm_add_epi32(norm2_acc, norm2_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_low = _mm_madd_epi16(p1_low, p2_low);
dot_acc = _mm_add_epi32(dot_acc, dot_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm1_high = _mm_madd_epi16(p1_high, p1_high);
norm1_acc = _mm_add_epi32(norm1_acc, norm1_high);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm2_high = _mm_madd_epi16(p2_high, p2_high);
norm2_acc = _mm_add_epi32(norm2_acc, norm2_high);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_high = _mm_madd_epi16(p1_high, p2_high);
dot_acc = _mm_add_epi32(dot_acc, dot_high);
}
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let dot_ps = _mm_cvtepi32_ps(dot_acc);
let mut dot_product = hsum128_ps_sse(dot_ps);
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let norm1_ps = _mm_cvtepi32_ps(norm1_acc);
let mut norm1 = hsum128_ps_sse(norm1_ps);
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let norm2_ps = _mm_cvtepi32_ps(norm2_acc);
let mut norm2 = hsum128_ps_sse(norm2_ps);
let remainder = len % 16;
if remainder != 0 {
let mut remainder_dot_product = 0;
let mut remainder_norm1 = 0;
let mut remainder_norm2 = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_dot_product += i32::from(v1) * i32::from(v2);
remainder_norm1 += i32::from(v1) * i32::from(v1);
remainder_norm2 += i32::from(v2) * i32::from(v2);
}
dot_product += remainder_dot_product as f32;
norm1 += remainder_norm1 as f32;
norm2 += remainder_norm2 as f32;
}
let denominator = norm1 * norm2;
if denominator == 0.0 {
return 0.0;
}
dot_product / denominator.sqrt()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_cosine::cosine_similarity_bytes;
#[test]
fn test_spaces_sse2() {
if is_x86_feature_detected!("sse2") && is_x86_feature_detected!("sse") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { sse_cosine_similarity_bytes(&v1, &v2) };
let dot = cosine_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("sse2 test skipped");
}
}
#[test]
fn test_zero_sse2() {
if is_x86_feature_detected!("sse2") && is_x86_feature_detected!("sse") {
let v1: Vec<u8> = vec![0, 0, 0, 0, 0, 0, 0, 0];
let v2: Vec<u8> = vec![255, 255, 0, 254, 253, 252, 251, 250];
let dot_simd = unsafe { sse_cosine_similarity_bytes(&v1, &v2) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { sse_cosine_similarity_bytes(&v2, &v1) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { sse_cosine_similarity_bytes(&v1, &v1) };
assert_eq!(dot_simd, 0.0);
} else {
println!("sse2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/sse2/euclid.rs | lib/segment/src/spaces/metric_uint/sse2/euclid.rs | use std::arch::x86_64::*;
use crate::spaces::simple_sse::hsum128_ps_sse;
#[target_feature(enable = "sse")]
#[target_feature(enable = "sse2")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_euclid_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 4x32 bit integers
let mut acc = _mm_setzero_si128();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 16 {
// load 16 bytes
let p1 = _mm_loadu_si128(ptr1.cast::<__m128i>());
let p2 = _mm_loadu_si128(ptr2.cast::<__m128i>());
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
// Compute the difference in both directions and take the maximum for abs
let diff1 = _mm_subs_epu8(p1, p2);
let diff2 = _mm_subs_epu8(p2, p1);
let abs_diff = _mm_max_epu8(diff1, diff2);
let abs_diff_low = _mm_and_si128(abs_diff, mask_epu16_epu8);
let abs_diff_high = _mm_and_si128(_mm_bsrli_si128(abs_diff, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let mul16 = _mm_madd_epi16(abs_diff_low, abs_diff_low);
acc = _mm_add_epi32(acc, mul16);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let mul16 = _mm_madd_epi16(abs_diff_high, abs_diff_high);
acc = _mm_add_epi32(acc, mul16);
}
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let mul_ps = _mm_cvtepi32_ps(acc);
let mut score = hsum128_ps_sse(mul_ps);
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..remainder {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
let diff = v1 - v2;
remainder_score += diff * diff;
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_euclid::euclid_similarity_bytes;
#[test]
fn test_spaces_sse() {
if is_x86_feature_detected!("sse2") && is_x86_feature_detected!("sse") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { sse_euclid_similarity_bytes(&v1, &v2) };
let dot = euclid_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("sse2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/sse2/mod.rs | lib/segment/src/spaces/metric_uint/sse2/mod.rs | pub mod cosine;
pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/sse2/dot.rs | lib/segment/src/spaces/metric_uint/sse2/dot.rs | use std::arch::x86_64::*;
use crate::spaces::simple_sse::hsum128_ps_sse;
#[target_feature(enable = "sse")]
#[target_feature(enable = "sse2")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_dot_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 4x32 bit integers
let mut dot_acc = _mm_setzero_si128();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 16 {
// load 16 bytes
let p1 = _mm_loadu_si128(ptr1.cast::<__m128i>());
let p2 = _mm_loadu_si128(ptr2.cast::<__m128i>());
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
// convert 16x8 bit integers into 8x16 bit integers using bitwise AND
// conversion is done by taking only lower 8 bits from 16 bits
// p1 = [byte0, byte1, byte2, byte3, ..]
// p1_low = [0, byte1, 0, byte3, ..]
// p1_high = [0, byte0, 0, byte2, ..]
let p1_low = _mm_and_si128(p1, mask_epu16_epu8);
let p1_high = _mm_and_si128(_mm_bsrli_si128(p1, 1), mask_epu16_epu8);
let p2_low = _mm_and_si128(p2, mask_epu16_epu8);
let p2_high = _mm_and_si128(_mm_bsrli_si128(p2, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_low = _mm_madd_epi16(p1_low, p2_low);
dot_acc = _mm_add_epi32(dot_acc, dot_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_high = _mm_madd_epi16(p1_high, p2_high);
dot_acc = _mm_add_epi32(dot_acc, dot_high);
}
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let dot_ps = _mm_cvtepi32_ps(dot_acc);
let mut score = hsum128_ps_sse(dot_ps);
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_score += i32::from(v1) * i32::from(v2);
}
score += remainder_score as f32;
}
score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_dot::dot_similarity_bytes;
#[test]
fn test_spaces_sse() {
if is_x86_feature_detected!("sse2") && is_x86_feature_detected!("sse") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { sse_dot_similarity_bytes(&v1, &v2) };
let dot = dot_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("sse2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/sse2/manhattan.rs | lib/segment/src/spaces/metric_uint/sse2/manhattan.rs | use std::arch::x86_64::*;
use crate::spaces::simple_sse::hsum128_ps_sse;
#[target_feature(enable = "sse")]
#[target_feature(enable = "sse2")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_manhattan_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 4x32 bit integers
let mut acc = _mm_setzero_si128();
let len = v1.len();
for _ in 0..len / 16 {
// load 16 bytes
let p1 = _mm_loadu_si128(ptr1.cast::<__m128i>());
let p2 = _mm_loadu_si128(ptr2.cast::<__m128i>());
ptr1 = ptr1.add(16);
ptr2 = ptr2.add(16);
// Computes the absolute differences of packed unsigned 8-bit integers in a and b
// with horizontal sum and adding to accumulator
let sad = _mm_sad_epu8(p1, p2);
acc = _mm_add_epi32(acc, sad);
}
// convert 4x32 bit integers into 8x32 bit floats and calculate horizontal sum
let mul_ps = _mm_cvtepi32_ps(acc);
let mut score = hsum128_ps_sse(mul_ps);
let remainder = len % 16;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..len % 16 {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_score += (v1 - v2).abs();
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_manhattan::manhattan_similarity_bytes;
#[test]
fn test_spaces_sse() {
if is_x86_feature_detected!("sse2") && is_x86_feature_detected!("sse") {
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { sse_manhattan_similarity_bytes(&v1, &v2) };
let dot = manhattan_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("sse2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/avx2/cosine.rs | lib/segment/src/spaces/metric_uint/avx2/cosine.rs | use std::arch::x86_64::*;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "fma")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_cosine_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
debug_assert!(is_x86_feature_detected!("avx"));
debug_assert!(is_x86_feature_detected!("avx2"));
debug_assert!(is_x86_feature_detected!("fma"));
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulators for 8x32 bit integers
let mut dot_acc = _mm256_setzero_si256();
let mut norm1_acc = _mm256_setzero_si256();
let mut norm2_acc = _mm256_setzero_si256();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm256_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 32 {
// load 32 bytes
let p1 = _mm256_loadu_si256(ptr1.cast::<__m256i>());
let p2 = _mm256_loadu_si256(ptr2.cast::<__m256i>());
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
// convert 32x8 bit integers into 16x16 bit integers using bitwise AND
// conversion is done by taking only lower 8 bits from 16 bits
// p1 = [byte0, byte1, byte2, byte3, ..]
// p1_low = [0, byte1, 0, byte3, ..]
// p1_high = [0, byte0, 0, byte2, ..]
let p1_low = _mm256_and_si256(p1, mask_epu16_epu8);
let p1_high = _mm256_and_si256(_mm256_bsrli_epi128(p1, 1), mask_epu16_epu8);
let p2_low = _mm256_and_si256(p2, mask_epu16_epu8);
let p2_high = _mm256_and_si256(_mm256_bsrli_epi128(p2, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm1_low = _mm256_madd_epi16(p1_low, p1_low);
norm1_acc = _mm256_add_epi32(norm1_acc, norm1_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm2_low = _mm256_madd_epi16(p2_low, p2_low);
norm2_acc = _mm256_add_epi32(norm2_acc, norm2_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_low = _mm256_madd_epi16(p1_low, p2_low);
dot_acc = _mm256_add_epi32(dot_acc, dot_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm1_high = _mm256_madd_epi16(p1_high, p1_high);
norm1_acc = _mm256_add_epi32(norm1_acc, norm1_high);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let norm2_high = _mm256_madd_epi16(p2_high, p2_high);
norm2_acc = _mm256_add_epi32(norm2_acc, norm2_high);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_high = _mm256_madd_epi16(p1_high, p2_high);
dot_acc = _mm256_add_epi32(dot_acc, dot_high);
}
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let dot_ps = _mm256_cvtepi32_ps(dot_acc);
let mut dot_product = hsum256_ps_avx(dot_ps);
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let norm1_ps = _mm256_cvtepi32_ps(norm1_acc);
let mut norm1 = hsum256_ps_avx(norm1_ps);
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let norm2_ps = _mm256_cvtepi32_ps(norm2_acc);
let mut norm2 = hsum256_ps_avx(norm2_ps);
let remainder = len % 32;
if remainder != 0 {
let mut remainder_dot_product = 0;
let mut remainder_norm1 = 0;
let mut remainder_norm2 = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_dot_product += i32::from(v1) * i32::from(v2);
remainder_norm1 += i32::from(v1) * i32::from(v1);
remainder_norm2 += i32::from(v2) * i32::from(v2);
}
dot_product += remainder_dot_product as f32;
norm1 += remainder_norm1 as f32;
norm2 += remainder_norm2 as f32;
}
let denominator = norm1 * norm2;
if denominator == 0.0 {
return 0.0;
}
dot_product / denominator.sqrt()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_cosine::cosine_similarity_bytes;
#[test]
fn test_spaces_avx() {
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
{
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { avx_cosine_similarity_bytes(&v1, &v2) };
let dot = cosine_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("avx2 test skipped");
}
}
#[test]
fn test_zero_avx() {
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
{
let v1: Vec<u8> = vec![0, 0, 0, 0, 0, 0, 0, 0];
let v2: Vec<u8> = vec![255, 255, 0, 254, 253, 252, 251, 250];
let dot_simd = unsafe { avx_cosine_similarity_bytes(&v1, &v2) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { avx_cosine_similarity_bytes(&v2, &v1) };
assert_eq!(dot_simd, 0.0);
let dot_simd = unsafe { avx_cosine_similarity_bytes(&v1, &v1) };
assert_eq!(dot_simd, 0.0);
} else {
println!("avx2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/avx2/euclid.rs | lib/segment/src/spaces/metric_uint/avx2/euclid.rs | use std::arch::x86_64::*;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "fma")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_euclid_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
debug_assert!(is_x86_feature_detected!("avx"));
debug_assert!(is_x86_feature_detected!("avx2"));
debug_assert!(is_x86_feature_detected!("fma"));
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 8x32 bit integers
let mut acc = _mm256_setzero_si256();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm256_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 32 {
// load 32 bytes
let p1 = _mm256_loadu_si256(ptr1.cast::<__m256i>());
let p2 = _mm256_loadu_si256(ptr2.cast::<__m256i>());
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
// Compute the difference in both directions and take the maximum for abs
let diff1 = _mm256_subs_epu8(p1, p2);
let diff2 = _mm256_subs_epu8(p2, p1);
let abs_diff = _mm256_max_epu8(diff1, diff2);
let abs_diff_low = _mm256_and_si256(abs_diff, mask_epu16_epu8);
let abs_diff_high = _mm256_and_si256(_mm256_bsrli_epi128(abs_diff, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let mul16 = _mm256_madd_epi16(abs_diff_low, abs_diff_low);
acc = _mm256_add_epi32(acc, mul16);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let mul16 = _mm256_madd_epi16(abs_diff_high, abs_diff_high);
acc = _mm256_add_epi32(acc, mul16);
}
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let mul_ps = _mm256_cvtepi32_ps(acc);
let mut score = hsum256_ps_avx(mul_ps);
let remainder = len % 32;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..remainder {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
let diff = v1 - v2;
remainder_score += diff * diff;
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_euclid::euclid_similarity_bytes;
#[test]
fn test_spaces_avx() {
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
{
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { avx_euclid_similarity_bytes(&v1, &v2) };
let dot = euclid_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("avx2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/avx2/mod.rs | lib/segment/src/spaces/metric_uint/avx2/mod.rs | pub mod cosine;
pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/avx2/dot.rs | lib/segment/src/spaces/metric_uint/avx2/dot.rs | use std::arch::x86_64::*;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "fma")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_dot_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
debug_assert!(is_x86_feature_detected!("avx"));
debug_assert!(is_x86_feature_detected!("avx2"));
debug_assert!(is_x86_feature_detected!("fma"));
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 8x32 bit integers
let mut dot_acc = _mm256_setzero_si256();
// mask to take only lower 8 bits from 16 bits
let mask_epu16_epu8 = _mm256_set1_epi16(0xFF);
let len = v1.len();
for _ in 0..len / 32 {
// load 32 bytes
let p1 = _mm256_loadu_si256(ptr1.cast::<__m256i>());
let p2 = _mm256_loadu_si256(ptr2.cast::<__m256i>());
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
// convert 32x8 bit integers into 16x16 bit integers using bitwise AND
// conversion is done by taking only lower 8 bits from 16 bits
// p1 = [byte0, byte1, byte2, byte3, ..]
// p1_low = [0, byte1, 0, byte3, ..]
// p1_high = [0, byte0, 0, byte2, ..]
let p1_low = _mm256_and_si256(p1, mask_epu16_epu8);
let p1_high = _mm256_and_si256(_mm256_bsrli_epi128(p1, 1), mask_epu16_epu8);
let p2_low = _mm256_and_si256(p2, mask_epu16_epu8);
let p2_high = _mm256_and_si256(_mm256_bsrli_epi128(p2, 1), mask_epu16_epu8);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_low = _mm256_madd_epi16(p1_low, p2_low);
dot_acc = _mm256_add_epi32(dot_acc, dot_low);
// calculate 16bit multiplication with taking lower 16 bits and adding to accumulator
let dot_high = _mm256_madd_epi16(p1_high, p2_high);
dot_acc = _mm256_add_epi32(dot_acc, dot_high);
}
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let mul_ps = _mm256_cvtepi32_ps(dot_acc);
let mut score = hsum256_ps_avx(mul_ps);
let remainder = len % 32;
if remainder != 0 {
let mut remainder_dot = 0;
for _ in 0..remainder {
let v1 = *ptr1;
let v2 = *ptr2;
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_dot += i32::from(v1) * i32::from(v2);
}
score += remainder_dot as f32;
}
score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_dot::dot_similarity_bytes;
#[test]
fn test_spaces_avx() {
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
{
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { avx_dot_similarity_bytes(&v1, &v2) };
let dot = dot_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("avx2 test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_uint/avx2/manhattan.rs | lib/segment/src/spaces/metric_uint/avx2/manhattan.rs | use std::arch::x86_64::*;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "fma")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_manhattan_similarity_bytes(v1: &[u8], v2: &[u8]) -> f32 {
debug_assert!(v1.len() == v2.len());
debug_assert!(is_x86_feature_detected!("avx"));
debug_assert!(is_x86_feature_detected!("avx2"));
debug_assert!(is_x86_feature_detected!("fma"));
let mut ptr1: *const u8 = v1.as_ptr();
let mut ptr2: *const u8 = v2.as_ptr();
unsafe {
// sum accumulator for 8x32 bit integers
let mut acc = _mm256_setzero_si256();
let len = v1.len();
for _ in 0..len / 32 {
// load 32 bytes
let p1 = _mm256_loadu_si256(ptr1.cast::<__m256i>());
let p2 = _mm256_loadu_si256(ptr2.cast::<__m256i>());
ptr1 = ptr1.add(32);
ptr2 = ptr2.add(32);
// Computes the absolute differences of packed unsigned 8-bit integers in a and b
// with horizontal sum and adding to accumulator
let sad = _mm256_sad_epu8(p1, p2);
acc = _mm256_add_epi32(acc, sad);
}
// convert 8x32 bit integers into 8x32 bit floats and calculate horizontal sum
let mul_ps = _mm256_cvtepi32_ps(acc);
let mut score = hsum256_ps_avx(mul_ps);
let remainder = len % 32;
if remainder != 0 {
let mut remainder_score = 0;
for _ in 0..len % 32 {
let v1 = i32::from(*ptr1);
let v2 = i32::from(*ptr2);
ptr1 = ptr1.add(1);
ptr2 = ptr2.add(1);
remainder_score += (v1 - v2).abs();
}
score += remainder_score as f32;
}
-score
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::spaces::metric_uint::simple_manhattan::manhattan_similarity_bytes;
#[test]
fn test_spaces_avx() {
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("avx2")
&& is_x86_feature_detected!("fma")
{
let v1: Vec<u8> = vec![
255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17,
];
let v2: Vec<u8> = vec![
255, 255, 0, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245,
244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253, 252, 251, 250, 249,
248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255, 255, 255, 254, 253,
252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 255,
255, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238,
];
let dot_simd = unsafe { avx_manhattan_similarity_bytes(&v1, &v2) };
let dot = manhattan_similarity_bytes(&v1, &v2);
assert_eq!(dot_simd, dot);
} else {
println!("avx test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/simple_dot.rs | lib/segment/src/spaces/metric_f16/simple_dot.rs | use common::types::ScoreType;
use half::f16;
use crate::data_types::vectors::{DenseVector, VectorElementTypeHalf};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_f16::avx::dot::avx_dot_similarity_half;
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
use crate::spaces::metric_f16::neon::dot::neon_dot_similarity_half;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_f16::sse::dot::sse_dot_similarity_half;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{DotProductMetric, MIN_DIM_SIZE_SIMD};
use crate::types::Distance;
impl Metric<VectorElementTypeHalf> for DotProductMetric {
fn distance() -> Distance {
Distance::Dot
}
fn similarity(v1: &[VectorElementTypeHalf], v2: &[VectorElementTypeHalf]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_dot_similarity_half(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { sse_dot_similarity_half(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
{
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { neon_dot_similarity_half(v1, v2) };
}
}
dot_similarity_half(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn dot_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
v1.iter()
.zip(v2)
.map(|(a, b)| f16::to_f32(a * b))
.sum::<f32>()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/simple_cosine.rs | lib/segment/src/spaces/metric_f16/simple_cosine.rs | use common::types::ScoreType;
use super::simple_dot::dot_similarity_half;
use crate::data_types::vectors::{DenseVector, VectorElementTypeHalf};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_f16::avx::dot::avx_dot_similarity_half;
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
use crate::spaces::metric_f16::neon::dot::neon_dot_similarity_half;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_f16::sse::dot::sse_dot_similarity_half;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{CosineMetric, MIN_DIM_SIZE_SIMD, cosine_preprocess};
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple_avx::*;
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
use crate::spaces::simple_neon::*;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::simple_sse::*;
use crate::types::Distance;
impl Metric<VectorElementTypeHalf> for CosineMetric {
fn distance() -> Distance {
Distance::Dot
}
fn similarity(v1: &[VectorElementTypeHalf], v2: &[VectorElementTypeHalf]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_dot_similarity_half(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { sse_dot_similarity_half(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
{
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { neon_dot_similarity_half(v1, v2) };
}
}
dot_similarity_half(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& vector.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { cosine_preprocess_avx(vector) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && vector.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { cosine_preprocess_sse(vector) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
{
if std::arch::is_aarch64_feature_detected!("neon") && vector.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { cosine_preprocess_neon(vector) };
}
}
cosine_preprocess(vector)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/simple_euclid.rs | lib/segment/src/spaces/metric_f16/simple_euclid.rs | use common::types::ScoreType;
use half::f16;
use num_traits::Float;
use crate::data_types::vectors::{DenseVector, VectorElementTypeHalf};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_f16::avx::euclid::avx_euclid_similarity_half;
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
use crate::spaces::metric_f16::neon::euclid::neon_euclid_similarity_half;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_f16::sse::euclid::sse_euclid_similarity_half;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{EuclidMetric, MIN_DIM_SIZE_SIMD};
use crate::types::Distance;
impl Metric<VectorElementTypeHalf> for EuclidMetric {
fn distance() -> Distance {
Distance::Euclid
}
fn similarity(v1: &[VectorElementTypeHalf], v2: &[VectorElementTypeHalf]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_euclid_similarity_half(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { sse_euclid_similarity_half(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
{
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { neon_euclid_similarity_half(v1, v2) };
}
}
euclid_similarity_half(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn euclid_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| f16::to_f32((a - b).powi(2)))
.sum::<f32>()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/mod.rs | lib/segment/src/spaces/metric_f16/mod.rs | pub mod simple_cosine;
pub mod simple_dot;
pub mod simple_euclid;
pub mod simple_manhattan;
#[cfg(target_arch = "x86_64")]
pub mod avx;
#[cfg(all(target_arch = "aarch64", not(windows)))]
pub mod neon;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub mod sse;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/simple_manhattan.rs | lib/segment/src/spaces/metric_f16/simple_manhattan.rs | use common::types::ScoreType;
use half::f16;
use num_traits::Float;
use crate::data_types::vectors::{DenseVector, VectorElementTypeHalf};
use crate::spaces::metric::Metric;
#[cfg(target_arch = "x86_64")]
use crate::spaces::metric_f16::avx::manhattan::avx_manhattan_similarity_half;
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
use crate::spaces::metric_f16::neon::manhattan::neon_manhattan_similarity_half;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::spaces::metric_f16::sse::manhattan::sse_manhattan_similarity_half;
#[cfg(target_arch = "x86_64")]
use crate::spaces::simple::MIN_DIM_SIZE_AVX;
use crate::spaces::simple::{MIN_DIM_SIZE_SIMD, ManhattanMetric};
use crate::types::Distance;
impl Metric<VectorElementTypeHalf> for ManhattanMetric {
fn distance() -> Distance {
Distance::Manhattan
}
fn similarity(v1: &[VectorElementTypeHalf], v2: &[VectorElementTypeHalf]) -> ScoreType {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
&& v1.len() >= MIN_DIM_SIZE_AVX
{
return unsafe { avx_manhattan_similarity_half(v1, v2) };
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("sse") && v1.len() >= MIN_DIM_SIZE_SIMD {
return unsafe { sse_manhattan_similarity_half(v1, v2) };
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon", not(windows)))]
{
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
&& v1.len() >= MIN_DIM_SIZE_SIMD
{
return unsafe { neon_manhattan_similarity_half(v1, v2) };
}
}
manhattan_similarity_half(v1, v2)
}
fn preprocess(vector: DenseVector) -> DenseVector {
vector
}
}
pub fn manhattan_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
-v1.iter()
.zip(v2)
.map(|(a, b)| f16::to_f32((a - b).abs()))
.sum::<f32>()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/avx/euclid.rs | lib/segment/src/spaces/metric_f16/avx/euclid.rs | use std::arch::x86_64::*;
use common::types::ScoreType;
use half::f16;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
#[target_feature(enable = "f16c")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_euclid_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const __m128i = v1.as_ptr().cast::<__m128i>();
let mut ptr2: *const __m128i = v2.as_ptr().cast::<__m128i>();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut addr1s: __m128i;
let mut addr2s: __m128i;
let mut i: usize = 0;
while i < m {
addr1s = _mm_loadu_si128(ptr1);
addr2s = _mm_loadu_si128(ptr2);
let sub256_1: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_1 = _mm256_fmadd_ps(sub256_1, sub256_1, sum256_1);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(1));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(1));
let sub256_2: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_2 = _mm256_fmadd_ps(sub256_2, sub256_2, sum256_2);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(2));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(2));
let sub256_3: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_3 = _mm256_fmadd_ps(sub256_3, sub256_3, sum256_3);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(3));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(3));
let sub256_4: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_4 = _mm256_fmadd_ps(sub256_4, sub256_4, sum256_4);
ptr1 = ptr1.wrapping_add(4);
ptr2 = ptr2.wrapping_add(4);
i += 32;
}
let ptr1_f16: *const f16 = ptr1.cast::<f16>();
let ptr2_f16: *const f16 = ptr2.cast::<f16>();
let mut result = hsum256_ps_avx(sum256_1)
+ hsum256_ps_avx(sum256_2)
+ hsum256_ps_avx(sum256_3)
+ hsum256_ps_avx(sum256_4);
for i in 0..n - m {
result += (f16::to_f32(*ptr1_f16.add(i)) - f16::to_f32(*ptr2_f16.add(i))).powi(2);
}
-result
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_avx() {
use super::*;
use crate::spaces::metric_f16::simple_euclid::*;
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let euclid_simd = unsafe { avx_euclid_similarity_half(&v1, &v2) };
let euclid = euclid_similarity_half(&v1, &v2);
assert!((euclid_simd - euclid).abs() / euclid.abs() < 0.0005);
} else {
println!("avx test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/avx/mod.rs | lib/segment/src/spaces/metric_f16/avx/mod.rs | pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/avx/dot.rs | lib/segment/src/spaces/metric_f16/avx/dot.rs | use std::arch::x86_64::*;
use common::types::ScoreType;
use half::f16;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
#[target_feature(enable = "f16c")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_dot_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
unsafe {
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const __m128i = v1.as_ptr().cast::<__m128i>();
let mut ptr2: *const __m128i = v2.as_ptr().cast::<__m128i>();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut addr1s: __m128i;
let mut addr2s: __m128i;
let mut i: usize = 0;
while i < m {
addr1s = _mm_loadu_si128(ptr1);
addr2s = _mm_loadu_si128(ptr2);
sum256_1 = _mm256_fmadd_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s), sum256_1);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(1));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(1));
sum256_2 = _mm256_fmadd_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s), sum256_2);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(2));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(2));
sum256_3 = _mm256_fmadd_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s), sum256_3);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(3));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(3));
sum256_4 = _mm256_fmadd_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s), sum256_4);
ptr1 = ptr1.wrapping_add(4);
ptr2 = ptr2.wrapping_add(4);
i += 32;
}
let ptr1_f16: *const f16 = ptr1.cast::<f16>();
let ptr2_f16: *const f16 = ptr2.cast::<f16>();
let mut result = hsum256_ps_avx(sum256_1)
+ hsum256_ps_avx(sum256_2)
+ hsum256_ps_avx(sum256_3)
+ hsum256_ps_avx(sum256_4);
for i in 0..n - m {
result += f16::to_f32(*ptr1_f16.add(i)) * f16::to_f32(*ptr2_f16.add(i));
}
result
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_avx() {
use super::*;
use crate::spaces::metric_f16::simple_dot::*;
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let dot_simd = unsafe { avx_dot_similarity_half(&v1, &v2) };
let dot = dot_similarity_half(&v1, &v2);
assert!((dot_simd - dot).abs() / dot.abs() < 0.0005);
} else {
println!("avx test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/avx/manhattan.rs | lib/segment/src/spaces/metric_f16/avx/manhattan.rs | use std::arch::x86_64::*;
use common::types::ScoreType;
use half::f16;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_avx::hsum256_ps_avx;
#[target_feature(enable = "avx")]
#[target_feature(enable = "fma")]
#[target_feature(enable = "f16c")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn avx_manhattan_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
unsafe {
let mask: __m256 = _mm256_set1_ps(-0.0f32); // 1 << 31 used to clear sign bit to mimic abs
let n = v1.len();
let m = n - (n % 32);
let mut ptr1: *const __m128i = v1.as_ptr().cast::<__m128i>();
let mut ptr2: *const __m128i = v2.as_ptr().cast::<__m128i>();
let mut sum256_1: __m256 = _mm256_setzero_ps();
let mut sum256_2: __m256 = _mm256_setzero_ps();
let mut sum256_3: __m256 = _mm256_setzero_ps();
let mut sum256_4: __m256 = _mm256_setzero_ps();
let mut addr1s: __m128i;
let mut addr2s: __m128i;
let mut i: usize = 0;
while i < m {
addr1s = _mm_loadu_si128(ptr1);
addr2s = _mm_loadu_si128(ptr2);
let sub256_1: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_1 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_1), sum256_1);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(1));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(1));
let sub256_2: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_2 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_2), sum256_2);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(2));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(2));
let sub256_3: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_3 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_3), sum256_3);
addr1s = _mm_loadu_si128(ptr1.wrapping_add(3));
addr2s = _mm_loadu_si128(ptr2.wrapping_add(3));
let sub256_4: __m256 = _mm256_sub_ps(_mm256_cvtph_ps(addr1s), _mm256_cvtph_ps(addr2s));
sum256_4 = _mm256_add_ps(_mm256_andnot_ps(mask, sub256_4), sum256_4);
ptr1 = ptr1.wrapping_add(4);
ptr2 = ptr2.wrapping_add(4);
i += 32;
}
let ptr1_f16: *const f16 = ptr1.cast::<f16>();
let ptr2_f16: *const f16 = ptr2.cast::<f16>();
let mut result = hsum256_ps_avx(sum256_1)
+ hsum256_ps_avx(sum256_2)
+ hsum256_ps_avx(sum256_3)
+ hsum256_ps_avx(sum256_4);
for i in 0..n - m {
result += (f16::to_f32(*ptr1_f16.add(i)) - f16::to_f32(*ptr2_f16.add(i))).abs();
}
-result
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_avx() {
use super::*;
use crate::spaces::metric_f16::simple_manhattan::*;
if is_x86_feature_detected!("avx")
&& is_x86_feature_detected!("fma")
&& is_x86_feature_detected!("f16c")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let manhattan_simd = unsafe { avx_manhattan_similarity_half(&v1, &v2) };
let manhattan = manhattan_similarity_half(&v1, &v2);
assert!((manhattan_simd - manhattan).abs() / manhattan.abs() < 0.0005);
} else {
println!("avx test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/sse/euclid.rs | lib/segment/src/spaces/metric_f16/sse/euclid.rs | use common::types::ScoreType;
use half::f16;
use itertools::Itertools;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_sse;
#[target_feature(enable = "sse")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_euclid_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let v1_f32 = v1.iter().map(|x| f16::to_f32(*x)).collect_vec();
let v2_f32 = v2.iter().map(|x| f16::to_f32(*x)).collect_vec();
unsafe { simple_sse::euclid_similarity_sse(&v1_f32, &v2_f32) }
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_sse() {
use super::*;
use crate::spaces::metric_f16::simple_euclid::*;
if is_x86_feature_detected!("sse") {
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let euclid_simd = unsafe { sse_euclid_similarity_half(&v1, &v2) };
let euclid = euclid_similarity_half(&v1, &v2);
assert!((euclid_simd - euclid).abs() / euclid.abs() < 0.0005);
} else {
println!("sse test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/sse/mod.rs | lib/segment/src/spaces/metric_f16/sse/mod.rs | pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/sse/dot.rs | lib/segment/src/spaces/metric_f16/sse/dot.rs | use common::types::ScoreType;
use half::f16;
use itertools::Itertools;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_sse;
#[target_feature(enable = "sse")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_dot_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let v1_f32 = v1.iter().map(|x| f16::to_f32(*x)).collect_vec();
let v2_f32 = v2.iter().map(|x| f16::to_f32(*x)).collect_vec();
unsafe { simple_sse::dot_similarity_sse(&v1_f32, &v2_f32) }
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_sse() {
use super::*;
use crate::spaces::metric_f16::simple_dot::*;
if is_x86_feature_detected!("sse") {
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let dot_simd = unsafe { sse_dot_similarity_half(&v1, &v2) };
let dot = dot_similarity_half(&v1, &v2);
assert!((dot_simd - dot).abs() / dot.abs() < 0.0005);
} else {
println!("sse test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/sse/manhattan.rs | lib/segment/src/spaces/metric_f16/sse/manhattan.rs | use common::types::ScoreType;
use half::f16;
use itertools::Itertools;
use crate::data_types::vectors::VectorElementTypeHalf;
use crate::spaces::simple_sse;
#[target_feature(enable = "sse")]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn sse_manhattan_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let v1_f32 = v1.iter().map(|x| f16::to_f32(*x)).collect_vec();
let v2_f32 = v2.iter().map(|x| f16::to_f32(*x)).collect_vec();
unsafe { simple_sse::manhattan_similarity_sse(&v1_f32, &v2_f32) }
}
#[cfg(test)]
mod tests {
#[test]
fn test_spaces_sse() {
use super::*;
use crate::spaces::metric_f16::simple_manhattan::*;
if is_x86_feature_detected!("sse") {
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let manhattan_simd = unsafe { sse_manhattan_similarity_half(&v1, &v2) };
let manhattan = manhattan_similarity_half(&v1, &v2);
assert!((manhattan_simd - manhattan).abs() / manhattan.abs() < 0.0005);
} else {
println!("sse test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/neon/euclid.rs | lib/segment/src/spaces/metric_f16/neon/euclid.rs | #[cfg(target_feature = "neon")]
use common::types::ScoreType;
use half::f16;
#[cfg(target_feature = "neon")]
use crate::data_types::vectors::VectorElementTypeHalf;
#[cfg(target_feature = "neon")]
unsafe extern "C" {
fn euclideanDist_half_4x4(v1: *const f16, v2: *const f16, n: i32) -> f32;
}
#[allow(clippy::missing_safety_doc)]
#[cfg(target_feature = "neon")]
pub unsafe fn neon_euclid_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let n = v1.len();
unsafe { -euclideanDist_half_4x4(v1.as_ptr(), v2.as_ptr(), n.try_into().unwrap()) }
}
#[cfg(test)]
mod tests {
#[cfg(target_feature = "neon")]
#[test]
fn test_spaces_neon() {
use super::*;
use crate::spaces::metric_f16::simple_euclid::*;
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let euclid_simd = unsafe { neon_euclid_similarity_half(&v1, &v2) };
let euclid = euclid_similarity_half(&v1, &v2);
assert!((euclid_simd - euclid).abs() / euclid.abs() < 0.0005);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/neon/mod.rs | lib/segment/src/spaces/metric_f16/neon/mod.rs | pub mod dot;
pub mod euclid;
pub mod manhattan;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/neon/dot.rs | lib/segment/src/spaces/metric_f16/neon/dot.rs | #[cfg(target_feature = "neon")]
use common::types::ScoreType;
use half::f16;
#[cfg(target_feature = "neon")]
use crate::data_types::vectors::VectorElementTypeHalf;
#[cfg(target_feature = "neon")]
unsafe extern "C" {
fn dotProduct_half_4x4(v1: *const f16, v2: *const f16, n: i32) -> f32;
}
#[allow(clippy::missing_safety_doc)]
#[cfg(target_feature = "neon")]
pub unsafe fn neon_dot_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let n: i32 = v1.len().try_into().unwrap();
unsafe { dotProduct_half_4x4(v1.as_ptr(), v2.as_ptr(), n) }
}
#[cfg(test)]
mod tests {
#[cfg(target_feature = "neon")]
#[test]
fn test_spaces_neon() {
use super::*;
use crate::spaces::metric_f16::simple_dot::*;
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let dot_simd = unsafe { neon_dot_similarity_half(&v1, &v2) };
let dot = dot_similarity_half(&v1, &v2);
assert!((dot_simd - dot).abs() / dot.abs() < 0.0005);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/spaces/metric_f16/neon/manhattan.rs | lib/segment/src/spaces/metric_f16/neon/manhattan.rs | #[cfg(target_feature = "neon")]
use common::types::ScoreType;
use half::f16;
#[cfg(target_feature = "neon")]
use crate::data_types::vectors::VectorElementTypeHalf;
#[cfg(target_feature = "neon")]
unsafe extern "C" {
fn manhattanDist_half_4x4(v1: *const f16, v2: *const f16, n: i32) -> f32;
}
#[allow(clippy::missing_safety_doc)]
#[cfg(target_feature = "neon")]
pub unsafe fn neon_manhattan_similarity_half(
v1: &[VectorElementTypeHalf],
v2: &[VectorElementTypeHalf],
) -> ScoreType {
let n = v1.len();
unsafe { -manhattanDist_half_4x4(v1.as_ptr(), v2.as_ptr(), n.try_into().unwrap()) }
}
#[cfg(test)]
mod tests {
#[cfg(target_feature = "neon")]
#[test]
fn test_spaces_neon() {
use super::*;
use crate::spaces::metric_f16::simple_manhattan::*;
if std::arch::is_aarch64_feature_detected!("neon")
&& std::arch::is_aarch64_feature_detected!("fp16")
{
let v1_f32: Vec<f32> = vec![
3.7, 4.3, 5.6, 7.7, 7.6, 4.2, 4.2, 7.3, 4.1, 6., 6.4, 1., 2.4, 7., 2.4, 6.4, 4.8,
2.4, 2.9, 3.9, 3.9, 7.4, 6.9, 5.3, 6.2, 5.2, 5.2, 4.2, 5.9, 1.8, 4.5, 3.5, 3.1,
6.1, 6.5, 2.4, 2.1, 7.5, 2.3, 5.9, 3.6, 2.9, 6.1, 5.9, 3.3, 2.9, 3.7, 6.8, 7.2,
6.5, 3.1, 5.7, 1.1, 7.2, 5.6, 5.1, 7., 2.5, 6.2, 7.6, 7., 6.9, 7.5, 3.2, 5.4, 5.8,
1.9, 4.9, 7.7, 6.5, 3., 2., 6.9, 6.8, 3.3, 1.4, 4.7, 3.7, 1.9, 3.6, 3.9, 7.2, 7.7,
7., 6.9, 5.8, 4.4, 1.8, 4.9, 3.1, 7.9, 6.5, 7.5, 3.7, 4.6, 1.5, 3.4, 1.7, 6.4, 7.3,
4.7, 1.9, 7.7, 8., 4.3, 3.9, 1.5, 6.1, 2.1, 6.9, 2.5, 7.2, 4.1, 4.8, 1., 4.1, 6.3,
5.9, 6.2, 3.9, 4.1, 1.2, 7.3, 1., 4., 3.1, 6., 5.8, 6.8, 2.6, 5.1, 2.3, 1.2, 5.6,
3.3, 1.6, 4.7, 7., 4.7, 7.7, 1.5, 4.1, 4.1, 5.8, 7.5, 7.6, 5.2, 2.8, 6.9, 6.1, 4.3,
5.9, 5.2, 8., 2.1, 1.3, 3.2, 4.3, 5.5, 7.7, 6.8, 2.6, 5.2, 4.1, 4.9, 3.7, 6.2, 1.6,
4.9, 2.6, 6.9, 2.3, 3.9, 7.7, 6.6, 5.3, 3.1, 5.5, 3., 2.4, 1.9, 6.7, 7.1, 6.3, 7.4,
6.8, 2.3, 6.1, 3.6, 1.1, 2.8, 7., 3.5, 4.1, 3.4, 7.4, 1.4, 5.5, 6.3, 6.8, 2., 2.1,
2.7, 7.8, 6., 3.6, 5.9, 3.9, 3.6, 7.8, 5.4, 6.8, 4.6, 7.8, 2.3, 6.2, 7.6, 5.8, 3.3,
3.2, 6.2, 1.9, 6., 5.3, 3.2, 5.8, 7., 1.6, 1.3, 7.7, 6.1, 1.2, 2.8, 2., 2.2, 2.2,
5.4, 4.8, 1.8, 3.6, 1.9, 6., 3.3, 3.1, 4.9, 6.2, 2.9, 6.1, 6.6, 3.9, 3.8, 4.8, 6.1,
6.9, 6.7, 5.9, 6.3, 3.3, 3.2, 5.9,
];
let v2_f32: Vec<f32> = vec![
1.5, 1.3, 1.7, 6.4, 4.6, 6.2, 1.7, 2.6, 4.3, 6.1, 7.2, 3.7, 1.3, 7.3, 3.6, 5.6,
5.9, 5.6, 2.3, 3.7, 7.4, 3.6, 7.5, 7.6, 4.8, 5.6, 2.2, 4.3, 4.4, 4.9, 6.1, 2.9,
5.6, 1.6, 2.4, 7.6, 6., 6.3, 7.3, 1., 3.1, 7., 3.1, 5.5, 2.6, 6.7, 2.2, 1.8, 6.6,
7.1, 1.6, 3.7, 7.7, 6.3, 2.8, 3., 6.5, 3.3, 3.6, 2.7, 7., 4.2, 7.7, 5.6, 3., 7.4,
1.6, 4.2, 3.7, 2.7, 3.4, 7., 2.9, 6.6, 8., 5.7, 4.9, 3.8, 4.9, 7.1, 3.9, 4.8, 5.3,
4.2, 7.2, 6.3, 2.4, 1.5, 3.9, 5.5, 4.1, 6.2, 1., 2.8, 2.7, 6.8, 1.7, 6.7, 1.7, 7.2,
2.1, 6.3, 5.1, 7.3, 4.7, 1.1, 4.4, 6.4, 4.9, 5.8, 5., 7.6, 6.5, 4., 4., 5.9, 5.3,
2.1, 3., 7.9, 6.1, 6.1, 5.3, 5.8, 1.4, 3.2, 3.3, 1.2, 1., 6.2, 4.2, 4.5, 3.5, 5.1,
7., 6., 3.9, 5.5, 6.6, 6.9, 5., 1., 4.8, 4.2, 5.1, 1.1, 1.3, 1.5, 7.9, 7.7, 5.2,
5.4, 1.4, 1.4, 4.6, 4., 3.2, 2.2, 4.3, 7.1, 3.9, 4.5, 6.1, 5.3, 3.2, 1.4, 6.7, 1.6,
2.2, 2.8, 4.7, 6.1, 6.2, 6.1, 1.4, 7., 7.4, 7.3, 4.1, 1.5, 3.3, 7.4, 5.3, 7.9, 4.3,
2.6, 3.6, 4.1, 5.1, 6.4, 5.8, 2.4, 1.8, 4.8, 6.2, 3.5, 5.9, 6.3, 5.1, 4.9, 7.5,
7.1, 2.4, 1.9, 6.3, 4.2, 7.9, 7.4, 5.6, 4.7, 7.4, 7.9, 3.2, 4.8, 5.7, 5.9, 7.4,
2.8, 5.2, 6.4, 5.1, 4., 7.2, 3.6, 2., 3.1, 7.5, 3.7, 2.9, 3.4, 6.1, 1., 1.2, 1.3,
3.8, 2.7, 7.4, 6.6, 5.3, 4.6, 1.8, 3.7, 1.4, 1.1, 1.9, 5.9, 6.5, 4.1, 4.9, 5.7,
3.9, 4.1, 7.2, 5., 7.3, 2.8, 7.1, 7.2, 4., 2.7,
];
let v1: Vec<f16> = v1_f32.iter().map(|x| f16::from_f32(*x)).collect();
let v2: Vec<f16> = v2_f32.iter().map(|x| f16::from_f32(*x)).collect();
let manhattan_simd = unsafe { neon_manhattan_similarity_half(&v1, &v2) };
let manhattan = manhattan_similarity_half(&v1, &v2);
assert!((manhattan_simd - manhattan).abs() / manhattan.abs() < 0.0005);
} else {
println!("neon test skipped");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/rocksdb_builder.rs | lib/segment/src/segment_constructor/rocksdb_builder.rs | use std::path::{Path, PathBuf};
use std::sync::Arc;
use parking_lot::RwLock;
use super::segment_constructor_base::get_vector_name_with_prefix;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::common::rocksdb_wrapper;
use crate::types::SegmentConfig;
/// Struct to optionally create and open a RocksDB instance in a lazy way.
/// Used as helper to eventually completely remove RocksDB.
#[derive(Debug)]
pub struct RocksDbBuilder {
path: PathBuf,
column_families: Vec<String>,
rocksdb: Option<Arc<RwLock<rocksdb::DB>>>,
is_required: bool,
}
impl RocksDbBuilder {
pub fn new(path: impl Into<PathBuf>, config: &SegmentConfig) -> OperationResult<Self> {
let path = path.into();
let vector_cfs = config.vector_data.keys().map(|vector_name| {
get_vector_name_with_prefix(rocksdb_wrapper::DB_VECTOR_CF, vector_name)
});
let sparse_vector_cfs =
config
.sparse_vector_data
.iter()
.filter_map(|(vector_name, config)| {
if matches!(
config.storage_type,
crate::types::SparseVectorStorageType::OnDisk
) {
return Some(get_vector_name_with_prefix(
rocksdb_wrapper::DB_VECTOR_CF,
vector_name,
));
}
let (_, _) = (vector_name, config);
None
});
let column_families: Vec<_> = vector_cfs.chain(sparse_vector_cfs).collect();
let rocksdb = if rocksdb_wrapper::check_db_exists(&path) {
Some(open_db(&path, &column_families)?)
} else {
None
};
Ok(Self {
path,
column_families,
rocksdb,
is_required: false,
})
}
pub fn read(&self) -> Option<parking_lot::RwLockReadGuard<'_, rocksdb::DB>> {
self.rocksdb.as_ref().map(|db| db.read())
}
pub fn require(&mut self) -> OperationResult<Arc<RwLock<rocksdb::DB>>> {
let db = match &self.rocksdb {
Some(db) => db,
None => self
.rocksdb
.insert(open_db(&self.path, &self.column_families)?),
};
self.is_required = true;
Ok(db.clone())
}
pub fn build(self) -> Option<Arc<RwLock<rocksdb::DB>>> {
self.rocksdb.filter(|_| self.is_required)
}
}
fn open_db(path: &Path, cfs: &[impl AsRef<str>]) -> OperationResult<Arc<RwLock<rocksdb::DB>>> {
rocksdb_wrapper::open_db(path, cfs).map_err(|err| {
OperationError::service_error(format!(
"failed to open RocksDB at {}: {err}",
path.display(),
))
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/batched_reader.rs | lib/segment/src/segment_constructor/batched_reader.rs | use std::cmp::min;
use std::iter::Iterator;
use ahash::AHashMap;
use atomic_refcell::AtomicRef;
use common::small_uint::U24;
use common::types::PointOffsetType;
use crate::data_types::named_vectors::CowVector;
use crate::types::CompactExtendedPointId;
use crate::vector_storage::{Sequential, VectorStorage, VectorStorageEnum};
const BATCH_SIZE: usize = 256;
/// Define location of the point source during segment construction.
pub struct PointData {
pub external_id: CompactExtendedPointId,
/// [`CompactExtendedPointId`] is 17 bytes, we reduce
/// `segment_index` to 3 bytes to avoid paddings and align nicely.
pub segment_index: U24,
pub internal_id: PointOffsetType,
pub version: u64,
pub ordering: u64,
}
/// Batched iterator over points to insert.
/// This structure should read `BATCH_SIZE` points into a buffer,
/// and then iterate over them.
pub struct BatchedVectorReader<'a> {
points_to_insert: &'a [PointData],
source_vector_storages: &'a [AtomicRef<'a, VectorStorageEnum>],
buffer: Vec<(CowVector<'a>, bool)>,
seg_to_points_buffer: AHashMap<U24, Vec<(&'a PointData, usize)>>,
/// Global position of the iterator.
/// From 0 to `points_to_insert.len()`.
position: usize,
}
impl<'a> BatchedVectorReader<'a> {
pub fn new(
points_to_insert: &'a [PointData],
source_vector_storages: &'a [AtomicRef<'a, VectorStorageEnum>],
) -> BatchedVectorReader<'a> {
// We need to allocate the buffer with the size of the batch,
// but we don't know the size of the vectors.
// So we use a placeholder vector with size 0.
let buffer = vec![(CowVector::default(), false); BATCH_SIZE];
BatchedVectorReader {
points_to_insert,
source_vector_storages,
buffer,
seg_to_points_buffer: AHashMap::default(),
position: 0,
}
}
/// Fills the buffer with the next batch of points.
///
/// Reading of a single point looks like this:
///
/// ```text
/// let source_vector_storage = &source_vector_storages[point_data.segment_index.get() as usize];
/// let vec = source_vector_storage.get_vector(point_data.internal_id);
/// let vector_deleted = source_vector_storage.is_deleted_vector(point_data.internal_id);
/// (vec, vector_deleted)
/// ```
fn refill_buffer(&mut self) {
let start_pos = self.position;
let end_pos = min(self.position + BATCH_SIZE, self.points_to_insert.len());
// Read by segments, as we want to localize reads as much as possible.
for pos in start_pos..end_pos {
let point_data = &self.points_to_insert[pos];
let offset_in_batch = pos - start_pos;
self.seg_to_points_buffer
.entry(point_data.segment_index)
.or_default()
.push((point_data, offset_in_batch))
}
for (segment_index, points) in self.seg_to_points_buffer.drain() {
let source_vector_storage = &self.source_vector_storages[segment_index.get() as usize];
for (point_data, offset_in_batch) in points {
let vec = source_vector_storage.get_vector::<Sequential>(point_data.internal_id);
let vector_deleted =
source_vector_storage.is_deleted_vector(point_data.internal_id);
self.buffer[offset_in_batch] = (vec, vector_deleted);
}
}
}
fn refill_buffer_if_needed(&mut self) {
if self.position.is_multiple_of(BATCH_SIZE) {
self.refill_buffer();
}
}
}
impl<'a> Iterator for BatchedVectorReader<'a> {
type Item = (CowVector<'a>, bool);
fn next(&mut self) -> Option<Self::Item> {
if self.position >= self.points_to_insert.len() {
return None;
}
self.refill_buffer_if_needed();
let item = self.buffer[self.position % BATCH_SIZE].clone();
self.position += 1;
Some(item)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/segment_builder.rs | lib/segment/src/segment_constructor/segment_builder.rs | use std::cmp;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use ahash::AHasher;
use atomic_refcell::AtomicRefCell;
use bitvec::macros::internal::funty::Integral;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::feature_flags;
use common::progress_tracker::ProgressTracker;
use common::small_uint::U24;
use common::types::PointOffsetType;
use fs_err as fs;
use io::storage_version::StorageVersion;
use itertools::Itertools;
use rand::Rng;
use tempfile::TempDir;
use uuid::Uuid;
#[cfg(feature = "rocksdb")]
use super::rocksdb_builder::RocksDbBuilder;
use super::{
create_mutable_id_tracker, create_payload_storage, create_sparse_vector_index,
create_sparse_vector_storage, get_payload_index_path, get_vector_index_path,
get_vector_storage_path, new_segment_path, open_vector_storage,
};
use crate::common::error_logging::LogError;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::entry::entry_point::SegmentEntry;
use crate::id_tracker::compressed::compressed_point_mappings::CompressedPointMappings;
use crate::id_tracker::immutable_id_tracker::ImmutableIdTracker;
use crate::id_tracker::in_memory_id_tracker::InMemoryIdTracker;
use crate::id_tracker::{IdTracker, IdTrackerEnum, for_each_unique_point};
use crate::index::field_index::FieldIndex;
use crate::index::sparse_index::sparse_vector_index::SparseVectorIndexOpenArgs;
use crate::index::struct_payload_index::StructPayloadIndex;
use crate::index::{PayloadIndex, VectorIndexEnum};
use crate::payload_storage::PayloadStorage;
use crate::payload_storage::payload_storage_enum::PayloadStorageEnum;
use crate::segment::{Segment, SegmentVersion};
use crate::segment_constructor::batched_reader::{BatchedVectorReader, PointData};
use crate::segment_constructor::{
VectorIndexBuildArgs, VectorIndexOpenArgs, build_vector_index, load_segment,
};
use crate::types::{
CompactExtendedPointId, ExtendedPointId, HnswGlobalConfig, PayloadFieldSchema, PayloadKeyType,
SegmentConfig, SegmentState, SeqNumberType, VectorNameBuf,
};
use crate::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use crate::vector_storage::{VectorStorage, VectorStorageEnum};
/// Structure for constructing segment out of several other segments
pub struct SegmentBuilder {
version: SeqNumberType,
id_tracker: IdTrackerEnum,
payload_storage: PayloadStorageEnum,
vector_data: HashMap<VectorNameBuf, VectorData>,
segment_config: SegmentConfig,
hnsw_global_config: HnswGlobalConfig,
// The path, where fully created segment will be moved
destination_path: PathBuf,
// The temporary segment directory
temp_dir: TempDir,
indexed_fields: HashMap<PayloadKeyType, PayloadFieldSchema>,
// Payload key to defragment data to
defragment_keys: Vec<PayloadKeyType>,
}
struct VectorData {
vector_storage: VectorStorageEnum,
old_indices: Vec<Arc<AtomicRefCell<VectorIndexEnum>>>,
}
impl SegmentBuilder {
pub fn new(
segments_path: &Path,
temp_dir: &Path,
segment_config: &SegmentConfig,
hnsw_global_config: &HnswGlobalConfig,
) -> OperationResult<Self> {
let temp_dir = create_temp_dir(temp_dir)?;
let id_tracker = if segment_config.is_appendable() {
IdTrackerEnum::MutableIdTracker(create_mutable_id_tracker(temp_dir.path())?)
} else {
IdTrackerEnum::InMemoryIdTracker(InMemoryIdTracker::new())
};
#[cfg(feature = "rocksdb")]
let mut db_builder = RocksDbBuilder::new(temp_dir.path(), segment_config)?;
let payload_storage = create_payload_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
temp_dir.path(),
segment_config,
)?;
let mut vector_data = HashMap::new();
for (vector_name, vector_config) in &segment_config.vector_data {
let vector_storage_path = get_vector_storage_path(temp_dir.path(), vector_name);
let vector_storage = open_vector_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
vector_config,
#[cfg(feature = "rocksdb")]
&Default::default(),
&vector_storage_path,
#[cfg(feature = "rocksdb")]
vector_name,
)?;
vector_data.insert(
vector_name.to_owned(),
VectorData {
vector_storage,
old_indices: Vec::new(),
},
);
}
for (vector_name, sparse_vector_config) in &segment_config.sparse_vector_data {
let vector_storage_path = get_vector_storage_path(temp_dir.path(), vector_name);
let vector_storage = create_sparse_vector_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
&vector_storage_path,
#[cfg(feature = "rocksdb")]
vector_name,
&sparse_vector_config.storage_type,
#[cfg(feature = "rocksdb")]
&Default::default(),
)?;
vector_data.insert(
vector_name.to_owned(),
VectorData {
vector_storage,
old_indices: Vec::new(),
},
);
}
let destination_path = new_segment_path(segments_path);
Ok(SegmentBuilder {
version: Default::default(), // default version is 0
id_tracker,
payload_storage,
vector_data,
segment_config: segment_config.clone(),
hnsw_global_config: hnsw_global_config.clone(),
destination_path,
temp_dir,
indexed_fields: Default::default(),
defragment_keys: vec![],
})
}
pub fn set_defragment_keys(&mut self, keys: Vec<PayloadKeyType>) {
self.defragment_keys = keys;
}
pub fn remove_indexed_field(&mut self, field: &PayloadKeyType) {
self.indexed_fields.remove(field);
}
pub fn remove_index_field_if_incompatible(
&mut self,
field: &PayloadKeyType,
schema: &PayloadFieldSchema,
) {
if let Some(existing_schema) = self.indexed_fields.get(field)
&& existing_schema != schema
{
self.indexed_fields.remove(field);
}
}
pub fn add_indexed_field(&mut self, field: PayloadKeyType, schema: PayloadFieldSchema) {
self.indexed_fields.insert(field, schema);
}
/// Get ordering value from the payload index
///
/// Ordering value is used to sort points to keep points with the same payload together
/// Under the assumption that points are queried together, this will reduce the number of
/// random disk reads.
///
/// Note: This value doesn't guarantee strict ordering in ambiguous cases.
/// It should only be used in optimization purposes, not for correctness.
fn _get_ordering_value(internal_id: PointOffsetType, indices: &[FieldIndex]) -> u64 {
let mut ordering = 0;
for payload_index in indices {
match payload_index {
FieldIndex::IntMapIndex(index) => {
if let Some(numbers) = index.get_values(internal_id) {
for number in numbers {
ordering = ordering.wrapping_add(*number as u64);
}
}
break;
}
FieldIndex::KeywordIndex(index) => {
if let Some(keywords) = index.get_values(internal_id) {
for keyword in keywords {
let mut hasher = AHasher::default();
keyword.hash(&mut hasher);
ordering = ordering.wrapping_add(hasher.finish());
}
}
break;
}
FieldIndex::IntIndex(index) => {
if let Some(numbers) = index.get_values(internal_id) {
for number in numbers {
ordering = ordering.wrapping_add(number as u64);
}
}
break;
}
FieldIndex::FloatIndex(index) => {
if let Some(numbers) = index.get_values(internal_id) {
for number in numbers {
// Bit-level conversion of f64 to u64 preserves ordering
// (for positive numbers)
//
// 0.001 -> 4562254508917369340
// 0.01 -> 4576918229304087675
// 0.05 -> 4587366580439587226
// 0.1 -> 4591870180066957722
// 1 -> 4607182418800017408
// 2 -> 4611686018427387904
// 10 -> 4621819117588971520
ordering = ordering.wrapping_add(number.to_bits());
}
}
break;
}
FieldIndex::DatetimeIndex(index) => {
if let Some(dates) = index.get_values(internal_id) {
for date in dates {
ordering = ordering.wrapping_add(date as u64);
}
}
break;
}
FieldIndex::UuidMapIndex(index) => {
if let Some(ids) = index.get_values(internal_id) {
uuid_hash(&mut ordering, ids.copied());
}
break;
}
FieldIndex::UuidIndex(index) => {
if let Some(ids) = index.get_values(internal_id) {
uuid_hash(&mut ordering, ids);
}
break;
}
FieldIndex::GeoIndex(_) => {}
FieldIndex::FullTextIndex(_) => {}
FieldIndex::BoolIndex(_) => {}
FieldIndex::NullIndex(_) => {}
}
}
ordering
}
/// Update current segment builder with all (not deleted) vectors and payload from `segments`.
/// Also defragments if the `defragment_key` is set.
/// However only points in the same call get defragmented and grouped together.
/// Therefore this function should only be called once, unless this behavior is desired.
///
/// # Result
///
/// * `bool` - if `true` - data successfully added, if `false` - process was interrupted
///
pub fn update(&mut self, segments: &[&Segment], stopped: &AtomicBool) -> OperationResult<bool> {
if segments.is_empty() {
return Ok(true);
}
if segments.len() > U24::MAX as usize {
return Err(OperationError::service_error("Too many segments to update"));
}
let mut points_to_insert = Vec::new();
let locked_id_trackers = segments.iter().map(|s| s.id_tracker.borrow()).collect_vec();
for_each_unique_point(locked_id_trackers.iter().map(|i| i.deref()), |item| {
points_to_insert.push(PointData {
external_id: CompactExtendedPointId::from(item.external_id),
segment_index: U24::new_wrapped(item.tracker_index as u32),
internal_id: item.internal_id,
version: item.version,
ordering: 0,
});
});
drop(locked_id_trackers);
let payloads: Vec<_> = segments.iter().map(|i| i.payload_index.borrow()).collect();
for defragment_key in &self.defragment_keys {
for point_data in &mut points_to_insert {
let Some(payload_indices) = payloads[point_data.segment_index.get() as usize]
.field_indexes
.get(defragment_key)
else {
continue;
};
point_data.ordering = point_data.ordering.wrapping_add(Self::_get_ordering_value(
point_data.internal_id,
payload_indices,
));
}
}
if !self.defragment_keys.is_empty() {
points_to_insert.sort_unstable_by_key(|i| i.ordering);
}
let src_segment_max_version = segments.iter().map(|i| i.version()).max().unwrap();
self.version = cmp::max(self.version, src_segment_max_version);
let vector_storages: Vec<_> = segments.iter().map(|i| &i.vector_data).collect();
let internal_range_start = self.id_tracker.available_point_count() as PointOffsetType;
let internal_range_end = internal_range_start + points_to_insert.len() as PointOffsetType;
let new_internal_range = internal_range_start..internal_range_end;
for (vector_name, vector_data) in &mut self.vector_data {
check_process_stopped(stopped)?;
let other_vector_storages = vector_storages
.iter()
.map(|i| {
let other_vector_data = i.get(vector_name).ok_or_else(|| {
OperationError::service_error(format!(
"Cannot update from other segment because it is \
missing vector name {vector_name}"
))
})?;
vector_data
.old_indices
.push(Arc::clone(&other_vector_data.vector_index));
Ok(other_vector_data.vector_storage.borrow())
})
.collect::<Result<Vec<_>, OperationError>>()?;
let mut vectors_iter: BatchedVectorReader =
BatchedVectorReader::new(&points_to_insert, &other_vector_storages);
let internal_range = vector_data
.vector_storage
.update_from(&mut vectors_iter, stopped)?;
if new_internal_range != internal_range {
debug_assert!(
new_internal_range != internal_range,
"Internal ids range mismatch between self segment vectors and other segment vectors\n\
vector_name: {vector_name}, self range: {new_internal_range:?}, other range: {internal_range:?}"
);
return Err(OperationError::service_error(format!(
"Internal ids range mismatch between self segment vectors and other segment vectors\n\
vector_name: {vector_name}, self range: {new_internal_range:?}, other range: {internal_range:?}"
)));
}
}
let hw_counter = HardwareCounterCell::disposable(); // Disposable counter for internal operations.
let internal_id_iter = new_internal_range.zip(points_to_insert.iter());
for (new_internal_id, point_data) in internal_id_iter {
check_process_stopped(stopped)?;
let old_internal_id = point_data.internal_id;
let other_payload = payloads[point_data.segment_index.get() as usize]
.get_payload_sequential(old_internal_id, &hw_counter)?; // Internal operation, no measurement needed!
match self
.id_tracker
.internal_id(ExtendedPointId::from(point_data.external_id))
{
Some(existing_internal_id) => {
debug_assert!(
false,
"This code should not be reachable, cause points were resolved with `merged_points`"
);
let existing_external_version = self
.id_tracker
.internal_version(existing_internal_id)
.unwrap();
let remove_id = if existing_external_version < point_data.version {
// Other version is the newest, remove the existing one and replace
self.id_tracker
.drop(ExtendedPointId::from(point_data.external_id))?;
self.id_tracker.set_link(
ExtendedPointId::from(point_data.external_id),
new_internal_id,
)?;
self.id_tracker
.set_internal_version(new_internal_id, point_data.version)?;
self.payload_storage
.clear(existing_internal_id, &hw_counter)?;
existing_internal_id
} else {
// Old version is still good, do not move anything else
// Mark newly added vector as removed
new_internal_id
};
for vector_data in self.vector_data.values_mut() {
vector_data.vector_storage.delete_vector(remove_id)?;
}
}
None => {
self.id_tracker.set_link(
ExtendedPointId::from(point_data.external_id),
new_internal_id,
)?;
self.id_tracker
.set_internal_version(new_internal_id, point_data.version)?;
}
}
// Propagate payload to new segment
if !other_payload.is_empty() {
self.payload_storage.set(
new_internal_id,
&other_payload,
&HardwareCounterCell::disposable(),
)?;
}
}
for payload in payloads {
for (field, payload_schema) in payload.indexed_fields() {
self.indexed_fields.insert(field, payload_schema);
}
}
Ok(true)
}
pub fn build<R: Rng + ?Sized>(
self,
permit: ResourcePermit,
stopped: &AtomicBool,
rng: &mut R,
hw_counter: &HardwareCounterCell,
progress_segment: ProgressTracker,
) -> Result<Segment, OperationError> {
let (temp_dir, destination_path) = {
let SegmentBuilder {
version,
id_tracker,
payload_storage,
mut vector_data,
segment_config,
hnsw_global_config,
destination_path,
temp_dir,
indexed_fields,
defragment_keys: _,
} = self;
let progress_quantization = progress_segment.subtask("quantization");
let progress_payload_index = progress_segment.subtask("payload_index");
let indexed_fields = indexed_fields
.into_iter()
.map(|(field, payload_schema)| {
let progress = progress_payload_index
.subtask(format!("{}:{field}", payload_schema.name()));
(field, payload_schema, progress)
})
.collect::<Vec<(PayloadKeyType, PayloadFieldSchema, ProgressTracker)>>();
let progress_vector_index = progress_segment.subtask("vector_index");
let progress_sparse_vector_index = progress_segment.subtask("sparse_vector_index");
let appendable_flag = segment_config.is_appendable();
payload_storage.flusher()()?;
let payload_storage_arc = Arc::new(AtomicRefCell::new(payload_storage));
let id_tracker = match id_tracker {
IdTrackerEnum::InMemoryIdTracker(in_memory_id_tracker) => {
let (versions, mappings) = in_memory_id_tracker.into_internal();
let compressed_mapping = CompressedPointMappings::from_mappings(mappings);
let immutable_id_tracker =
ImmutableIdTracker::new(temp_dir.path(), &versions, compressed_mapping)?;
IdTrackerEnum::ImmutableIdTracker(immutable_id_tracker)
}
IdTrackerEnum::MutableIdTracker(_) => id_tracker,
IdTrackerEnum::ImmutableIdTracker(_) => {
unreachable!("ImmutableIdTracker should not be used for building segment")
}
#[cfg(feature = "rocksdb")]
IdTrackerEnum::RocksDbIdTracker(_) => id_tracker,
};
id_tracker.mapping_flusher()()?;
id_tracker.versions_flusher()()?;
let id_tracker_arc = Arc::new(AtomicRefCell::new(id_tracker));
let mut quantized_vectors = Self::update_quantization(
&segment_config,
&vector_data,
temp_dir.path(),
&permit,
stopped,
progress_quantization,
)?;
let mut vector_storages_arc = HashMap::new();
let mut old_indices = HashMap::new();
for vector_name in segment_config.vector_data.keys() {
let Some(vector_info) = vector_data.remove(vector_name) else {
return Err(OperationError::service_error(format!(
"Vector storage for vector name {vector_name} not found on segment build"
)));
};
vector_info.vector_storage.flusher()()?;
let vector_storage_arc = Arc::new(AtomicRefCell::new(vector_info.vector_storage));
old_indices.insert(vector_name, vector_info.old_indices);
vector_storages_arc.insert(vector_name.to_owned(), vector_storage_arc);
}
for vector_name in segment_config.sparse_vector_data.keys() {
let Some(vector_info) = vector_data.remove(vector_name) else {
return Err(OperationError::service_error(format!(
"Vector storage for vector name {vector_name} not found on sparse segment build"
)));
};
vector_info.vector_storage.flusher()()?;
let vector_storage_arc = Arc::new(AtomicRefCell::new(vector_info.vector_storage));
vector_storages_arc.insert(vector_name.to_owned(), vector_storage_arc);
}
let payload_index_path = get_payload_index_path(temp_dir.path());
progress_payload_index.start();
let mut payload_index = StructPayloadIndex::open(
payload_storage_arc.clone(),
id_tracker_arc.clone(),
vector_storages_arc.clone(),
&payload_index_path,
appendable_flag,
true,
)?;
for (field, payload_schema, progress) in indexed_fields {
progress.start();
payload_index.set_indexed(&field, payload_schema, hw_counter)?;
check_process_stopped(stopped)?;
}
drop(progress_payload_index);
payload_index.flusher()()?;
let payload_index_arc = Arc::new(AtomicRefCell::new(payload_index));
// Try to lock GPU device.
#[cfg(feature = "gpu")]
let gpu_devices_manager = crate::index::hnsw_index::gpu::GPU_DEVICES_MANAGER.read();
#[cfg(feature = "gpu")]
let gpu_device = gpu_devices_manager
.as_ref()
.map(|devices_manager| devices_manager.lock_device(stopped))
.transpose()?
.flatten();
#[cfg(not(feature = "gpu"))]
let gpu_device = None;
// Arc permit to share it with each vector store
let permit = Arc::new(permit);
progress_vector_index.start();
for (vector_name, vector_config) in &segment_config.vector_data {
let vector_storage = vector_storages_arc.remove(vector_name).unwrap();
let quantized_vectors =
Arc::new(AtomicRefCell::new(quantized_vectors.remove(vector_name)));
let index = build_vector_index(
vector_config,
VectorIndexOpenArgs {
path: &get_vector_index_path(temp_dir.path(), vector_name),
id_tracker: id_tracker_arc.clone(),
vector_storage: vector_storage.clone(),
payload_index: payload_index_arc.clone(),
quantized_vectors: quantized_vectors.clone(),
},
VectorIndexBuildArgs {
permit: permit.clone(),
old_indices: &old_indices.remove(vector_name).unwrap(),
gpu_device: gpu_device.as_ref(),
stopped,
rng,
hnsw_global_config: &hnsw_global_config,
feature_flags: feature_flags(),
progress: progress_vector_index.running_subtask(vector_name),
},
)?;
if vector_storage.borrow().is_on_disk() {
// If vector storage is expected to be on-disk, we need to clear cache
// to avoid cache pollution
vector_storage.borrow().clear_cache()?;
}
if let Some(quantized_vectors) = quantized_vectors.borrow().as_ref() {
quantized_vectors.clear_cache()?;
}
// Index if always loaded on-disk=true from build function
// So we may clear unconditionally
index.clear_cache()?;
}
drop(progress_vector_index);
progress_sparse_vector_index.start();
for (vector_name, sparse_vector_config) in &segment_config.sparse_vector_data {
let vector_index_path = get_vector_index_path(temp_dir.path(), vector_name);
let vector_storage_arc = vector_storages_arc.remove(vector_name).unwrap();
let index = create_sparse_vector_index(SparseVectorIndexOpenArgs {
config: sparse_vector_config.index,
id_tracker: id_tracker_arc.clone(),
vector_storage: vector_storage_arc.clone(),
payload_index: payload_index_arc.clone(),
path: &vector_index_path,
stopped,
tick_progress: || (),
})?;
if sparse_vector_config.storage_type.is_on_disk() {
// If vector storage is expected to be on-disk, we need to clear cache
// to avoid cache pollution
vector_storage_arc.borrow().clear_cache()?;
}
if sparse_vector_config.index.index_type.is_on_disk() {
index.clear_cache()?;
}
}
drop(progress_sparse_vector_index);
if segment_config.payload_storage_type.is_on_disk() {
// If payload storage is expected to be on-disk, we need to clear cache
// to avoid cache pollution
payload_storage_arc.borrow().clear_cache()?;
}
// Clear cache for payload index to avoid cache pollution
payload_index_arc.borrow().clear_cache_if_on_disk()?;
// We're done with CPU-intensive tasks, release CPU permit
debug_assert_eq!(
Arc::strong_count(&permit),
1,
"Must release CPU permit Arc everywhere",
);
drop(permit);
// Finalize the newly created segment by saving config and version
Segment::save_state(
&SegmentState {
initial_version: Some(version), // TODO!?
version: Some(version),
config: segment_config,
},
temp_dir.path(),
)?;
// After version is saved, segment can be loaded on restart
SegmentVersion::save(temp_dir.path())?;
// All temp data is evicted from RAM
(temp_dir, destination_path)
};
// Move fully constructed segment into collection directory and load back to RAM
fs::rename(temp_dir.keep(), &destination_path)
.describe("Moving segment data after optimization")?;
let loaded_segment = load_segment(&destination_path, stopped)?.ok_or_else(|| {
OperationError::service_error(format!(
"Segment loading error: {}",
destination_path.display()
))
})?;
Ok(loaded_segment)
}
fn update_quantization(
segment_config: &SegmentConfig,
vector_storages: &HashMap<VectorNameBuf, VectorData>,
temp_path: &Path,
permit: &ResourcePermit,
stopped: &AtomicBool,
progress: ProgressTracker,
) -> OperationResult<HashMap<VectorNameBuf, QuantizedVectors>> {
progress.start();
let config = segment_config.clone();
let mut quantized_vectors_map = HashMap::new();
for (vector_name, vector_info) in vector_storages {
let Some(vector_config) = config.vector_data.get(vector_name) else {
continue;
};
let is_appendable = vector_config.is_appendable();
// If appendable quantization feature is not enabled, skip appendable case.
if is_appendable && !common::flags::feature_flags().appendable_quantization {
continue;
}
let max_threads = permit.num_cpus as usize;
if let Some(quantization_config) = config.quantization_config(vector_name) {
// Don't build quantization for appendable vectors if quantization method does not support it
if is_appendable && !quantization_config.supports_appendable() {
continue;
}
let progress_vector = progress.running_subtask(vector_name);
let segment_path = temp_path;
let quantized_storage_type = if is_appendable {
QuantizedVectorsStorageType::Mutable
} else {
QuantizedVectorsStorageType::Immutable
};
let vector_storage_path = get_vector_storage_path(segment_path, vector_name);
let quantized_vectors = QuantizedVectors::create(
&vector_info.vector_storage,
quantization_config,
quantized_storage_type,
&vector_storage_path,
max_threads,
stopped,
)?;
quantized_vectors_map.insert(vector_name.to_owned(), quantized_vectors);
drop(progress_vector);
}
}
Ok(quantized_vectors_map)
}
/// Populate cache of all vector storages, so it will be faster to build index
pub fn populate_vector_storages(&self) -> OperationResult<()> {
for vector_data in self.vector_data.values() {
vector_data.vector_storage.populate()?;
}
Ok(())
}
}
fn uuid_hash<I>(hash: &mut u64, ids: I)
where
I: Iterator<Item = u128>,
{
for id in ids {
let uuid = Uuid::from_u128(id);
// Not all Uuid versions hold timestamp data. The most common version, v4 for example is completely
// random and can't be sorted. To still allow defragmentation, we assume that usually the same
// version gets used for a payload key and implement an alternative sorting criteria, that just
// takes the Uuids bytes to group equal Uuids together.
if let Some(timestamp) = uuid.get_timestamp() {
*hash = hash.wrapping_add(timestamp.to_gregorian().0);
} else {
// First part of u128
*hash = hash.wrapping_add((id >> 64) as u64);
// Second part of u128
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/segment_constructor_base.rs | lib/segment/src/segment_constructor/segment_constructor_base.rs | use std::collections::HashMap;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::flags::FeatureFlags;
use common::is_alive_lock::IsAliveLock;
use common::progress_tracker::ProgressTracker;
use fs_err as fs;
use fs_err::File;
use io::storage_version::StorageVersion;
use log::info;
use parking_lot::Mutex;
#[cfg(feature = "rocksdb")]
use parking_lot::RwLock;
use rand::Rng;
#[cfg(feature = "rocksdb")]
use rocksdb::DB;
use serde::Deserialize;
use uuid::Uuid;
#[cfg(feature = "rocksdb")]
use super::rocksdb_builder::RocksDbBuilder;
use crate::common::operation_error::{OperationError, OperationResult, check_process_stopped};
use crate::data_types::vectors::DEFAULT_VECTOR_NAME;
use crate::id_tracker::immutable_id_tracker::ImmutableIdTracker;
use crate::id_tracker::mutable_id_tracker::MutableIdTracker;
#[cfg(feature = "rocksdb")]
use crate::id_tracker::simple_id_tracker::SimpleIdTracker;
use crate::id_tracker::{IdTracker, IdTrackerEnum, IdTrackerSS};
use crate::index::VectorIndexEnum;
use crate::index::hnsw_index::gpu::gpu_devices_manager::LockedGpuDevice;
use crate::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use crate::index::plain_vector_index::PlainVectorIndex;
use crate::index::sparse_index::sparse_index_config::SparseIndexType;
use crate::index::sparse_index::sparse_vector_index::{
self, SparseVectorIndex, SparseVectorIndexOpenArgs,
};
use crate::index::struct_payload_index::StructPayloadIndex;
use crate::payload_storage::mmap_payload_storage::MmapPayloadStorage;
#[cfg(feature = "rocksdb")]
use crate::payload_storage::on_disk_payload_storage::OnDiskPayloadStorage;
use crate::payload_storage::payload_storage_enum::PayloadStorageEnum;
#[cfg(feature = "rocksdb")]
use crate::payload_storage::simple_payload_storage::SimplePayloadStorage;
use crate::segment::{SEGMENT_STATE_FILE, Segment, SegmentVersion, VectorData};
#[cfg(feature = "rocksdb")]
use crate::types::MultiVectorConfig;
use crate::types::{
Distance, HnswGlobalConfig, Indexes, PayloadStorageType, SegmentConfig, SegmentState,
SegmentType, SeqNumberType, SparseVectorStorageType, VectorDataConfig, VectorName,
VectorStorageDatatype, VectorStorageType,
};
use crate::vector_storage::dense::appendable_dense_vector_storage::{
open_appendable_in_ram_vector_storage, open_appendable_memmap_vector_storage,
open_appendable_memmap_vector_storage_byte, open_appendable_memmap_vector_storage_half,
};
use crate::vector_storage::dense::memmap_dense_vector_storage::{
open_memmap_vector_storage, open_memmap_vector_storage_byte, open_memmap_vector_storage_half,
};
#[cfg(feature = "rocksdb")]
use crate::vector_storage::dense::simple_dense_vector_storage::open_simple_dense_vector_storage;
use crate::vector_storage::multi_dense::appendable_mmap_multi_dense_vector_storage::{
open_appendable_in_ram_multi_vector_storage, open_appendable_memmap_multi_vector_storage,
};
#[cfg(feature = "rocksdb")]
use crate::vector_storage::multi_dense::simple_multi_dense_vector_storage::open_simple_multi_dense_vector_storage;
use crate::vector_storage::quantized::quantized_vectors::QuantizedVectors;
use crate::vector_storage::sparse::mmap_sparse_vector_storage::MmapSparseVectorStorage;
use crate::vector_storage::{VectorStorage, VectorStorageEnum};
pub const PAYLOAD_INDEX_PATH: &str = "payload_index";
pub const VECTOR_STORAGE_PATH: &str = "vector_storage";
pub const VECTOR_INDEX_PATH: &str = "vector_index";
fn sp<T>(t: T) -> Arc<AtomicRefCell<T>> {
Arc::new(AtomicRefCell::new(t))
}
pub fn get_vector_name_with_prefix(prefix: &str, vector_name: &VectorName) -> String {
if !vector_name.is_empty() {
format!("{prefix}-{vector_name}")
} else {
prefix.to_owned()
}
}
pub fn get_vector_storage_path(segment_path: &Path, vector_name: &VectorName) -> PathBuf {
segment_path.join(get_vector_name_with_prefix(
VECTOR_STORAGE_PATH,
vector_name,
))
}
pub fn get_vector_index_path(segment_path: &Path, vector_name: &VectorName) -> PathBuf {
segment_path.join(get_vector_name_with_prefix(VECTOR_INDEX_PATH, vector_name))
}
pub(crate) fn open_vector_storage(
#[cfg(feature = "rocksdb")] db_builder: &mut RocksDbBuilder,
vector_config: &VectorDataConfig,
#[cfg(feature = "rocksdb")] stopped: &AtomicBool,
vector_storage_path: &Path,
#[cfg(feature = "rocksdb")] vector_name: &VectorName,
) -> OperationResult<VectorStorageEnum> {
let storage_element_type = vector_config.datatype.unwrap_or_default();
match vector_config.storage_type {
// In memory - RocksDB disabled
#[cfg(not(feature = "rocksdb"))]
VectorStorageType::Memory => Err(OperationError::service_error(
"Failed to load 'Memory' storage type, RocksDB disabled in this Qdrant version",
)),
// In memory - RocksDB enabled
#[cfg(feature = "rocksdb")]
VectorStorageType::Memory => {
use crate::common::rocksdb_wrapper::DB_VECTOR_CF;
let db_column_name = get_vector_name_with_prefix(DB_VECTOR_CF, vector_name);
if let Some(multi_vec_config) = &vector_config.multivector_config {
open_simple_multi_dense_vector_storage(
storage_element_type,
db_builder.require()?,
&db_column_name,
vector_config.size,
vector_config.distance,
*multi_vec_config,
stopped,
)
} else {
open_simple_dense_vector_storage(
storage_element_type,
db_builder.require()?,
&db_column_name,
vector_config.size,
vector_config.distance,
stopped,
)
}
}
// Mmap on disk, not appendable
VectorStorageType::Mmap => {
if let Some(multi_vec_config) = &vector_config.multivector_config {
// there are no mmap multi vector storages, appendable only
open_appendable_memmap_multi_vector_storage(
storage_element_type,
vector_storage_path,
vector_config.size,
vector_config.distance,
*multi_vec_config,
)
} else {
match storage_element_type {
VectorStorageDatatype::Float32 => open_memmap_vector_storage(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
VectorStorageDatatype::Uint8 => open_memmap_vector_storage_byte(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
VectorStorageDatatype::Float16 => open_memmap_vector_storage_half(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
}
}
}
// Chunked mmap on disk, appendable
VectorStorageType::ChunkedMmap => {
if let Some(multi_vec_config) = &vector_config.multivector_config {
open_appendable_memmap_multi_vector_storage(
storage_element_type,
vector_storage_path,
vector_config.size,
vector_config.distance,
*multi_vec_config,
)
} else {
match storage_element_type {
VectorStorageDatatype::Float32 => open_appendable_memmap_vector_storage(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
VectorStorageDatatype::Uint8 => open_appendable_memmap_vector_storage_byte(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
VectorStorageDatatype::Float16 => open_appendable_memmap_vector_storage_half(
vector_storage_path,
vector_config.size,
vector_config.distance,
),
}
}
}
VectorStorageType::InRamChunkedMmap => {
if let Some(multi_vec_config) = &vector_config.multivector_config {
open_appendable_in_ram_multi_vector_storage(
storage_element_type,
vector_storage_path,
vector_config.size,
vector_config.distance,
*multi_vec_config,
)
} else {
open_appendable_in_ram_vector_storage(
storage_element_type,
vector_storage_path,
vector_config.size,
vector_config.distance,
)
}
}
}
}
pub(crate) fn create_payload_storage(
#[cfg(feature = "rocksdb")] db_builder: &mut RocksDbBuilder,
segment_path: &Path,
config: &SegmentConfig,
) -> OperationResult<PayloadStorageEnum> {
let payload_storage = match config.payload_storage_type {
#[cfg(feature = "rocksdb")]
PayloadStorageType::InMemory => {
PayloadStorageEnum::from(SimplePayloadStorage::open(db_builder.require()?)?)
}
#[cfg(feature = "rocksdb")]
PayloadStorageType::OnDisk => {
PayloadStorageEnum::from(OnDiskPayloadStorage::open(db_builder.require()?)?)
}
PayloadStorageType::Mmap => PayloadStorageEnum::from(MmapPayloadStorage::open_or_create(
segment_path.to_path_buf(),
false,
)?),
PayloadStorageType::InRamMmap => PayloadStorageEnum::from(
MmapPayloadStorage::open_or_create(segment_path.to_path_buf(), true)?,
),
};
Ok(payload_storage)
}
pub(crate) fn create_mutable_id_tracker(segment_path: &Path) -> OperationResult<MutableIdTracker> {
MutableIdTracker::open(segment_path)
}
#[cfg(feature = "rocksdb")]
pub(crate) fn create_rocksdb_id_tracker(
database: Arc<RwLock<DB>>,
) -> OperationResult<SimpleIdTracker> {
SimpleIdTracker::open(database)
}
pub(crate) fn create_immutable_id_tracker(
segment_path: &Path,
) -> OperationResult<ImmutableIdTracker> {
ImmutableIdTracker::open(segment_path)
}
pub(crate) fn get_payload_index_path(segment_path: &Path) -> PathBuf {
segment_path.join(PAYLOAD_INDEX_PATH)
}
pub(crate) struct VectorIndexOpenArgs<'a> {
pub path: &'a Path,
pub id_tracker: Arc<AtomicRefCell<IdTrackerSS>>,
pub vector_storage: Arc<AtomicRefCell<VectorStorageEnum>>,
pub payload_index: Arc<AtomicRefCell<StructPayloadIndex>>,
pub quantized_vectors: Arc<AtomicRefCell<Option<QuantizedVectors>>>,
}
pub struct VectorIndexBuildArgs<'a, R: Rng + ?Sized> {
pub permit: Arc<ResourcePermit>,
/// Vector indices from other segments, used to speed up index building.
/// May or may not contain the same vectors.
pub old_indices: &'a [Arc<AtomicRefCell<VectorIndexEnum>>],
pub gpu_device: Option<&'a LockedGpuDevice<'a>>,
pub rng: &'a mut R,
pub stopped: &'a AtomicBool,
pub hnsw_global_config: &'a HnswGlobalConfig,
pub feature_flags: FeatureFlags,
pub progress: ProgressTracker,
}
pub(crate) fn open_vector_index(
vector_config: &VectorDataConfig,
open_args: VectorIndexOpenArgs,
) -> OperationResult<VectorIndexEnum> {
let VectorIndexOpenArgs {
path,
id_tracker,
vector_storage,
payload_index,
quantized_vectors,
} = open_args;
Ok(match &vector_config.index {
Indexes::Plain {} => VectorIndexEnum::Plain(PlainVectorIndex::new(
id_tracker,
vector_storage,
quantized_vectors,
payload_index,
)),
Indexes::Hnsw(hnsw_config) => VectorIndexEnum::Hnsw(HNSWIndex::open(HnswIndexOpenArgs {
path,
id_tracker,
vector_storage,
quantized_vectors,
payload_index,
hnsw_config: *hnsw_config,
})?),
})
}
pub(crate) fn build_vector_index<R: Rng + ?Sized>(
vector_config: &VectorDataConfig,
open_args: VectorIndexOpenArgs,
build_args: VectorIndexBuildArgs<R>,
) -> OperationResult<VectorIndexEnum> {
let VectorIndexOpenArgs {
path,
id_tracker,
vector_storage,
payload_index,
quantized_vectors,
} = open_args;
Ok(match &vector_config.index {
Indexes::Plain {} => VectorIndexEnum::Plain(PlainVectorIndex::new(
id_tracker,
vector_storage,
quantized_vectors,
payload_index,
)),
Indexes::Hnsw(hnsw_config) => VectorIndexEnum::Hnsw(HNSWIndex::build(
HnswIndexOpenArgs {
path,
id_tracker,
vector_storage,
quantized_vectors,
payload_index,
hnsw_config: *hnsw_config,
},
build_args,
)?),
})
}
#[cfg(feature = "testing")]
pub fn create_sparse_vector_index_test(
args: SparseVectorIndexOpenArgs<impl FnMut()>,
) -> OperationResult<VectorIndexEnum> {
create_sparse_vector_index(args)
}
pub(crate) fn create_sparse_vector_index(
args: SparseVectorIndexOpenArgs<impl FnMut()>,
) -> OperationResult<VectorIndexEnum> {
let vector_index = match (
args.config.index_type,
args.config.datatype.unwrap_or_default(),
sparse_vector_index::USE_COMPRESSED,
) {
(_, a @ (VectorStorageDatatype::Float16 | VectorStorageDatatype::Uint8), false) => {
Err(OperationError::ValidationError {
description: format!("{a:?} datatype is not supported"),
})?
}
(SparseIndexType::MutableRam, _, _) => {
VectorIndexEnum::SparseRam(SparseVectorIndex::open(args)?)
}
// Non-compressed
(SparseIndexType::ImmutableRam, VectorStorageDatatype::Float32, false) => {
VectorIndexEnum::SparseImmutableRam(SparseVectorIndex::open(args)?)
}
(SparseIndexType::Mmap, VectorStorageDatatype::Float32, false) => {
VectorIndexEnum::SparseMmap(SparseVectorIndex::open(args)?)
}
// Compressed
(SparseIndexType::ImmutableRam, VectorStorageDatatype::Float32, true) => {
VectorIndexEnum::SparseCompressedImmutableRamF32(SparseVectorIndex::open(args)?)
}
(SparseIndexType::Mmap, VectorStorageDatatype::Float32, true) => {
VectorIndexEnum::SparseCompressedMmapF32(SparseVectorIndex::open(args)?)
}
(SparseIndexType::ImmutableRam, VectorStorageDatatype::Float16, true) => {
VectorIndexEnum::SparseCompressedImmutableRamF16(SparseVectorIndex::open(args)?)
}
(SparseIndexType::Mmap, VectorStorageDatatype::Float16, true) => {
VectorIndexEnum::SparseCompressedMmapF16(SparseVectorIndex::open(args)?)
}
(SparseIndexType::ImmutableRam, VectorStorageDatatype::Uint8, true) => {
VectorIndexEnum::SparseCompressedImmutableRamU8(SparseVectorIndex::open(args)?)
}
(SparseIndexType::Mmap, VectorStorageDatatype::Uint8, true) => {
VectorIndexEnum::SparseCompressedMmapU8(SparseVectorIndex::open(args)?)
}
};
Ok(vector_index)
}
pub(crate) fn create_sparse_vector_storage(
#[cfg(feature = "rocksdb")] db_builder: &mut RocksDbBuilder,
path: &Path,
#[cfg(feature = "rocksdb")] vector_name: &VectorName,
storage_type: &SparseVectorStorageType,
#[cfg(feature = "rocksdb")] stopped: &AtomicBool,
) -> OperationResult<VectorStorageEnum> {
match storage_type {
#[cfg(feature = "rocksdb")]
SparseVectorStorageType::OnDisk => {
use crate::common::rocksdb_wrapper::DB_VECTOR_CF;
use crate::vector_storage::sparse::simple_sparse_vector_storage::open_simple_sparse_vector_storage;
let db_column_name = get_vector_name_with_prefix(DB_VECTOR_CF, vector_name);
let storage =
open_simple_sparse_vector_storage(db_builder.require()?, &db_column_name, stopped)?;
Ok(storage)
}
SparseVectorStorageType::Mmap => {
let mmap_storage = MmapSparseVectorStorage::open_or_create(path)?;
Ok(VectorStorageEnum::SparseMmap(mmap_storage))
}
}
}
fn create_segment(
initial_version: Option<SeqNumberType>,
version: Option<SeqNumberType>,
segment_path: &Path,
config: &SegmentConfig,
stopped: &AtomicBool,
create: bool,
) -> OperationResult<Segment> {
#[cfg(feature = "rocksdb")]
let mut db_builder = RocksDbBuilder::new(segment_path, config)?;
let payload_storage = sp(create_payload_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
segment_path,
config,
)?);
let appendable_flag = config.is_appendable();
let use_mutable_id_tracker =
appendable_flag || !ImmutableIdTracker::mappings_file_path(segment_path).is_file();
let id_tracker = create_segment_id_tracker(
use_mutable_id_tracker,
segment_path,
#[cfg(feature = "rocksdb")]
&mut db_builder,
)?;
let mut vector_storages = HashMap::new();
for (vector_name, vector_config) in &config.vector_data {
let vector_storage_path = get_vector_storage_path(segment_path, vector_name);
// Select suitable vector storage type based on configuration
let vector_storage = sp(open_vector_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
vector_config,
#[cfg(feature = "rocksdb")]
stopped,
&vector_storage_path,
#[cfg(feature = "rocksdb")]
vector_name,
)?);
vector_storages.insert(vector_name.to_owned(), vector_storage);
}
for (vector_name, sparse_config) in config.sparse_vector_data.iter() {
let vector_storage_path = get_vector_storage_path(segment_path, vector_name);
// Select suitable sparse vector storage type based on configuration
let vector_storage = sp(create_sparse_vector_storage(
#[cfg(feature = "rocksdb")]
&mut db_builder,
&vector_storage_path,
#[cfg(feature = "rocksdb")]
vector_name,
&sparse_config.storage_type,
#[cfg(feature = "rocksdb")]
stopped,
)?);
vector_storages.insert(vector_name.to_owned(), vector_storage);
}
let payload_index_path = get_payload_index_path(segment_path);
let payload_index: Arc<AtomicRefCell<StructPayloadIndex>> = sp(StructPayloadIndex::open(
payload_storage.clone(),
id_tracker.clone(),
vector_storages.clone(),
&payload_index_path,
appendable_flag,
create,
)?);
let mut vector_data = HashMap::new();
for (vector_name, vector_config) in &config.vector_data {
let vector_storage_path = get_vector_storage_path(segment_path, vector_name);
let vector_storage = vector_storages.remove(vector_name).unwrap();
let vector_index_path = get_vector_index_path(segment_path, vector_name);
// Warn when number of points between ID tracker and storage differs
let point_count = id_tracker.borrow().total_point_count();
let vector_count = vector_storage.borrow().total_vector_count();
if vector_count != point_count {
log::debug!(
"Mismatch of point and vector counts ({point_count} != {vector_count}, storage: {})",
vector_storage_path.display(),
);
}
let quantized_vectors = sp(
if let Some(quantization_config) = config.quantization_config(vector_name) {
let quantized_data_path = vector_storage_path;
QuantizedVectors::load(
quantization_config,
&vector_storage.borrow(),
&quantized_data_path,
stopped,
)?
} else {
None
},
);
let vector_index: Arc<AtomicRefCell<VectorIndexEnum>> = sp(open_vector_index(
vector_config,
VectorIndexOpenArgs {
path: &vector_index_path,
id_tracker: id_tracker.clone(),
vector_storage: vector_storage.clone(),
payload_index: payload_index.clone(),
quantized_vectors: quantized_vectors.clone(),
},
)?);
check_process_stopped(stopped)?;
vector_data.insert(
vector_name.to_owned(),
VectorData {
vector_index,
vector_storage,
quantized_vectors,
},
);
}
for (vector_name, sparse_vector_config) in &config.sparse_vector_data {
let vector_storage_path = get_vector_storage_path(segment_path, vector_name);
let vector_index_path = get_vector_index_path(segment_path, vector_name);
let vector_storage = vector_storages.remove(vector_name).unwrap();
// Warn when number of points between ID tracker and storage differs
let point_count = id_tracker.borrow().total_point_count();
let vector_count = vector_storage.borrow().total_vector_count();
if vector_count != point_count {
log::debug!(
"Mismatch of point and vector counts ({point_count} != {vector_count}, storage: {})",
vector_storage_path.display(),
);
}
let vector_index = sp(create_sparse_vector_index(SparseVectorIndexOpenArgs {
config: sparse_vector_config.index,
id_tracker: id_tracker.clone(),
vector_storage: vector_storage.clone(),
payload_index: payload_index.clone(),
path: &vector_index_path,
stopped,
tick_progress: || (),
})?);
check_process_stopped(stopped)?;
vector_data.insert(
vector_name.to_owned(),
VectorData {
vector_storage,
vector_index,
quantized_vectors: sp(None),
},
);
}
let segment_type = if config.is_any_vector_indexed() {
SegmentType::Indexed
} else {
SegmentType::Plain
};
Ok(Segment {
initial_version,
version,
persisted_version: Arc::new(Mutex::new(version)),
is_alive_flush_lock: IsAliveLock::new(),
current_path: segment_path.to_owned(),
version_tracker: Default::default(),
id_tracker,
vector_data,
segment_type,
appendable_flag,
payload_index,
payload_storage,
segment_config: config.clone(),
error_status: None,
#[cfg(feature = "rocksdb")]
database: db_builder.build(),
})
}
fn create_segment_id_tracker(
mutable_id_tracker: bool,
segment_path: &Path,
#[cfg(feature = "rocksdb")] db_builder: &mut RocksDbBuilder,
) -> OperationResult<Arc<AtomicRefCell<IdTrackerEnum>>> {
if !mutable_id_tracker {
return Ok(sp(IdTrackerEnum::ImmutableIdTracker(
create_immutable_id_tracker(segment_path)?,
)));
}
// Determine whether we use the new (file based) or old (RocksDB) mutable ID tracker
// Decide based on the feature flag and state on disk
#[cfg(feature = "rocksdb")]
{
use crate::common::rocksdb_wrapper::DB_MAPPING_CF;
let use_rocksdb_mutable_tracker = if let Some(db) = db_builder.read() {
// New ID tracker is enabled by default, but we still use the old tracker if we have
// any mappings stored in RocksDB
//
// TODO(1.15 or later): remove this check and use new mutable ID tracker unconditionally
if let Some(cf) = db.cf_handle(DB_MAPPING_CF) {
let count = db
.property_int_value_cf(cf, rocksdb::properties::ESTIMATE_NUM_KEYS)
.map_err(|err| {
OperationError::service_error(format!(
"Failed to get estimated number of keys from RocksDB: {err}"
))
})?
.unwrap_or_default();
count > 0
} else {
false
}
} else {
false
};
if use_rocksdb_mutable_tracker {
let id_tracker = create_rocksdb_id_tracker(db_builder.require()?)?;
// Actively migrate RocksDB based ID tracker into mutable ID tracker
if common::flags::feature_flags().migrate_rocksdb_id_tracker {
let id_tracker = migrate_rocksdb_id_tracker_to_mutable(id_tracker, segment_path)?;
return Ok(sp(IdTrackerEnum::MutableIdTracker(id_tracker)));
}
return Ok(sp(IdTrackerEnum::RocksDbIdTracker(id_tracker)));
}
}
Ok(sp(IdTrackerEnum::MutableIdTracker(
create_mutable_id_tracker(segment_path)?,
)))
}
pub fn load_segment(path: &Path, stopped: &AtomicBool) -> OperationResult<Option<Segment>> {
if path
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext == "deleted")
.unwrap_or(false)
{
log::warn!("Segment is marked as deleted, skipping: {}", path.display());
// Skip deleted segments
return Ok(None);
}
let Some(stored_version) = SegmentVersion::load(path)? else {
// Assume segment was not properly saved.
// Server might have crashed before saving the segment fully.
log::warn!(
"Segment version file not found, skipping: {}",
path.display()
);
return Ok(None);
};
let app_version = SegmentVersion::current();
if stored_version != app_version {
info!("Migrating segment {stored_version} -> {app_version}");
if stored_version > app_version {
return Err(OperationError::service_error(format!(
"Data version {stored_version} is newer than application version {app_version}. \
Please upgrade the application. Compatibility is not guaranteed."
)));
}
if stored_version.major == 0 && stored_version.minor < 3 {
return Err(OperationError::service_error(format!(
"Segment version({stored_version}) is not compatible with current version({app_version})"
)));
}
if stored_version.major == 0 && stored_version.minor == 3 {
let segment_state = load_segment_state_v3(path)?;
Segment::save_state(&segment_state, path)?;
} else if stored_version.major == 0 && stored_version.minor <= 5 {
let segment_state = load_segment_state_v5(path)?;
Segment::save_state(&segment_state, path)?;
}
SegmentVersion::save(path)?
}
#[cfg_attr(not(feature = "rocksdb"), expect(unused_mut))]
let mut segment_state = Segment::load_state(path)?;
#[cfg_attr(not(feature = "rocksdb"), expect(unused_mut))]
let mut segment = create_segment(
segment_state.initial_version,
segment_state.version,
path,
&segment_state.config,
stopped,
false,
)?;
#[cfg(feature = "rocksdb")]
{
if common::flags::feature_flags().migrate_rocksdb_vector_storage {
migrate_all_rocksdb_dense_vector_storages(path, &mut segment, &mut segment_state)?;
migrate_all_rocksdb_sparse_vector_storages(path, &mut segment, &mut segment_state)?;
}
if common::flags::feature_flags().migrate_rocksdb_payload_storage {
migrate_rocksdb_payload_storage(path, &mut segment, &mut segment_state)?;
}
}
Ok(Some(segment))
}
pub fn new_segment_path(segments_path: &Path) -> PathBuf {
segments_path.join(Uuid::new_v4().to_string())
}
/// Build segment instance using given configuration.
/// Builder will generate folder for the segment and store all segment information inside it.
///
/// # Arguments
///
/// * `segments_path` - Path to the segments directory. Segment folder will be created in this directory
/// * `config` - Segment configuration
/// * `ready` - Whether the segment is ready after building; will save segment version
///
/// To load a segment, saving the segment version is required. If `ready` is false, the version
/// will not be stored. Then the segment is skipped on restart when trying to load it again. In
/// that case, the segment version must be stored manually to make it ready.
pub fn build_segment(
segments_path: &Path,
config: &SegmentConfig,
ready: bool,
) -> OperationResult<Segment> {
let segment_path = new_segment_path(segments_path);
fs::create_dir_all(&segment_path)?;
let segment = create_segment(
None,
None,
&segment_path,
config,
&AtomicBool::new(false),
true,
)?;
segment.save_current_state()?;
// Version is the last file to save, as it will be used to check if segment was built correctly.
// If it is not saved, segment will be skipped.
if ready {
SegmentVersion::save(&segment_path)?;
}
Ok(segment)
}
/// Load v0.3.* segment data and migrate to current version
#[allow(deprecated)]
fn load_segment_state_v3(segment_path: &Path) -> OperationResult<SegmentState> {
use crate::compat::{SegmentConfigV5, StorageTypeV5, VectorDataConfigV5};
#[derive(Deserialize)]
#[serde(rename_all = "snake_case")]
#[deprecated]
pub struct SegmentStateV3 {
pub version: SeqNumberType,
pub config: SegmentConfigV3,
}
#[derive(Deserialize)]
#[serde(rename_all = "snake_case")]
#[deprecated]
pub struct SegmentConfigV3 {
/// Size of a vectors used
pub vector_size: usize,
/// Type of distance function used for measuring distance between vectors
pub distance: Distance,
/// Type of index used for search
pub index: Indexes,
/// Type of vector storage
pub storage_type: StorageTypeV5,
/// Defines payload storage type
#[serde(default)]
pub payload_storage_type: PayloadStorageType,
}
let path = segment_path.join(SEGMENT_STATE_FILE);
let mut contents = String::new();
let mut file = File::open(&path)?;
file.read_to_string(&mut contents)?;
serde_json::from_str::<SegmentStateV3>(&contents)
.map(|state| {
// Construct V5 version, then convert into current
let vector_data = VectorDataConfigV5 {
size: state.config.vector_size,
distance: state.config.distance,
hnsw_config: None,
quantization_config: None,
on_disk: None,
};
let segment_config = SegmentConfigV5 {
vector_data: HashMap::from([(DEFAULT_VECTOR_NAME.to_owned(), vector_data)]),
index: state.config.index,
storage_type: state.config.storage_type,
payload_storage_type: state.config.payload_storage_type,
quantization_config: None,
};
SegmentState {
initial_version: None,
version: Some(state.version),
config: segment_config.into(),
}
})
.map_err(|err| {
OperationError::service_error(format!(
"Failed to read segment {}. Error: {}",
path.to_str().unwrap(),
err
))
})
}
/// Load v0.5.0 segment data and migrate to current version
#[allow(deprecated)]
fn load_segment_state_v5(segment_path: &Path) -> OperationResult<SegmentState> {
use crate::compat::SegmentStateV5;
let path = segment_path.join(SEGMENT_STATE_FILE);
let mut contents = String::new();
let mut file = File::open(&path)?;
file.read_to_string(&mut contents)?;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/mod.rs | lib/segment/src/segment_constructor/mod.rs | mod batched_reader;
#[cfg(feature = "rocksdb")]
mod rocksdb_builder;
pub mod segment_builder;
mod segment_constructor_base;
#[cfg(any(test, feature = "testing"))]
pub mod simple_segment_constructor;
pub use segment_constructor_base::*;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/segment_constructor/simple_segment_constructor.rs | lib/segment/src/segment_constructor/simple_segment_constructor.rs | use std::collections::HashMap;
use std::path::Path;
use crate::common::operation_error::OperationResult;
use crate::data_types::vectors::DEFAULT_VECTOR_NAME;
use crate::segment::Segment;
use crate::segment_constructor::build_segment;
use crate::types::{
Distance, Indexes, PayloadStorageType, SegmentConfig, VectorDataConfig, VectorName,
VectorStorageType,
};
pub const VECTOR1_NAME: &VectorName = "vector1";
pub const VECTOR2_NAME: &VectorName = "vector2";
/// Build new segment with plain index in given directory
///
/// # Arguments
///
/// * `path` - path to collection\`s segment directory
///
pub fn build_simple_segment(
path: &Path,
dim: usize,
distance: Distance,
) -> OperationResult<Segment> {
build_segment(
path,
&SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
},
true,
)
}
pub fn build_simple_segment_with_payload_storage(
path: &Path,
dim: usize,
distance: Distance,
payload_storage_type: PayloadStorageType,
) -> OperationResult<Segment> {
build_segment(
path,
&SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type,
},
true,
)
}
pub fn build_multivec_segment(
path: &Path,
dim1: usize,
dim2: usize,
distance: Distance,
) -> OperationResult<Segment> {
let mut vectors_config = HashMap::new();
vectors_config.insert(
VECTOR1_NAME.into(),
VectorDataConfig {
size: dim1,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
);
vectors_config.insert(
VECTOR2_NAME.into(),
VectorDataConfig {
size: dim2,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
);
build_segment(
path,
&SegmentConfig {
vector_data: vectors_config,
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
},
true,
)
}
#[cfg(test)]
mod tests {
use common::counter::hardware_counter::HardwareCounterCell;
use tempfile::Builder;
use super::*;
use crate::common::operation_error::OperationError;
use crate::data_types::vectors::only_default_vector;
use crate::entry::entry_point::SegmentEntry;
use crate::payload_json;
#[test]
fn test_create_simple_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment = build_simple_segment(dir.path(), 100, Distance::Dot).unwrap();
eprintln!(" = {:?}", segment.version());
}
#[test]
fn test_add_and_search() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment = build_simple_segment(dir.path(), 4, Distance::Dot).unwrap();
let wrong_vec = vec![1.0, 1.0, 1.0];
let vec1 = vec![1.0, 0.0, 1.0, 1.0];
let vec2 = vec![1.0, 0.0, 1.0, 0.0];
let vec3 = vec![1.0, 1.0, 1.0, 1.0];
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
match segment.upsert_point(1, 120.into(), only_default_vector(&wrong_vec), &hw_counter) {
Err(OperationError::WrongVectorDimension { .. }) => (),
Err(_) => panic!("Wrong error"),
Ok(_) => panic!("Operation with wrong vector should fail"),
};
segment
.upsert_point(2, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment
.upsert_point(2, 2.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment
.upsert_point(2, 3.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment
.upsert_point(2, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment
.upsert_point(2, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
segment
.set_payload(
3,
1.into(),
&payload_json! {"color": vec!["red".to_owned(), "green".to_owned()]},
&None,
&hw_counter,
)
.unwrap();
segment
.set_payload(
3,
2.into(),
&payload_json! {"color": vec!["red".to_owned(), "blue".to_owned()]},
&None,
&hw_counter,
)
.unwrap();
segment
.set_payload(
3,
3.into(),
&payload_json! {"color": vec!["red".to_owned(), "yellow".to_owned()]},
&None,
&hw_counter,
)
.unwrap();
segment
.set_payload(
3,
4.into(),
&payload_json! {"color": vec!["red".to_owned(), "green".to_owned()]},
&None,
&hw_counter,
)
.unwrap();
// Replace vectors
segment
.upsert_point(4, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment
.upsert_point(5, 2.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment
.upsert_point(6, 3.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment
.upsert_point(7, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment
.upsert_point(8, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
assert_eq!(segment.version(), 8);
let declined = segment
.upsert_point(3, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
// Should not be processed due to operation number
assert!(!declined);
}
// ToDo: More tests
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/collection_defaults.rs | lib/segment/src/data_types/collection_defaults.rs | use serde::Deserialize;
use validator::Validate;
use crate::types::{QuantizationConfig, StrictModeConfig, VectorsConfigDefaults};
/// Collection default values
#[derive(Debug, Deserialize, Validate, Clone, PartialEq, Eq)]
pub struct CollectionConfigDefaults {
#[serde(default)]
pub vectors: Option<VectorsConfigDefaults>,
#[validate(nested)]
pub quantization: Option<QuantizationConfig>,
#[validate(range(min = 1))]
pub shard_number: Option<u32>,
#[validate(range(min = 1))]
pub shard_number_per_node: Option<u32>,
#[validate(range(min = 1))]
pub replication_factor: Option<u32>,
#[validate(range(min = 1))]
pub write_consistency_factor: Option<u32>,
#[validate(nested)]
pub strict_mode: Option<StrictModeConfig>,
}
impl CollectionConfigDefaults {
pub fn get_shard_number(&self, number_of_peers: u32) -> u32 {
match (self.shard_number, self.shard_number_per_node) {
(Some(shard_number), None) => shard_number,
(None, Some(shard_number_per_node)) => shard_number_per_node * number_of_peers,
(None, None) => number_of_peers,
(Some(shard_number), Some(_)) => {
log::warn!(
"Both shard_number and shard_number_per_node are set. Using shard_number: {shard_number}"
);
shard_number
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/tiny_map.rs | lib/segment/src/data_types/tiny_map.rs | use std::collections::HashMap;
use std::{borrow, iter, mem, slice};
use tinyvec::TinyVec;
pub const CAPACITY: usize = 3;
#[derive(Clone, Debug, Default)]
pub struct TinyMap<K, V>
where
K: Default,
V: Default,
{
list: TinyVec<[(K, V); CAPACITY]>,
}
impl<K, V> TinyMap<K, V>
where
K: Default,
V: Default,
{
pub fn new() -> Self {
Self {
list: TinyVec::new(),
}
}
pub fn insert_no_check(&mut self, key: K, value: V) {
self.list.push((key, value));
}
pub fn len(&self) -> usize {
self.list.len()
}
pub fn is_empty(&self) -> bool {
self.list.is_empty()
}
pub fn iter(&self) -> slice::Iter<'_, (K, V)> {
self.list.iter()
}
pub fn iter_mut(&mut self) -> slice::IterMut<'_, (K, V)> {
self.list.iter_mut()
}
pub fn clear(&mut self) {
self.list.clear();
}
pub fn keys(&self) -> impl Iterator<Item = &K> {
self.list.iter().map(|(k, _)| k)
}
pub fn values(&self) -> impl Iterator<Item = &V> {
self.list.iter().map(|(_, v)| v)
}
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> {
self.list.iter_mut().map(|(_, v)| v)
}
}
impl<K, V> TinyMap<K, V>
where
K: Default + Eq,
V: Default,
{
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let found = self.list.iter_mut().find(|(k, _)| k == &key);
match found {
Some((_, v)) => {
let old = mem::replace(v, value);
Some(old)
}
None => {
self.list.push((key, value));
None
}
}
}
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: borrow::Borrow<Q>,
Q: Eq + ?Sized,
{
self.list
.iter()
.find(|(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: borrow::Borrow<Q>,
Q: Eq + ?Sized,
{
self.list
.iter_mut()
.find(|(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
/// Returns the (mutable) value assigned to `key`, if such an entry exists.
/// Otherwise the default value for `V` is inserted and returned as mutable reference.
///
/// This method automatically clones `key` if required. Therefore Q must implement `ToOwned<Owned = K>`.
pub fn get_or_insert_default<Q>(&mut self, key: &Q) -> &mut V
where
V: Sized,
K: borrow::Borrow<Q>,
Q: Eq + ToOwned<Owned = K> + ?Sized,
{
// Try to locate an existing entry for the key.
let existing_position = self.list.iter().position(|(k, _)| k.borrow() == key);
// Insert default value if not existing and get the new index.
let index = match existing_position {
Some(existing_pos) => existing_pos,
None => {
let new_index = self.list.len();
self.list.push((key.to_owned(), V::default()));
new_index
}
};
&mut self.list[index].1
}
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: borrow::Borrow<Q>,
Q: Eq + ?Sized,
{
let found = self.list.iter().position(|(k, _)| k.borrow() == key);
match found {
Some(i) => {
let (_, v) = self.list.remove(i);
Some(v)
}
None => None,
}
}
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: borrow::Borrow<Q>,
Q: Eq + ?Sized,
{
self.list.iter().any(|(k, _)| k.borrow() == key)
}
}
impl<K, V> PartialEq for TinyMap<K, V>
where
K: Default + Eq,
V: Default + PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
for (k, v) in self.iter() {
if other.get(k) != Some(v) {
return false;
}
}
true
}
}
impl<K, V> IntoIterator for TinyMap<K, V>
where
K: Default,
V: Default,
{
type Item = (K, V);
type IntoIter = tinyvec::TinyVecIterator<[(K, V); CAPACITY]>;
fn into_iter(self) -> Self::IntoIter {
self.list.into_iter()
}
}
impl<K, V> From<TinyMap<K, V>> for HashMap<K, V>
where
K: Default + std::hash::Hash + Eq,
V: Default,
{
#[inline]
fn from(value: TinyMap<K, V>) -> Self {
value.into_iter().collect()
}
}
impl<K, V> FromIterator<(K, V)> for TinyMap<K, V>
where
K: Default,
V: Default,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
Self {
list: iter::FromIterator::from_iter(iter),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tiny_map_basic_operations() {
// Create dummy data
let mut map: TinyMap<String, String> = TinyMap::new();
let key = "key".to_string();
let mut value = "value".to_string();
let key2 = "key2".to_string();
let value2 = "value2".to_string();
let key3 = "key3".to_string();
let value3 = "value3".to_string();
// Test insert
map.insert(key.clone(), value.clone());
assert_eq!(map.len(), 1);
assert_eq!(map.get(&key), Some(&value));
// Test insert_no_check
map.insert_no_check(key2.clone(), value2.clone());
assert_eq!(map.len(), 2);
assert_eq!(map.get(&key2), Some(&value2));
// Test insert overwrite
map.insert(key.clone(), value3.clone());
assert_eq!(map.len(), 2);
assert_eq!(map.get(&key), Some(&value3));
// Test remove
map.remove(&key);
assert_eq!(map.len(), 1);
assert_eq!(map.get(&key), None);
// Test get_mut
map.clear();
map.insert(key.clone(), value.clone());
assert_eq!(map.get_mut(&key), Some(&mut value));
map.get_mut(&key).unwrap().clone_from(&value3);
assert_eq!(map.get(&key), Some(&value3));
// Test iter
map.clear();
map.insert(key2.clone(), value2.clone());
map.insert(key3.clone(), value3.clone());
let mut iter = map.iter();
assert_eq!(iter.next(), Some(&(key2, value2)));
assert_eq!(iter.next(), Some(&(key3, value3)));
}
#[test]
fn test_tiny_map_get_or_insert() {
let mut map: TinyMap<String, usize> = TinyMap::new();
map.insert("a".to_string(), 1);
map.insert("b".to_string(), 2);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default("a"), 1);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default("b"), 2);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default("c"), 0);
assert_eq!(map.len(), 3);
let mut map: TinyMap<usize, usize> = TinyMap::new();
map.insert(1, 1);
map.insert(2, 4);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default(&1), 1);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default(&2), 4);
assert_eq!(map.len(), 2);
assert_eq!(*map.get_or_insert_default(&3), 0);
assert_eq!(map.len(), 3);
*map.get_or_insert_default(&3) = 6;
assert_eq!(map.len(), 3); // This call should not add an additional item.
assert_eq!(map.get(&3), Some(&6));
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/index.rs | lib/segment/src/data_types/index.rs | use std::collections::BTreeSet;
use std::fmt;
use std::str::FromStr;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use validator::{Validate, ValidationError, ValidationErrors};
// Keyword
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum KeywordIndexType {
#[default]
Keyword,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct KeywordIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: KeywordIndexType,
/// If true - used for tenant optimization. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub is_tenant: Option<bool>,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
// Integer
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum IntegerIndexType {
#[default]
Integer,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct IntegerIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: IntegerIndexType,
/// If true - support direct lookups.
/// Default is true.
pub lookup: Option<bool>,
/// If true - support ranges filters.
/// Default is true.
pub range: Option<bool>,
/// If true - use this key to organize storage of the collection data.
/// This option assumes that this key will be used in majority of filtered requests.
/// Default is false.
pub is_principal: Option<bool>,
/// If true, store the index on disk. Default: false.
/// Default is false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
impl Validate for IntegerIndexParams {
fn validate(&self) -> Result<(), ValidationErrors> {
let IntegerIndexParams {
r#type: _,
lookup,
range,
is_principal: _,
on_disk: _,
} = &self;
validate_integer_index_params(lookup, range)
}
}
pub fn validate_integer_index_params(
lookup: &Option<bool>,
range: &Option<bool>,
) -> Result<(), ValidationErrors> {
if lookup == &Some(false) && range == &Some(false) {
let mut errors = ValidationErrors::new();
let error =
ValidationError::new("the 'lookup' and 'range' capabilities can't be both disabled");
errors.add("lookup", error);
return Err(errors);
}
Ok(())
}
// UUID
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum UuidIndexType {
#[default]
Uuid,
}
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct UuidIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: UuidIndexType,
/// If true - used for tenant optimization.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub is_tenant: Option<bool>,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
// Float
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum FloatIndexType {
#[default]
Float,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct FloatIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: FloatIndexType,
/// If true - use this key to organize storage of the collection data.
/// This option assumes that this key will be used in majority of filtered requests.
pub is_principal: Option<bool>,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
// Geo
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum GeoIndexType {
#[default]
Geo,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct GeoIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: GeoIndexType,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
// Text
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum TextIndexType {
#[default]
Text,
}
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum TokenizerType {
Prefix,
Whitespace,
#[default]
Word,
Multilingual,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct TextIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: TextIndexType,
#[serde(default)]
pub tokenizer: TokenizerType,
/// Minimum characters to be tokenized.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub min_token_len: Option<usize>,
/// Maximum characters to be tokenized.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max_token_len: Option<usize>,
/// If true, lowercase all tokens. Default: true.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub lowercase: Option<bool>,
/// If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ascii_folding: Option<bool>,
/// If true, support phrase matching. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub phrase_matching: Option<bool>,
/// Ignore this set of tokens. Can select from predefined languages and/or provide a custom set.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stopwords: Option<StopwordsInterface>,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
/// Algorithm for stemming. Default: disabled.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stemmer: Option<StemmingAlgorithm>,
}
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum Snowball {
#[default]
Snowball,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
pub struct SnowballParams {
pub r#type: Snowball,
pub language: SnowballLanguage,
}
/// Different stemming algorithms with their configs.
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(untagged)]
pub enum StemmingAlgorithm {
Snowball(SnowballParams),
}
/// Languages supported by snowball stemmer.
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum SnowballLanguage {
#[serde(alias = "ar")]
Arabic,
#[serde(alias = "hy")]
Armenian,
#[serde(alias = "da")]
Danish,
#[serde(alias = "nl")]
Dutch,
#[serde(alias = "en")]
English,
#[serde(alias = "fi")]
Finnish,
#[serde(alias = "fr")]
French,
#[serde(alias = "de")]
German,
#[serde(alias = "el")]
Greek,
#[serde(alias = "hu")]
Hungarian,
#[serde(alias = "it")]
Italian,
#[serde(alias = "no")]
Norwegian,
#[serde(alias = "pt")]
Portuguese,
#[serde(alias = "ro")]
Romanian,
#[serde(alias = "ru")]
Russian,
#[serde(alias = "es")]
Spanish,
#[serde(alias = "sv")]
Swedish,
#[serde(alias = "ta")]
Tamil,
#[serde(alias = "tr")]
Turkish,
}
impl From<SnowballLanguage> for rust_stemmers::Algorithm {
fn from(value: SnowballLanguage) -> Self {
match value {
SnowballLanguage::Arabic => rust_stemmers::Algorithm::Arabic,
SnowballLanguage::Armenian => rust_stemmers::Algorithm::Armenian,
SnowballLanguage::Danish => rust_stemmers::Algorithm::Danish,
SnowballLanguage::Dutch => rust_stemmers::Algorithm::Dutch,
SnowballLanguage::English => rust_stemmers::Algorithm::English,
SnowballLanguage::Finnish => rust_stemmers::Algorithm::Finnish,
SnowballLanguage::French => rust_stemmers::Algorithm::French,
SnowballLanguage::German => rust_stemmers::Algorithm::German,
SnowballLanguage::Greek => rust_stemmers::Algorithm::Greek,
SnowballLanguage::Hungarian => rust_stemmers::Algorithm::Hungarian,
SnowballLanguage::Italian => rust_stemmers::Algorithm::Italian,
SnowballLanguage::Norwegian => rust_stemmers::Algorithm::Norwegian,
SnowballLanguage::Portuguese => rust_stemmers::Algorithm::Portuguese,
SnowballLanguage::Romanian => rust_stemmers::Algorithm::Romanian,
SnowballLanguage::Russian => rust_stemmers::Algorithm::Russian,
SnowballLanguage::Spanish => rust_stemmers::Algorithm::Spanish,
SnowballLanguage::Swedish => rust_stemmers::Algorithm::Swedish,
SnowballLanguage::Tamil => rust_stemmers::Algorithm::Tamil,
SnowballLanguage::Turkish => rust_stemmers::Algorithm::Turkish,
}
}
}
impl fmt::Display for SnowballLanguage {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let json_string = serde_json::to_string(self).map_err(|_| fmt::Error)?;
f.write_str(json_string.trim_matches('"'))
}
}
impl FromStr for SnowballLanguage {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(&format!("\"{s}\""))
}
}
#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(untagged)]
pub enum StopwordsInterface {
Language(Language),
Set(StopwordsSet),
}
impl StopwordsInterface {
#[cfg(feature = "testing")]
pub fn new_custom(custom: &[&str]) -> Self {
StopwordsInterface::Set(StopwordsSet {
languages: None,
custom: Some(custom.iter().map(|s| (*s).to_string()).collect()),
})
}
#[cfg(feature = "testing")]
pub fn new_language(language: Language) -> Self {
StopwordsInterface::Language(language)
}
#[cfg(feature = "testing")]
pub fn new_set(languages: &[Language], custom: &[&str]) -> Self {
StopwordsInterface::Set(StopwordsSet {
languages: Some(languages.iter().cloned().collect()),
custom: Some(custom.iter().map(|s| (*s).to_string()).collect()),
})
}
}
#[derive(
Debug, Serialize, Deserialize, JsonSchema, Clone, PartialEq, PartialOrd, Ord, Hash, Eq,
)]
#[serde(rename_all = "snake_case")]
pub enum Language {
#[serde(alias = "ar")]
Arabic,
#[serde(alias = "az")]
Azerbaijani,
#[serde(alias = "eu")]
Basque,
#[serde(alias = "bn")]
Bengali,
#[serde(alias = "ca")]
Catalan,
#[serde(alias = "zh")]
Chinese,
#[serde(alias = "da")]
Danish,
#[serde(alias = "nl")]
Dutch,
#[serde(alias = "en")]
English,
#[serde(alias = "fi")]
Finnish,
#[serde(alias = "fr")]
French,
#[serde(alias = "de")]
German,
#[serde(alias = "el")]
Greek,
#[serde(alias = "he")]
Hebrew,
#[serde(alias = "hi-en")]
Hinglish,
#[serde(alias = "hu")]
Hungarian,
#[serde(alias = "id")]
Indonesian,
#[serde(alias = "it")]
Italian,
#[serde(alias = "jp")]
Japanese,
#[serde(alias = "kk")]
Kazakh,
#[serde(alias = "ne")]
Nepali,
#[serde(alias = "no")]
Norwegian,
#[serde(alias = "pt")]
Portuguese,
#[serde(alias = "ro")]
Romanian,
#[serde(alias = "ru")]
Russian,
#[serde(alias = "sl")]
Slovene,
#[serde(alias = "es")]
Spanish,
#[serde(alias = "sv")]
Swedish,
#[serde(alias = "tg")]
Tajik,
#[serde(alias = "tr")]
Turkish,
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let json_string = serde_json::to_string(self).map_err(|_| fmt::Error)?;
f.write_str(json_string.trim_matches('"'))
}
}
impl FromStr for Language {
type Err = serde_json::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(&format!("\"{s}\""))
}
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
pub struct StopwordsSet {
/// Set of languages to use for stopwords.
/// Multiple pre-defined lists of stopwords can be combined.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
pub languages: Option<BTreeSet<Language>>,
/// Custom stopwords set. Will be merged with the languages set.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
pub custom: Option<BTreeSet<String>>,
}
// Bool
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BoolIndexType {
#[default]
Bool,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct BoolIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: BoolIndexType,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
// Datetime
#[derive(Default, Debug, Deserialize, Serialize, JsonSchema, Clone, Copy, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub enum DatetimeIndexType {
#[default]
Datetime,
}
#[derive(Debug, Default, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Hash, Eq)]
#[serde(rename_all = "snake_case")]
pub struct DatetimeIndexParams {
// Required for OpenAPI schema without anonymous types, versus #[serde(tag = "type")]
pub r#type: DatetimeIndexType,
/// If true - use this key to organize storage of the collection data.
/// This option assumes that this key will be used in majority of filtered requests.
pub is_principal: Option<bool>,
/// If true, store the index on disk. Default: false.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_stopwords_option_language_serialization() {
let stopwords = StopwordsInterface::Language(Language::English);
let json = serde_json::to_string(&stopwords).unwrap();
assert_eq!(json, r#""english""#);
let deserialized: StopwordsInterface = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized, stopwords);
}
#[test]
fn test_stopwords_option_set_serialization() {
let stopwords = StopwordsInterface::new_set(&[Language::English], &["AAA"]);
let json = serde_json::to_string(&stopwords).unwrap();
let expected = r#"{"languages":["english"],"custom":["AAA"]}"#;
assert_eq!(json, expected);
let deserialized: StopwordsInterface = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized, stopwords);
}
#[test]
fn test_deserialize_stopwords_from_json_examples() {
let json1 = r#"{"custom": ["as", "the", "a"]}"#;
let stopwords1: StopwordsInterface = serde_json::from_str(json1).unwrap();
let expected = StopwordsInterface::new_custom(&["as", "the", "a"]);
assert_eq!(stopwords1, expected);
let json2 = r#""english""#;
let stopwords2: StopwordsInterface = serde_json::from_str(json2).unwrap();
if let StopwordsInterface::Language(lang) = stopwords2 {
assert_eq!(lang, Language::English);
} else {
panic!("Expected Language");
}
// single language
let json3 = r#"{"languages": ["english"], "custom": ["AAA"]}"#;
let stopwords3: StopwordsInterface = serde_json::from_str(json3).unwrap();
let expected = StopwordsInterface::new_set(&[Language::English], &["AAA"]);
assert_eq!(stopwords3, expected);
// languages array
let json4 = r#"{"languages": ["english", "spanish"], "custom": ["AAA"]}"#;
let stopwords4: StopwordsInterface = serde_json::from_str(json4).unwrap();
let expected =
StopwordsInterface::new_set(&[Language::English, Language::Spanish], &["AAA"]);
assert_eq!(stopwords4, expected);
// null languages field
let json5 = r#"{"languages": null, "custom": ["AAA"]}"#;
let stopwords5: StopwordsInterface = serde_json::from_str(json5).unwrap();
let expected = StopwordsInterface::new_custom(&["AAA"]);
assert_eq!(stopwords5, expected);
}
#[test]
fn test_language_aliases() {
// Test that language aliases work for deserialization
let json_en = r#""en""#;
let lang_en: Language = serde_json::from_str(json_en).unwrap();
assert_eq!(lang_en, Language::English);
let json_fr = r#""fr""#;
let lang_fr: Language = serde_json::from_str(json_fr).unwrap();
assert_eq!(lang_fr, Language::French);
let json_es = r#""es""#;
let lang_es: Language = serde_json::from_str(json_es).unwrap();
assert_eq!(lang_es, Language::Spanish);
// Test aliases in StopwordsInterface
let json_interface = r#""en""#;
let stopwords: StopwordsInterface = serde_json::from_str(json_interface).unwrap();
if let StopwordsInterface::Language(lang) = stopwords {
assert_eq!(lang, Language::English);
} else {
panic!("Expected Language");
}
// Test aliases in StopwordsSet
let json_set = r#"{"languages": ["en"], "custom": ["AAA"]}"#;
let stopwords_set: StopwordsInterface = serde_json::from_str(json_set).unwrap();
let expected = StopwordsInterface::new_set(&[Language::English], &["AAA"]);
assert_eq!(stopwords_set, expected);
// languages array
let json_set_compat = r#"{"languages": ["en", "es"], "custom": ["AAA"]}"#;
let stopwords_set_compat: StopwordsInterface =
serde_json::from_str(json_set_compat).unwrap();
let expected_set =
StopwordsInterface::new_set(&[Language::English, Language::Spanish], &["AAA"]);
assert_eq!(stopwords_set_compat, expected_set);
}
#[test]
fn test_unsupported_language_error() {
// Test that unsupported languages are rejected with a clear error message
let json_unsupported = r#""klingon""#;
let result = serde_json::from_str::<Language>(json_unsupported);
assert!(result.is_err());
let error = result.unwrap_err().to_string();
assert!(
error.contains("klingon"),
"Error message should contain 'klingon', got: {error}",
);
let json_interface = r#""klingon""#;
let result = serde_json::from_str::<StopwordsInterface>(json_interface);
assert!(result.is_err());
let json_set = r#"{"languages": "klingon", "custom": ["AAA"]}"#;
let result = serde_json::from_str::<StopwordsInterface>(json_set);
assert!(result.is_err());
// languages array
let json_set_compat = r#"{"languages": ["english", "klingon"], "custom": ["AAA"]}"#;
let result = serde_json::from_str::<StopwordsInterface>(json_set_compat);
assert!(result.is_err());
}
#[test]
fn test_language_field_aliasing() {
// Test that "languages" key works for deserialization
// Multiple languages using "languages" key should work
let json_multiple = r#"{"languages": ["english", "french"], "custom": ["AAA"]}"#;
let stopwords_multiple: StopwordsInterface = serde_json::from_str(json_multiple).unwrap();
let expected_set =
StopwordsInterface::new_set(&[Language::English, Language::French], &["AAA"]);
assert_eq!(stopwords_multiple, expected_set);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/vectors.rs | lib/segment/src/data_types/vectors.rs | use std::collections::HashMap;
use std::hash::Hash;
use std::mem;
use std::slice::ChunksExactMut;
use half::f16;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::types::DimId;
use validator::Validate;
use super::named_vectors::NamedVectors;
use super::primitive::PrimitiveVectorElement;
use crate::common::operation_error::{OperationError, OperationResult};
use crate::common::utils::transpose_map_into_named_vector;
use crate::types::{VectorName, VectorNameBuf};
use crate::vector_storage::query::{
ContextQuery, DiscoveryQuery, NaiveFeedbackQuery, RecoQuery, TransformInto,
};
/// How many dimensions of a sparse vector are considered to be a single unit for cost estimation.
const SPARSE_DIMS_COST_UNIT: usize = 64;
#[derive(Clone, Debug, PartialEq, Serialize)]
pub enum VectorInternal {
Dense(DenseVector),
Sparse(SparseVector),
MultiDense(MultiDenseVectorInternal),
}
impl Hash for VectorInternal {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
mem::discriminant(self).hash(state);
match self {
VectorInternal::Dense(v) => {
for element in v {
OrderedFloat(*element).hash(state);
}
}
VectorInternal::Sparse(v) => {
let SparseVector { indices, values } = v;
indices.hash(state);
for value in values {
OrderedFloat(*value).hash(state);
}
}
VectorInternal::MultiDense(v) => {
v.hash(state);
}
}
}
}
impl VectorInternal {
/// Returns the estimated cost of using this vector in terms of the number of how many similarity comparisons vector will make against one point.
pub fn similarity_cost(&self) -> usize {
match self {
VectorInternal::Dense(_dense) => 1,
VectorInternal::Sparse(sparse) => sparse.indices.len().div_ceil(SPARSE_DIMS_COST_UNIT),
VectorInternal::MultiDense(multivec) => multivec.vectors_count(),
}
}
/// Preprocess the vector
///
/// For a sparse vector, indices will be sorted.
pub fn preprocess(&mut self) {
match self {
VectorInternal::Dense(_) => {}
VectorInternal::Sparse(sparse) => {
if !sparse.is_sorted() {
sparse.sort_by_indices();
}
}
VectorInternal::MultiDense(_) => {}
}
}
pub fn from_vector_and_indices(vector: DenseVector, indices: Option<Vec<DimId>>) -> Self {
if let Some(indices) = indices {
VectorInternal::Sparse(SparseVector {
indices,
values: vector,
})
} else {
VectorInternal::Dense(vector)
}
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum VectorRef<'a> {
Dense(&'a [VectorElementType]),
Sparse(&'a SparseVector),
MultiDense(TypedMultiDenseVectorRef<'a, VectorElementType>),
}
impl<'a> TryFrom<VectorRef<'a>> for &'a [VectorElementType] {
type Error = OperationError;
fn try_from(value: VectorRef<'a>) -> Result<Self, Self::Error> {
match value {
VectorRef::Dense(v) => Ok(v),
VectorRef::Sparse(_) => Err(OperationError::WrongSparse),
VectorRef::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> TryFrom<VectorRef<'a>> for &'a SparseVector {
type Error = OperationError;
fn try_from(value: VectorRef<'a>) -> Result<Self, Self::Error> {
match value {
VectorRef::Dense(_) => Err(OperationError::WrongSparse),
VectorRef::Sparse(v) => Ok(v),
VectorRef::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> TryFrom<VectorRef<'a>> for TypedMultiDenseVectorRef<'a, f32> {
type Error = OperationError;
fn try_from(value: VectorRef<'a>) -> Result<Self, Self::Error> {
match value {
VectorRef::Dense(d) => Ok(TypedMultiDenseVectorRef {
flattened_vectors: d,
dim: d.len(),
}),
VectorRef::Sparse(_v) => Err(OperationError::WrongSparse),
VectorRef::MultiDense(v) => Ok(v),
}
}
}
impl From<NamedVectorStruct> for VectorInternal {
fn from(value: NamedVectorStruct) -> Self {
match value {
NamedVectorStruct::Default(v) => VectorInternal::Dense(v),
NamedVectorStruct::Dense(v) => VectorInternal::Dense(v.vector),
NamedVectorStruct::Sparse(v) => VectorInternal::Sparse(v.vector),
NamedVectorStruct::MultiDense(v) => VectorInternal::MultiDense(v.vector),
}
}
}
impl TryFrom<VectorInternal> for DenseVector {
type Error = OperationError;
fn try_from(value: VectorInternal) -> Result<Self, Self::Error> {
match value {
VectorInternal::Dense(v) => Ok(v),
VectorInternal::Sparse(_) => Err(OperationError::WrongSparse),
VectorInternal::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl TryFrom<VectorInternal> for SparseVector {
type Error = OperationError;
fn try_from(value: VectorInternal) -> Result<Self, Self::Error> {
match value {
VectorInternal::Dense(_) => Err(OperationError::WrongSparse),
VectorInternal::Sparse(v) => Ok(v),
VectorInternal::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl TryFrom<VectorInternal> for MultiDenseVectorInternal {
type Error = OperationError;
fn try_from(value: VectorInternal) -> Result<Self, Self::Error> {
match value {
VectorInternal::Dense(v) => {
// expand single dense vector into multivector with a single vector
let len = v.len();
Ok(MultiDenseVectorInternal::new(v, len))
}
VectorInternal::Sparse(_) => Err(OperationError::WrongSparse),
VectorInternal::MultiDense(v) => Ok(v),
}
}
}
impl<'a> From<&'a [VectorElementType]> for VectorRef<'a> {
fn from(val: &'a [VectorElementType]) -> Self {
VectorRef::Dense(val)
}
}
impl<'a> From<&'a DenseVector> for VectorRef<'a> {
fn from(val: &'a DenseVector) -> Self {
VectorRef::Dense(val.as_slice())
}
}
impl<'a> From<&'a MultiDenseVectorInternal> for VectorRef<'a> {
fn from(val: &'a MultiDenseVectorInternal) -> Self {
VectorRef::MultiDense(TypedMultiDenseVectorRef::from(val))
}
}
impl<'a> From<TypedMultiDenseVectorRef<'a, VectorElementType>> for VectorRef<'a> {
fn from(val: TypedMultiDenseVectorRef<'a, VectorElementType>) -> Self {
VectorRef::MultiDense(val)
}
}
impl<'a> From<&'a SparseVector> for VectorRef<'a> {
fn from(val: &'a SparseVector) -> Self {
VectorRef::Sparse(val)
}
}
impl From<DenseVector> for VectorInternal {
fn from(val: DenseVector) -> Self {
VectorInternal::Dense(val)
}
}
impl From<SparseVector> for VectorInternal {
fn from(val: SparseVector) -> Self {
VectorInternal::Sparse(val)
}
}
impl From<MultiDenseVectorInternal> for VectorInternal {
fn from(val: MultiDenseVectorInternal) -> Self {
VectorInternal::MultiDense(val)
}
}
impl<'a> From<&'a VectorInternal> for VectorRef<'a> {
fn from(val: &'a VectorInternal) -> Self {
match val {
VectorInternal::Dense(v) => VectorRef::Dense(v.as_slice()),
VectorInternal::Sparse(v) => VectorRef::Sparse(v),
VectorInternal::MultiDense(v) => {
VectorRef::MultiDense(TypedMultiDenseVectorRef::from(v))
}
}
}
}
/// Type of vector element.
pub type VectorElementType = f32;
pub type VectorElementTypeHalf = f16;
pub type VectorElementTypeByte = u8;
pub const DEFAULT_VECTOR_NAME: &VectorName = "";
pub type TypedDenseVector<T> = Vec<T>;
/// Type for dense vector
pub type DenseVector = TypedDenseVector<VectorElementType>;
/// Type for multi dense vector
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct TypedMultiDenseVector<T> {
pub flattened_vectors: TypedDenseVector<T>, // vectors are flattened into a single vector
pub dim: usize, // dimension of each vector
}
impl<T> TypedMultiDenseVector<T> {
pub fn try_from_flatten(vectors: Vec<T>, dim: usize) -> Result<Self, OperationError> {
if dim == 0 {
return Err(OperationError::ValidationError {
description: "MultiDenseVector cannot have zero dimension".to_string(),
});
}
if !vectors.len().is_multiple_of(dim) || vectors.is_empty() {
return Err(OperationError::ValidationError {
description: format!(
"Invalid multi-vector length: {}, expected multiple of {}",
vectors.len(),
dim
),
});
}
Ok(TypedMultiDenseVector {
flattened_vectors: vectors,
dim,
})
}
pub fn try_from_matrix(matrix: Vec<Vec<T>>) -> Result<Self, OperationError> {
if matrix.is_empty() {
return Err(OperationError::ValidationError {
description: "MultiDenseVector cannot be empty".to_string(),
});
}
let dim = matrix[0].len();
if dim == 0 {
return Err(OperationError::ValidationError {
description: "MultiDenseVector cannot have zero dimension".to_string(),
});
}
// assert all vectors have the same dimension
if let Some(bad_vec) = matrix.iter().find(|v| v.len() != dim) {
return Err(OperationError::WrongVectorDimension {
expected_dim: dim,
received_dim: bad_vec.len(),
});
}
let flattened_vectors = matrix.into_iter().flatten().collect_vec();
let multi_dense = TypedMultiDenseVector {
flattened_vectors,
dim,
};
Ok(multi_dense)
}
}
pub type MultiDenseVectorInternal = TypedMultiDenseVector<VectorElementType>;
impl Hash for MultiDenseVectorInternal {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let Self {
flattened_vectors,
dim,
} = self;
dim.hash(state);
for element in flattened_vectors {
OrderedFloat(*element).hash(state);
}
}
}
impl<T: PrimitiveVectorElement> TypedMultiDenseVector<T> {
pub fn num_vectors(&self) -> usize {
self.flattened_vectors.len() / self.dim
}
pub fn new(flattened_vectors: TypedDenseVector<T>, dim: usize) -> Self {
debug_assert_eq!(flattened_vectors.len() % dim, 0, "Invalid vector length");
Self {
flattened_vectors,
dim,
}
}
/// To be used when the input vectors are already validated to avoid double validation
pub fn new_unchecked(vectors: Vec<Vec<T>>) -> Self {
debug_assert!(!vectors.is_empty(), "MultiDenseVector cannot be empty");
debug_assert!(
vectors.iter().all(|v| !v.is_empty()),
"Multi individual vectors cannot be empty"
);
let dim = vectors[0].len();
let inner_vector = vectors.into_iter().flatten().collect();
Self {
flattened_vectors: inner_vector,
dim,
}
}
/// MultiDenseVector cannot be empty, so we use a placeholder vector instead
pub fn placeholder(dim: usize) -> Self {
Self {
flattened_vectors: vec![Default::default(); dim],
dim,
}
}
/// Slices the multi vector into the underlying individual vectors
pub fn multi_vectors(&self) -> impl Iterator<Item = &[T]> {
self.flattened_vectors.chunks_exact(self.dim)
}
pub fn multi_vectors_mut(&mut self) -> ChunksExactMut<'_, T> {
self.flattened_vectors.chunks_exact_mut(self.dim)
}
/// Consumes the multi vector and returns the underlying individual vectors
pub fn into_multi_vectors(self) -> Vec<Vec<T>> {
self.flattened_vectors
.into_iter()
.chunks(self.dim)
.into_iter()
.map(Iterator::collect)
.collect()
}
pub fn is_empty(&self) -> bool {
self.flattened_vectors.is_empty()
}
pub fn vectors_count(&self) -> usize {
self.flattened_vectors.len() / self.dim
}
pub fn flattened_len(&self) -> usize {
self.flattened_vectors.len()
}
}
impl<T: PrimitiveVectorElement> TryFrom<Vec<TypedDenseVector<T>>> for TypedMultiDenseVector<T> {
type Error = OperationError;
fn try_from(value: Vec<TypedDenseVector<T>>) -> Result<Self, Self::Error> {
Self::try_from_matrix(value)
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct TypedMultiDenseVectorRef<'a, T> {
pub flattened_vectors: &'a [T],
pub dim: usize,
}
impl<'a, T: PrimitiveVectorElement> TypedMultiDenseVectorRef<'a, T> {
/// Slices the multi vector into the underlying individual vectors
pub fn multi_vectors(self) -> impl Iterator<Item = &'a [T]> {
self.flattened_vectors.chunks_exact(self.dim)
}
pub fn is_empty(self) -> bool {
self.flattened_vectors.is_empty()
}
pub fn vectors_count(self) -> usize {
self.flattened_vectors.len() / self.dim
}
pub fn flattened_len(&self) -> usize {
self.flattened_vectors.len()
}
// Cannot use `ToOwned` trait because of `Borrow` implementation for `TypedMultiDenseVector`
pub fn to_owned(self) -> TypedMultiDenseVector<T> {
TypedMultiDenseVector {
flattened_vectors: self.flattened_vectors.to_owned(),
dim: self.dim,
}
}
}
impl<'a, T: PrimitiveVectorElement> From<&'a TypedMultiDenseVector<T>>
for TypedMultiDenseVectorRef<'a, T>
{
fn from(val: &'a TypedMultiDenseVector<T>) -> Self {
TypedMultiDenseVectorRef {
flattened_vectors: &val.flattened_vectors,
dim: val.dim,
}
}
}
impl TryFrom<Vec<DenseVector>> for VectorInternal {
type Error = OperationError;
fn try_from(value: Vec<DenseVector>) -> Result<Self, Self::Error> {
MultiDenseVectorInternal::try_from(value).map(VectorInternal::MultiDense)
}
}
impl VectorRef<'_> {
// Cannot use `ToOwned` trait because of `Borrow` implementation for `Vector`
pub fn to_owned(self) -> VectorInternal {
match self {
VectorRef::Dense(v) => VectorInternal::Dense(v.to_vec()),
VectorRef::Sparse(v) => VectorInternal::Sparse(v.clone()),
VectorRef::MultiDense(v) => VectorInternal::MultiDense(v.to_owned()),
}
}
}
impl<'a> TryInto<&'a [VectorElementType]> for &'a VectorInternal {
type Error = OperationError;
fn try_into(self) -> Result<&'a [VectorElementType], Self::Error> {
match self {
VectorInternal::Dense(v) => Ok(v),
VectorInternal::Sparse(_) => Err(OperationError::WrongSparse),
VectorInternal::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> TryInto<&'a SparseVector> for &'a VectorInternal {
type Error = OperationError;
fn try_into(self) -> Result<&'a SparseVector, Self::Error> {
match self {
VectorInternal::Dense(_) => Err(OperationError::WrongSparse),
VectorInternal::Sparse(v) => Ok(v),
VectorInternal::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> TryInto<&'a MultiDenseVectorInternal> for &'a VectorInternal {
type Error = OperationError;
fn try_into(self) -> Result<&'a MultiDenseVectorInternal, Self::Error> {
match self {
VectorInternal::Dense(_) => Err(OperationError::WrongMulti), // &Dense vector cannot be converted to &MultiDense
VectorInternal::Sparse(_) => Err(OperationError::WrongSparse),
VectorInternal::MultiDense(v) => Ok(v),
}
}
}
pub fn default_vector(vec: DenseVector) -> NamedVectors<'static> {
NamedVectors::from_pairs([(DEFAULT_VECTOR_NAME.to_owned(), vec)])
}
pub fn default_multi_vector(vec: MultiDenseVectorInternal) -> NamedVectors<'static> {
let mut named_vectors = NamedVectors::default();
named_vectors.insert(
DEFAULT_VECTOR_NAME.to_owned(),
VectorInternal::MultiDense(vec),
);
named_vectors
}
pub fn only_default_vector(vec: &[VectorElementType]) -> NamedVectors<'_> {
NamedVectors::from_ref(DEFAULT_VECTOR_NAME, VectorRef::from(vec))
}
pub fn only_default_multi_vector(vec: &MultiDenseVectorInternal) -> NamedVectors<'_> {
NamedVectors::from_ref(
DEFAULT_VECTOR_NAME,
VectorRef::MultiDense(TypedMultiDenseVectorRef::from(vec)),
)
}
/// Full vector data per point separator with single and multiple vector modes
#[derive(Clone, Debug, PartialEq)]
pub enum VectorStructInternal {
Single(DenseVector),
MultiDense(MultiDenseVectorInternal),
Named(HashMap<VectorNameBuf, VectorInternal>),
}
impl From<DenseVector> for VectorStructInternal {
fn from(v: DenseVector) -> Self {
VectorStructInternal::Single(v)
}
}
impl From<&[VectorElementType]> for VectorStructInternal {
fn from(v: &[VectorElementType]) -> Self {
VectorStructInternal::Single(v.to_vec())
}
}
impl From<NamedVectors<'_>> for VectorStructInternal {
fn from(v: NamedVectors) -> Self {
if v.len() == 1 && v.contains_key(DEFAULT_VECTOR_NAME) {
let vector_ref = v.get(DEFAULT_VECTOR_NAME).unwrap();
match vector_ref {
VectorRef::Dense(v) => VectorStructInternal::Single(v.to_owned()),
VectorRef::Sparse(v) => {
debug_assert!(false, "Sparse vector cannot be default");
let mut map = HashMap::new();
map.insert(
DEFAULT_VECTOR_NAME.to_owned(),
VectorInternal::Sparse(v.to_owned()),
);
VectorStructInternal::Named(map)
}
VectorRef::MultiDense(v) => VectorStructInternal::MultiDense(v.to_owned()),
}
} else {
VectorStructInternal::Named(v.into_owned_map())
}
}
}
impl VectorStructInternal {
pub fn get(&self, name: &VectorName) -> Option<VectorRef<'_>> {
match self {
VectorStructInternal::Single(v) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorRef::from(v))
}
VectorStructInternal::MultiDense(v) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorRef::from(v))
}
VectorStructInternal::Named(v) => v.get(name).map(VectorRef::from),
}
}
/// Takes a vector by name. If it was the only one, leaves a None in `from`
pub fn take_opt(from: &mut Option<Self>, name: &VectorName) -> Option<VectorInternal> {
from.take().and_then(|v| match v {
VectorStructInternal::Single(v) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorInternal::Dense(v))
}
VectorStructInternal::MultiDense(v) => {
(name == DEFAULT_VECTOR_NAME).then_some(VectorInternal::MultiDense(v))
}
VectorStructInternal::Named(mut v) => {
let out = v.remove(name);
if !v.is_empty() {
from.replace(Self::Named(v));
}
out
}
})
}
}
/// Dense vector data with name
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq)]
#[serde(rename_all = "snake_case")]
pub struct NamedVector {
/// Name of vector data
pub name: VectorNameBuf,
/// Vector data
pub vector: DenseVector,
}
/// MultiDense vector data with name
#[derive(Debug, Clone, PartialEq)]
pub struct NamedMultiDenseVector {
/// Name of vector data
pub name: VectorNameBuf,
/// Vector data
pub vector: MultiDenseVectorInternal,
}
/// Sparse vector data with name
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Validate, PartialEq)]
#[serde(rename_all = "snake_case")]
pub struct NamedSparseVector {
/// Name of vector data
pub name: VectorNameBuf,
/// Vector data
#[validate(nested)]
pub vector: SparseVector,
}
#[derive(Debug, Clone, PartialEq)]
pub enum NamedVectorStruct {
Default(DenseVector),
Dense(NamedVector),
Sparse(NamedSparseVector),
MultiDense(NamedMultiDenseVector),
}
impl From<DenseVector> for NamedVectorStruct {
fn from(v: DenseVector) -> Self {
NamedVectorStruct::Default(v)
}
}
impl From<NamedVector> for NamedVectorStruct {
fn from(v: NamedVector) -> Self {
NamedVectorStruct::Dense(v)
}
}
impl From<NamedSparseVector> for NamedVectorStruct {
fn from(v: NamedSparseVector) -> Self {
NamedVectorStruct::Sparse(v)
}
}
impl From<NamedMultiDenseVector> for NamedVectorStruct {
fn from(v: NamedMultiDenseVector) -> Self {
NamedVectorStruct::MultiDense(v)
}
}
pub trait Named {
fn get_name(&self) -> &VectorName;
}
impl Named for NamedVectorStruct {
fn get_name(&self) -> &VectorName {
match self {
NamedVectorStruct::Default(_) => DEFAULT_VECTOR_NAME,
NamedVectorStruct::Dense(v) => &v.name,
NamedVectorStruct::Sparse(v) => &v.name,
NamedVectorStruct::MultiDense(v) => &v.name,
}
}
}
impl NamedVectorStruct {
pub fn new_from_vector(vector: VectorInternal, name: impl Into<VectorNameBuf>) -> Self {
let name = name.into();
match vector {
VectorInternal::Dense(vector) => NamedVectorStruct::Dense(NamedVector { name, vector }),
VectorInternal::Sparse(vector) => {
NamedVectorStruct::Sparse(NamedSparseVector { name, vector })
}
VectorInternal::MultiDense(vector) => {
NamedVectorStruct::MultiDense(NamedMultiDenseVector { name, vector })
}
}
}
pub fn get_vector(&self) -> VectorRef<'_> {
match self {
NamedVectorStruct::Default(v) => v.as_slice().into(),
NamedVectorStruct::Dense(v) => v.vector.as_slice().into(),
NamedVectorStruct::Sparse(v) => (&v.vector).into(),
NamedVectorStruct::MultiDense(v) => (&v.vector).into(),
}
}
pub fn to_vector(self) -> VectorInternal {
match self {
NamedVectorStruct::Default(v) => v.into(),
NamedVectorStruct::Dense(v) => v.vector.into(),
NamedVectorStruct::Sparse(v) => v.vector.into(),
NamedVectorStruct::MultiDense(v) => v.vector.into(),
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum BatchVectorStructInternal {
Single(Vec<DenseVector>),
MultiDense(Vec<MultiDenseVectorInternal>),
Named(HashMap<VectorNameBuf, Vec<VectorInternal>>),
}
impl From<Vec<DenseVector>> for BatchVectorStructInternal {
fn from(v: Vec<DenseVector>) -> Self {
BatchVectorStructInternal::Single(v)
}
}
impl BatchVectorStructInternal {
pub fn into_all_vectors(self, num_records: usize) -> Vec<NamedVectors<'static>> {
match self {
BatchVectorStructInternal::Single(vectors) => {
vectors.into_iter().map(default_vector).collect()
}
BatchVectorStructInternal::MultiDense(vectors) => {
vectors.into_iter().map(default_multi_vector).collect()
}
BatchVectorStructInternal::Named(named_vectors) => {
if named_vectors.is_empty() {
vec![NamedVectors::default(); num_records]
} else {
transpose_map_into_named_vector(named_vectors)
}
}
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Hash)]
pub struct NamedQuery<TQuery> {
pub query: TQuery,
pub using: Option<VectorNameBuf>,
}
impl NamedQuery<VectorInternal> {
pub fn default_dense(vec: DenseVector) -> NamedQuery<VectorInternal> {
NamedQuery {
query: VectorInternal::Dense(vec),
using: None,
}
}
}
impl<TQuery> NamedQuery<TQuery> {
pub fn new(query: TQuery, using: impl Into<String>) -> NamedQuery<TQuery> {
NamedQuery {
query,
using: Some(using.into()),
}
}
}
impl From<NamedVectorStruct> for NamedQuery<VectorInternal> {
fn from(named_vector: NamedVectorStruct) -> Self {
match named_vector {
NamedVectorStruct::Default(dense) => NamedQuery {
query: VectorInternal::Dense(dense),
using: None,
},
NamedVectorStruct::Dense(NamedVector { name, vector }) => NamedQuery {
query: VectorInternal::Dense(vector),
using: Some(name),
},
NamedVectorStruct::Sparse(NamedSparseVector { name, vector }) => NamedQuery {
query: VectorInternal::Sparse(vector),
using: Some(name),
},
NamedVectorStruct::MultiDense(NamedMultiDenseVector { name, vector }) => NamedQuery {
query: VectorInternal::MultiDense(vector),
using: Some(name),
},
}
}
}
impl<T> Named for NamedQuery<T> {
fn get_name(&self) -> &VectorName {
self.using.as_deref().unwrap_or(DEFAULT_VECTOR_NAME)
}
}
impl<T: Validate> Validate for NamedQuery<T> {
fn validate(&self) -> Result<(), validator::ValidationErrors> {
self.query.validate()
}
}
#[derive(Debug, Clone)]
pub enum QueryVector {
Nearest(VectorInternal),
RecommendBestScore(RecoQuery<VectorInternal>),
RecommendSumScores(RecoQuery<VectorInternal>),
Discovery(DiscoveryQuery<VectorInternal>),
Context(ContextQuery<VectorInternal>),
FeedbackNaive(NaiveFeedbackQuery<VectorInternal>),
}
impl TransformInto<QueryVector, VectorInternal, VectorInternal> for QueryVector {
fn transform<F>(self, mut f: F) -> OperationResult<QueryVector>
where
F: FnMut(VectorInternal) -> OperationResult<VectorInternal>,
{
match self {
QueryVector::Nearest(v) => f(v).map(QueryVector::Nearest),
QueryVector::RecommendBestScore(v) => {
Ok(QueryVector::RecommendBestScore(v.transform(&mut f)?))
}
QueryVector::RecommendSumScores(v) => {
Ok(QueryVector::RecommendSumScores(v.transform(&mut f)?))
}
QueryVector::Discovery(v) => Ok(QueryVector::Discovery(v.transform(&mut f)?)),
QueryVector::Context(v) => Ok(QueryVector::Context(v.transform(&mut f)?)),
QueryVector::FeedbackNaive(v) => Ok(QueryVector::FeedbackNaive(v.transform(&mut f)?)),
}
}
}
impl From<DenseVector> for QueryVector {
fn from(vec: DenseVector) -> Self {
Self::Nearest(VectorInternal::Dense(vec))
}
}
impl<'a> From<&'a [VectorElementType]> for QueryVector {
fn from(vec: &'a [VectorElementType]) -> Self {
Self::Nearest(VectorInternal::Dense(vec.to_vec()))
}
}
impl<'a> From<&'a MultiDenseVectorInternal> for QueryVector {
fn from(vec: &'a MultiDenseVectorInternal) -> Self {
Self::Nearest(VectorInternal::MultiDense(vec.clone()))
}
}
impl<const N: usize> From<[VectorElementType; N]> for QueryVector {
fn from(vec: [VectorElementType; N]) -> Self {
let vec: VectorRef = vec.as_slice().into();
Self::Nearest(vec.to_owned())
}
}
impl<'a> From<VectorRef<'a>> for QueryVector {
fn from(vec: VectorRef<'a>) -> Self {
Self::Nearest(vec.to_owned())
}
}
impl From<VectorInternal> for QueryVector {
fn from(vec: VectorInternal) -> Self {
Self::Nearest(vec)
}
}
impl From<SparseVector> for QueryVector {
fn from(vec: SparseVector) -> Self {
Self::Nearest(VectorInternal::Sparse(vec))
}
}
impl From<MultiDenseVectorInternal> for QueryVector {
fn from(vec: MultiDenseVectorInternal) -> Self {
Self::Nearest(VectorInternal::MultiDense(vec))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/manifest.rs | lib/segment/src/data_types/manifest.rs | use std::collections::HashMap;
use std::path::{Path, PathBuf};
use crate::common::operation_error::{OperationError, OperationResult};
use crate::types::SeqNumberType;
#[derive(Clone, Debug, Default, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
#[serde(transparent)]
pub struct SnapshotManifest {
segments: HashMap<String, SegmentManifest>,
}
impl SnapshotManifest {
pub fn validate(&self) -> OperationResult<()> {
for (segment_id, manifest) in &self.segments {
for (file, version) in &manifest.file_versions {
if version.or_segment_version(manifest.segment_version) > manifest.segment_version {
return Err(OperationError::validation_error(format!(
"invalid snapshot manifest: \
file {segment_id}/{} is newer than segment {segment_id} ({version:?} > {})",
file.display(),
manifest.segment_version,
)));
}
}
}
Ok(())
}
pub fn version(&self, segment_id: &str) -> Option<SeqNumberType> {
self.segments
.get(segment_id)
.map(|manifest| manifest.segment_version)
}
pub fn add(&mut self, new_manifest: SegmentManifest) -> bool {
let Some(current_manifest) = self.segments.get_mut(&new_manifest.segment_id) else {
self.segments
.insert(new_manifest.segment_id.clone(), new_manifest);
return true;
};
debug_assert_eq!(current_manifest.segment_id, new_manifest.segment_id);
if current_manifest.segment_version < new_manifest.segment_version {
*current_manifest = new_manifest;
return true;
}
false
}
pub fn get(&self, segment_id: &str) -> Option<&SegmentManifest> {
self.segments.get(segment_id)
}
pub fn iter(&self) -> impl Iterator<Item = (&String, &SegmentManifest)> {
self.segments.iter()
}
pub fn len(&self) -> usize {
self.segments.len()
}
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
}
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct SegmentManifest {
pub segment_id: String,
pub segment_version: SeqNumberType,
pub file_versions: HashMap<PathBuf, FileVersion>,
}
impl SegmentManifest {
pub fn empty(segment_id: impl Into<String>) -> Self {
Self {
segment_id: segment_id.into(),
segment_version: 0,
file_versions: HashMap::new(),
}
}
pub fn file_version(&self, file: &Path) -> Option<SeqNumberType> {
self.file_versions
.get(file)
.map(|version| version.or_segment_version(self.segment_version))
}
pub fn file_versions(&self) -> impl Iterator<Item = (&Path, SeqNumberType)> {
self.file_versions.iter().map(|(file, version)| {
let file = file.as_path();
let version = version.or_segment_version(self.segment_version);
(file, version)
})
}
}
#[derive(
Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, serde::Serialize, serde::Deserialize,
)]
#[serde(untagged)]
pub enum FileVersion {
Version(SeqNumberType),
Unversioned,
}
impl FileVersion {
pub fn is_unversioned(self) -> bool {
self == Self::Unversioned
}
pub fn or_segment_version(self, segment_version: SeqNumberType) -> SeqNumberType {
match self {
FileVersion::Version(version) => version,
FileVersion::Unversioned => segment_version,
}
}
}
impl From<Option<SeqNumberType>> for FileVersion {
fn from(version: Option<SeqNumberType>) -> Self {
match version {
Some(version) => Self::Version(version),
None => Self::Unversioned,
}
}
}
impl From<SeqNumberType> for FileVersion {
fn from(version: SeqNumberType) -> Self {
Self::Version(version)
}
}
#[cfg(test)]
mod test {
use super::*;
/// Tests that `FileVersion` variants are uniquely represented in JSON
#[test]
fn file_version_serde() {
test_file_version_serde(FileVersion::Version(42), "42");
test_file_version_serde(FileVersion::Unversioned, "null");
}
/// Tests that `FileVersion` serializes into/deserializes from provided JSON representation
fn test_file_version_serde(version: FileVersion, json: &str) {
let serialized =
serde_json::to_string(&version).expect("failed to serialize FileVersion to JSON");
assert_eq!(serialized, json);
let deserialized: FileVersion =
serde_json::from_str(json).expect("failed to deserialize FileVersion from JSON");
assert_eq!(deserialized, version);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/modifier.rs | lib/segment/src/data_types/modifier.rs | use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::common::anonymize::Anonymize;
/// If used, include weight modification, which will be applied to sparse vectors at query time:
/// None - no modification (default)
/// Idf - inverse document frequency, based on statistics of the collection
#[derive(
Debug, Hash, Deserialize, Serialize, JsonSchema, Anonymize, Clone, Copy, PartialEq, Eq, Default,
)]
#[serde(rename_all = "snake_case")]
pub enum Modifier {
#[default]
None,
Idf,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/build_index_result.rs | lib/segment/src/data_types/build_index_result.rs | use crate::index::field_index::FieldIndex;
use crate::types::PayloadFieldSchema;
pub enum BuildFieldIndexResult {
/// Index was not built, as operation version is lower than segment version
SkippedByVersion,
/// Index was already built
AlreadyExists,
/// Incompatible schema
IncompatibleSchema,
/// Index was built
Built {
indexes: Vec<FieldIndex>,
schema: PayloadFieldSchema,
},
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/primitive.rs | lib/segment/src/data_types/primitive.rs | use std::borrow::Cow;
use half::f16;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
use super::named_vectors::CowMultiVector;
use super::vectors::TypedMultiDenseVector;
use crate::data_types::vectors::{VectorElementType, VectorElementTypeByte, VectorElementTypeHalf};
use crate::types::{Distance, QuantizationConfig, VectorStorageDatatype};
pub trait PrimitiveVectorElement
where
Self: Copy + Clone + Default + Send + Sync + 'static,
Self: Serialize + for<'a> Deserialize<'a>,
Self: FromBytes + Immutable + IntoBytes + KnownLayout,
{
fn slice_from_float_cow(vector: Cow<[VectorElementType]>) -> Cow<[Self]>;
fn slice_to_float_cow(vector: Cow<[Self]>) -> Cow<[VectorElementType]>;
fn quantization_preprocess<'a>(
quantization_config: &QuantizationConfig,
distance: Distance,
vector: &'a [Self],
) -> Cow<'a, [f32]>;
fn datatype() -> VectorStorageDatatype;
fn from_float_multivector(
multivector: CowMultiVector<VectorElementType>,
) -> CowMultiVector<Self>;
fn into_float_multivector(
multivector: CowMultiVector<Self>,
) -> CowMultiVector<VectorElementType>;
}
impl PrimitiveVectorElement for VectorElementType {
fn slice_from_float_cow(vector: Cow<[VectorElementType]>) -> Cow<[Self]> {
vector
}
fn slice_to_float_cow(vector: Cow<[Self]>) -> Cow<[VectorElementType]> {
vector
}
fn quantization_preprocess<'a>(
_quantization_config: &QuantizationConfig,
_distance: Distance,
vector: &'a [Self],
) -> Cow<'a, [f32]> {
Cow::Borrowed(vector)
}
fn datatype() -> VectorStorageDatatype {
VectorStorageDatatype::Float32
}
fn from_float_multivector(
multivector: CowMultiVector<VectorElementType>,
) -> CowMultiVector<Self> {
multivector
}
fn into_float_multivector(
multivector: CowMultiVector<Self>,
) -> CowMultiVector<VectorElementType> {
multivector
}
}
impl PrimitiveVectorElement for VectorElementTypeHalf {
fn slice_from_float_cow(vector: Cow<[VectorElementType]>) -> Cow<[Self]> {
Cow::Owned(vector.iter().map(|&x| f16::from_f32(x)).collect())
}
fn slice_to_float_cow(vector: Cow<[Self]>) -> Cow<[VectorElementType]> {
Cow::Owned(vector.iter().map(|&x| f16::to_f32(x)).collect_vec())
}
fn quantization_preprocess<'a>(
_quantization_config: &QuantizationConfig,
_distance: Distance,
vector: &'a [Self],
) -> Cow<'a, [f32]> {
Cow::Owned(vector.iter().map(|&x| f16::to_f32(x)).collect_vec())
}
fn from_float_multivector(
multivector: CowMultiVector<VectorElementType>,
) -> CowMultiVector<Self> {
CowMultiVector::Owned(TypedMultiDenseVector::new(
multivector
.as_vec_ref()
.flattened_vectors
.iter()
.map(|&x| f16::from_f32(x))
.collect_vec(),
multivector.as_vec_ref().dim,
))
}
fn into_float_multivector(
multivector: CowMultiVector<Self>,
) -> CowMultiVector<VectorElementType> {
CowMultiVector::Owned(TypedMultiDenseVector::new(
multivector
.as_vec_ref()
.flattened_vectors
.iter()
.map(|&x| f16::to_f32(x))
.collect_vec(),
multivector.as_vec_ref().dim,
))
}
fn datatype() -> VectorStorageDatatype {
VectorStorageDatatype::Float16
}
}
impl PrimitiveVectorElement for VectorElementTypeByte {
fn slice_from_float_cow(vector: Cow<[VectorElementType]>) -> Cow<[Self]> {
Cow::Owned(vector.iter().map(|&x| x as u8).collect())
}
fn slice_to_float_cow(vector: Cow<[Self]>) -> Cow<[VectorElementType]> {
Cow::Owned(
vector
.iter()
.map(|&x| VectorElementType::from(x))
.collect_vec(),
)
}
fn quantization_preprocess<'a>(
quantization_config: &QuantizationConfig,
distance: Distance,
vector: &'a [Self],
) -> Cow<'a, [f32]> {
if let QuantizationConfig::Binary(_) = quantization_config {
Cow::from(
vector
.iter()
.map(|&x| VectorElementType::from(x) - 127.0)
.collect_vec(),
)
} else {
let vector = vector
.iter()
.map(|&x| VectorElementType::from(x))
.collect_vec();
Cow::from(distance.preprocess_vector::<VectorElementType>(vector))
}
}
fn datatype() -> VectorStorageDatatype {
VectorStorageDatatype::Uint8
}
fn from_float_multivector(
multivector: CowMultiVector<VectorElementType>,
) -> CowMultiVector<Self> {
CowMultiVector::Owned(TypedMultiDenseVector::new(
multivector
.as_vec_ref()
.flattened_vectors
.iter()
.map(|&x| x as Self)
.collect_vec(),
multivector.as_vec_ref().dim,
))
}
fn into_float_multivector(
multivector: CowMultiVector<Self>,
) -> CowMultiVector<VectorElementType> {
CowMultiVector::Owned(TypedMultiDenseVector::new(
multivector
.as_vec_ref()
.flattened_vectors
.iter()
.map(|&x| VectorElementType::from(x))
.collect_vec(),
multivector.as_vec_ref().dim,
))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/mod.rs | lib/segment/src/data_types/mod.rs | pub mod build_index_result;
pub mod collection_defaults;
pub mod facets;
pub mod groups;
pub mod index;
pub mod manifest;
pub mod modifier;
pub mod named_vectors;
pub mod order_by;
pub mod primitive;
pub mod query_context;
pub mod tiny_map;
pub mod vectors;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/query_context.rs | lib/segment/src/data_types/query_context.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use bitvec::prelude::BitSlice;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::counter::hardware_counter::HardwareCounterCell;
use common::cow::SimpleCow;
use sparse::common::types::{DimId, DimWeight};
use crate::data_types::tiny_map;
use crate::index::query_optimization::rescore_formula::parsed_formula::ParsedFormula;
use crate::types::{ScoredPoint, VectorName, VectorNameBuf};
#[derive(Debug, Default)]
pub struct QueryIdfStats {
/// Statistics of the element frequency,
/// collected over all segments.
/// Required for processing sparse vector search with `idf-dot` similarity.
pub idf: tiny_map::TinyMap<VectorNameBuf, HashMap<DimId, usize>>,
/// Number of indexed vectors per vector name.
pub indexed_vectors: tiny_map::TinyMap<VectorNameBuf, usize>,
}
#[derive(Debug)]
pub struct QueryContext {
/// Total amount of available points in the segment.
available_point_count: usize,
/// Parameter, which defines how big a plain segment can be to be considered
/// small enough to be searched with `indexed_only` option.
search_optimized_threshold_kb: usize,
/// Defines if the search process was stopped.
/// Is changed externally if API times out or cancelled.
is_stopped: Arc<AtomicBool>,
/// Statistics of the element frequency,
/// collected over all segments.
/// Required for processing sparse vector search with `idf-dot` similarity.
idf_stats: QueryIdfStats,
/// Structure to accumulate and report hardware usage.
/// Holds reference to the shared drain, which is used to accumulate the values.
hardware_usage_accumulator: HwMeasurementAcc,
}
impl QueryContext {
pub fn new(
search_optimized_threshold_kb: usize,
hardware_usage_accumulator: HwMeasurementAcc,
) -> Self {
Self {
available_point_count: 0,
search_optimized_threshold_kb,
is_stopped: Arc::new(AtomicBool::new(false)),
idf_stats: QueryIdfStats::default(),
hardware_usage_accumulator,
}
}
pub fn is_stopped(&self) -> bool {
self.is_stopped.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn with_is_stopped(mut self, flag: Arc<AtomicBool>) -> Self {
self.is_stopped = flag;
self
}
pub fn available_point_count(&self) -> usize {
self.available_point_count
}
pub fn search_optimized_threshold_kb(&self) -> usize {
self.search_optimized_threshold_kb
}
pub fn add_available_point_count(&mut self, count: usize) {
self.available_point_count += count;
}
/// Fill indices of sparse vectors, which are required for `idf-dot` similarity
/// with zeros, so the statistics can be collected.
pub fn init_idf(&mut self, vector_name: &VectorName, indices: &[DimId]) {
self.idf_stats
.indexed_vectors
.insert(vector_name.to_owned(), 0);
// ToDo: Would be nice to have an implementation of `entry` for `TinyMap`.
let idf = if let Some(idf) = self.idf_stats.idf.get_mut(vector_name) {
idf
} else {
self.idf_stats
.idf
.insert(vector_name.to_owned(), HashMap::default());
self.idf_stats.idf.get_mut(vector_name).unwrap()
};
for index in indices {
idf.insert(*index, 0);
}
}
pub fn mut_idf_stats(&mut self) -> &mut QueryIdfStats {
&mut self.idf_stats
}
pub fn get_segment_query_context(&self) -> SegmentQueryContext<'_> {
SegmentQueryContext {
query_context: self,
deleted_points: None,
hardware_counter: self.hardware_usage_accumulator.get_counter_cell(),
}
}
pub fn hardware_usage_accumulator(&self) -> &HwMeasurementAcc {
&self.hardware_usage_accumulator
}
}
#[cfg(feature = "testing")]
impl Default for QueryContext {
fn default() -> Self {
Self::new(usize::MAX, HwMeasurementAcc::new()) // Search optimized threshold won't affect the search.
}
}
/// Defines context of the search query on the segment level
#[derive(Debug)]
pub struct SegmentQueryContext<'a> {
query_context: &'a QueryContext,
deleted_points: Option<&'a BitSlice>,
hardware_counter: HardwareCounterCell,
}
impl<'a> SegmentQueryContext<'a> {
pub fn available_point_count(&self) -> usize {
self.query_context.available_point_count()
}
pub fn get_vector_context(&self, vector_name: &VectorName) -> VectorQueryContext<'_> {
VectorQueryContext {
search_optimized_threshold_kb: self.query_context.search_optimized_threshold_kb,
is_stopped: Some(&self.query_context.is_stopped),
idf: self.query_context.idf_stats.idf.get(vector_name),
indexed_vectors: self
.query_context
.idf_stats
.indexed_vectors
.get(vector_name)
.copied(),
deleted_points: self.deleted_points,
hardware_counter: self.hardware_counter.fork(),
}
}
pub fn with_deleted_points(mut self, deleted_points: &'a BitSlice) -> Self {
self.deleted_points = Some(deleted_points);
self
}
pub fn is_stopped(&self) -> bool {
self.query_context.is_stopped()
}
pub fn fork(&self) -> Self {
Self {
query_context: self.query_context,
deleted_points: self.deleted_points,
hardware_counter: self.hardware_counter.fork(),
}
}
}
/// Query context related to a specific vector
#[derive(Debug)]
pub struct VectorQueryContext<'a> {
/// Parameter, which defines how big a plain segment can be to be considered
/// small enough to be searched with `indexed_only` option.
search_optimized_threshold_kb: usize,
is_stopped: Option<&'a AtomicBool>,
idf: Option<&'a HashMap<DimId, usize>>,
indexed_vectors: Option<usize>,
deleted_points: Option<&'a BitSlice>,
hardware_counter: HardwareCounterCell,
}
impl VectorQueryContext<'_> {
pub fn hardware_counter(&self) -> HardwareCounterCell {
self.hardware_counter.fork()
}
pub fn search_optimized_threshold_kb(&self) -> usize {
self.search_optimized_threshold_kb
}
pub fn deleted_points(&self) -> Option<&BitSlice> {
self.deleted_points
}
pub fn is_stopped(&self) -> SimpleCow<'_, AtomicBool> {
self.is_stopped
.map(SimpleCow::Borrowed)
.unwrap_or_else(|| SimpleCow::Owned(AtomicBool::new(false)))
}
/// Compute advanced formula for Inverse Document Frequency (IDF) according to wikipedia.
/// This should account for corner cases when `df` and `n` are small or zero.
#[inline]
fn fancy_idf(n: DimWeight, df: DimWeight) -> DimWeight {
((n - df + 0.5) / (df + 0.5) + 1.).ln()
}
pub fn remap_idf_weights(&self, indices: &[DimId], weights: &mut [DimWeight]) {
// Number of documents
let Some(indexed_vectors) = self.indexed_vectors else {
return;
};
let n = indexed_vectors as DimWeight;
for (weight, index) in weights.iter_mut().zip(indices) {
// Document frequency
let df = self
.idf
.and_then(|idf| idf.get(index))
.copied()
.unwrap_or(0);
*weight *= Self::fancy_idf(n, df as DimWeight);
}
}
pub fn is_require_idf(&self) -> bool {
self.idf.is_some() && self.indexed_vectors.is_some()
}
}
#[cfg(feature = "testing")]
impl Default for VectorQueryContext<'_> {
fn default() -> Self {
VectorQueryContext {
search_optimized_threshold_kb: usize::MAX,
is_stopped: None,
idf: None,
indexed_vectors: None,
deleted_points: None,
hardware_counter: HardwareCounterCell::new(),
}
}
}
pub struct FormulaContext {
pub formula: ParsedFormula,
pub prefetches_results: Vec<Vec<ScoredPoint>>,
pub limit: usize,
pub is_stopped: Arc<AtomicBool>,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/named_vectors.rs | lib/segment/src/data_types/named_vectors.rs | use std::borrow::Cow;
use std::collections::HashMap;
use sparse::common::sparse_vector::SparseVector;
use super::primitive::PrimitiveVectorElement;
use super::tiny_map;
use super::vectors::{
DenseVector, MultiDenseVectorInternal, TypedMultiDenseVector, TypedMultiDenseVectorRef,
VectorElementType, VectorElementTypeByte, VectorElementTypeHalf, VectorInternal, VectorRef,
};
use crate::common::operation_error::OperationError;
use crate::types::{VectorDataConfig, VectorName, VectorNameBuf, VectorStorageDatatype};
type CowKey<'a> = Cow<'a, VectorName>;
#[derive(Clone, PartialEq, Debug)]
pub enum CowMultiVector<'a, TElement: PrimitiveVectorElement> {
Owned(TypedMultiDenseVector<TElement>),
Borrowed(TypedMultiDenseVectorRef<'a, TElement>),
}
impl<TElement> CowMultiVector<'_, TElement>
where
TElement: PrimitiveVectorElement,
{
fn flattened_len(&self) -> usize {
match self {
CowMultiVector::Owned(typed_multi_dense_vector) => {
typed_multi_dense_vector.flattened_len()
}
CowMultiVector::Borrowed(typed_multi_dense_vector_ref) => {
typed_multi_dense_vector_ref.flattened_len()
}
}
}
}
#[derive(Clone, PartialEq, Debug)]
pub enum CowVector<'a> {
Dense(Cow<'a, [VectorElementType]>),
Sparse(Cow<'a, SparseVector>),
MultiDense(CowMultiVector<'a, VectorElementType>),
}
impl Default for CowVector<'_> {
fn default() -> Self {
CowVector::Dense(Cow::Owned(Vec::new()))
}
}
impl CowVector<'_> {
pub fn estimate_size_in_bytes(&self) -> usize {
match self {
CowVector::Dense(cow) => cow.len() * size_of::<VectorElementType>(),
CowVector::Sparse(cow) => cow.indices.len() * size_of::<VectorElementType>() * 2, // indices & values
CowVector::MultiDense(cow_multi_vector) => {
cow_multi_vector.flattened_len() * size_of::<VectorElementType>()
}
}
}
}
type TinyMap<'a> = tiny_map::TinyMap<CowKey<'a>, CowVector<'a>>;
#[derive(Clone, Default, Debug, PartialEq)]
pub struct NamedVectors<'a> {
map: TinyMap<'a>,
}
impl<'a, TElement: PrimitiveVectorElement> CowMultiVector<'a, TElement> {
pub fn to_owned(self) -> TypedMultiDenseVector<TElement> {
match self {
CowMultiVector::Owned(v) => v,
CowMultiVector::Borrowed(v) => v.to_owned(),
}
}
pub fn as_vec_ref(&'a self) -> TypedMultiDenseVectorRef<'a, TElement> {
match self {
CowMultiVector::Owned(v) => TypedMultiDenseVectorRef {
flattened_vectors: &v.flattened_vectors,
dim: v.dim,
},
CowMultiVector::Borrowed(v) => *v,
}
}
}
impl CowVector<'_> {
pub fn default_sparse() -> Self {
CowVector::Sparse(Cow::Owned(SparseVector::default()))
}
pub fn to_owned(self) -> VectorInternal {
match self {
CowVector::Dense(v) => VectorInternal::Dense(v.into_owned()),
CowVector::Sparse(v) => VectorInternal::Sparse(v.into_owned()),
CowVector::MultiDense(v) => VectorInternal::MultiDense(v.to_owned()),
}
}
pub fn as_vec_ref(&self) -> VectorRef<'_> {
match self {
CowVector::Dense(v) => VectorRef::Dense(v.as_ref()),
CowVector::Sparse(v) => VectorRef::Sparse(v.as_ref()),
CowVector::MultiDense(v) => VectorRef::MultiDense(v.as_vec_ref()),
}
}
}
impl<'a> From<Cow<'a, [VectorElementType]>> for CowVector<'a> {
fn from(v: Cow<'a, [VectorElementType]>) -> Self {
match v {
Cow::Borrowed(v) => CowVector::Dense(Cow::Borrowed(v)),
Cow::Owned(v) => CowVector::Dense(Cow::Owned(v)),
}
}
}
impl From<VectorInternal> for CowVector<'_> {
fn from(v: VectorInternal) -> Self {
match v {
VectorInternal::Dense(v) => CowVector::Dense(Cow::Owned(v)),
VectorInternal::Sparse(v) => CowVector::Sparse(Cow::Owned(v)),
VectorInternal::MultiDense(v) => CowVector::MultiDense(CowMultiVector::Owned(v)),
}
}
}
impl From<SparseVector> for CowVector<'_> {
fn from(v: SparseVector) -> Self {
CowVector::Sparse(Cow::Owned(v))
}
}
impl From<DenseVector> for CowVector<'_> {
fn from(v: DenseVector) -> Self {
CowVector::Dense(Cow::Owned(v))
}
}
impl From<MultiDenseVectorInternal> for CowVector<'_> {
fn from(v: MultiDenseVectorInternal) -> Self {
CowVector::MultiDense(CowMultiVector::Owned(v))
}
}
impl<'a> From<Cow<'a, MultiDenseVectorInternal>> for CowVector<'a> {
fn from(v: Cow<'a, MultiDenseVectorInternal>) -> Self {
match v {
Cow::Borrowed(v) => {
CowVector::MultiDense(CowMultiVector::Borrowed(TypedMultiDenseVectorRef::from(v)))
}
Cow::Owned(v) => CowVector::MultiDense(CowMultiVector::Owned(v)),
}
}
}
impl<'a> From<&'a SparseVector> for CowVector<'a> {
fn from(v: &'a SparseVector) -> Self {
CowVector::Sparse(Cow::Borrowed(v))
}
}
impl<'a> From<&'a [VectorElementType]> for CowVector<'a> {
fn from(v: &'a [VectorElementType]) -> Self {
CowVector::Dense(Cow::Borrowed(v))
}
}
impl<'a> From<&'a MultiDenseVectorInternal> for CowVector<'a> {
fn from(v: &'a MultiDenseVectorInternal) -> Self {
CowVector::MultiDense(CowMultiVector::Borrowed(TypedMultiDenseVectorRef::from(v)))
}
}
impl<'a> TryFrom<CowVector<'a>> for SparseVector {
type Error = OperationError;
fn try_from(value: CowVector<'a>) -> Result<Self, Self::Error> {
match value {
CowVector::Dense(_) => Err(OperationError::WrongSparse),
CowVector::Sparse(v) => Ok(v.into_owned()),
CowVector::MultiDense(_) => Err(OperationError::WrongSparse),
}
}
}
impl<'a> TryFrom<CowVector<'a>> for DenseVector {
type Error = OperationError;
fn try_from(value: CowVector<'a>) -> Result<Self, Self::Error> {
match value {
CowVector::Dense(v) => Ok(v.into_owned()),
CowVector::Sparse(_) => Err(OperationError::WrongSparse),
CowVector::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> TryFrom<CowVector<'a>> for Cow<'a, [VectorElementType]> {
type Error = OperationError;
fn try_from(value: CowVector<'a>) -> Result<Self, Self::Error> {
match value {
CowVector::Dense(v) => Ok(v),
CowVector::Sparse(_) => Err(OperationError::WrongSparse),
CowVector::MultiDense(_) => Err(OperationError::WrongMulti),
}
}
}
impl<'a> From<VectorRef<'a>> for CowVector<'a> {
fn from(v: VectorRef<'a>) -> Self {
match v {
VectorRef::Dense(v) => CowVector::Dense(Cow::Borrowed(v)),
VectorRef::Sparse(v) => CowVector::Sparse(Cow::Borrowed(v)),
VectorRef::MultiDense(v) => CowVector::MultiDense(CowMultiVector::Borrowed(v)),
}
}
}
impl<'a> NamedVectors<'a> {
pub fn from_ref(key: &'a VectorName, value: VectorRef<'a>) -> Self {
let mut map = TinyMap::new();
map.insert(Cow::Borrowed(key), CowVector::from(value));
Self { map }
}
pub fn from_pairs<const N: usize>(arr: [(VectorNameBuf, DenseVector); N]) -> Self {
NamedVectors {
map: arr
.into_iter()
.map(|(k, v)| (CowKey::from(k), CowVector::Dense(Cow::Owned(v))))
.collect(),
}
}
pub fn from_map(map: HashMap<VectorNameBuf, VectorInternal>) -> Self {
Self {
map: map
.into_iter()
.map(|(k, v)| (CowKey::from(k), v.into()))
.collect(),
}
}
pub fn from_map_ref(map: &'a HashMap<VectorNameBuf, DenseVector>) -> Self {
Self {
map: map
.iter()
.map(|(k, v)| (CowKey::from(k), CowVector::Dense(Cow::Borrowed(v))))
.collect(),
}
}
pub fn merge(&mut self, other: NamedVectors<'a>) {
for (key, value) in other {
self.map.insert(key, value);
}
}
pub fn insert(&mut self, name: VectorNameBuf, vector: VectorInternal) {
self.map
.insert(CowKey::Owned(name), CowVector::from(vector));
}
pub fn remove_ref(&mut self, key: &VectorName) {
self.map.remove(key);
}
pub fn insert_ref(&mut self, name: &'a VectorName, vector: VectorRef<'a>) {
self.map
.insert(CowKey::Borrowed(name), CowVector::from(vector));
}
pub fn contains_key(&self, key: &VectorName) -> bool {
self.map.contains_key(key)
}
pub fn len(&self) -> usize {
self.map.len()
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
pub fn keys(&self) -> impl Iterator<Item = &VectorName> {
self.map.iter().map(|(k, _)| k.as_ref())
}
pub fn into_owned_map(self) -> HashMap<VectorNameBuf, VectorInternal> {
self.map
.into_iter()
.map(|(k, v)| (k.into_owned(), v.to_owned()))
.collect()
}
pub fn iter(&self) -> impl Iterator<Item = (&VectorName, VectorRef<'_>)> {
self.map.iter().map(|(k, v)| (k.as_ref(), v.as_vec_ref()))
}
pub fn get(&self, key: &VectorName) -> Option<VectorRef<'_>> {
self.map.get(key).map(|v| v.as_vec_ref())
}
pub fn preprocess<'b>(
&mut self,
get_vector_data: impl Fn(&VectorName) -> &'b VectorDataConfig,
) {
for (name, vector) in self.map.iter_mut() {
match vector {
CowVector::Dense(v) => {
let config = get_vector_data(name.as_ref());
let preprocessed_vector = Self::preprocess_dense_vector(v.to_vec(), config);
*vector = CowVector::Dense(Cow::Owned(preprocessed_vector))
}
CowVector::Sparse(v) => {
// sort by indices to enable faster dot product and overlap checks
if !v.is_sorted() {
v.to_mut().sort_by_indices();
}
}
CowVector::MultiDense(multi_vector) => {
// invalid temp value to swap with multi_vector and reduce reallocations
let mut tmp_multi_vector = CowMultiVector::Borrowed(TypedMultiDenseVectorRef {
flattened_vectors: &[],
dim: 1,
});
// `multi_vector` is empty invalid and `tmp_multi_vector` owns the real data
std::mem::swap(&mut tmp_multi_vector, multi_vector);
let mut owned_multi_vector = tmp_multi_vector.to_owned();
let config = get_vector_data(name.as_ref());
for dense_vector in owned_multi_vector.multi_vectors_mut() {
let preprocessed_vector =
Self::preprocess_dense_vector(dense_vector.to_vec(), config);
// replace dense vector with preprocessed vector
dense_vector.copy_from_slice(&preprocessed_vector);
}
*multi_vector = CowMultiVector::Owned(owned_multi_vector);
}
}
}
}
fn preprocess_dense_vector(
dense_vector: DenseVector,
config: &VectorDataConfig,
) -> DenseVector {
match config.datatype {
Some(VectorStorageDatatype::Float32) | None => config
.distance
.preprocess_vector::<VectorElementType>(dense_vector),
Some(VectorStorageDatatype::Uint8) => config
.distance
.preprocess_vector::<VectorElementTypeByte>(dense_vector),
Some(VectorStorageDatatype::Float16) => config
.distance
.preprocess_vector::<VectorElementTypeHalf>(dense_vector),
}
}
}
impl<'a> IntoIterator for NamedVectors<'a> {
type Item = (CowKey<'a>, CowVector<'a>);
type IntoIter =
tinyvec::TinyVecIterator<[(CowKey<'a>, CowVector<'a>); super::tiny_map::CAPACITY]>;
fn into_iter(self) -> Self::IntoIter {
self.map.into_iter()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/order_by.rs | lib/segment/src/data_types/order_by.rs | use std::hash::Hash;
use num_cmp::NumCmp;
use ordered_float::OrderedFloat;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use validator::Validate;
use crate::json_path::JsonPath;
use crate::types::{
DateTimePayloadType, FloatPayloadType, IntPayloadType, Order, Range, RangeInterface,
};
#[derive(Deserialize, Serialize, JsonSchema, Copy, Clone, Debug, Default, PartialEq, Hash)]
#[serde(rename_all = "snake_case")]
pub enum Direction {
#[default]
Asc,
Desc,
}
impl Direction {
pub fn as_range_from<T>(&self, from: T) -> Range<T> {
match self {
Direction::Asc => Range {
gte: Some(from),
gt: None,
lte: None,
lt: None,
},
Direction::Desc => Range {
lte: Some(from),
gt: None,
gte: None,
lt: None,
},
}
}
}
impl From<Direction> for Order {
fn from(direction: Direction) -> Self {
match direction {
Direction::Asc => Order::SmallBetter,
Direction::Desc => Order::LargeBetter,
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)]
#[serde(untagged)]
pub enum StartFrom {
Integer(IntPayloadType),
Float(FloatPayloadType),
Datetime(DateTimePayloadType),
}
impl Hash for StartFrom {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
StartFrom::Integer(i) => i.hash(state),
StartFrom::Float(f) => OrderedFloat(*f).hash(state),
StartFrom::Datetime(dt) => dt.hash(state),
}
}
}
#[derive(Deserialize, Serialize, JsonSchema, Validate, Clone, Debug, PartialEq, Hash)]
#[serde(rename_all = "snake_case")]
pub struct OrderBy {
/// Payload key to order by
pub key: JsonPath,
/// Direction of ordering: `asc` or `desc`. Default is ascending.
pub direction: Option<Direction>,
/// Which payload value to start scrolling from. Default is the lowest value for `asc` and the highest for `desc`
pub start_from: Option<StartFrom>,
}
impl OrderBy {
/// Returns a range representation of OrderBy.
pub fn as_range(&self) -> RangeInterface {
self.start_from
.as_ref()
.map(|start_from| match start_from {
// TODO: When we introduce integer ranges, we'll stop doing lossy conversion to f64 here
// Accepting an integer as start_from simplifies the client generation.
StartFrom::Integer(i) => {
RangeInterface::Float(self.direction().as_range_from(OrderedFloat(*i as f64)))
}
StartFrom::Float(f) => {
RangeInterface::Float(self.direction().as_range_from(OrderedFloat(*f)))
}
StartFrom::Datetime(dt) => {
RangeInterface::DateTime(self.direction().as_range_from(*dt))
}
})
.unwrap_or_else(|| RangeInterface::Float(Range::default()))
}
pub fn direction(&self) -> Direction {
self.direction.unwrap_or_default()
}
pub fn start_from(&self) -> OrderValue {
self.start_from
.as_ref()
.map(|start_from| match start_from {
StartFrom::Integer(i) => OrderValue::Int(*i),
StartFrom::Float(f) => OrderValue::Float(*f),
StartFrom::Datetime(dt) => OrderValue::Int(dt.timestamp()),
})
.unwrap_or_else(|| match self.direction() {
Direction::Asc => OrderValue::MIN,
Direction::Desc => OrderValue::MAX,
})
}
}
fn order_value_int_example() -> IntPayloadType {
42
}
fn order_value_float_example() -> FloatPayloadType {
42.5
}
#[derive(Debug, Clone, Copy, Serialize, JsonSchema)]
#[serde(untagged)]
pub enum OrderValue {
#[schemars(example = "order_value_int_example")]
Int(IntPayloadType),
#[schemars(example = "order_value_float_example")]
Float(FloatPayloadType),
}
#[cfg(any(test, feature = "testing"))]
impl std::hash::Hash for OrderValue {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
OrderValue::Int(i) => i.hash(state),
OrderValue::Float(f) => f.to_bits().hash(state),
}
}
}
impl OrderValue {
const MAX: Self = Self::Float(f64::NAN);
const MIN: Self = Self::Float(f64::MIN);
}
impl From<OrderValue> for serde_json::Value {
fn from(value: OrderValue) -> Self {
match value {
OrderValue::Float(value) => serde_json::Number::from_f64(value)
.map(serde_json::Value::Number)
.unwrap_or(serde_json::Value::Null),
OrderValue::Int(value) => serde_json::Value::Number(serde_json::Number::from(value)),
}
}
}
impl TryFrom<serde_json::Value> for OrderValue {
type Error = ();
fn try_from(value: serde_json::Value) -> Result<Self, Self::Error> {
value
.as_i64()
.map(Self::from)
.or_else(|| value.as_f64().map(Self::from))
.ok_or(())
}
}
impl From<FloatPayloadType> for OrderValue {
fn from(value: FloatPayloadType) -> Self {
OrderValue::Float(value)
}
}
impl From<IntPayloadType> for OrderValue {
fn from(value: IntPayloadType) -> Self {
OrderValue::Int(value)
}
}
impl Eq for OrderValue {}
impl PartialEq for OrderValue {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(OrderValue::Float(a), OrderValue::Float(b)) => OrderedFloat(*a) == OrderedFloat(*b),
(OrderValue::Int(a), OrderValue::Int(b)) => a == b,
(OrderValue::Float(a), OrderValue::Int(b)) => a.num_eq(*b),
(OrderValue::Int(a), OrderValue::Float(b)) => a.num_eq(*b),
}
}
}
impl PartialOrd for OrderValue {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for OrderValue {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
match (self, other) {
(OrderValue::Float(a), OrderValue::Float(b)) => OrderedFloat(*a).cmp(&OrderedFloat(*b)),
(OrderValue::Int(a), OrderValue::Int(b)) => a.cmp(b),
(OrderValue::Float(a), OrderValue::Int(b)) => {
// num_cmp() might return None only if the float value is NaN. We follow the
// OrderedFloat logic here: the NaN is always greater than any other value.
a.num_cmp(*b).unwrap_or(std::cmp::Ordering::Greater)
}
(OrderValue::Int(a), OrderValue::Float(b)) => {
// Ditto, but the NaN is on the right side of the comparison.
a.num_cmp(*b).unwrap_or(std::cmp::Ordering::Less)
}
}
}
}
#[cfg(test)]
mod tests {
use proptest::proptest;
use crate::data_types::order_by::OrderValue;
proptest! {
#[test]
fn test_min_ordering_value(a in i64::MIN..0, b in f64::MIN..0.0) {
assert!(OrderValue::MIN.cmp(&OrderValue::from(a)).is_le());
assert!(OrderValue::MIN.cmp(&OrderValue::from(b)).is_le());
assert!(OrderValue::MIN.cmp(&OrderValue::from(f64::NAN)).is_le());
}
#[test]
fn test_max_ordering_value(a in 0..i64::MAX, b in 0.0..f64::MAX) {
assert!(OrderValue::MAX.cmp(&OrderValue::from(a)).is_ge());
assert!(OrderValue::MAX.cmp(&OrderValue::from(b)).is_ge());
assert!(OrderValue::MAX.cmp(&OrderValue::from(f64::NAN)).is_ge());
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/groups.rs | lib/segment/src/data_types/groups.rs | use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::json;
/// Value of the group_by key, shared across all the hits in the group
#[derive(Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq, Clone, Hash)]
#[serde(untagged)]
pub enum GroupId {
String(String),
NumberU64(u64),
NumberI64(i64),
}
impl From<u64> for GroupId {
fn from(id: u64) -> Self {
GroupId::NumberU64(id)
}
}
impl From<i64> for GroupId {
fn from(id: i64) -> Self {
GroupId::NumberI64(id)
}
}
impl From<String> for GroupId {
fn from(id: String) -> Self {
GroupId::String(id)
}
}
impl From<&str> for GroupId {
fn from(id: &str) -> Self {
GroupId::String(id.to_string())
}
}
impl From<GroupId> for serde_json::Value {
fn from(key: GroupId) -> Self {
match key {
GroupId::String(s) => serde_json::Value::String(s),
GroupId::NumberU64(n) => json!(n),
GroupId::NumberI64(n) => json!(n),
}
}
}
impl TryFrom<&serde_json::Value> for GroupId {
type Error = ();
/// Only allows Strings and Numbers to be converted into GroupId
fn try_from(value: &serde_json::Value) -> Result<Self, Self::Error> {
match value {
serde_json::Value::String(s) => Ok(Self::String(s.clone())),
serde_json::Value::Number(n) => {
if let Some(n_u64) = n.as_u64() {
Ok(Self::NumberU64(n_u64))
} else if let Some(n_i64) = n.as_i64() {
Ok(Self::NumberI64(n_i64))
} else {
Err(())
}
}
_ => Err(()),
}
}
}
impl GroupId {
pub fn as_u64(&self) -> Option<u64> {
match self {
GroupId::NumberI64(id) => u64::try_from(*id).ok(),
GroupId::NumberU64(id) => Some(*id),
GroupId::String(_) => None,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/src/data_types/facets.rs | lib/segment/src/data_types/facets.rs | use std::cmp::Reverse;
use std::hash::Hash;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use validator::Validate;
use crate::json_path::JsonPath;
use crate::types::{Filter, IntPayloadType, UuidIntType, ValueVariants};
#[derive(Clone, Debug, JsonSchema, Serialize, Deserialize, Validate, Hash)]
pub struct FacetParams {
pub key: JsonPath,
#[validate(range(min = 1))]
pub limit: usize,
#[validate(nested)]
pub filter: Option<Filter>,
#[serde(default)]
pub exact: bool,
}
impl FacetParams {
pub const DEFAULT_LIMIT: usize = 10;
pub const DEFAULT_EXACT: bool = false;
}
#[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum FacetValueRef<'a> {
Keyword(&'a str),
Int(&'a IntPayloadType),
Uuid(&'a u128),
Bool(bool),
}
impl FacetValueRef<'_> {
pub fn to_owned(&self) -> FacetValue {
match self {
FacetValueRef::Keyword(s) => FacetValue::Keyword((*s).to_string()),
FacetValueRef::Int(i) => FacetValue::Int(**i),
FacetValueRef::Uuid(uuid) => FacetValue::Uuid(**uuid),
FacetValueRef::Bool(b) => FacetValue::Bool(*b),
}
}
}
impl<'a> From<&'a str> for FacetValueRef<'a> {
fn from(s: &'a str) -> Self {
FacetValueRef::Keyword(s)
}
}
impl<'a> From<&'a IntPayloadType> for FacetValueRef<'a> {
fn from(i: &'a IntPayloadType) -> Self {
FacetValueRef::Int(i)
}
}
impl<'a> From<&'a UuidIntType> for FacetValueRef<'a> {
fn from(uuid: &'a UuidIntType) -> Self {
FacetValueRef::Uuid(uuid)
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Debug)]
pub enum FacetValue {
Keyword(String),
Int(IntPayloadType),
Uuid(UuidIntType),
Bool(bool),
// other types to add?
// Bool(bool),
// FloatRange(FloatRange),
}
pub trait FacetValueTrait: Clone + PartialEq + Eq + Hash + Ord {}
impl FacetValueTrait for FacetValue {}
impl FacetValueTrait for FacetValueRef<'_> {}
pub type FacetValueHit = FacetHit<FacetValue>;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct FacetHit<T: FacetValueTrait> {
pub value: T,
pub count: usize,
}
#[derive(Clone, Debug, Default)]
pub struct FacetResponse {
pub hits: Vec<FacetValueHit>,
}
impl<T: FacetValueTrait> Ord for FacetHit<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.count
.cmp(&other.count)
// Reverse so that descending order has ascending values when having the same count
.then_with(|| Reverse(&self.value).cmp(&Reverse(&other.value)))
}
}
impl<T: FacetValueTrait> PartialOrd for FacetHit<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl From<FacetValue> for ValueVariants {
fn from(value: FacetValue) -> Self {
match value {
FacetValue::Keyword(s) => ValueVariants::String(s),
FacetValue::Int(i) => ValueVariants::Integer(i),
FacetValue::Uuid(uuid) => ValueVariants::String(Uuid::from_u128(uuid).to_string()),
FacetValue::Bool(b) => ValueVariants::Bool(b),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/segment_on_disk_snapshot.rs | lib/segment/tests/integration/segment_on_disk_snapshot.rs | use std::collections::HashMap;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::progress_tracker::ProgressTracker;
use common::tar_ext;
use fs_err as fs;
use fs_err::File;
use rstest::rstest;
use segment::data_types::index::{IntegerIndexParams, KeywordIndexParams};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::entry::snapshot_entry::SnapshotEntry as _;
use segment::index::hnsw_index::num_rayon_threads;
use segment::json_path::JsonPath;
use segment::segment::Segment;
use segment::segment_constructor::load_segment;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Distance, HnswConfig, Indexes, PayloadFieldSchema, PayloadSchemaParams, PayloadStorageType,
SegmentConfig, SnapshotFormat, VectorDataConfig, VectorStorageType,
};
use tempfile::Builder;
/// This test tests snapshotting and restoring a segment with all on-disk components.
#[rstest]
#[case::regular(SnapshotFormat::Regular)]
#[case::streamable(SnapshotFormat::Streamable)]
fn test_on_disk_segment_snapshot(#[case] format: SnapshotFormat) {
use common::counter::hardware_counter::HardwareCounterCell;
use segment::types::HnswGlobalConfig;
let _ = env_logger::builder().is_test(true).try_init();
let data = r#"
{
"names": ["John Doe", "Bill Murray"],
"ages": [43, 51],
"metadata": {
"height": 50,
"width": 60
}
}"#;
let segment_builder_dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment = build_simple_segment(segment_builder_dir.path(), 2, Distance::Dot).unwrap();
let hw_counter = HardwareCounterCell::new();
segment
.upsert_point(0, 0.into(), only_default_vector(&[1.0, 1.0]), &hw_counter)
.unwrap();
segment
.upsert_point(1, 1.into(), only_default_vector(&[2.0, 2.0]), &hw_counter)
.unwrap();
segment
.set_full_payload(
2,
0.into(),
&serde_json::from_str(data).unwrap(),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(
3,
0.into(),
&serde_json::from_str(data).unwrap(),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
4,
&JsonPath::new("names"),
Some(&PayloadFieldSchema::FieldParams(
PayloadSchemaParams::Keyword(KeywordIndexParams {
r#type: segment::data_types::index::KeywordIndexType::Keyword,
is_tenant: None,
on_disk: Some(true),
}),
)),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
5,
&JsonPath::new("ages"),
Some(&PayloadFieldSchema::FieldParams(
PayloadSchemaParams::Integer(IntegerIndexParams {
r#type: segment::data_types::index::IntegerIndexType::Integer,
lookup: Some(true),
range: Some(true),
is_principal: None,
on_disk: Some(true),
}),
)),
&hw_counter,
)
.unwrap();
let segment_config = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: 2,
distance: Distance::Dot,
storage_type: VectorStorageType::Mmap, // mmap vectors
index: Indexes::Hnsw(HnswConfig {
m: 4,
ef_construct: 16,
full_scan_threshold: 8,
max_indexing_threads: 2,
on_disk: Some(true), // mmap index
payload_m: None,
inline_storage: None,
}),
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: PayloadStorageType::Mmap,
};
let segment_base_dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment_builder_dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment_builder = SegmentBuilder::new(
segment_base_dir.path(),
segment_builder_dir.path(),
&segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
segment_builder.update(&[&segment], &false.into()).unwrap();
let mut rng = rand::rng();
let segment = segment_builder
.build(
ResourcePermit::dummy(num_rayon_threads(0) as u32),
&false.into(),
&mut rng,
&HardwareCounterCell::new(),
ProgressTracker::new_for_test(),
)
.unwrap();
let temp_dir = Builder::new().prefix("temp_dir").tempdir().unwrap();
// The segment snapshot is a part of a parent collection/shard snapshot.
let parent_snapshot_tar = Builder::new()
.prefix("parent_snapshot")
.suffix(".tar")
.tempfile()
.unwrap();
let segment_id = segment
.current_path
.file_stem()
.and_then(|f| f.to_str())
.unwrap();
// snapshotting!
let tar =
tar_ext::BuilderExt::new_seekable_owned(File::create(parent_snapshot_tar.path()).unwrap());
segment
.take_snapshot(temp_dir.path(), &tar, format, None)
.unwrap();
tar.blocking_finish().unwrap();
let parent_snapshot_unpacked = Builder::new().prefix("parent_snapshot").tempdir().unwrap();
tar::Archive::new(File::open(parent_snapshot_tar.path()).unwrap())
.unpack(parent_snapshot_unpacked.path())
.unwrap();
// Should be exactly one entry in the snapshot.
let mut entries = fs::read_dir(parent_snapshot_unpacked.path()).unwrap();
let entry = entries.next().unwrap().unwrap();
assert!(entries.next().is_none());
match format {
SnapshotFormat::Ancient => unreachable!("The old days are gone"),
SnapshotFormat::Regular => {
assert_eq!(entry.file_name(), format!("{segment_id}.tar").as_str());
assert!(entry.path().is_file());
}
SnapshotFormat::Streamable => {
assert_eq!(entry.file_name(), segment_id);
assert!(entry.path().is_dir());
}
}
// restore snapshot
Segment::restore_snapshot_in_place(&entry.path()).unwrap();
// Should be exactly one entry in the snapshot.
let mut entries = fs::read_dir(parent_snapshot_unpacked.path()).unwrap();
let entry = entries.next().unwrap().unwrap();
assert!(entries.next().is_none());
// It should be unpacked entry, not tar archive.
assert!(entry.path().is_dir());
assert_eq!(entry.file_name(), segment_id);
let restored_segment = load_segment(&entry.path(), &AtomicBool::new(false))
.unwrap()
.unwrap();
// validate restored snapshot is the same as original segment
assert_eq!(
segment.total_point_count(),
restored_segment.total_point_count(),
);
assert_eq!(
segment.available_point_count(),
restored_segment.available_point_count(),
);
assert_eq!(
segment.deleted_point_count(),
restored_segment.deleted_point_count(),
);
let hw_counter = HardwareCounterCell::new();
for id in segment.iter_points() {
let vectors = segment.all_vectors(id, &hw_counter).unwrap();
let restored_vectors = restored_segment.all_vectors(id, &hw_counter).unwrap();
assert_eq!(vectors, restored_vectors);
let payload = segment.payload(id, &hw_counter).unwrap();
let restored_payload = restored_segment.payload(id, &hw_counter).unwrap();
assert_eq!(payload, restored_payload);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/byte_storage_hnsw_test.rs | lib/segment/tests/integration/byte_storage_hnsw_test.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::{ScoredPointOffset, TelemetryDetail};
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, QueryVector, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_dense_byte_vector, random_int_payload};
use segment::fixtures::query_fixtures::QueryVariant;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::{PayloadIndex, VectorIndex};
use segment::segment_constructor::build_segment;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, Indexes, Range, SearchParams,
SegmentConfig, SeqNumberType, VectorDataConfig, VectorStorageDatatype, VectorStorageType,
};
use segment::vector_storage::VectorStorageEnum;
use tempfile::Builder;
fn random_query<R: Rng + ?Sized>(variant: &QueryVariant, rng: &mut R, dim: usize) -> QueryVector {
segment::fixtures::query_fixtures::random_query(variant, rng, |rng| {
random_dense_byte_vector(rng, dim).into()
})
}
fn compare_search_result(result_a: &[Vec<ScoredPointOffset>], result_b: &[Vec<ScoredPointOffset>]) {
assert_eq!(result_a.len(), result_b.len());
for (a, b) in result_a.iter().zip(result_b) {
assert_eq!(a.len(), b.len());
for (a, b) in a.iter().zip(b) {
assert_eq!(a.idx, b.idx);
assert!((a.score - b.score).abs() < 1e-3);
}
}
}
#[rstest]
#[case::nearest(QueryVariant::Nearest, VectorStorageDatatype::Uint8, 32, 10)]
#[case::nearest(QueryVariant::Nearest, VectorStorageDatatype::Float16, 32, 10)]
#[case::discovery(QueryVariant::Discovery, VectorStorageDatatype::Uint8, 128, 20)]
#[case::reco_best_score(QueryVariant::RecoBestScore, VectorStorageDatatype::Float16, 64, 20)]
#[case::reco_sum_scores(QueryVariant::RecoSumScores, VectorStorageDatatype::Float16, 64, 20)]
fn test_byte_storage_hnsw(
#[case] query_variant: QueryVariant,
#[case] storage_data_type: VectorStorageDatatype,
#[case] ef: usize,
#[case] max_failures: usize, // out of 100
) {
use common::counter::hardware_counter::HardwareCounterCell;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::types::{HnswGlobalConfig, PayloadSchemaType};
let stopped = AtomicBool::new(false);
let dim = 8;
let m = 8;
let num_vectors: u64 = 5_000;
let ef_construct = 16;
let distance = Distance::Cosine;
let full_scan_threshold = 0;
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir_float = Builder::new()
.prefix("segment_dir_float")
.tempdir()
.unwrap();
let dir_byte = Builder::new().prefix("segment_dir_byte").tempdir().unwrap();
let hnsw_dir_byte = Builder::new().prefix("hnsw_dir_byte").tempdir().unwrap();
let config_byte = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: Some(storage_data_type),
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let int_key = "int";
let mut segment_float = build_simple_segment(dir_float.path(), dim, distance).unwrap();
let mut segment_byte = build_segment(dir_byte.path(), &config_byte, true).unwrap();
// check that `segment_byte` uses byte or half storage
{
let borrowed_storage = segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.borrow();
let raw_storage: &VectorStorageEnum = &borrowed_storage;
#[cfg(feature = "rocksdb")]
assert!(matches!(
raw_storage,
&VectorStorageEnum::DenseSimpleByte(_) | &VectorStorageEnum::DenseSimpleHalf(_),
));
#[cfg(not(feature = "rocksdb"))]
assert!(matches!(
raw_storage,
&VectorStorageEnum::DenseAppendableInRamByte(_)
| &VectorStorageEnum::DenseAppendableInRamHalf(_),
));
}
for n in 0..num_vectors {
let idx = n.into();
let vector = random_dense_byte_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
let hw_counter = HardwareCounterCell::new();
segment_float
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment_float
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
segment_byte
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment_byte
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
let hw_counter = HardwareCounterCell::new();
segment_float
.payload_index
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
segment_byte
.payload_index
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index_byte = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir_byte.path(),
id_tracker: segment_byte.id_tracker.clone(),
vector_storage: segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: segment_byte.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut hits = 0;
let attempts = 100;
for i in 0..attempts {
let query = random_query(&query_variant, &mut rng, dim);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result_byte = hnsw_index_byte
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
// check that search was performed using HNSW index
assert_eq!(
hnsw_index_byte
.get_telemetry_data(TelemetryDetail::default())
.filtered_large_cardinality
.count,
i + 1
);
let plain_result_float = segment_float.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
let plain_result_byte = segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
if storage_data_type == VectorStorageDatatype::Uint8 {
compare_search_result(&plain_result_float, &plain_result_byte);
}
if plain_result_byte == index_result_byte {
hits += 1;
}
}
assert!(
attempts - hits <= max_failures,
"hits: {hits} of {attempts}"
);
eprintln!("hits = {hits:#?} out of {attempts}");
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/segment_tests.rs | lib/segment/tests/integration/segment_tests.rs | use std::iter::FromIterator;
use std::sync::atomic::AtomicBool;
use ahash::AHashSet;
use common::counter::hardware_counter::HardwareCounterCell;
use fs_err as fs;
use itertools::Itertools;
use segment::common::operation_error::OperationError;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, VectorRef, VectorStructInternal, only_default_vector,
};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::index_fixtures::random_vector;
use segment::segment_constructor::load_segment;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{Condition, Distance, Filter, SearchParams, SegmentType, WithPayload};
use tempfile::Builder;
use crate::fixtures::segment::{build_segment_1, build_segment_3};
#[test]
fn test_point_exclusion() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment = build_segment_1(dir.path());
assert!(segment.has_point(3.into()));
let query_vector = [1.0, 1.0, 1.0, 1.0].into();
let res = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
None,
1,
None,
)
.unwrap();
let best_match = res.first().expect("Non-empty result");
assert_eq!(best_match.id, 3.into());
let ids: AHashSet<_> = AHashSet::from_iter([3.into()]);
let frt = Filter::new_must_not(Condition::HasId(ids.into()));
let res = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
Some(&frt),
1,
None,
)
.unwrap();
let best_match = res.first().expect("Non-empty result");
assert_ne!(best_match.id, 3.into());
let point_ids1: Vec<_> = segment.iter_points().collect();
let point_ids2: Vec<_> = segment.iter_points().collect();
assert!(!point_ids1.is_empty());
assert!(!point_ids2.is_empty());
assert_eq!(&point_ids1, &point_ids2)
}
#[test]
fn test_named_vector_search() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment = build_segment_3(dir.path());
assert!(segment.has_point(3.into()));
let query_vector = [1.0, 1.0, 1.0, 1.0].into();
let res = segment
.search(
"vector1",
&query_vector,
&WithPayload::default(),
&false.into(),
None,
1,
None,
)
.unwrap();
let best_match = res.first().expect("Non-empty result");
assert_eq!(best_match.id, 3.into());
let ids: AHashSet<_> = AHashSet::from_iter([3.into()]);
let frt = Filter {
should: None,
min_should: None,
must: None,
must_not: Some(vec![Condition::HasId(ids.into())]),
};
let res = segment
.search(
"vector1",
&query_vector,
&WithPayload::default(),
&false.into(),
Some(&frt),
1,
None,
)
.unwrap();
let best_match = res.first().expect("Non-empty result");
assert_ne!(best_match.id, 3.into());
let point_ids1: Vec<_> = segment.iter_points().collect();
let point_ids2: Vec<_> = segment.iter_points().collect();
assert!(!point_ids1.is_empty());
assert!(!point_ids2.is_empty());
assert_eq!(&point_ids1, &point_ids2)
}
#[test]
fn test_missed_vector_name() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment = build_segment_3(dir.path());
let hw_counter = HardwareCounterCell::new();
let exists = segment
.upsert_point(
7,
1.into(),
NamedVectors::from_pairs([
("vector2".into(), vec![10.]),
("vector3".into(), vec![5., 6., 7., 8.]),
]),
&hw_counter,
)
.unwrap();
assert!(exists, "this partial vector should overwrite existing");
let exists = segment
.upsert_point(
8,
6.into(),
NamedVectors::from_pairs([
("vector2".into(), vec![10.]),
("vector3".into(), vec![5., 6., 7., 8.]),
]),
&hw_counter,
)
.unwrap();
assert!(!exists, "this partial vector should not existing");
}
#[test]
fn test_vector_name_not_exists() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment = build_segment_3(dir.path());
let hw_counter = HardwareCounterCell::new();
let result = segment.upsert_point(
6,
6.into(),
NamedVectors::from_pairs([
("vector1".into(), vec![5., 6., 7., 8.]),
("vector2".into(), vec![10.]),
("vector3".into(), vec![5., 6., 7., 8.]),
("vector4".into(), vec![5., 6., 7., 8.]),
]),
&hw_counter,
);
if let Err(OperationError::VectorNameNotExists { received_name }) = result {
assert_eq!(received_name, "vector4");
} else {
panic!("wrong upsert result")
}
}
#[test]
fn ordered_deletion_test() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let path = {
let mut segment = build_segment_1(dir.path());
segment.delete_point(6, 5.into(), &hw_counter).unwrap();
segment.delete_point(6, 4.into(), &hw_counter).unwrap();
segment.flush(false).unwrap();
segment.current_path.clone()
};
let segment = load_segment(&path, &AtomicBool::new(false))
.unwrap()
.unwrap();
let query_vector = [1.0, 1.0, 1.0, 1.0].into();
let res = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
None,
1,
None,
)
.unwrap();
let best_match = res.first().expect("Non-empty result");
assert_eq!(best_match.id, 3.into());
}
#[test]
fn skip_deleted_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let path = {
let mut segment = build_segment_1(dir.path());
segment.delete_point(6, 5.into(), &hw_counter).unwrap();
segment.delete_point(6, 4.into(), &hw_counter).unwrap();
segment.flush(false).unwrap();
segment.current_path.clone()
};
let new_path = path.with_extension("deleted");
fs::rename(&path, new_path).unwrap();
let segment = load_segment(&path, &AtomicBool::new(false)).unwrap();
assert!(segment.is_none());
}
#[test]
fn test_update_named_vector() {
let num_points = 25;
let dim = 4;
let mut rng = rand::rng();
let distance = Distance::Cosine;
let vectors = (0..num_points)
.map(|_| random_vector(&mut rng, dim))
.collect_vec();
let hw_counter = HardwareCounterCell::new();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for (i, vec) in vectors.iter().enumerate() {
let i = i as u64;
segment
.upsert_point(i, i.into(), only_default_vector(vec), &hw_counter)
.unwrap();
}
let query_vector = random_vector(&mut rng, dim).into();
// do exact search
let search_params = SearchParams {
exact: true,
..Default::default()
};
let nearest_upsert = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&false.into(),
&true.into(),
None,
1,
Some(&search_params),
)
.unwrap();
let nearest_upsert = nearest_upsert.first().unwrap();
let sqrt_distance = |v: &[f32]| -> f32 { v.iter().map(|x| x * x).sum::<f32>().sqrt() };
// check if nearest_upsert is normalized
match &nearest_upsert.vector {
Some(VectorStructInternal::Single(v)) => {
assert!((sqrt_distance(v) - 1.).abs() < 1e-5);
}
Some(VectorStructInternal::Named(v)) => {
let v: VectorRef = (&v[DEFAULT_VECTOR_NAME]).into();
let v: &[_] = v.try_into().unwrap();
assert!((sqrt_distance(v) - 1.).abs() < 1e-5);
}
_ => panic!("unexpected vector type"),
}
// update vector using the same values
for (i, vec) in vectors.iter().enumerate() {
let i = i as u64;
segment
.update_vectors(
i + num_points as u64,
i.into(),
only_default_vector(vec),
&hw_counter,
)
.unwrap();
}
// do search after update
let nearest_update = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&false.into(),
&true.into(),
None,
1,
Some(&search_params),
)
.unwrap();
let nearest_update = nearest_update.first().unwrap();
// check that nearest_upsert is normalized
match &nearest_update.vector {
Some(VectorStructInternal::Single(v)) => {
assert!((sqrt_distance(v) - 1.).abs() < 1e-5);
}
Some(VectorStructInternal::Named(v)) => {
let v: VectorRef = (&v[DEFAULT_VECTOR_NAME]).into();
let v: &[_] = v.try_into().unwrap();
assert!((sqrt_distance(v) - 1.).abs() < 1e-5);
}
_ => panic!("unexpected vector type"),
}
// check that nearests are the same
assert_eq!(nearest_upsert.id, nearest_update.id);
}
#[test]
fn test_plain_search_top_zero() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let segment = build_segment_1(dir.path());
assert_eq!(segment.segment_type(), SegmentType::Plain);
segment
.search(
DEFAULT_VECTOR_NAME,
&[1.0, 1.0, 1.0, 1.0].into(),
&WithPayload::default(),
&false.into(),
None,
0,
None,
)
.unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/hnsw_incremental_build.rs | lib/segment/tests/integration/hnsw_incremental_build.rs | use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use itertools::Itertools as _;
use rand::rngs::StdRng;
use rand::seq::SliceRandom as _;
use rand::{Rng, SeedableRng as _};
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, QueryVector, VectorElementType, only_default_vector,
};
use segment::entry::SegmentEntry as _;
use segment::fixtures::index_fixtures::random_vector;
use segment::index::VectorIndexEnum;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::segment::Segment;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{Distance, ExtendedPointId, HnswConfig, HnswGlobalConfig, SeqNumberType};
use tap::Tap as _;
use tempfile::Builder;
use crate::hnsw_quantized_search_test::check_matches;
const NUM_POINTS: usize = 5_000;
const ITERATIONS: usize = 10;
const DIM: usize = 8;
const M: usize = 16;
const EF_CONSTRUCT: usize = 64;
const DISTANCE: Distance = Distance::Cosine;
#[test]
fn hnsw_incremental_build() {
let _ = env_logger::builder()
.is_test(true)
.filter_level(log::LevelFilter::Trace)
.try_init();
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new()
.prefix("hnsw_incremental_build")
.tempdir()
.unwrap();
let ids = std::iter::repeat_with(|| ExtendedPointId::NumId(rng.random()))
.unique()
.take(NUM_POINTS)
.collect_vec();
let vectors = std::iter::repeat_with(|| random_vector(&mut rng, DIM))
.take(NUM_POINTS)
.collect_vec();
let vector_refs = vectors.iter().map(|v| v.as_slice()).collect_vec();
let num_queries = 10;
let query_vectors: Vec<QueryVector> = (0..num_queries)
.map(|_| random_vector(&mut rng, DIM).into())
.collect();
let mut last_index = None;
for i in 1..=ITERATIONS {
log::info!(
"Building segment with {:.0}% of data",
i as f64 / ITERATIONS as f64 * 100.0
);
let num_points = i * NUM_POINTS / ITERATIONS;
let segment = make_segment(
&mut rng,
&dir.path().join(format!("segment_{i}")),
&ids[0..num_points],
&vector_refs[0..num_points],
);
let index_path = dir.path().join(format!("hnsw_{i}"));
let old_indices = last_index
.as_ref()
.map_or(vec![], |idx| vec![Arc::clone(idx)]);
let index = build_hnsw_index(&mut rng, &index_path, &segment, &old_indices);
let ef = 64;
let top = 10;
check_matches(&query_vectors, &segment, &index, None, ef, top);
last_index = Some(Arc::new(AtomicRefCell::new(VectorIndexEnum::Hnsw(index))));
}
}
fn make_segment(
rng: &mut StdRng,
path: &Path,
ids: &[ExtendedPointId],
vectors: &[&[VectorElementType]],
) -> Segment {
let mut sequence = (0..ids.len()).collect_vec();
sequence.shuffle(rng);
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(path, DIM, DISTANCE).unwrap();
for n in sequence {
let vector = only_default_vector(vectors[n]);
segment
.upsert_point(n as SeqNumberType, ids[n], vector, &hw_counter)
.unwrap();
}
segment
}
fn build_hnsw_index<R: Rng + ?Sized>(
rng: &mut R,
path: &Path,
segment: &Segment,
old_indices: &[Arc<AtomicRefCell<VectorIndexEnum>>],
) -> HNSWIndex {
log::info!("Building HNSW index for {:?}", path.file_name().unwrap());
let hnsw_config = HnswConfig {
m: M,
ef_construct: EF_CONSTRUCT,
full_scan_threshold: 1,
max_indexing_threads: 0,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
HNSWIndex::build(
HnswIndexOpenArgs {
path,
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: Default::default(),
payload_index: Arc::clone(&segment.payload_index),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices,
gpu_device: None,
rng,
stopped: &AtomicBool::new(false),
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default().tap_mut(|flags| {
flags.incremental_hnsw_building = true;
}),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/filtrable_hnsw_test.rs | lib/segment/tests/integration/filtrable_hnsw_test.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::{PointOffsetType, TelemetryDetail};
use itertools::Itertools;
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, QueryVector, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_vector};
use segment::fixtures::query_fixtures::QueryVariant;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::{PayloadIndex, VectorIndex};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, PayloadSchemaType,
Range, SearchParams, SeqNumberType,
};
use tempfile::Builder;
fn random_query<R: Rng + ?Sized>(variant: &QueryVariant, rng: &mut R, dim: usize) -> QueryVector {
segment::fixtures::query_fixtures::random_query(variant, rng, move |rng| {
random_vector(rng, dim).into()
})
}
#[rstest]
#[case::nearest(QueryVariant::Nearest, 32, 5)]
#[case::discovery(QueryVariant::Discovery, 128, 10)] // tests that check better precision are in `hnsw_discover_test.rs`
#[case::reco_best_score(QueryVariant::RecoBestScore, 64, 10)]
#[case::reco_sum_scores(QueryVariant::RecoSumScores, 64, 10)]
fn test_filterable_hnsw(
#[case] query_variant: QueryVariant,
#[case] ef: usize,
#[case] max_failures: usize, // out of 100
) {
_test_filterable_hnsw(query_variant, ef, max_failures);
}
fn _test_filterable_hnsw(
query_variant: QueryVariant,
ef: usize,
max_failures: usize, // out of 100
) {
let stopped = AtomicBool::new(false);
let dim = 8;
let m = 8;
let num_vectors: u64 = 5_000;
let ef_construct = 16;
let distance = Distance::Cosine;
let full_scan_threshold = 16; // KB
let indexing_threshold = 500; // num vectors
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let int_key = "int";
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
let payload_index_ptr = segment.payload_index.clone();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let borrowed_payload_index = payload_index_ptr.borrow();
let blocks = borrowed_payload_index
.payload_blocks(&JsonPath::new(int_key), indexing_threshold)
.collect_vec();
for block in blocks.iter() {
assert!(
block.condition.range.is_some(),
"only range conditions should be generated for this type of payload"
);
}
let mut coverage: HashMap<PointOffsetType, usize> = Default::default();
let px = payload_index_ptr.borrow();
for block in &blocks {
let filter = Filter::new_must(Condition::Field(block.condition.clone()));
let points = px.query_points(&filter, &hw_counter, &stopped);
for point in points {
coverage.insert(point, coverage.get(&point).unwrap_or(&0) + 1);
}
}
let expected_blocks = num_vectors as usize / indexing_threshold * 2;
eprintln!("blocks.len() = {:#?}", blocks.len());
assert!(
(blocks.len() as i64 - expected_blocks as i64).abs() <= 3,
"real number of payload blocks is too far from expected"
);
assert_eq!(
coverage.len(),
num_vectors as usize,
"not all points are covered by payload blocks"
);
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut hits = 0;
let attempts = 100;
for i in 0..attempts {
let query = random_query(&query_variant, &mut rng, dim);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
// check that search was performed using HNSW index
assert_eq!(
hnsw_index
.get_telemetry_data(TelemetryDetail::default())
.filtered_large_cardinality
.count,
i + 1
);
let plain_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
if plain_result == index_result {
hits += 1;
}
}
assert!(
attempts - hits <= max_failures,
"hits: {hits} of {attempts}"
); // Not more than X% failures
eprintln!("hits = {hits:#?} out of {attempts}");
}
#[rstest]
#[case::plain(50, 16 * 1024)]
#[case::index(1_000, 1)]
fn test_hnsw_search_top_zero(#[case] num_vectors: u64, #[case] full_scan_threshold_kb: usize) {
let stopped = AtomicBool::new(false);
let dim = 8;
let m = 8;
let ef_construct = 16;
let distance = Distance::Cosine;
let indexing_threshold = 500; // num vectors
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let int_key = "int";
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
let payload_index_ptr = segment.payload_index.clone();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold: full_scan_threshold_kb,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let borrowed_payload_index = payload_index_ptr.borrow();
let blocks = borrowed_payload_index
.payload_blocks(&JsonPath::new(int_key), indexing_threshold)
.collect_vec();
for block in blocks.iter() {
assert!(
block.condition.range.is_some(),
"only range conditions should be generated for this type of payload"
);
}
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 0;
let query = random_query(&QueryVariant::Nearest, &mut rng, dim);
hnsw_index
.search(
&[&query],
None,
top,
Some(&Default::default()),
&Default::default(),
)
.unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/filtering_context_check.rs | lib/segment/tests/integration/filtering_context_check.rs | use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use itertools::Itertools;
use rand::SeedableRng;
use rand::prelude::StdRng;
use segment::fixtures::payload_context_fixture::{
create_plain_payload_index, create_struct_payload_index,
};
use segment::fixtures::payload_fixtures::random_filter;
use segment::index::PayloadIndex;
use tempfile::Builder;
const NUM_POINTS: usize = 2000;
const ATTEMPTS: usize = 100;
#[test]
fn test_filtering_context_consistency() {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let plain_index = create_plain_payload_index(dir.path(), NUM_POINTS, seed);
let struct_index = create_struct_payload_index(dir.path(), NUM_POINTS, seed);
let hw_counter = HardwareCounterCell::new();
for _ in 0..ATTEMPTS {
let filter = random_filter(&mut rng, 3);
let plain_filter_context = plain_index.filter_context(&filter, &hw_counter);
let struct_filter_context = struct_index.filter_context(&filter, &hw_counter);
let plain_result = (0..NUM_POINTS)
.filter(|point_id| plain_filter_context.check(*point_id as PointOffsetType))
.collect_vec();
let struct_result = (0..NUM_POINTS)
.filter(|point_id| struct_filter_context.check(*point_id as PointOffsetType))
.collect_vec();
assert_eq!(plain_result, struct_result, "filter: {filter:#?}");
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/exact_search_test.rs | lib/segment/tests/integration/exact_search_test.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::PointOffsetType;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use rand::Rng;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_vector};
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::index::{PayloadIndex, VectorIndex};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, PayloadSchemaType,
Range, SearchParams, SeqNumberType,
};
use tempfile::Builder;
#[test]
fn exact_search_test() {
let stopped = AtomicBool::new(false);
let dim = 8;
let m = 8;
let num_vectors: u64 = 5_000;
let ef = 32;
let ef_construct = 16;
let distance = Distance::Cosine;
let full_scan_threshold = 16; // KB
let indexing_threshold = 500; // num vectors
let num_payload_values = 2;
let mut rng = rand::rng();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let int_key = "int";
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
// let opnum = num_vectors + 1;
let payload_index_ptr = segment.payload_index.clone();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let borrowed_payload_index = payload_index_ptr.borrow();
let blocks = borrowed_payload_index
.payload_blocks(&JsonPath::new(int_key), indexing_threshold)
.collect_vec();
for block in blocks.iter() {
assert!(
block.condition.range.is_some(),
"only range conditions should be generated for this type of payload"
);
}
let mut coverage: HashMap<PointOffsetType, usize> = Default::default();
for block in &blocks {
let px = payload_index_ptr.borrow();
let filter = Filter::new_must(Condition::Field(block.condition.clone()));
let points = px.query_points(&filter, &hw_counter, &is_stopped);
for point in points {
coverage.insert(point, coverage.get(&point).unwrap_or(&0) + 1);
}
}
let expected_blocks = num_vectors as usize / indexing_threshold * 2;
eprintln!("blocks.len() = {:#?}", blocks.len());
assert!(
(blocks.len() as i64 - expected_blocks as i64).abs() <= 3,
"real number of payload blocks is too far from expected"
);
assert_eq!(
coverage.len(),
num_vectors as usize,
"not all points are covered by payload blocks"
);
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: payload_index_ptr.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let attempts = 50;
for _i in 0..attempts {
let query = random_vector(&mut rng, dim).into();
let index_result = hnsw_index
.search(
&[&query],
None,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
exact: true,
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], None, top, None, &Default::default())
.unwrap();
assert_eq!(
index_result, plain_result,
"Exact search is not equal to plain search"
);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
exact: true,
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
assert_eq!(
index_result, plain_result,
"Exact search is not equal to plain search"
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/sparse_vector_index_search_tests.rs | lib/segment/tests/integration/sparse_vector_index_search_tests.rs | use std::cmp::max;
use std::collections::HashMap;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::{PointOffsetType, TelemetryDetail};
use fs_err as fs;
use io::storage_version::VERSION_FILE;
use itertools::Itertools;
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::common::operation_error::OperationResult;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::{QueryVector, VectorInternal};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::STR_KEY;
use segment::fixtures::sparse_fixtures::{fixture_sparse_index, fixture_sparse_index_from_iter};
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::index::sparse_index::sparse_vector_index::{
SparseVectorIndex, SparseVectorIndexOpenArgs,
};
use segment::index::{PayloadIndex, VectorIndex, VectorIndexEnum};
use segment::json_path::JsonPath;
use segment::segment::Segment;
use segment::segment_constructor::{build_segment, load_segment};
use segment::types::PayloadFieldSchema::FieldType;
use segment::types::PayloadSchemaType::Keyword;
use segment::types::{
Condition, DEFAULT_SPARSE_FULL_SCAN_THRESHOLD, FieldCondition, Filter, ScoredPoint,
SegmentConfig, SeqNumberType, SparseVectorDataConfig, SparseVectorStorageType, VectorName,
VectorStorageDatatype,
};
use segment::vector_storage::{Random, VectorStorage};
use segment::{fixture_for_all_indices, payload_json};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::sparse_vector_fixture::{random_full_sparse_vector, random_sparse_vector};
use sparse::common::types::DimId;
use sparse::index::inverted_index::InvertedIndex;
use sparse::index::inverted_index::inverted_index_compressed_immutable_ram::InvertedIndexCompressedImmutableRam;
use sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use sparse::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use sparse::index::posting_list_common::PostingListIter as _;
use tempfile::Builder;
/// Max dimension of sparse vectors used in tests
const MAX_SPARSE_DIM: usize = 4096;
/// Number of vectors to index in tests
const NUM_VECTORS: usize = 2000;
/// Default full scan threshold in tests
/// very low value to force usage of index
const LOW_FULL_SCAN_THRESHOLD: usize = 1;
/// Full scan threshold to force plain search
const LARGE_FULL_SCAN_THRESHOLD: usize = 10 * NUM_VECTORS;
const SPARSE_VECTOR_NAME: &VectorName = "sparse_vector";
/// Expects the filter to match ALL points in order to compare the results with/without filter
fn compare_sparse_vectors_search_with_without_filter(full_scan_threshold: usize) {
let mut rnd = StdRng::seed_from_u64(43);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index = fixture_sparse_index::<InvertedIndexCompressedImmutableRam<f32>, _>(
&mut rnd,
NUM_VECTORS,
MAX_SPARSE_DIM,
full_scan_threshold,
data_dir.path(),
);
// random query vectors
let attempts = 1000;
let query_vectors = (0..attempts)
.map(|_| random_sparse_vector(&mut rnd, MAX_SPARSE_DIM))
.collect::<Vec<_>>();
// filter matches everything
let filter = Filter::new_must_not(Condition::Field(FieldCondition::new_match(
JsonPath::new(STR_KEY),
STR_KEY.to_owned().into(),
)));
// compares results with and without filters
// expects the filter to have no effect on the results because the filter matches everything
for query in query_vectors {
let maximum_number_of_results = sparse_vector_index.max_result_count(&query);
// get all results minus 10 to force a bit of pruning
let top = max(1, maximum_number_of_results.saturating_sub(10));
let query_vector: QueryVector = query.clone().into();
// with filter
let index_results_filter = sparse_vector_index
.search(
&[&query_vector],
Some(&filter),
top,
None,
&Default::default(),
)
.unwrap();
// without filter
let index_results_no_filter = sparse_vector_index
.search(&[&query_vector], None, top, None, &Default::default())
.unwrap();
assert_eq!(index_results_filter.len(), index_results_no_filter.len());
for (filter_result, no_filter_result) in index_results_filter
.iter()
.zip(index_results_no_filter.iter())
{
assert_eq!(
filter_result.len(),
no_filter_result.len(),
"query = {query:#?}, filter_result = {filter_result:#?} no_filter_result = {no_filter_result:#?}",
);
// skip zero scores because index skips non-overlapping points, but plain search does not
for (filter_result, no_filter_result) in filter_result
.iter()
.filter(|s| s.score != 0.0)
.zip(no_filter_result.iter().filter(|s| s.score != 0.0))
{
if filter_result.idx != no_filter_result.idx {
// we do not break ties when identical scores
assert_eq!(filter_result.score, no_filter_result.score);
} else {
assert_eq!(filter_result, no_filter_result);
}
}
}
}
}
#[test]
fn sparse_vector_index_ram_filter_search() {
// very low full scan threshold to force usage of inverted index
compare_sparse_vectors_search_with_without_filter(LOW_FULL_SCAN_THRESHOLD);
}
#[test]
fn sparse_vector_index_fallback_plain_search() {
// very high full scan threshold to force fallback to plain search
compare_sparse_vectors_search_with_without_filter(NUM_VECTORS + 1);
}
/// Checks that the sparse vector index is consistent with the underlying storage
fn check_index_storage_consistency<T: InvertedIndex>(sparse_vector_index: &SparseVectorIndex<T>) {
let borrowed_vector_storage = sparse_vector_index.vector_storage().borrow();
let point_count = borrowed_vector_storage.available_vector_count();
let hw_counter = HardwareCounterCell::disposable();
for id in 0..point_count as PointOffsetType {
// assuming no deleted points
let vector = borrowed_vector_storage.get_vector::<Random>(id);
let vector: &SparseVector = vector.as_vec_ref().try_into().unwrap();
let remapped_vector = sparse_vector_index
.indices_tracker()
.remap_vector(vector.to_owned());
// check posting lists are consistent with storage
for (dim_id, dim_value) in remapped_vector
.indices
.iter()
.zip(remapped_vector.values.iter())
{
let posting_list = sparse_vector_index
.inverted_index()
.get(*dim_id, &hw_counter)
.unwrap();
// assert posting list sorted by record id
assert!(
posting_list
.clone()
.into_std_iter()
.tuple_windows()
.all(|(w0, w1)| w0.record_id < w1.record_id),
);
// assert posted list contains record id
assert!(
posting_list
.into_std_iter()
.any(|e| e.record_id == id && e.weight == *dim_value),
);
}
// check the vector can be found via search using large top
let top = sparse_vector_index.max_result_count(vector);
let query_vector: QueryVector = vector.to_owned().into();
let results = sparse_vector_index
.search(&[&query_vector], None, top, None, &Default::default())
.unwrap();
assert!(results[0].iter().any(|s| s.idx == id));
}
}
#[test]
fn sparse_vector_index_consistent_with_storage() {
let stopped = AtomicBool::new(false);
let mut rnd = StdRng::seed_from_u64(42);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_ram_index = fixture_sparse_index::<InvertedIndexCompressedImmutableRam<f32>, _>(
&mut rnd,
NUM_VECTORS,
MAX_SPARSE_DIM,
LOW_FULL_SCAN_THRESHOLD,
data_dir.path(),
);
// check consistency with underlying RAM inverted index
check_index_storage_consistency(&sparse_vector_ram_index);
let mmap_index_dir = Builder::new().prefix("mmap_index_dir").tempdir().unwrap();
// create mmap sparse vector index
let mut sparse_index_config = sparse_vector_ram_index.config();
sparse_index_config.index_type = SparseIndexType::Mmap;
let sparse_vector_mmap_index: SparseVectorIndex<InvertedIndexCompressedMmap<f32>> =
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: sparse_index_config,
id_tracker: sparse_vector_ram_index.id_tracker().clone(),
vector_storage: sparse_vector_ram_index.vector_storage().clone(),
payload_index: sparse_vector_ram_index.payload_index().clone(),
path: mmap_index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
assert_eq!(
sparse_vector_mmap_index.indexed_vector_count(),
sparse_vector_ram_index.indexed_vector_count()
);
// check consistency with underlying mmap inverted index
check_index_storage_consistency(&sparse_vector_mmap_index);
// drop and reload index
drop(sparse_vector_mmap_index);
// load index from memmap file
let mut sparse_index_config = sparse_vector_ram_index.config();
sparse_index_config.index_type = SparseIndexType::Mmap;
let sparse_vector_mmap_index: SparseVectorIndex<InvertedIndexCompressedMmap<f32>> =
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: sparse_index_config,
id_tracker: sparse_vector_ram_index.id_tracker().clone(),
vector_storage: sparse_vector_ram_index.vector_storage().clone(),
payload_index: sparse_vector_ram_index.payload_index().clone(),
path: mmap_index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
assert_eq!(
sparse_vector_mmap_index.indexed_vector_count(),
sparse_vector_ram_index.indexed_vector_count()
);
// check consistency with underlying mmap inverted index
check_index_storage_consistency(&sparse_vector_mmap_index);
}
#[test]
fn sparse_vector_index_load_missing_mmap() {
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index: OperationResult<SparseVectorIndex<InvertedIndexCompressedMmap<f32>>> =
fixture_sparse_index_from_iter(
data_dir.path(),
[].iter().cloned(),
10_000,
SparseIndexType::Mmap,
);
// absent configuration file for mmap are ignored
// a new index is created
assert!(sparse_vector_index.is_ok())
}
#[test]
fn sparse_vector_index_ram_deleted_points_search() {
let top = 10;
let mut rnd = StdRng::seed_from_u64(42);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index = fixture_sparse_index_from_iter::<InvertedIndexRam>(
data_dir.path(),
(0..NUM_VECTORS).map(|_| random_sparse_vector(&mut rnd, MAX_SPARSE_DIM)),
LOW_FULL_SCAN_THRESHOLD,
SparseIndexType::MutableRam,
)
.unwrap();
// sanity check (all indexed, no deleted points)
assert_eq!(
sparse_vector_index
.id_tracker()
.borrow()
.available_point_count(),
sparse_vector_index.indexed_vector_count()
);
assert_eq!(
sparse_vector_index
.id_tracker()
.borrow()
.deleted_point_count(),
0
);
// query index
let query_vector: QueryVector = random_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into();
let before_deletion_results: Vec<_> = sparse_vector_index
.search(&[&query_vector], None, top, None, &Default::default())
.unwrap();
// pick a point to delete
let deleted_idx = before_deletion_results[0][0].idx;
// delete a point
let deleted_external = sparse_vector_index
.id_tracker()
.borrow_mut()
.external_id(deleted_idx)
.unwrap();
sparse_vector_index
.id_tracker()
.borrow_mut()
.drop(deleted_external)
.unwrap();
assert!(
sparse_vector_index
.id_tracker()
.borrow()
.is_deleted_point(deleted_idx),
);
assert_eq!(
sparse_vector_index
.id_tracker()
.borrow()
.deleted_point_count(),
1
);
// assert that the deleted point is no longer in the index
let after_deletion_results: Vec<_> = sparse_vector_index
.search(&[&query_vector], None, top, None, &Default::default())
.unwrap();
assert_ne!(before_deletion_results, after_deletion_results);
assert!(
after_deletion_results
.iter()
.all(|x| x.iter().all(|y| y.idx != deleted_idx)),
);
}
#[test]
fn sparse_vector_index_ram_filtered_search() {
let mut rnd = StdRng::seed_from_u64(42);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
// setup index
let sparse_vector_index = fixture_sparse_index::<InvertedIndexCompressedImmutableRam<f32>, _>(
&mut rnd,
NUM_VECTORS,
MAX_SPARSE_DIM,
LOW_FULL_SCAN_THRESHOLD,
data_dir.path(),
);
// query index by payload
let field_name = "field";
let field_value = "important value";
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(field_name),
field_value.to_owned().into(),
)));
// query all sparse dimension to get all points
let query_vector: QueryVector = random_full_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into();
let before_result = sparse_vector_index
.search(
&[&query_vector],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
assert_eq!(before_result.len(), 1);
assert_eq!(before_result[0].len(), 0);
let hw_counter = HardwareCounterCell::new();
// create payload field index
let mut payload_index = sparse_vector_index.payload_index().borrow_mut();
payload_index
.set_indexed(&JsonPath::new(field_name), Keyword, &hw_counter)
.unwrap();
drop(payload_index);
// assert payload field index created and empty
let payload_index = sparse_vector_index.payload_index().borrow();
let indexed_fields = payload_index.indexed_fields();
assert_eq!(
*indexed_fields.get(&JsonPath::new(field_name)).unwrap(),
FieldType(Keyword)
);
let field_indexes = &payload_index.field_indexes;
let field_index = field_indexes.get(&JsonPath::new(field_name)).unwrap();
assert_eq!(field_index[0].count_indexed_points(), 0);
drop(payload_index);
// add payload on the first half of the points
let half_indexed_count = sparse_vector_index.indexed_vector_count() / 2;
let payload = payload_json! {field_name: field_value};
let hw_counter = HardwareCounterCell::new();
let mut payload_index = sparse_vector_index.payload_index().borrow_mut();
for idx in 0..half_indexed_count {
payload_index
.set_payload(idx as PointOffsetType, &payload, &None, &hw_counter)
.unwrap();
}
drop(payload_index);
// assert payload index updated
let payload_index = sparse_vector_index.payload_index().borrow();
let field_indexes = &payload_index.field_indexes;
let field_index = field_indexes.get(&JsonPath::new(field_name)).unwrap();
assert_eq!(field_index[0].count_indexed_points(), half_indexed_count);
drop(payload_index);
// request all points with payload
let after_result = sparse_vector_index
.search(
&[&query_vector],
Some(&filter),
half_indexed_count * 2, // original top
None,
&Default::default(),
)
.unwrap();
assert_eq!(after_result.len(), 1);
assert_eq!(after_result[0].len(), half_indexed_count); // expect half of the points
}
#[test]
fn sparse_vector_index_plain_search() {
let mut rnd = StdRng::seed_from_u64(42);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
// setup index
let sparse_vector_index = fixture_sparse_index::<InvertedIndexCompressedImmutableRam<f32>, _>(
&mut rnd,
NUM_VECTORS,
MAX_SPARSE_DIM,
LARGE_FULL_SCAN_THRESHOLD,
data_dir.path(),
);
// query index by payload
let field_name = "field";
let field_value = "important value";
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(field_name),
field_value.to_owned().into(),
)));
// query all sparse dimension to get all points
let query_vector: QueryVector = random_full_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into();
// empty when searching payload index directly
let before_plain_results = sparse_vector_index
.search(
&[&query_vector],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
assert_eq!(before_plain_results.len(), 1);
assert_eq!(before_plain_results[0].len(), 0);
let payload = payload_json! {field_name: field_value};
let hw_counter = HardwareCounterCell::new();
// add payload to all points
let mut payload_index = sparse_vector_index.payload_index().borrow_mut();
for idx in 0..NUM_VECTORS {
payload_index
.set_payload(idx as PointOffsetType, &payload, &None, &hw_counter)
.unwrap();
}
drop(payload_index);
// same results when searching payload index directly
let after_plain_results = sparse_vector_index
.search(
&[&query_vector],
Some(&filter),
NUM_VECTORS,
None,
&Default::default(),
)
.unwrap();
assert_eq!(after_plain_results.len(), 1);
assert_eq!(after_plain_results[0].len(), NUM_VECTORS);
// check that plain searchers were used
assert_eq!(
sparse_vector_index
.get_telemetry_data(TelemetryDetail::default())
.filtered_small_cardinality
.count,
2
);
}
#[test]
fn handling_empty_sparse_vectors() {
let mut rnd = StdRng::seed_from_u64(42);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index: SparseVectorIndex<InvertedIndexCompressedImmutableRam<f32>> =
fixture_sparse_index_from_iter(
data_dir.path(),
(0..NUM_VECTORS).map(|_| SparseVector::default()),
DEFAULT_SPARSE_FULL_SCAN_THRESHOLD,
SparseIndexType::ImmutableRam,
)
.unwrap();
let mut borrowed_storage = sparse_vector_index.vector_storage().borrow_mut();
let hw_counter = HardwareCounterCell::new();
// add empty points to storage
for idx in 0..NUM_VECTORS {
let vec = &SparseVector::new(vec![], vec![]).unwrap();
borrowed_storage
.insert_vector(idx as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
drop(borrowed_storage);
// assert all empty points are in storage
assert_eq!(
sparse_vector_index
.vector_storage()
.borrow()
.available_vector_count(),
NUM_VECTORS,
);
// empty vectors are not indexed
assert_eq!(sparse_vector_index.indexed_vector_count(), 0);
let query_vector: QueryVector = random_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into();
// empty vectors are not searchable (recommend using scroll API to retrieve those)
let results = sparse_vector_index
.search(&[&query_vector], None, 10, None, &Default::default())
.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].len(), 0);
}
#[test]
fn sparse_vector_index_persistence_test() {
let stopped = AtomicBool::new(false);
let dim = 8;
let num_vectors: u64 = 5_000;
let top = 3;
let mut rnd = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let config = SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::MutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
storage_type: SparseVectorStorageType::default(),
modifier: None,
},
)]),
payload_storage_type: Default::default(),
};
let mut segment = build_segment(dir.path(), &config, true).unwrap();
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let vector: VectorInternal = random_sparse_vector(&mut rnd, dim).into();
let mut named_vector = NamedVectors::default();
named_vector.insert(SPARSE_VECTOR_NAME.to_owned(), vector);
let idx = n.into();
segment
.upsert_point(n as SeqNumberType, idx, named_vector, &hw_counter)
.unwrap();
}
segment.flush(false).unwrap();
let search_vector = random_sparse_vector(&mut rnd, dim);
let query_vector: QueryVector = search_vector.into();
let search_result = segment
.search(
SPARSE_VECTOR_NAME,
&query_vector,
&Default::default(),
&Default::default(),
None,
top,
None,
)
.unwrap();
assert_eq!(search_result.len(), top);
let path = segment.current_path.clone();
drop(segment);
// persistence using rebuild of inverted index
// for appendable segment vector index has to be rebuilt
let segment = load_segment(&path, &stopped).unwrap().unwrap();
let search_after_reload_result = segment
.search(
SPARSE_VECTOR_NAME,
&query_vector,
&Default::default(),
&Default::default(),
None,
top,
None,
)
.unwrap();
assert_eq!(search_after_reload_result.len(), top);
assert_eq!(search_result, search_after_reload_result);
fixture_for_all_indices!(check_persistence::<_>(
&segment,
&search_result,
&query_vector,
top
));
}
fn check_persistence<TInvertedIndex: InvertedIndex>(
segment: &Segment,
search_result: &[ScoredPoint],
query_vector: &QueryVector,
top: usize,
) {
let stopped = AtomicBool::new(false);
let inverted_index_dir = Builder::new()
.prefix("inverted_index_ram")
.tempdir()
.unwrap();
let open_index = || -> SparseVectorIndex<TInvertedIndex> {
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::Mmap,
datatype: Some(VectorStorageDatatype::Float32),
},
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[SPARSE_VECTOR_NAME]
.vector_storage
.clone(),
payload_index: segment.payload_index.clone(),
path: inverted_index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap()
};
let check_search = |sparse_vector_index: &SparseVectorIndex<TInvertedIndex>| {
// check that the loaded index performs the same search
let search_after_reload_result = sparse_vector_index
.search(&[query_vector], None, top, None, &Default::default())
.unwrap();
assert_eq!(search_after_reload_result[0].len(), top);
for (search_1, search_2) in search_result
.iter()
.zip(search_after_reload_result[0].iter())
{
let id_1 = segment
.id_tracker
.borrow_mut()
.internal_id(search_1.id)
.unwrap();
assert_eq!(id_1, search_2.idx);
}
};
let sparse_vector_index = open_index();
let version_file = inverted_index_dir.path().join(VERSION_FILE);
assert!(version_file.exists());
// reload sparse index from file
drop(sparse_vector_index);
let sparse_vector_index = open_index();
check_search(&sparse_vector_index);
// drop version file and reload index
drop(sparse_vector_index);
fs::remove_file(&version_file).unwrap();
let sparse_vector_index = open_index();
assert!(version_file.exists(), "version file should be recreated");
check_search(&sparse_vector_index);
}
#[test]
fn sparse_vector_index_files() {
fixture_for_all_indices!(check_sparse_vector_index_files::<_>());
}
fn check_sparse_vector_index_files<I: InvertedIndex>() {
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let index = fixture_sparse_index::<I, _>(
&mut StdRng::seed_from_u64(42),
1,
MAX_SPARSE_DIM,
LOW_FULL_SCAN_THRESHOLD,
data_dir.path(),
);
let files = index.files();
// sparse index config + version + inverted index config + inverted index data + tracker
assert_eq!(files.len(), 5);
for file in files.iter() {
assert!(file.exists(), "file {file:?} does not exist");
}
}
#[test]
fn sparse_vector_test_large_index() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let config = SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::MutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
storage_type: SparseVectorStorageType::Mmap,
modifier: None,
},
)]),
payload_storage_type: Default::default(),
};
let mut segment = build_segment(dir.path(), &config, true).unwrap();
let hw_counter = HardwareCounterCell::new();
let vector: VectorInternal = SparseVector {
indices: vec![DimId::MAX],
values: vec![0.0],
}
.into();
let mut named_vector = NamedVectors::default();
named_vector.insert(SPARSE_VECTOR_NAME.to_owned(), vector);
let idx = 0.into();
segment
.upsert_point(0 as SeqNumberType, idx, named_vector, &hw_counter)
.unwrap();
let borrowed_vector_index = segment.vector_data[SPARSE_VECTOR_NAME]
.vector_index
.borrow();
match &*borrowed_vector_index {
VectorIndexEnum::SparseRam(sparse_vector_index) => {
assert!(
sparse_vector_index
.indices_tracker()
.remap_index(DimId::MAX)
.is_some(),
);
assert_eq!(sparse_vector_index.inverted_index().max_index().unwrap(), 0);
}
_ => panic!("unexpected vector index type"),
}
}
#[test]
fn test_sparse_search_top_zero() {
let mut rnd = StdRng::seed_from_u64(43);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index = fixture_sparse_index::<InvertedIndexCompressedImmutableRam<f32>, _>(
&mut rnd,
NUM_VECTORS,
MAX_SPARSE_DIM,
LOW_FULL_SCAN_THRESHOLD,
data_dir.path(),
);
let query_vector = random_sparse_vector(&mut rnd, MAX_SPARSE_DIM).into();
let top = 0;
sparse_vector_index
.search(&[&query_vector], None, top, None, &Default::default())
.unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/hnsw_discover_test.rs | lib/segment/tests/integration/hnsw_discover_test.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use itertools::Itertools;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, QueryVector, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::random_vector;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::index::{PayloadIndex, VectorIndex};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, PayloadSchemaType,
SearchParams, SeqNumberType,
};
use segment::vector_storage::query::{ContextPair, DiscoveryQuery};
use tempfile::Builder;
const MAX_EXAMPLE_PAIRS: usize = 3;
fn random_discovery_query<R: Rng + ?Sized>(rng: &mut R, dim: usize) -> QueryVector {
let num_pairs: usize = rng.random_range(1..MAX_EXAMPLE_PAIRS);
let target = random_vector(rng, dim).into();
let pairs = (0..num_pairs)
.map(|_| {
let positive = random_vector(rng, dim).into();
let negative = random_vector(rng, dim).into();
ContextPair { positive, negative }
})
.collect_vec();
DiscoveryQuery::new(target, pairs).into()
}
fn get_random_keyword_of<R: Rng + ?Sized>(num_options: usize, rng: &mut R) -> String {
let random_number = rng.random_range(0..num_options);
format!("keyword_{random_number}")
}
/// Checks discovery search precision when using hnsw index, this is different from the tests in
/// `filtrable_hnsw_test.rs` because it sets higher `m` and `ef_construct` parameters to get better precision
#[test]
fn hnsw_discover_precision() {
let stopped = AtomicBool::new(false);
let max_failures = 5; // out of 100
let dim = 8;
let m = 16;
let num_vectors: u64 = 5_000;
let ef = 32;
let ef_construct = 64;
let distance = Distance::Cosine;
let full_scan_threshold = 16; // KB
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
}
let payload_index_ptr = segment.payload_index.clone();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr,
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut discovery_hits = 0;
let attempts = 100;
for _i in 0..attempts {
let query: QueryVector = random_discovery_query(&mut rng, dim);
let index_discovery_result = hnsw_index
.search(
&[&query],
None,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_discovery_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], None, top, None, &Default::default())
.unwrap();
if plain_discovery_result == index_discovery_result {
discovery_hits += 1;
}
}
eprintln!("discovery_hits = {discovery_hits:#?} out of {attempts}");
assert!(
attempts - discovery_hits <= max_failures,
"hits: {discovery_hits} of {attempts}"
); // Not more than X% failures
}
/// Same test as above but with payload index and filtering
#[test]
fn filtered_hnsw_discover_precision() {
let stopped = AtomicBool::new(false);
let max_failures = 5; // out of 100
let dim = 8;
let m = 16;
let num_vectors: u64 = 5_000;
let ef = 64;
let ef_construct = 64;
let distance = Distance::Cosine;
let full_scan_threshold = 16; // KB
let num_payload_values = 4;
let mut rng = StdRng::seed_from_u64(42);
let hw_counter = HardwareCounterCell::new();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let keyword_key = "keyword";
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let keyword_payload = get_random_keyword_of(num_payload_values, &mut rng);
let payload = payload_json! {keyword_key: keyword_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
let payload_index_ptr = segment.payload_index.clone();
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(keyword_key),
PayloadSchemaType::Keyword,
&hw_counter,
)
.unwrap();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr,
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut discovery_hits = 0;
let attempts = 100;
for _i in 0..attempts {
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(keyword_key),
get_random_keyword_of(num_payload_values, &mut rng).into(),
)));
let filter_query = Some(&filter);
let query: QueryVector = random_discovery_query(&mut rng, dim);
let index_discovery_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_discovery_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
if plain_discovery_result == index_discovery_result {
discovery_hits += 1;
}
}
eprintln!("discovery_hits = {discovery_hits:#?} out of {attempts}");
assert!(
attempts - discovery_hits <= max_failures,
"hits: {discovery_hits} of {attempts}"
); // Not more than X% failures
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/disbalanced_vectors_test.rs | lib/segment/tests/integration/disbalanced_vectors_test.rs | const NUM_VECTORS_1: u64 = 300;
const NUM_VECTORS_2: u64 = 500;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::progress_tracker::ProgressTracker;
use segment::data_types::named_vectors::NamedVectors;
use segment::entry::entry_point::SegmentEntry;
use segment::index::hnsw_index::num_rayon_threads;
use segment::segment::Segment;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::segment_constructor::simple_segment_constructor::{
VECTOR1_NAME, VECTOR2_NAME, build_multivec_segment,
};
use segment::types::{Distance, HnswGlobalConfig};
use segment::vector_storage::VectorStorage;
use tempfile::Builder;
#[test]
fn test_rebuild_with_removed_vectors() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let stopped = AtomicBool::new(false);
let mut segment1 = build_multivec_segment(dir.path(), 4, 6, Distance::Dot).unwrap();
let mut segment2 = build_multivec_segment(dir.path(), 4, 6, Distance::Dot).unwrap();
let hw_counter = HardwareCounterCell::new();
for i in 0..NUM_VECTORS_1 {
segment1
.upsert_point(
1,
i.into(),
NamedVectors::from_pairs([
(VECTOR1_NAME.into(), vec![i as f32, 0., 0., 0.]),
(VECTOR2_NAME.into(), vec![0., i as f32, 0., 0., 0., 0.]),
]),
&hw_counter,
)
.unwrap();
}
for i in 0..NUM_VECTORS_2 {
let vectors = if i % 5 == 0 {
NamedVectors::from_pairs([(VECTOR1_NAME.into(), vec![0., 0., i as f32, 0.])])
} else {
NamedVectors::from_pairs([
(VECTOR1_NAME.into(), vec![0., 0., i as f32, 0.]),
(VECTOR2_NAME.into(), vec![0., 0., 0., i as f32, 0., 0.]),
])
};
segment2
.upsert_point(1, (NUM_VECTORS_1 + i).into(), vectors, &hw_counter)
.unwrap();
}
for i in 0..NUM_VECTORS_2 {
if i % 3 == 0 {
segment2
.delete_vector(2, (NUM_VECTORS_1 + i).into(), VECTOR1_NAME)
.unwrap();
segment2
.delete_vector(2, (NUM_VECTORS_1 + i).into(), VECTOR2_NAME)
.unwrap();
}
if i % 3 == 1 {
segment2
.delete_vector(2, (NUM_VECTORS_1 + i).into(), VECTOR2_NAME)
.unwrap();
}
if i % 2 == 0 {
segment2
.delete_point(2, (NUM_VECTORS_1 + i).into(), &hw_counter)
.unwrap();
}
}
let mut reference = vec![];
for i in 0..20 {
if i % 2 == 0 {
continue;
}
let idx = NUM_VECTORS_1 + i;
let vec = segment2.all_vectors(idx.into(), &hw_counter).unwrap();
reference.push(vec);
}
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
builder.update(&[&segment1, &segment2], &stopped).unwrap();
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let mut rng = rand::rng();
let progress = ProgressTracker::new_for_test();
let merged_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
let merged_points_count = merged_segment.available_point_count();
assert_eq!(
merged_points_count,
(NUM_VECTORS_1 + NUM_VECTORS_2 / 2) as usize
);
let vec1_count = merged_segment
.vector_data
.get(VECTOR1_NAME)
.unwrap()
.vector_storage
.borrow()
.available_vector_count();
let vec2_count = merged_segment
.vector_data
.get(VECTOR2_NAME)
.unwrap()
.vector_storage
.borrow()
.available_vector_count();
assert_ne!(vec1_count, vec2_count);
assert!(vec1_count > NUM_VECTORS_1 as usize);
assert!(vec2_count > NUM_VECTORS_1 as usize);
assert!(vec1_count < NUM_VECTORS_1 as usize + NUM_VECTORS_2 as usize);
assert!(vec2_count < NUM_VECTORS_1 as usize + NUM_VECTORS_2 as usize);
let mut merged_reference = vec![];
for i in 0..20 {
if i % 2 == 0 {
continue;
}
let idx = NUM_VECTORS_1 + i;
let vec = merged_segment.all_vectors(idx.into(), &hw_counter).unwrap();
merged_reference.push(vec);
}
for i in 0..merged_reference.len() {
assert_eq!(merged_reference[i], reference[i]);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/multivector_hnsw_test.rs | lib/segment/tests/integration/multivector_hnsw_test.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use rand::SeedableRng;
use rand::prelude::StdRng;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, MultiDenseVectorInternal, QueryVector, TypedMultiDenseVectorRef,
VectorElementType, VectorRef, only_default_vector,
};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::index_fixtures::random_vector;
use segment::fixtures::payload_fixtures::random_int_payload;
use segment::index::VectorIndex;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, MultiVectorConfig,
PayloadSchemaType, SeqNumberType,
};
use segment::vector_storage::VectorStorage;
use segment::vector_storage::multi_dense::appendable_mmap_multi_dense_vector_storage::open_appendable_in_ram_multi_vector_storage_full;
use tempfile::Builder;
#[test]
fn test_single_multi_and_dense_hnsw_equivalency() {
let num_vectors: u64 = 1_000;
let distance = Distance::Cosine;
let num_payload_values = 2;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let int_key = "int";
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
let hw_counter = HardwareCounterCell::new();
segment
.create_field_index(
0,
&JsonPath::new(int_key),
Some(&PayloadSchemaType::Integer.into()),
&hw_counter,
)
.unwrap();
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut multi_storage = open_appendable_in_ram_multi_vector_storage_full(
dir.path(),
dim,
distance,
MultiVectorConfig::default(),
)
.unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let preprocessed_vector = distance.preprocess_vector::<VectorElementType>(vector.clone());
let vector_multi = MultiDenseVectorInternal::new(preprocessed_vector, vector.len());
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
let internal_id = segment.id_tracker.borrow().internal_id(idx).unwrap();
multi_storage
.insert_vector(
internal_id,
VectorRef::MultiDense(TypedMultiDenseVectorRef::from(&vector_multi)),
&hw_counter,
)
.unwrap();
}
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let stopped = AtomicBool::new(false);
let m = 8;
let ef_construct = 100;
let full_scan_threshold = 10000;
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
// single threaded mode to guarantee equivalency between single and multi hnsw
let permit = Arc::new(ResourcePermit::dummy(1));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index_dense = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: segment.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit: permit.clone(),
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let multi_storage = Arc::new(AtomicRefCell::new(multi_storage));
let hnsw_index_multi = HNSWIndex::open(HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: multi_storage,
quantized_vectors: quantized_vectors.clone(),
payload_index: segment.payload_index.clone(),
hnsw_config,
})
.unwrap();
for _ in 0..10 {
let random_vector = random_vector(&mut rng, dim);
let query_vector = random_vector.clone().into();
let query_vector_multi = QueryVector::Nearest(vec![random_vector].try_into().unwrap());
let payload_value = random_int_payload(&mut rng, 1..=1).pop().unwrap();
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(int_key),
payload_value.into(),
)));
let search_res_dense = hnsw_index_dense
.search(
&[&query_vector],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
let search_res_multi = hnsw_index_multi
.search(
&[&query_vector_multi],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
assert_eq!(search_res_dense, search_res_multi);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/sparse_discover_test.rs | lib/segment/tests/integration/sparse_discover_test.rs | use std::collections::HashMap;
use std::sync::atomic::AtomicBool;
use ahash::AHashSet;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::TelemetryDetail;
use itertools::Itertools;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::query_context::{QueryContext, VectorQueryContext};
use segment::data_types::vectors::{QueryVector, VectorElementType, VectorInternal};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::random_vector;
use segment::index::VectorIndex;
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::index::sparse_index::sparse_vector_index::SparseVectorIndexOpenArgs;
use segment::segment_constructor::{build_segment, create_sparse_vector_index_test};
use segment::types::{
Condition, DEFAULT_SPARSE_FULL_SCAN_THRESHOLD, Distance, ExtendedPointId, Filter,
HasIdCondition, Indexes, PointIdType, SegmentConfig, SeqNumberType, SparseVectorDataConfig,
SparseVectorStorageType, VectorDataConfig, VectorStorageDatatype, VectorStorageType,
};
use segment::vector_storage::query::{ContextPair, DiscoveryQuery};
use sparse::common::sparse_vector::SparseVector;
use tempfile::Builder;
use crate::fixtures::segment::SPARSE_VECTOR_NAME;
const MAX_EXAMPLE_PAIRS: usize = 3;
fn convert_to_sparse_vector(vector: &[VectorElementType]) -> SparseVector {
let mut sparse_vector = SparseVector::default();
for (idx, value) in vector.iter().enumerate() {
sparse_vector.indices.push(idx as u32);
sparse_vector.values.push(*value);
}
sparse_vector
}
fn random_named_vector<R: Rng + ?Sized>(
rnd: &mut R,
dim: usize,
) -> (NamedVectors<'_>, NamedVectors<'_>) {
let dense_vector = random_vector(rnd, dim);
let sparse_vector = convert_to_sparse_vector(&dense_vector);
let mut sparse_result = NamedVectors::default();
sparse_result.insert(SPARSE_VECTOR_NAME.to_owned(), sparse_vector.into());
let mut dense_result = NamedVectors::default();
dense_result.insert(SPARSE_VECTOR_NAME.to_owned(), dense_vector.into());
(sparse_result, dense_result)
}
fn random_discovery_query<R: Rng + ?Sized>(rnd: &mut R, dim: usize) -> (QueryVector, QueryVector) {
let num_pairs: usize = rnd.random_range(1..MAX_EXAMPLE_PAIRS);
let dense_target = random_vector(rnd, dim);
let sparse_target = convert_to_sparse_vector(&dense_target);
let dense_pairs = (0..num_pairs)
.map(|_| {
let positive = random_vector(rnd, dim);
let negative = random_vector(rnd, dim);
(positive, negative)
})
.collect_vec();
let sparse_pairs = (0..num_pairs)
.map(|i| {
let positive = convert_to_sparse_vector(&dense_pairs[i].0);
let negative = convert_to_sparse_vector(&dense_pairs[i].1);
(positive, negative)
})
.collect_vec();
let dense_query = DiscoveryQuery::new(
dense_target.into(),
dense_pairs
.into_iter()
.map(|(positive, negative)| ContextPair {
positive: positive.into(),
negative: negative.into(),
})
.collect(),
)
.into();
let sparse_query = DiscoveryQuery::new(
sparse_target.into(),
sparse_pairs
.into_iter()
.map(|(positive, negative)| ContextPair {
positive: positive.into(),
negative: negative.into(),
})
.collect(),
)
.into();
(sparse_query, dense_query)
}
fn random_nearest_query<R: Rng + ?Sized>(rnd: &mut R, dim: usize) -> (QueryVector, QueryVector) {
let dense_target = random_vector(rnd, dim);
let sparse_target = convert_to_sparse_vector(&dense_target);
(sparse_target.into(), dense_target.into())
}
#[test]
fn sparse_index_discover_test() {
let stopped = AtomicBool::new(false);
let dim = 8;
let num_vectors: u64 = 5_000;
let distance = Distance::Dot;
let mut rnd = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let index_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let sparse_config = SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::MutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
storage_type: SparseVectorStorageType::default(),
modifier: None,
},
)]),
payload_storage_type: Default::default(),
};
let dense_config = SegmentConfig {
vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
payload_storage_type: Default::default(),
sparse_vector_data: Default::default(),
};
let mut sparse_segment = build_segment(dir.path(), &sparse_config, true).unwrap();
let mut dense_segment = build_segment(dir.path(), &dense_config, true).unwrap();
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let (sparse_vector, dense_vector) = random_named_vector(&mut rnd, dim);
let idx = n.into();
sparse_segment
.upsert_point(n as SeqNumberType, idx, sparse_vector, &hw_counter)
.unwrap();
dense_segment
.upsert_point(n as SeqNumberType, idx, dense_vector, &hw_counter)
.unwrap();
}
let payload_index_ptr = sparse_segment.payload_index.clone();
let vector_storage = &sparse_segment.vector_data[SPARSE_VECTOR_NAME].vector_storage;
let sparse_index = create_sparse_vector_index_test(SparseVectorIndexOpenArgs {
config: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::ImmutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
id_tracker: sparse_segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
payload_index: payload_index_ptr,
path: index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
let top = 3;
let attempts = 100;
for i in 0..attempts {
// do discovery search
let (sparse_query, dense_query) = random_discovery_query(&mut rnd, dim);
let vec_context = VectorQueryContext::default();
let sparse_discovery_result = sparse_index
.search(&[&sparse_query], None, top, None, &vec_context)
.unwrap();
let dense_discovery_result = dense_segment.vector_data[SPARSE_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&dense_query], None, top, None, &vec_context)
.unwrap();
// check id only because scores can be epsilon-size different
assert_eq!(
sparse_discovery_result[0]
.iter()
.map(|r| r.idx)
.collect_vec(),
dense_discovery_result[0]
.iter()
.map(|r| r.idx)
.collect_vec(),
);
// do regular nearest search
let (sparse_query, dense_query) = random_nearest_query(&mut rnd, dim);
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let vector_context = segment_query_context.get_vector_context(SPARSE_VECTOR_NAME);
let sparse_search_result = sparse_index
.search(&[&sparse_query], None, top, None, &vector_context)
.unwrap();
let cpu_usage = query_context.hardware_usage_accumulator().get_cpu();
assert!(cpu_usage > 0);
let dense_search_result = dense_segment.vector_data[SPARSE_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&dense_query], None, top, None, &vector_context)
.unwrap();
// check that nearest search uses sparse index
let telemetry = sparse_index.get_telemetry_data(TelemetryDetail::default());
assert_eq!(telemetry.unfiltered_sparse.count, i + 1);
// check id only because scores can be epsilon-size different
assert_eq!(
sparse_search_result[0].iter().map(|r| r.idx).collect_vec(),
dense_search_result[0].iter().map(|r| r.idx).collect_vec(),
);
}
}
#[test]
fn sparse_index_hardware_measurement_test() {
let stopped = AtomicBool::new(false);
let dim = 8;
let num_vectors: u64 = 5_000;
let mut rnd = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let index_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let sparse_config = SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::MutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
storage_type: SparseVectorStorageType::default(),
modifier: None,
},
)]),
payload_storage_type: Default::default(),
};
let mut sparse_segment = build_segment(dir.path(), &sparse_config, true).unwrap();
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let (sparse_vector, _) = random_named_vector(&mut rnd, dim);
let idx = n.into();
sparse_segment
.upsert_point(n as SeqNumberType, idx, sparse_vector, &hw_counter)
.unwrap();
}
let payload_index_ptr = sparse_segment.payload_index.clone();
let vector_storage = &sparse_segment.vector_data[SPARSE_VECTOR_NAME].vector_storage;
let sparse_index = create_sparse_vector_index_test(SparseVectorIndexOpenArgs {
config: SparseIndexConfig {
full_scan_threshold: Some(DEFAULT_SPARSE_FULL_SCAN_THRESHOLD),
index_type: SparseIndexType::ImmutableRam,
datatype: Some(VectorStorageDatatype::Float32),
},
id_tracker: sparse_segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
payload_index: payload_index_ptr,
path: index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
let query_vec = QueryVector::Nearest(VectorInternal::Sparse(
SparseVector::new(vec![0, 1, 2], vec![42.0, 42.42, 42.4242]).unwrap(),
));
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let vector_context = segment_query_context.get_vector_context(SPARSE_VECTOR_NAME);
let cpu_usage = query_context.hardware_usage_accumulator().get_cpu();
assert_eq!(cpu_usage, 0);
// Some filter so we do plain sparse search
let ids: AHashSet<PointIdType> = (0..3).map(ExtendedPointId::NumId).collect();
let filter = Filter::new_must(Condition::HasId(HasIdCondition::from(ids)));
sparse_index
.search(&[&query_vec], Some(&filter), 1, None, &vector_context)
.unwrap();
let cpu_usage = query_context.hardware_usage_accumulator().get_cpu();
assert!(cpu_usage > 0);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/nested_filtering_test.rs | lib/segment/tests/integration/nested_filtering_test.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::index::PayloadIndex;
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::payload_storage::PayloadStorage;
use segment::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use segment::types::{Condition, FieldCondition, Filter, Match, Payload, PayloadSchemaType, Range};
use tempfile::Builder;
const NUM_POINTS: usize = 200;
fn nested_payloads() -> Vec<Payload> {
let mut res = Vec::new();
for i in 0..NUM_POINTS {
let payload = payload_json! {
"arr1": [
{"a": 1, "b": i % 10 + 1, "c": i % 2 + 1, "d": i % 3, "text": format!("a1 b{} c{} d{}", i, i % 10 + 1, i % 3) },
{"a": 2, "b": i % 10 + 2, "c": i % 2 + 1, "d": i % 3, "text": format!("a2 b{} c{} d{}", i, i % 10 + 2, i % 3) },
{"a": 3, "b": i % 10 + 3, "c": i % 2 + 2, "d": i % 3, "text": format!("a3 b{} c{} d{}", i, i % 10 + 3, i % 3) },
{"a": 4, "b": i % 10 + 4, "c": i % 2 + 2, "d": i % 3, "text": format!("a4 b{} c{} d{}", i, i % 10 + 4, i % 3) },
{"a": [5, 6], "b": i % 10 + 5, "c": i % 2 + 2, "d": i % 3, "text": format!("a5 b{} c{} d{}", i, i % 10 + 5, i % 3) },
],
"f": i % 10,
"arr2": [
{
"arr3": [
{ "a": 1, "b": i % 7 + 1 },
{ "a": 2, "b": i % 7 + 2 },
]
},
{
"arr3": [
{ "a": 3, "b": i % 7 + 3 },
{ "a": 4, "b": i % 7 + 4 },
]
}
]
};
res.push(payload);
}
res
}
#[test]
fn test_filtering_context_consistency() {
// let seed = 42;
// let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut payload_storage = InMemoryPayloadStorage::default();
let mut points = HashMap::new();
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
for (idx, payload) in nested_payloads().into_iter().enumerate() {
points.insert(idx, payload.clone());
payload_storage
.set(idx as PointOffsetType, &payload, &hw_counter)
.unwrap();
}
let wrapped_payload_storage = Arc::new(AtomicRefCell::new(payload_storage.into()));
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(NUM_POINTS)));
let mut index = StructPayloadIndex::open(
wrapped_payload_storage,
id_tracker,
HashMap::new(),
dir.path(),
true,
true,
)
.unwrap();
index
.set_indexed(&JsonPath::new("f"), PayloadSchemaType::Integer, &hw_counter)
.unwrap();
index
.set_indexed(
&JsonPath::new("arr1[].a"),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
index
.set_indexed(
&JsonPath::new("arr1[].b"),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
index
.set_indexed(
&JsonPath::new("arr1[].c"),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
index
.set_indexed(
&JsonPath::new("arr1[].d"),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
index
.set_indexed(
&JsonPath::new("arr1[].text"),
PayloadSchemaType::Text,
&hw_counter,
)
.unwrap();
{
let nested_condition_0 = Condition::new_nested(
JsonPath::new("arr1"),
Filter {
must: Some(vec![
// E.g. idx = 6 => { "a" = 1, "b" = 7, "c" = 1, "d" = 0 }
Condition::Field(FieldCondition::new_match(JsonPath::new("a"), 1.into())),
Condition::Field(FieldCondition::new_match(JsonPath::new("c"), 1.into())),
]),
should: None,
min_should: None,
must_not: Some(vec![Condition::Field(FieldCondition::new_range(
JsonPath::new("d"),
Range {
lte: Some(1.into()),
..Default::default()
},
))]),
},
);
let nested_filter_0 = Filter::new_must(nested_condition_0);
let res0 = index.query_points(&nested_filter_0, &hw_counter, &is_stopped);
let filter_context = index.filter_context(&nested_filter_0, &hw_counter);
let check_res0: Vec<_> = (0..NUM_POINTS as PointOffsetType)
.filter(|point_id| filter_context.check(*point_id as PointOffsetType))
.collect();
assert_eq!(res0, check_res0);
assert!(!res0.is_empty());
// i % 2 + 1 == 1
// i % 3 == 2
// result = 2, 8, 14, ...
assert!(res0.contains(&2));
assert!(res0.contains(&8));
assert!(res0.contains(&14));
}
{
let nested_condition_1 = Condition::new_nested(
JsonPath::new("arr1"),
Filter {
must: Some(vec![
// E.g. idx = 6 => { "a" = 1, "b" = 7, "c" = 1, "d" = 0 }
Condition::Field(FieldCondition::new_match(JsonPath::new("a"), 1.into())),
Condition::Field(FieldCondition::new_match(JsonPath::new("c"), 1.into())),
Condition::Field(FieldCondition::new_match(JsonPath::new("d"), 0.into())),
]),
should: None,
min_should: None,
must_not: None,
},
);
let nested_filter_1 = Filter::new_must(nested_condition_1);
let res1 = index.query_points(&nested_filter_1, &hw_counter, &is_stopped);
let filter_context = index.filter_context(&nested_filter_1, &hw_counter);
let check_res1: Vec<_> = (0..NUM_POINTS as PointOffsetType)
.filter(|point_id| filter_context.check(*point_id as PointOffsetType))
.collect();
assert_eq!(res1, check_res1);
assert!(!res1.is_empty());
assert!(res1.contains(&6));
}
{
let nested_condition_2 = Condition::new_nested(
JsonPath::new("arr1"),
Filter {
must: Some(vec![
// E.g. idx = 6 => { "a" = 1, "b" = 7, "c" = 1, "d" = 0 }
Condition::Field(FieldCondition::new_match(JsonPath::new("a"), 1.into())),
Condition::Field(FieldCondition::new_match(
JsonPath::new("text"),
Match::Text("c1".to_string().into()),
)),
Condition::Field(FieldCondition::new_match(JsonPath::new("d"), 0.into())),
]),
should: None,
min_should: None,
must_not: None,
},
);
let nested_filter_2 = Filter::new_must(nested_condition_2);
let res2 = index.query_points(&nested_filter_2, &hw_counter, &is_stopped);
let filter_context = index.filter_context(&nested_filter_2, &hw_counter);
let check_res2: Vec<_> = (0..NUM_POINTS as PointOffsetType)
.filter(|point_id| filter_context.check(*point_id as PointOffsetType))
.collect();
assert_eq!(res2, check_res2);
assert!(!res2.is_empty());
}
{
let nested_condition_3 = Condition::new_nested(
JsonPath::new("arr1"),
Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new("b"),
1.into(),
))),
);
let nester_condition_3_1 = Condition::new_nested(
JsonPath::new("arr2"),
Filter {
must: Some(vec![Condition::new_nested(
JsonPath::new("arr3"),
Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new("b"),
10.into(),
))),
)]),
should: None,
min_should: None,
must_not: None,
},
);
let nested_filter_3 = Filter {
must: Some(vec![nested_condition_3, nester_condition_3_1]),
should: None,
min_should: None,
must_not: None,
};
let res3 = index.query_points(&nested_filter_3, &hw_counter, &is_stopped);
let filter_context = index.filter_context(&nested_filter_3, &hw_counter);
let check_res3: Vec<_> = (0..NUM_POINTS as PointOffsetType)
.filter(|point_id| filter_context.check(*point_id as PointOffsetType))
.collect();
assert_eq!(res3, check_res3);
assert!(!res3.is_empty());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/multivector_quantization_test.rs | lib/segment/tests/integration/multivector_quantization_test.rs | use std::collections::{BTreeSet, HashMap};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::ScoredPointOffset;
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, MultiDenseVectorInternal, QueryVector, only_default_multi_vector,
};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_multi_vector};
use segment::fixtures::query_fixtures::QueryVariant;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::{PayloadIndex, VectorIndex};
use segment::json_path::JsonPath;
use segment::segment_constructor::build_segment;
use segment::types::{
BinaryQuantizationConfig, CompressionRatio, Condition, Distance, FieldCondition, Filter,
HnswConfig, Indexes, MultiVectorConfig, PayloadSchemaType, ProductQuantizationConfig,
QuantizationSearchParams, Range, ScalarQuantizationConfig, SearchParams, SegmentConfig,
SeqNumberType, VectorDataConfig, VectorStorageType,
};
use segment::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use tempfile::Builder;
const MAX_VECTORS_COUNT: usize = 3;
enum QuantizationVariant {
Scalar,
PQ,
Binary,
}
fn random_vector<R: Rng + ?Sized>(rng: &mut R, dim: usize) -> MultiDenseVectorInternal {
let count = rng.random_range(1..=MAX_VECTORS_COUNT);
let mut vector = random_multi_vector(rng, dim, count);
// for BQ change range to [-0.5; 0.5]
vector.flattened_vectors.iter_mut().for_each(|x| *x -= 0.5);
vector
}
fn random_query<R: Rng + ?Sized>(variant: &QueryVariant, rng: &mut R, dim: usize) -> QueryVector {
segment::fixtures::query_fixtures::random_query(variant, rng, |rng: &mut R| {
random_vector(rng, dim).into()
})
}
fn sames_count(a: &[Vec<ScoredPointOffset>], b: &[Vec<ScoredPointOffset>]) -> usize {
a[0].iter()
.map(|x| x.idx)
.collect::<BTreeSet<_>>()
.intersection(&b[0].iter().map(|x| x.idx).collect())
.count()
}
#[rstest]
#[case::nearest_binary_dot(
QueryVariant::Nearest,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
32, // ef
false,
25., // min_acc out of 100
)]
#[case::discovery_binary_dot(
QueryVariant::Discovery,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
128, // ef
false,
20., // min_acc out of 100
)]
#[case::recobestscore_binary_dot(
QueryVariant::RecoBestScore,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
64, // ef
false,
20., // min_acc out of 100
)]
#[case::recosumscores_binary_dot(
QueryVariant::RecoSumScores,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
64, // ef
false,
20., // min_acc out of 100
)]
#[case::nearest_binary_cosine(
QueryVariant::Nearest,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
32, // ef
false,
25., // min_acc out of 100
)]
#[case::discovery_binary_cosine(
QueryVariant::Discovery,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
128, // ef
false,
15., // min_acc out of 100
)]
#[case::recobestscore_binary_cosine(
QueryVariant::RecoBestScore,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
64, // ef
false,
15., // min_acc out of 100
)]
#[case::recosumscores_binary_cosine(
QueryVariant::RecoSumScores,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
64, // ef
false,
15., // min_acc out of 100
)]
#[case::nearest_scalar_dot(
QueryVariant::Nearest,
QuantizationVariant::Scalar,
Distance::Dot,
32, // dim
32, // ef
false,
80., // min_acc out of 100
)]
#[case::nearest_scalar_cosine(
QueryVariant::Nearest,
QuantizationVariant::Scalar,
Distance::Cosine,
32, // dim
32, // ef
false,
80., // min_acc out of 100
)]
#[case::nearest_pq_dot(
QueryVariant::Nearest,
QuantizationVariant::PQ,
Distance::Dot,
16, // dim
32, // ef
false,
70., // min_acc out of 100
)]
#[case::nearest_scalar_cosine_on_disk(
QueryVariant::Nearest,
QuantizationVariant::Scalar,
Distance::Cosine,
32, // dim
32, // ef
true,
80., // min_acc out of 100
)]
fn test_multivector_quantization_hnsw(
#[case] query_variant: QueryVariant,
#[case] quantization_variant: QuantizationVariant,
#[case] distance: Distance,
#[case] dim: usize,
#[case] ef: usize,
#[case] on_disk: bool,
#[case] min_acc: f64, // out of 100
) {
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::types::HnswGlobalConfig;
let stopped = AtomicBool::new(false);
let m = 8;
let num_vectors: u64 = 1_000;
let ef_construct = 16;
let full_scan_threshold = 16; // KB
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let quantized_data_path = dir.path();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let storage_type = if on_disk {
VectorStorageType::ChunkedMmap
} else {
#[cfg(feature = "rocksdb")]
{
VectorStorageType::Memory
}
#[cfg(not(feature = "rocksdb"))]
{
VectorStorageType::InRamChunkedMmap
}
};
let config = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type,
index: Indexes::Plain {},
quantization_config: None,
multivector_config: Some(MultiVectorConfig::default()), // uses multivec config
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let int_key = "int";
let mut segment = build_segment(dir.path(), &config, true).unwrap();
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_multi_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
segment
.payload_index
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let quantization_config = match quantization_variant {
QuantizationVariant::Scalar => ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: Some(false),
}
.into(),
QuantizationVariant::PQ => ProductQuantizationConfig {
compression: CompressionRatio::X8,
always_ram: Some(false),
}
.into(),
QuantizationVariant::Binary => BinaryQuantizationConfig {
always_ram: Some(false),
encoding: None,
query_encoding: None,
}
.into(),
};
segment.vector_data.values_mut().for_each(|vector_storage| {
{
// test persistence, encode and save quantized vectors
QuantizedVectors::create(
&vector_storage.vector_storage.borrow(),
&quantization_config,
QuantizedVectorsStorageType::Immutable,
quantized_data_path,
4,
&stopped,
)
.unwrap();
}
// test persistence, load quantized vectors
let quantized_vectors = QuantizedVectors::load(
&quantization_config,
&vector_storage.vector_storage.borrow(),
quantized_data_path,
&stopped,
)
.unwrap()
.unwrap();
vector_storage.quantized_vectors = Arc::new(AtomicRefCell::new(Some(quantized_vectors)));
});
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: segment.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 5;
let mut sames = 0;
let attempts = 100;
for _ in 0..attempts {
let query = random_query(&query_variant, &mut rng, dim);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
quantization: Some(QuantizationSearchParams {
oversampling: Some(1.3),
..Default::default()
}),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
quantization: Some(QuantizationSearchParams {
ignore: true,
..Default::default()
}),
exact: true,
..Default::default()
}),
&Default::default(),
)
.unwrap();
sames += sames_count(&plain_result, &index_result);
}
let acc = 100.0 * sames as f64 / (attempts * top) as f64;
println!("sames = {sames}, attempts = {attempts}, top = {top}, acc = {acc}");
assert!(acc > min_acc);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/scroll_filtering_test.rs | lib/segment/tests/integration/scroll_filtering_test.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::fixtures::payload_fixtures::random_filter;
use segment::fixtures::segment_fixtures::random_segment;
use tempfile::Builder;
const NUM_POINTS: usize = 2000;
const ATTEMPTS: usize = 100;
#[test]
fn test_filtering_context_consistency() {
let is_stopped = AtomicBool::new(false);
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let segment = random_segment(dir.path(), NUM_POINTS);
let hw_counter = HardwareCounterCell::new();
for _ in 0..ATTEMPTS {
let filter = random_filter(&mut rng, 3);
let random_offset = rng.random_range(0..10);
let read_by_index_res = segment.filtered_read_by_index(
Some(random_offset.into()),
Some(10),
&filter,
&is_stopped,
&hw_counter,
);
let read_by_stream_res = segment.filtered_read_by_id_stream(
Some(random_offset.into()),
Some(10),
&filter,
&is_stopped,
&hw_counter,
);
assert_eq!(read_by_index_res, read_by_stream_res, "filter: {filter:#?}");
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/payload_index_test.rs | lib/segment/tests/integration/payload_index_test.rs | use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use anyhow::{Context, Result};
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::progress_tracker::ProgressTracker;
use common::types::PointOffsetType;
use fnv::FnvBuildHasher;
use fs_err as fs;
use indexmap::IndexSet;
use itertools::Itertools;
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::facets::{FacetParams, FacetValue};
use segment::data_types::index::{
FloatIndexParams, FloatIndexType, IntegerIndexParams, IntegerIndexType, KeywordIndexParams,
KeywordIndexType, TextIndexParams, TextIndexType,
};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::fixtures::payload_fixtures::{
FLICKING_KEY, FLT_KEY, GEO_KEY, INT_KEY, INT_KEY_2, INT_KEY_3, LAT_RANGE, LON_RANGE, STR_KEY,
STR_PROJ_KEY, STR_ROOT_PROJ_KEY, TEXT_KEY, generate_diverse_nested_payload,
generate_diverse_payload, random_filter, random_nested_filter, random_vector,
};
use segment::index::PayloadIndex;
use segment::index::field_index::{FieldIndex, PrimaryCondition};
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::payload_storage::PayloadStorage;
use segment::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use segment::segment::Segment;
use segment::segment_constructor::build_segment;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::PayloadFieldSchema::{FieldParams, FieldType};
use segment::types::PayloadSchemaType::{Integer, Keyword};
use segment::types::{
AnyVariants, Condition, Distance, FieldCondition, Filter, GeoBoundingBox, GeoLineString,
GeoPoint, GeoPolygon, GeoRadius, HnswConfig, HnswGlobalConfig, Indexes, IsEmptyCondition,
Match, Payload, PayloadField, PayloadFieldSchema, PayloadSchemaParams, PayloadSchemaType,
Range, SegmentConfig, ValueVariants, VectorDataConfig, VectorStorageType, WithPayload,
};
use segment::utils::scored_point_ties::ScoredPointTies;
use tempfile::{Builder, TempDir};
macro_rules! here {
() => {
format!("at {}:{}", file!(), line!())
};
}
/// `anyhow::ensure!` but with location, as what `assert!` would do
macro_rules! ensure {
($($arg:tt)*) => {
(|| Ok(anyhow::ensure!($($arg)*)))().map_err(|e| {
e.context(here!())
})?
};
}
const DIM: usize = 5;
const ATTEMPTS: usize = 20;
struct TestSegments {
_base_dir: TempDir,
struct_segment: Segment,
plain_segment: Segment,
mmap_segment: Segment,
}
impl TestSegments {
fn new() -> Self {
let base_dir = Builder::new().prefix("test_segments").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let mut rng = StdRng::seed_from_u64(42);
let config = Self::make_simple_config(true);
let mut plain_segment =
build_segment(&base_dir.path().join("plain"), &config, true).unwrap();
let mut struct_segment =
build_segment(&base_dir.path().join("struct"), &config, true).unwrap();
let num_points = 3000;
let points_to_delete = 500;
let points_to_clear = 500;
let mut opnum = 0;
struct_segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY_2),
Some(&Integer.into()),
&hw_counter,
)
.unwrap();
opnum += 1;
for n in 0..num_points {
let idx = n.into();
let vector = random_vector(&mut rng, DIM);
let payload: Payload = generate_diverse_payload(&mut rng);
plain_segment
.upsert_point(opnum, idx, only_default_vector(&vector), &hw_counter)
.unwrap();
struct_segment
.upsert_point(opnum, idx, only_default_vector(&vector), &hw_counter)
.unwrap();
plain_segment
.set_full_payload(opnum, idx, &payload, &hw_counter)
.unwrap();
struct_segment
.set_full_payload(opnum, idx, &payload, &hw_counter)
.unwrap();
opnum += 1;
}
struct_segment
.create_field_index(
opnum,
&JsonPath::new(STR_KEY),
Some(&Keyword.into()),
&hw_counter,
)
.unwrap();
let int_payload_schema = PayloadFieldSchema::FieldType(PayloadSchemaType::Integer);
struct_segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY),
Some(&int_payload_schema),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY_2),
Some(&FieldParams(PayloadSchemaParams::Integer(
IntegerIndexParams {
r#type: IntegerIndexType::Integer,
lookup: Some(true),
range: Some(false),
is_principal: None,
on_disk: None,
},
))),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY_3),
Some(&FieldParams(PayloadSchemaParams::Integer(
IntegerIndexParams {
r#type: IntegerIndexType::Integer,
lookup: Some(false),
range: Some(true),
is_principal: None,
on_disk: None,
},
))),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&JsonPath::new(GEO_KEY),
Some(&PayloadSchemaType::Geo.into()),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&JsonPath::new(TEXT_KEY),
Some(&PayloadSchemaType::Text.into()),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&JsonPath::new(FLICKING_KEY),
Some(&Integer.into()),
&hw_counter,
)
.unwrap();
// Make mmap segment after inserting the points, but before deleting some of them
let mut mmap_segment =
Self::make_mmap_segment(&mut rng, &base_dir.path().join("mmap"), &plain_segment);
for _ in 0..points_to_clear {
opnum += 1;
let idx_to_remove = rng.random_range(0..num_points);
plain_segment
.clear_payload(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
struct_segment
.clear_payload(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
mmap_segment
.clear_payload(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
}
for _ in 0..points_to_delete {
opnum += 1;
let idx_to_remove = rng.random_range(0..num_points);
plain_segment
.delete_point(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
struct_segment
.delete_point(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
mmap_segment
.delete_point(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
}
for (field, indexes) in struct_segment.payload_index.borrow().field_indexes.iter() {
for index in indexes {
assert!(index.count_indexed_points() <= num_points as usize);
if field.to_string() != FLICKING_KEY {
assert!(
index.count_indexed_points()
>= (num_points as usize - points_to_delete - points_to_clear)
);
}
}
}
Self {
_base_dir: base_dir,
struct_segment,
plain_segment,
mmap_segment,
}
}
fn make_simple_config(appendable: bool) -> SegmentConfig {
let conf = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: DIM,
distance: Distance::Dot,
storage_type: VectorStorageType::default(),
index: if appendable {
Indexes::Plain {}
} else {
Indexes::Hnsw(HnswConfig::default())
},
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
assert_eq!(conf.is_appendable(), appendable);
conf
}
fn make_mmap_segment(rng: &mut StdRng, path: &Path, plain_segment: &Segment) -> Segment {
let stopped = AtomicBool::new(false);
fs::create_dir(path).unwrap();
let mut builder = SegmentBuilder::new(
path,
&path.with_extension("tmp"),
&Self::make_simple_config(false),
&HnswGlobalConfig::default(),
)
.unwrap();
builder.update(&[plain_segment], &stopped).unwrap();
let permit = ResourcePermit::dummy(1);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let mut segment = builder
.build(permit, &stopped, rng, &hw_counter, progress)
.unwrap();
let opnum = segment.version() + 1;
segment
.create_field_index(
opnum,
&JsonPath::new(STR_KEY),
Some(&FieldParams(PayloadSchemaParams::Keyword(
KeywordIndexParams {
r#type: KeywordIndexType::Keyword,
is_tenant: None,
on_disk: Some(true),
},
))),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY),
Some(&FieldParams(PayloadSchemaParams::Integer(
IntegerIndexParams {
r#type: IntegerIndexType::Integer,
lookup: Some(true),
range: Some(true),
is_principal: None,
on_disk: Some(true),
},
))),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY_2),
Some(&FieldParams(PayloadSchemaParams::Integer(
IntegerIndexParams {
r#type: IntegerIndexType::Integer,
lookup: Some(true),
range: Some(false),
is_principal: None,
on_disk: Some(true),
},
))),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
opnum,
&JsonPath::new(INT_KEY_3),
Some(&FieldParams(PayloadSchemaParams::Integer(
IntegerIndexParams {
r#type: IntegerIndexType::Integer,
lookup: Some(false),
range: Some(true),
is_principal: None,
on_disk: Some(true),
},
))),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
opnum,
&JsonPath::new(FLT_KEY),
Some(&FieldParams(PayloadSchemaParams::Float(FloatIndexParams {
r#type: FloatIndexType::Float,
is_principal: None,
on_disk: Some(true),
}))),
&hw_counter,
)
.unwrap();
segment
.create_field_index(
opnum,
&JsonPath::new(TEXT_KEY),
Some(&FieldParams(PayloadSchemaParams::Text(TextIndexParams {
r#type: TextIndexType::Text,
on_disk: Some(true),
..Default::default()
}))),
&hw_counter,
)
.unwrap();
segment
}
}
fn build_test_segments_nested_payload(path_struct: &Path, path_plain: &Path) -> (Segment, Segment) {
let mut rng = StdRng::seed_from_u64(42);
let mut plain_segment = build_simple_segment(path_plain, DIM, Distance::Dot).unwrap();
let mut struct_segment = build_simple_segment(path_struct, DIM, Distance::Dot).unwrap();
let num_points = 3000;
let points_to_delete = 500;
let points_to_clear = 500;
// Nested payload keys
let nested_str_key = JsonPath::new(&format!("{}.{}.{}", STR_KEY, "nested_1", "nested_2"));
let nested_str_proj_key =
JsonPath::new(&format!("{}.{}[].{}", STR_PROJ_KEY, "nested_1", "nested_2"));
let deep_nested_str_proj_key = JsonPath::new(&format!(
"{}[].{}[].{}",
STR_ROOT_PROJ_KEY, "nested_1", "nested_2"
));
let hw_counter = HardwareCounterCell::new();
let mut opnum = 0;
struct_segment
.create_field_index(opnum, &nested_str_key, Some(&Keyword.into()), &hw_counter)
.unwrap();
struct_segment
.create_field_index(
opnum,
&nested_str_proj_key,
Some(&Keyword.into()),
&hw_counter,
)
.unwrap();
struct_segment
.create_field_index(
opnum,
&deep_nested_str_proj_key,
Some(&Keyword.into()),
&hw_counter,
)
.unwrap();
eprintln!("{deep_nested_str_proj_key}");
opnum += 1;
for n in 0..num_points {
let idx = n.into();
let vector = random_vector(&mut rng, DIM);
let payload: Payload = generate_diverse_nested_payload(&mut rng);
plain_segment
.upsert_point(opnum, idx, only_default_vector(&vector), &hw_counter)
.unwrap();
struct_segment
.upsert_point(opnum, idx, only_default_vector(&vector), &hw_counter)
.unwrap();
plain_segment
.set_full_payload(opnum, idx, &payload, &hw_counter)
.unwrap();
struct_segment
.set_full_payload(opnum, idx, &payload, &hw_counter)
.unwrap();
opnum += 1;
}
for _ in 0..points_to_clear {
opnum += 1;
let idx_to_remove = rng.random_range(0..num_points);
plain_segment
.clear_payload(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
struct_segment
.clear_payload(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
}
for _ in 0..points_to_delete {
opnum += 1;
let idx_to_remove = rng.random_range(0..num_points);
plain_segment
.delete_point(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
struct_segment
.delete_point(opnum, idx_to_remove.into(), &hw_counter)
.unwrap();
}
for (_field, indexes) in struct_segment.payload_index.borrow().field_indexes.iter() {
for index in indexes {
assert!(index.count_indexed_points() <= num_points as usize);
assert!(
index.count_indexed_points()
> (num_points as usize - points_to_delete - points_to_clear)
);
}
}
(struct_segment, plain_segment)
}
fn validate_geo_filter(test_segments: &TestSegments, query_filter: Filter) -> Result<()> {
let mut rng = rand::rng();
for _i in 0..ATTEMPTS {
let query = random_vector(&mut rng, DIM).into();
let plain_result = test_segments
.plain_segment
.search(
DEFAULT_VECTOR_NAME,
&query,
&WithPayload::default(),
&false.into(),
Some(&query_filter),
5,
None,
)
.unwrap();
let hw_counter = HardwareCounterCell::new();
let estimation = test_segments
.plain_segment
.payload_index
.borrow()
.estimate_cardinality(&query_filter, &hw_counter);
ensure!(estimation.min <= estimation.exp, "{estimation:#?}");
ensure!(estimation.exp <= estimation.max, "{estimation:#?}");
ensure!(
estimation.max
<= test_segments
.struct_segment
.id_tracker
.borrow()
.available_point_count(),
"{estimation:#?}",
);
let struct_result = test_segments
.struct_segment
.search(
DEFAULT_VECTOR_NAME,
&query,
&WithPayload::default(),
&false.into(),
Some(&query_filter),
5,
None,
)
.unwrap();
let estimation = test_segments
.struct_segment
.payload_index
.borrow()
.estimate_cardinality(&query_filter, &hw_counter);
ensure!(estimation.min <= estimation.exp, "{estimation:#?}");
ensure!(estimation.exp <= estimation.max, "{estimation:#?}");
ensure!(
estimation.max
<= test_segments
.struct_segment
.id_tracker
.borrow()
.available_point_count(),
"{estimation:#?}",
);
for (r1, r2) in plain_result.iter().zip(struct_result.iter()) {
ensure!(r1.id == r2.id);
ensure!((r1.score - r2.score) < 0.0001)
}
}
Ok(())
}
/// Test read operations on segments.
/// The segments fixtures are created only once to improve test speed.
#[test]
fn test_read_operations() -> Result<()> {
let test_segments = Arc::new(TestSegments::new());
let mut handles = vec![];
for test_fn in [
test_is_empty_conditions,
test_integer_index_types,
test_cardinality_estimation,
test_struct_payload_index,
test_struct_payload_geo_boundingbox_index,
test_struct_payload_geo_radius_index,
test_struct_payload_geo_polygon_index,
test_any_matcher_cardinality_estimation,
test_struct_keyword_facet,
test_mmap_keyword_facet,
test_struct_keyword_facet_filtered,
test_mmap_keyword_facet_filtered,
] {
let segments = Arc::clone(&test_segments);
handles.push(std::thread::spawn(move || test_fn(&segments)));
}
for handle in handles {
handle.join().unwrap()?;
}
Ok(())
}
fn test_is_empty_conditions(test_segments: &TestSegments) -> Result<()> {
let filter = Filter::new_must(Condition::IsEmpty(IsEmptyCondition {
is_empty: PayloadField {
key: JsonPath::new(FLICKING_KEY),
},
}));
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
let estimation_struct = test_segments
.struct_segment
.payload_index
.borrow()
.estimate_cardinality(&filter, &hw_counter);
let estimation_plain = test_segments
.plain_segment
.payload_index
.borrow()
.estimate_cardinality(&filter, &hw_counter);
let plain_result = test_segments
.plain_segment
.payload_index
.borrow()
.query_points(&filter, &hw_counter, &is_stopped);
let real_number = plain_result.len();
let id_tracker = test_segments.struct_segment.id_tracker.borrow();
let struct_result = test_segments
.struct_segment
.payload_index
.borrow()
.query_points(&filter, &hw_counter, &is_stopped)
.into_iter()
// null index does not track deleted points, so we need to filter them out here. In callsites,
// the deleted check is done externally anyway
.filter(|id| !id_tracker.is_deleted_point(*id))
.collect::<Vec<_>>();
ensure!(plain_result == struct_result);
eprintln!("estimation_plain = {estimation_plain:#?}");
eprintln!("estimation_struct = {estimation_struct:#?}");
eprintln!("real_number = {real_number:#?}");
ensure!(estimation_plain.max >= real_number);
ensure!(estimation_plain.min <= real_number);
ensure!(estimation_struct.max >= real_number);
ensure!(estimation_struct.min <= real_number);
ensure!(
(estimation_struct.exp as f64 - real_number as f64).abs()
<= (estimation_plain.exp as f64 - real_number as f64).abs()
);
Ok(())
}
fn test_integer_index_types(test_segments: &TestSegments) -> Result<()> {
for (kind, indexes) in [
(
"struct",
&test_segments.struct_segment.payload_index.borrow(),
),
("mmap", &test_segments.mmap_segment.payload_index.borrow()),
] {
eprintln!("Checking {kind}_segment");
let field_indexes = indexes.field_indexes.get(&JsonPath::new(INT_KEY)).unwrap();
let has_map_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntMapIndex(_)));
let has_int_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntIndex(_)));
ensure!(has_map_index);
ensure!(has_int_index);
let field_indexes = indexes
.field_indexes
.get(&JsonPath::new(INT_KEY_2))
.unwrap();
let has_map_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntMapIndex(_)));
let has_int_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntIndex(_)));
ensure!(has_map_index);
ensure!(!has_int_index);
let field_indexes = indexes
.field_indexes
.get(&JsonPath::new(INT_KEY_3))
.unwrap();
let has_map_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntMapIndex(_)));
let has_int_index = field_indexes
.iter()
.any(|index| matches!(index, FieldIndex::IntIndex(_)));
ensure!(!has_map_index);
ensure!(has_int_index);
}
Ok(())
}
fn test_cardinality_estimation(test_segments: &TestSegments) -> Result<()> {
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(INT_KEY),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(50.)),
lte: Some(OrderedFloat(100.)),
},
)));
let hw_counter = HardwareCounterCell::new();
let estimation = test_segments
.struct_segment
.payload_index
.borrow()
.estimate_cardinality(&filter, &hw_counter);
let hw_counter = HardwareCounterCell::new();
let payload_index = test_segments.struct_segment.payload_index.borrow();
let filter_context = payload_index.filter_context(&filter, &hw_counter);
let exact = test_segments
.struct_segment
.id_tracker
.borrow()
.iter_internal()
.filter(|x| filter_context.check(*x))
.collect_vec()
.len();
eprintln!("exact = {exact:#?}");
eprintln!("estimation = {estimation:#?}");
ensure!(exact <= estimation.max);
ensure!(exact >= estimation.min);
Ok(())
}
#[test]
fn test_root_nested_array_filter_cardinality_estimation() {
let dir1 = Builder::new().prefix("segment1_dir").tempdir().unwrap();
let dir2 = Builder::new().prefix("segment2_dir").tempdir().unwrap();
let (struct_segment, _) = build_test_segments_nested_payload(dir1.path(), dir2.path());
// rely on test data from `build_test_segments_nested_payload`
let nested_key = "nested_1[].nested_2";
let nested_match =
FieldCondition::new_match(JsonPath::new(nested_key), "some value".to_owned().into());
let filter = Filter::new_must(Condition::new_nested(
JsonPath::new(STR_ROOT_PROJ_KEY),
Filter::new_must(Condition::Field(nested_match)),
));
let hw_counter = HardwareCounterCell::new();
let estimation = struct_segment
.payload_index
.borrow()
.estimate_cardinality(&filter, &hw_counter);
// not empty primary clauses
assert_eq!(estimation.primary_clauses.len(), 1);
eprintln!("primary_clauses = {:#?}", estimation.primary_clauses);
let primary_clause = estimation.primary_clauses.first().unwrap();
let expected_primary_clause = FieldCondition::new_match(
JsonPath::new(&format!("{STR_ROOT_PROJ_KEY}[].{nested_key}")), // full key expected
"some value".to_owned().into(),
);
match primary_clause {
PrimaryCondition::Condition(field_condition) => {
assert_eq!(*field_condition, Box::new(expected_primary_clause));
}
o => panic!("unexpected primary clause: {o:?}"),
}
let hw_counter = HardwareCounterCell::new();
let payload_index = struct_segment.payload_index.borrow();
let filter_context = payload_index.filter_context(&filter, &hw_counter);
let exact = struct_segment
.id_tracker
.borrow()
.iter_internal()
.filter(|x| filter_context.check(*x))
.collect_vec()
.len();
eprintln!("exact = {exact:#?}");
eprintln!("estimation = {estimation:#?}");
assert!(exact <= estimation.max);
assert!(exact >= estimation.min);
}
#[test]
fn test_nesting_nested_array_filter_cardinality_estimation() {
let dir1 = Builder::new().prefix("segment1_dir").tempdir().unwrap();
let dir2 = Builder::new().prefix("segment2_dir").tempdir().unwrap();
let (struct_segment, _) = build_test_segments_nested_payload(dir1.path(), dir2.path());
// rely on test data from `build_test_segments_nested_payload`
let nested_match_key = "nested_2";
let nested_match = FieldCondition::new_match(
JsonPath::new(nested_match_key),
"some value".to_owned().into(),
);
let filter = Filter::new_must(Condition::new_nested(
JsonPath::new(STR_ROOT_PROJ_KEY),
Filter::new_must(Condition::new_nested(
JsonPath::new("nested_1"),
Filter::new_must(Condition::Field(nested_match)),
)),
));
let hw_counter = HardwareCounterCell::new();
let estimation = struct_segment
.payload_index
.borrow()
.estimate_cardinality(&filter, &hw_counter);
// not empty primary clauses
assert_eq!(estimation.primary_clauses.len(), 1);
eprintln!("primary_clauses = {:#?}", estimation.primary_clauses);
let primary_clause = estimation.primary_clauses.first().unwrap();
let expected_primary_clause = FieldCondition::new_match(
// full key expected
JsonPath::new(&format!(
"{STR_ROOT_PROJ_KEY}[].nested_1[].{nested_match_key}"
)),
"some value".to_owned().into(),
);
match primary_clause {
PrimaryCondition::Condition(field_condition) => {
assert_eq!(*field_condition, Box::new(expected_primary_clause));
}
o => panic!("unexpected primary clause: {o:?}"),
}
let hw_counter = HardwareCounterCell::new();
let payload_index = struct_segment.payload_index.borrow();
let filter_context = payload_index.filter_context(&filter, &hw_counter);
let exact = struct_segment
.id_tracker
.borrow()
.iter_internal()
.filter(|x| filter_context.check(*x))
.collect_vec()
.len();
eprintln!("exact = {exact:#?}");
eprintln!("estimation = {estimation:#?}");
assert!(exact <= estimation.max);
assert!(exact >= estimation.min);
}
/// Compare search with plain, struct, and mmap indices.
fn test_struct_payload_index(test_segments: &TestSegments) -> Result<()> {
let mut rng = rand::rng();
for _i in 0..ATTEMPTS {
let query_vector = random_vector(&mut rng, DIM).into();
let query_filter = random_filter(&mut rng, 3);
let plain_result = test_segments
.plain_segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
Some(&query_filter),
5,
None,
)
.unwrap();
let struct_result = test_segments
.struct_segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
Some(&query_filter),
5,
None,
)
.unwrap();
let mmap_result = test_segments
.mmap_segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector,
&WithPayload::default(),
&false.into(),
Some(&query_filter),
5,
None,
)
.unwrap();
let hw_counter = HardwareCounterCell::new();
let estimation = test_segments
.struct_segment
.payload_index
.borrow()
.estimate_cardinality(&query_filter, &hw_counter);
ensure!(estimation.min <= estimation.exp, "{estimation:#?}");
ensure!(estimation.exp <= estimation.max, "{estimation:#?}");
ensure!(
estimation.max
<= test_segments
.struct_segment
.id_tracker
.borrow()
.available_point_count(),
"{estimation:#?}",
);
// Perform additional sort to break ties by score
let mut plain_result_sorted_ties: Vec<ScoredPointTies> =
plain_result.iter().map(|x| x.into()).collect_vec();
plain_result_sorted_ties.sort();
let mut struct_result_sorted_ties: Vec<ScoredPointTies> =
struct_result.iter().map(|x| x.into()).collect_vec();
struct_result_sorted_ties.sort();
let mut mmap_result_sorted_ties: Vec<ScoredPointTies> =
mmap_result.iter().map(|x| x.into()).collect_vec();
mmap_result_sorted_ties.sort();
ensure!(
plain_result_sorted_ties.len() == struct_result_sorted_ties.len(),
"query vector {query_vector:?}\n\
query filter {query_filter:?}\n\
plain result {plain_result:?}\n\
struct result{struct_result:?}",
);
ensure!(
plain_result_sorted_ties.len() == mmap_result_sorted_ties.len(),
"query vector {query_vector:?}\n\
query filter {query_filter:?}\n\
plain result {plain_result:?}\n\
mmap result {mmap_result:?}",
);
for (r1, r2, r3) in itertools::izip!(
plain_result_sorted_ties,
struct_result_sorted_ties,
mmap_result_sorted_ties,
)
.map(|(r1, r2, r3)| (r1.0, r2.0, r3.0))
{
ensure!(
r1.id == r2.id,
"got different ScoredPoint {r1:?} and {r2:?} for\n\
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/segment_builder_test.rs | lib/segment/tests/integration/segment_builder_test.rs | use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Duration, Instant};
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::progress_tracker::ProgressTracker;
use fs_err as fs;
use itertools::Itertools;
use segment::common::operation_error::OperationError;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, VectorRef, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::index::hnsw_index::num_rayon_threads;
use segment::json_path::JsonPath;
use segment::segment::Segment;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment_with_payload_storage;
use segment::types::{
Distance, HnswGlobalConfig, Indexes, PayloadContainer, PayloadFieldSchema, PayloadKeyType,
PayloadSchemaType, PayloadStorageType, SegmentConfig, VectorDataConfig, VectorStorageType,
};
use serde_json::Value;
use sparse::common::sparse_vector::SparseVector;
use tempfile::Builder;
use crate::fixtures::segment::{
PAYLOAD_KEY, SPARSE_VECTOR_NAME, build_segment_1, build_segment_2, build_segment_sparse_1,
build_segment_sparse_2, empty_segment,
};
#[test]
fn test_building_new_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_2(dir.path());
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
let hw_counter = HardwareCounterCell::new();
// Include overlapping with segment1 to check the
segment2
.upsert_point(
100,
3.into(),
only_default_vector(&[0., 0., 0., 0.]),
&hw_counter,
)
.unwrap();
builder
.update(&[&segment1, &segment2, &segment2], &stopped)
.unwrap();
// Check what happens if segment building fails here
let segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(segment_count, 2);
let temp_segment_count = fs::read_dir(temp_dir.path()).unwrap().count();
assert_eq!(temp_segment_count, 1);
// Now we finalize building
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let merged_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
let new_segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(new_segment_count, 3);
assert_eq!(
merged_segment.iter_points().count(),
merged_segment.available_point_count(),
);
assert_eq!(
merged_segment.available_point_count(),
segment1
.iter_points()
.chain(segment2.iter_points())
.unique()
.count(),
);
assert_eq!(merged_segment.point_version(3.into()), Some(100));
}
#[test]
fn test_building_new_defragmented_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let defragment_key = JsonPath::from_str(PAYLOAD_KEY).unwrap();
let hw_counter = HardwareCounterCell::new();
let payload_schema = PayloadFieldSchema::FieldType(PayloadSchemaType::Keyword);
let mut segment1 = build_segment_1(dir.path());
segment1
.create_field_index(7, &defragment_key, Some(&payload_schema), &hw_counter)
.unwrap();
let mut segment2 = build_segment_2(dir.path());
segment2
.create_field_index(17, &defragment_key, Some(&payload_schema), &hw_counter)
.unwrap();
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
// Include overlapping with segment1 to check the
segment2
.upsert_point(
100,
3.into(),
only_default_vector(&[0., 0., 0., 0.]),
&hw_counter,
)
.unwrap();
builder.set_defragment_keys(vec![defragment_key.clone()]);
builder.update(&[&segment1, &segment2], &stopped).unwrap();
// Check what happens if segment building fails here
let segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(segment_count, 2);
let temp_segment_count = fs::read_dir(temp_dir.path()).unwrap().count();
assert_eq!(temp_segment_count, 1);
// Now we finalize building
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let merged_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
let new_segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(new_segment_count, 3);
assert_eq!(
merged_segment.iter_points().count(),
merged_segment.available_point_count(),
);
assert_eq!(
merged_segment.available_point_count(),
segment1
.iter_points()
.chain(segment2.iter_points())
.unique()
.count(),
);
assert_eq!(merged_segment.point_version(3.into()), Some(100));
if let Err(err) = check_points_defragmented(&merged_segment, &defragment_key) {
panic!("{err}");
}
}
/// Iterates over the internal point ids of the merged segment and checks that the
/// points are grouped by the payload value.
fn check_points_defragmented(
segment: &Segment,
defragment_key: &PayloadKeyType,
) -> Result<(), &'static str> {
let id_tracker = segment.id_tracker.borrow();
// Previously seen group/value.
let mut previous_value: Option<Value> = None;
// keeps track of groups/values that have already been seen while iterating
let mut seen_values: Vec<Value> = vec![];
let hw_counter = HardwareCounterCell::new();
for internal_id in id_tracker.iter_internal() {
let external_id = id_tracker.external_id(internal_id).unwrap();
let payload = segment.payload(external_id, &hw_counter).unwrap();
let values = payload.get_value(defragment_key);
if values.is_empty() {
if !seen_values.is_empty() {
return Err(
"In a defragmented segment, points without a payload value should come first!",
);
}
continue;
}
let value = values[0].clone();
let Some(prev) = previous_value.as_ref() else {
previous_value = Some(value);
continue;
};
if *prev == value {
continue;
}
if seen_values.contains(&value) {
return Err("Segment not defragmented");
}
seen_values.push(value.clone());
previous_value = Some(value);
}
Ok(())
}
#[test]
fn test_building_new_sparse_segment() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let hw_counter = HardwareCounterCell::new();
let segment1 = build_segment_sparse_1(dir.path());
let mut segment2 = build_segment_sparse_2(dir.path());
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
// Include overlapping with segment1 to check the
let vec = SparseVector::new(vec![0, 1, 2, 3], vec![0.0, 0.0, 0.0, 0.0]).unwrap();
segment2
.upsert_point(
100,
3.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec)),
&hw_counter,
)
.unwrap();
builder
.update(&[&segment1, &segment2, &segment2], &stopped)
.unwrap();
// Check what happens if segment building fails here
let segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(segment_count, 2);
let temp_segment_count = fs::read_dir(temp_dir.path()).unwrap().count();
assert_eq!(temp_segment_count, 1);
// Now we finalize building
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let merged_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
let new_segment_count = fs::read_dir(dir.path()).unwrap().count();
assert_eq!(new_segment_count, 3);
assert_eq!(
merged_segment.iter_points().count(),
merged_segment.available_point_count(),
);
assert_eq!(
merged_segment.available_point_count(),
segment1
.iter_points()
.chain(segment2.iter_points())
.unique()
.count(),
);
assert_eq!(merged_segment.point_version(3.into()), Some(100));
}
fn estimate_build_time(segment: &Segment, stop_delay_millis: Option<u64>) -> (u64, bool) {
let mut rng = rand::rng();
let stopped = Arc::new(AtomicBool::new(false));
let dir = Builder::new().prefix("segment_dir1").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let segment_config = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: segment.segment_config.vector_data[DEFAULT_VECTOR_NAME].size,
distance: segment.segment_config.vector_data[DEFAULT_VECTOR_NAME].distance,
storage_type: VectorStorageType::default(),
index: Indexes::Hnsw(Default::default()),
quantization_config: None,
multivector_config: None,
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
builder.update(&[segment], &stopped).unwrap();
let now = Instant::now();
if let Some(stop_delay_millis) = stop_delay_millis {
let stopped_t = stopped.clone();
std::thread::Builder::new()
.name("build_estimator_timeout".to_string())
.spawn(move || {
std::thread::sleep(Duration::from_millis(stop_delay_millis));
stopped_t.store(true, Ordering::Release);
})
.unwrap();
}
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let res = builder.build(permit, &stopped, &mut rng, &hw_counter, progress);
let is_cancelled = match res {
Ok(_) => false,
Err(OperationError::Cancelled { .. }) => true,
Err(err) => {
eprintln!("Was expecting cancellation signal but got unexpected error: {err:?}");
false
}
};
(now.elapsed().as_millis() as u64, is_cancelled)
}
/// Unit test for a specific bug we caught before.
///
/// See: <https://github.com/qdrant/qdrant/pull/5614>
#[test]
fn test_building_new_segment_bug_5614() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let mut segment1 = build_segment_1(dir.path());
let mut segment2 = build_segment_2(dir.path());
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
let vector_100_low = only_default_vector(&[1., 1., 0., 0.]);
let vector_101_low = only_default_vector(&[2., 2., 0., 0.]);
let vector_100_high = only_default_vector(&[3., 3., 0., 0.]);
let vector_101_high = only_default_vector(&[4., 4., 0., 0.]);
let hw_counter = HardwareCounterCell::new();
// Insert point 100 and 101 in both segments
// Do this in a specific order so that:
// - the latter segment has a higher point version
// - the internal point IDs don't match across segments
segment1
.upsert_point(123, 100.into(), vector_100_low, &hw_counter)
.unwrap();
segment1
.upsert_point(123, 101.into(), vector_101_low, &hw_counter)
.unwrap();
segment2
.upsert_point(124, 101.into(), vector_101_high.clone(), &hw_counter)
.unwrap();
segment2
.upsert_point(124, 100.into(), vector_100_high.clone(), &hw_counter)
.unwrap();
builder.update(&[&segment1, &segment2], &stopped).unwrap();
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let merged_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
// Assert correct point versions - must have latest
assert_eq!(merged_segment.point_version(100.into()), Some(124));
assert_eq!(merged_segment.point_version(101.into()), Some(124));
// Assert correct vectors still belong to the point
// This was broken before <https://github.com/qdrant/qdrant/pull/5543>
assert_eq!(
merged_segment.all_vectors(100.into(), &hw_counter).unwrap(),
vector_100_high,
);
assert_eq!(
merged_segment.all_vectors(101.into(), &hw_counter).unwrap(),
vector_101_high,
);
}
#[test]
fn test_building_cancellation() {
let baseline_dir = Builder::new()
.prefix("segment_dir_baseline")
.tempdir()
.unwrap();
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let dir_2 = Builder::new().prefix("segment_dir_2").tempdir().unwrap();
let mut baseline_segment = empty_segment(baseline_dir.path());
let mut segment = empty_segment(dir.path());
let mut segment_2 = empty_segment(dir_2.path());
let hw_counter = HardwareCounterCell::new();
for idx in 0..2000 {
baseline_segment
.upsert_point(
1,
idx.into(),
only_default_vector(&[0., 0., 0., 0.]),
&hw_counter,
)
.unwrap();
segment
.upsert_point(
1,
idx.into(),
only_default_vector(&[0., 0., 0., 0.]),
&hw_counter,
)
.unwrap();
segment_2
.upsert_point(
1,
idx.into(),
only_default_vector(&[0., 0., 0., 0.]),
&hw_counter,
)
.unwrap();
}
// Get normal build time
let (time_baseline, was_cancelled_baseline) = estimate_build_time(&baseline_segment, None);
assert!(!was_cancelled_baseline);
eprintln!("baseline time: {time_baseline}");
// Checks that optimization with longer cancellation delay will also finish fast
let early_stop_delay = time_baseline / 20;
let (time_fast, was_cancelled_early) = estimate_build_time(&segment, Some(early_stop_delay));
let late_stop_delay = time_baseline / 5;
let (time_long, was_cancelled_later) = estimate_build_time(&segment_2, Some(late_stop_delay));
let acceptable_stopping_delay = 600; // millis
assert!(was_cancelled_early);
assert!(
time_fast < early_stop_delay + acceptable_stopping_delay,
"time_early: {time_fast}, early_stop_delay: {early_stop_delay}"
);
assert!(was_cancelled_later);
assert!(
time_long < late_stop_delay + acceptable_stopping_delay,
"time_later: {time_long}, late_stop_delay: {late_stop_delay}"
);
assert!(
time_fast < time_long,
"time_early: {time_fast}, time_later: {time_long}, was_cancelled_later: {was_cancelled_later}",
);
}
#[test]
fn test_building_new_segment_with_mmap_payload() {
let segment_dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let mut segment1 = build_simple_segment_with_payload_storage(
segment_dir.path(),
4,
Distance::Dot,
PayloadStorageType::Mmap,
)
.unwrap();
assert_eq!(
segment1.segment_config.payload_storage_type,
PayloadStorageType::Mmap
);
let hw_counter = HardwareCounterCell::new();
// add one point
segment1
.upsert_point(
1,
1.into(),
only_default_vector(&[1.0, 0.0, 1.0, 1.0]),
&hw_counter,
)
.unwrap();
let builder = SegmentBuilder::new(
segment_dir.path(),
temp_dir.path(),
&segment1.segment_config,
&HnswGlobalConfig::default(),
)
.unwrap();
let temp_segment_count = fs::read_dir(temp_dir.path()).unwrap().count();
assert_eq!(temp_segment_count, 1);
// Now we finalize building
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let progress = ProgressTracker::new_for_test();
let new_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
assert_eq!(
new_segment.segment_config.payload_storage_type,
PayloadStorageType::Mmap
);
let new_segment_count = fs::read_dir(segment_dir.path()).unwrap().count();
assert_eq!(new_segment_count, 2);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/fail_recovery_test.rs | lib/segment/tests/integration/fail_recovery_test.rs | use common::counter::hardware_counter::HardwareCounterCell;
use segment::common::operation_error::{OperationError, SegmentFailedState};
use segment::data_types::vectors::only_default_vector;
use segment::entry::entry_point::SegmentEntry;
use segment::payload_json;
use tempfile::Builder;
use crate::fixtures::segment::empty_segment;
#[test]
fn test_insert_fail_recovery() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let vec1 = vec![1.0, 0.0, 1.0, 1.0];
let mut segment = empty_segment(dir.path());
let hw_counter = HardwareCounterCell::new();
segment
.upsert_point(1, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment
.upsert_point(1, 2.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment.error_status = Some(SegmentFailedState {
version: 2,
point_id: Some(1.into()),
error: OperationError::service_error("test error"),
});
// op_num is greater than errored. Skip because not recovered yet
let fail_res = segment.set_payload(
3,
1.into(),
&payload_json! {"color": vec!["red".to_string()]},
&None,
&hw_counter,
);
assert!(fail_res.is_err());
// Also skip even with another point operation
let fail_res = segment.set_payload(
3,
2.into(),
&payload_json! {"color": vec!["red".to_string()]},
&None,
&hw_counter,
);
assert!(fail_res.is_err());
// Perform operation, but keep error status: operation is not fully recovered yet
let ok_res = segment.set_payload(
2,
2.into(),
&payload_json! {"color": vec!["red".to_string()]},
&None,
&hw_counter,
);
assert!(ok_res.is_ok());
assert!(segment.error_status.is_some());
// Perform operation and recover the error - operation is fixed now
let recover_res = segment.set_payload(
2,
1.into(),
&payload_json! {"color": vec!["red".to_string()]},
&None,
&hw_counter,
);
assert!(recover_res.is_ok());
assert!(segment.error_status.is_none());
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/main.rs | lib/segment/tests/integration/main.rs | mod batch_search_test;
mod byte_storage_hnsw_test;
mod byte_storage_quantization_test;
mod disbalanced_vectors_test;
mod exact_search_test;
mod fail_recovery_test;
mod filtering_context_check;
mod filtrable_hnsw_test;
mod fixtures;
#[cfg(feature = "gpu")]
mod gpu_hnsw_test;
mod hnsw_discover_test;
mod hnsw_incremental_build;
mod hnsw_quantized_search_test;
mod multivector_filtrable_hnsw_test;
mod multivector_hnsw_test;
mod multivector_quantization_test;
mod nested_filtering_test;
mod payload_index_test;
mod scroll_filtering_test;
mod segment_builder_test;
mod segment_on_disk_snapshot;
mod segment_tests;
mod sparse_discover_test;
mod sparse_vector_index_search_tests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/gpu_hnsw_test.rs | lib/segment/tests/integration/gpu_hnsw_test.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::TelemetryDetail;
use ordered_float::OrderedFloat;
use parking_lot::Mutex;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_vector};
use segment::index::hnsw_index::gpu::gpu_devices_manager::LockedGpuDevice;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::index::{PayloadIndex, VectorIndex};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, PayloadSchemaType,
Range, SearchParams, SeqNumberType,
};
use tempfile::Builder;
/// Captured logs from env_logger. It's used to check that indexing was performed using GPU correctly.
/// We cannot just check `Ok` because it's possible that GPU fails and index will be built on CPU without errors.
pub struct CapturedLogs {
strings: Arc<Mutex<Vec<String>>>,
}
impl std::io::Write for CapturedLogs {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
if let Ok(buf_str) = std::str::from_utf8(buf) {
let mut strings = self.strings.lock();
strings.push(buf_str.to_string());
}
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
#[test]
fn test_gpu_filterable_hnsw() {
let captured_logs = Arc::new(Mutex::new(Vec::new()));
let _env_logger = env_logger::builder()
.is_test(true)
.target(env_logger::Target::Pipe(Box::new(CapturedLogs {
strings: captured_logs.clone(),
})))
.filter_level(log::LevelFilter::Trace)
.try_init();
let stopped = AtomicBool::new(false);
let max_failures = 5;
let dim = 8;
let m = 8;
let num_vectors: u64 = 10_000;
let ef = 32;
let ef_construct = 16;
let distance = Distance::Cosine;
let full_scan_threshold = 32; // KB
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let int_key = "int";
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
let payload_index_ptr = segment.payload_index.clone();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let instance = gpu::GPU_TEST_INSTANCE.clone();
let device =
Mutex::new(gpu::Device::new(instance.clone(), &instance.physical_devices()[0]).unwrap());
let locked_device = LockedGpuDevice::new(device.lock());
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: Some(&locked_device), // enable GPU
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut hits = 0;
let attempts = 100;
for i in 0..attempts {
let query = random_vector(&mut rng, dim).into();
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat::from(f64::from(left_range))),
lte: Some(OrderedFloat::from(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
// check that search was performed using HNSW index
assert_eq!(
hnsw_index
.get_telemetry_data(TelemetryDetail::default())
.filtered_large_cardinality
.count,
i + 1
);
let plain_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
if plain_result == index_result {
hits += 1;
}
}
assert!(
attempts - hits <= max_failures,
"hits: {hits} of {attempts}"
); // Not more than X% failures
eprintln!("hits = {hits:#?} out of {attempts}");
// Check from logs that GPU was used correctly.
let logs = captured_logs.lock().clone();
const UPLOAD_VECTORS_PATTERN: &str = "Upload vector data";
const UPLOAD_LINKS_PATTERN: &str = "Upload links on level 0";
// Check that vectors was uploaded to GPU only one time.
assert_eq!(
logs.iter()
.filter(|s| s.contains(UPLOAD_VECTORS_PATTERN))
.count(),
1
);
// Check that indexing was called more than one time.
let gpu_indexes_count = logs
.iter()
.filter(|s| s.contains(UPLOAD_LINKS_PATTERN))
.count();
assert!(gpu_indexes_count > 1);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/hnsw_quantized_search_test.rs | lib/segment/tests/integration/hnsw_quantized_search_test.rs | use std::collections::BTreeSet;
use std::ops::Deref;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::{ScoreType, ScoredPointOffset};
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, QueryVector, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{STR_KEY, random_vector};
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::index::{VectorIndex, VectorIndexEnum};
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment::Segment;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::segment_builder::SegmentBuilder;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::PayloadSchemaType::Keyword;
use segment::types::{
CompressionRatio, Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig,
Indexes, ProductQuantizationConfig, QuantizationConfig, QuantizationSearchParams,
ScalarQuantizationConfig, SearchParams,
};
use segment::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use tempfile::Builder;
use crate::fixtures::segment::build_segment_1;
pub fn sames_count(a: &[Vec<ScoredPointOffset>], b: &[Vec<ScoredPointOffset>]) -> usize {
a[0].iter()
.map(|x| x.idx)
.collect::<BTreeSet<_>>()
.intersection(&b[0].iter().map(|x| x.idx).collect())
.count()
}
fn hnsw_quantized_search_test(
distance: Distance,
num_vectors: u64,
quantization_config: QuantizationConfig,
) {
let stopped = AtomicBool::new(false);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let quantized_data_path = dir.path();
let payloads_count = 50;
let dim = 131;
let m = 16;
let ef = 64;
let ef_construct = 64;
let top = 10;
let attempts = 10;
let mut rng = StdRng::seed_from_u64(42);
let mut op_num = 0;
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
segment
.upsert_point(op_num, idx, only_default_vector(&vector), &hw_counter)
.unwrap();
op_num += 1;
}
segment
.create_field_index(
op_num,
&JsonPath::new(STR_KEY),
Some(&Keyword.into()),
&hw_counter,
)
.unwrap();
op_num += 1;
for n in 0..payloads_count {
let idx = n.into();
let payload = payload_json! {STR_KEY: STR_KEY};
segment
.set_full_payload(op_num, idx, &payload, &hw_counter)
.unwrap();
op_num += 1;
}
segment.vector_data.values_mut().for_each(|vector_storage| {
let quantized_vectors = QuantizedVectors::create(
&vector_storage.vector_storage.borrow(),
&quantization_config,
QuantizedVectorsStorageType::Immutable,
quantized_data_path,
4,
&stopped,
)
.unwrap();
vector_storage.quantized_vectors = Arc::new(AtomicRefCell::new(Some(quantized_vectors)));
});
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold: 2 * payloads_count as usize,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: segment.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let query_vectors = (0..attempts)
.map(|_| random_vector(&mut rng, dim).into())
.collect::<Vec<_>>();
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(STR_KEY),
STR_KEY.to_owned().into(),
)));
// check that quantized search is working
// to check it, compare quantized search result with exact search result
check_matches(&query_vectors, &segment, &hnsw_index, None, ef, top);
check_matches(
&query_vectors,
&segment,
&hnsw_index,
Some(&filter),
ef,
top,
);
// check that oversampling is working
// to check it, search with oversampling and check that results are not worse
check_oversampling(&query_vectors, &hnsw_index, None, ef, top);
check_oversampling(&query_vectors, &hnsw_index, Some(&filter), ef, top);
// check that rescoring is working
// to check it, set all vectors to zero and expect zero scores
let zero_vector = vec![0.0; dim];
for n in 0..num_vectors {
let idx = n.into();
segment
.upsert_point(op_num, idx, only_default_vector(&zero_vector), &hw_counter)
.unwrap();
op_num += 1;
}
check_rescoring(&query_vectors, &hnsw_index, None, ef, top);
check_rescoring(&query_vectors, &hnsw_index, Some(&filter), ef, top);
}
pub fn check_matches(
query_vectors: &[QueryVector],
segment: &Segment,
hnsw_index: &HNSWIndex,
filter: Option<&Filter>,
ef: usize,
top: usize,
) {
let exact_search_results = query_vectors
.iter()
.map(|query| {
segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[query], filter, top, None, &Default::default())
.unwrap()
})
.collect::<Vec<_>>();
let mut sames: usize = 0;
let attempts = query_vectors.len();
for (query, plain_result) in query_vectors.iter().zip(exact_search_results.iter()) {
let index_result = hnsw_index
.search(
&[query],
filter,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
sames += sames_count(&index_result, plain_result);
}
let acc = 100.0 * sames as f64 / (attempts * top) as f64;
println!("sames = {sames}, attempts = {attempts}, top = {top}, acc = {acc}");
assert!(acc > 40.0);
}
fn check_oversampling(
query_vectors: &[QueryVector],
hnsw_index: &HNSWIndex,
filter: Option<&Filter>,
ef: usize,
top: usize,
) {
for query in query_vectors {
let ef_oversampling = ef / 8;
let oversampling_1_result = hnsw_index
.search(
&[query],
filter,
top,
Some(&SearchParams {
hnsw_ef: Some(ef_oversampling),
quantization: Some(QuantizationSearchParams {
rescore: Some(true),
..Default::default()
}),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let best_1 = oversampling_1_result[0][0];
let worst_1 = oversampling_1_result[0].last().unwrap();
let oversampling_2_result = hnsw_index
.search(
&[query],
None,
top,
Some(&SearchParams {
hnsw_ef: Some(ef_oversampling),
quantization: Some(QuantizationSearchParams {
oversampling: Some(4.0),
rescore: Some(true),
..Default::default()
}),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let best_2 = oversampling_2_result[0][0];
let worst_2 = oversampling_2_result[0].last().unwrap();
if best_2.score < best_1.score {
println!("oversampling_1_result = {oversampling_1_result:?}");
println!("oversampling_2_result = {oversampling_2_result:?}");
}
assert!(best_2.score >= best_1.score);
assert!(worst_2.score >= worst_1.score);
}
}
fn check_rescoring(
query_vectors: &[QueryVector],
hnsw_index: &HNSWIndex,
filter: Option<&Filter>,
ef: usize,
top: usize,
) {
for query in query_vectors.iter() {
let index_result = hnsw_index
.search(
&[query],
filter,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
quantization: Some(QuantizationSearchParams {
rescore: Some(true),
..Default::default()
}),
..Default::default()
}),
&Default::default(),
)
.unwrap();
for result in &index_result[0] {
assert!(result.score < ScoreType::EPSILON);
}
}
}
#[test]
fn hnsw_quantized_search_cosine_test() {
hnsw_quantized_search_test(
Distance::Cosine,
5003,
ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into(),
);
}
#[test]
fn hnsw_quantized_search_euclid_test() {
hnsw_quantized_search_test(
Distance::Euclid,
5003,
ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into(),
);
}
#[test]
fn hnsw_quantized_search_manhattan_test() {
hnsw_quantized_search_test(
Distance::Manhattan,
5003,
ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into(),
);
}
#[test]
fn hnsw_product_quantization_cosine_test() {
hnsw_quantized_search_test(
Distance::Cosine,
1003,
ProductQuantizationConfig {
compression: CompressionRatio::X4,
always_ram: Some(true),
}
.into(),
);
}
#[test]
fn hnsw_product_quantization_euclid_test() {
hnsw_quantized_search_test(
Distance::Euclid,
1003,
ProductQuantizationConfig {
compression: CompressionRatio::X4,
always_ram: Some(true),
}
.into(),
);
}
#[test]
fn hnsw_product_quantization_manhattan_test() {
hnsw_quantized_search_test(
Distance::Manhattan,
1003,
ProductQuantizationConfig {
compression: CompressionRatio::X4,
always_ram: Some(true),
}
.into(),
);
}
#[test]
fn test_build_hnsw_using_quantization() {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let temp_dir = Builder::new().prefix("segment_temp_dir").tempdir().unwrap();
let mut rng = rand::rng();
let stopped = AtomicBool::new(false);
let segment1 = build_segment_1(dir.path());
let mut config = segment1.segment_config.clone();
let vector_data_config = config.vector_data.get_mut(DEFAULT_VECTOR_NAME).unwrap();
vector_data_config.quantization_config = Some(
ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into(),
);
vector_data_config.index = Indexes::Hnsw(HnswConfig {
m: 16,
ef_construct: 64,
full_scan_threshold: 16,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
});
let permit_cpu_count = num_rayon_threads(0);
let permit = ResourcePermit::dummy(permit_cpu_count as u32);
let hw_counter = HardwareCounterCell::new();
let mut builder = SegmentBuilder::new(
dir.path(),
temp_dir.path(),
&config,
&HnswGlobalConfig::default(),
)
.unwrap();
builder.update(&[&segment1], &stopped).unwrap();
let progress = ProgressTracker::new_for_test();
let built_segment: Segment = builder
.build(permit, &stopped, &mut rng, &hw_counter, progress)
.unwrap();
// check if built segment has quantization and index
assert!(
built_segment.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.borrow()
.is_some(),
);
let borrowed_index = built_segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow();
match borrowed_index.deref() {
VectorIndexEnum::Hnsw(hnsw_index) => {
assert!(hnsw_index.get_quantized_vectors().borrow().is_some())
}
_ => panic!("unexpected vector index type"),
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/byte_storage_quantization_test.rs | lib/segment/tests/integration/byte_storage_quantization_test.rs | use std::collections::{BTreeSet, HashMap};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::budget::ResourcePermit;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::ScoredPointOffset;
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, DenseVector, QueryVector, only_default_vector,
};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_dense_byte_vector, random_int_payload};
use segment::fixtures::query_fixtures::QueryVariant;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::{PayloadIndex, VectorIndex};
use segment::segment_constructor::build_segment;
use segment::types::{
BinaryQuantizationConfig, CompressionRatio, Condition, Distance, FieldCondition, Filter,
HnswConfig, HnswGlobalConfig, Indexes, PayloadSchemaType, ProductQuantizationConfig,
QuantizationSearchParams, Range, ScalarQuantizationConfig, SearchParams, SegmentConfig,
SeqNumberType, VectorDataConfig, VectorStorageDatatype, VectorStorageType,
};
use segment::vector_storage::VectorStorageEnum;
use segment::vector_storage::quantized::quantized_vectors::{
QuantizedVectors, QuantizedVectorsStorageType,
};
use tempfile::Builder;
enum QuantizationVariant {
Scalar,
PQ,
Binary,
}
fn random_vector<R>(rnd_gen: &mut R, dim: usize, data_type: VectorStorageDatatype) -> DenseVector
where
R: Rng + ?Sized,
{
match data_type {
VectorStorageDatatype::Float32 => unreachable!(),
VectorStorageDatatype::Float16 => {
let mut vector = segment::fixtures::payload_fixtures::random_vector(rnd_gen, dim);
vector.iter_mut().for_each(|x| *x -= 0.5);
vector
}
VectorStorageDatatype::Uint8 => random_dense_byte_vector(rnd_gen, dim),
}
}
fn random_query<R: Rng + ?Sized>(
variant: &QueryVariant,
rng: &mut R,
dim: usize,
data_type: VectorStorageDatatype,
) -> QueryVector {
segment::fixtures::query_fixtures::random_query(variant, rng, |rng| {
random_vector(rng, dim, data_type).into()
})
}
fn sames_count(a: &[Vec<ScoredPointOffset>], b: &[Vec<ScoredPointOffset>]) -> usize {
a[0].iter()
.map(|x| x.idx)
.collect::<BTreeSet<_>>()
.intersection(&b[0].iter().map(|x| x.idx).collect())
.count()
}
#[rstest]
#[case::nearest_binary_dot(
QueryVariant::Nearest,
VectorStorageDatatype::Float16,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
32, // ef
10., // min_acc out of 100
)]
#[case::nearest_binary_dot(
QueryVariant::Nearest,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
32, // ef
5., // min_acc out of 100
)]
#[case::discovery_binary_dot(
QueryVariant::Discovery,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
128, // ef
1., // min_acc out of 100
)]
#[case::recobestscore_binary_dot(
QueryVariant::RecoBestScore,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
64, // ef
1., // min_acc out of 100
)]
#[case::recosumscores_binary_dot(
QueryVariant::RecoSumScores,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Dot,
128, // dim
64, // ef
1., // min_acc out of 100
)]
#[case::nearest_binary_cosine(
QueryVariant::Nearest,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
32, // ef
25., // min_acc out of 100
)]
#[case::discovery_binary_cosine(
QueryVariant::Discovery,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
128, // ef
15., // min_acc out of 100
)]
#[case::recobestscore_binary_cosine(
QueryVariant::RecoBestScore,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
64, // ef
15., // min_acc out of 100
)]
#[case::recosumscores_binary_cosine(
QueryVariant::RecoSumScores,
VectorStorageDatatype::Uint8,
QuantizationVariant::Binary,
Distance::Cosine,
128, // dim
64, // ef
15., // min_acc out of 100
)]
#[case::nearest_scalar_dot(
QueryVariant::Nearest,
VectorStorageDatatype::Float16,
QuantizationVariant::Scalar,
Distance::Dot,
32, // dim
32, // ef
80., // min_acc out of 100
)]
#[case::nearest_scalar_dot(
QueryVariant::Nearest,
VectorStorageDatatype::Uint8,
QuantizationVariant::Scalar,
Distance::Dot,
32, // dim
32, // ef
80., // min_acc out of 100
)]
#[case::nearest_scalar_cosine(
QueryVariant::Nearest,
VectorStorageDatatype::Uint8,
QuantizationVariant::Scalar,
Distance::Cosine,
32, // dim
32, // ef
80., // min_acc out of 100
)]
#[case::nearest_pq_dot(
QueryVariant::Nearest,
VectorStorageDatatype::Uint8,
QuantizationVariant::PQ,
Distance::Dot,
16, // dim
32, // ef
70., // min_acc out of 100
)]
fn test_byte_storage_binary_quantization_hnsw(
#[case] query_variant: QueryVariant,
#[case] storage_data_type: VectorStorageDatatype,
#[case] quantization_variant: QuantizationVariant,
#[case] distance: Distance,
#[case] dim: usize,
#[case] ef: usize,
#[case] min_acc: f64, // out of 100
) {
use common::counter::hardware_counter::HardwareCounterCell;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
let stopped = AtomicBool::new(false);
let m = 8;
let num_vectors: u64 = 5_000;
let ef_construct = 16;
let full_scan_threshold = 16; // KB
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir_byte = Builder::new().prefix("segment_dir_byte").tempdir().unwrap();
let quantized_data_path = dir_byte.path();
let hnsw_dir_byte = Builder::new().prefix("hnsw_dir_byte").tempdir().unwrap();
let config_byte = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: Some(storage_data_type),
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let int_key = "int";
let mut segment_byte = build_segment(dir_byte.path(), &config_byte, true).unwrap();
// check that `segment_byte` uses byte or half storage
{
let borrowed_storage = segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.borrow();
let raw_storage: &VectorStorageEnum = &borrowed_storage;
#[cfg(feature = "rocksdb")]
assert!(matches!(
raw_storage,
&VectorStorageEnum::DenseSimpleByte(_) | &VectorStorageEnum::DenseSimpleHalf(_),
));
#[cfg(not(feature = "rocksdb"))]
assert!(matches!(
raw_storage,
&VectorStorageEnum::DenseAppendableInRamByte(_)
| &VectorStorageEnum::DenseAppendableInRamHalf(_),
));
}
let hw_counter = HardwareCounterCell::new();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim, storage_data_type);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment_byte
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment_byte
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
segment_byte
.payload_index
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let quantization_config = match quantization_variant {
QuantizationVariant::Scalar => ScalarQuantizationConfig {
r#type: Default::default(),
quantile: None,
always_ram: None,
}
.into(),
QuantizationVariant::PQ => ProductQuantizationConfig {
compression: CompressionRatio::X8,
always_ram: None,
}
.into(),
QuantizationVariant::Binary => BinaryQuantizationConfig {
always_ram: None,
encoding: None,
query_encoding: None,
}
.into(),
};
segment_byte
.vector_data
.values_mut()
.for_each(|vector_storage| {
let quantized_vectors = QuantizedVectors::create(
&vector_storage.vector_storage.borrow(),
&quantization_config,
QuantizedVectorsStorageType::Immutable,
quantized_data_path,
4,
&stopped,
)
.unwrap();
vector_storage.quantized_vectors =
Arc::new(AtomicRefCell::new(Some(quantized_vectors)));
});
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let hnsw_index_byte = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir_byte.path(),
id_tracker: segment_byte.id_tracker.clone(),
vector_storage: segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment_byte.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: segment_byte.payload_index.clone(),
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 5;
let mut sames = 0;
let attempts = 100;
for _ in 0..attempts {
let query = random_query(&query_variant, &mut rng, dim, storage_data_type);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result_byte = hnsw_index_byte
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
quantization: Some(QuantizationSearchParams {
oversampling: Some(2.0),
..Default::default()
}),
..Default::default()
}),
&Default::default(),
)
.unwrap();
let plain_result_byte = hnsw_index_byte
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
quantization: Some(QuantizationSearchParams {
ignore: true,
..Default::default()
}),
exact: true,
..Default::default()
}),
&Default::default(),
)
.unwrap();
sames += sames_count(&plain_result_byte, &index_result_byte);
}
let acc = 100.0 * sames as f64 / (attempts * top) as f64;
println!("sames = {sames}, attempts = {attempts}, top = {top}, acc = {acc}");
assert!(acc > min_acc);
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/batch_search_test.rs | lib/segment/tests/integration/batch_search_test.rs | use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use rand::SeedableRng;
use rand::prelude::StdRng;
use segment::data_types::query_context::QueryContext;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::index_fixtures::random_vector;
use segment::fixtures::payload_fixtures::random_int_payload;
use segment::index::VectorIndex;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, HnswGlobalConfig, PayloadSchemaType,
SeqNumberType, WithPayload,
};
use tempfile::Builder;
#[test]
fn test_batch_and_single_request_equivalency() {
let num_vectors: u64 = 1_000;
let distance = Distance::Cosine;
let num_payload_values = 2;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let int_key = "int";
let mut segment = build_simple_segment(dir.path(), dim, distance).unwrap();
let hw_counter = HardwareCounterCell::new();
segment
.create_field_index(
0,
&JsonPath::new(int_key),
Some(&PayloadSchemaType::Integer.into()),
&hw_counter,
)
.unwrap();
for n in 0..num_vectors {
let idx = n.into();
let vector = random_vector(&mut rng, dim);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
segment
.upsert_point(
n as SeqNumberType,
idx,
only_default_vector(&vector),
&hw_counter,
)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
for _ in 0..10 {
let query_vector_1 = random_vector(&mut rng, dim).into();
let query_vector_2 = random_vector(&mut rng, dim).into();
let payload_value = random_int_payload(&mut rng, 1..=1).pop().unwrap();
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(int_key),
payload_value.into(),
)));
let search_res_1 = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector_1,
&WithPayload::default(),
&false.into(),
Some(&filter),
10,
None,
)
.unwrap();
let search_res_2 = segment
.search(
DEFAULT_VECTOR_NAME,
&query_vector_2,
&WithPayload::default(),
&false.into(),
Some(&filter),
10,
None,
)
.unwrap();
let query_context = QueryContext::default();
let segment_query_context = query_context.get_segment_query_context();
let batch_res = segment
.search_batch(
DEFAULT_VECTOR_NAME,
&[&query_vector_1, &query_vector_2],
&WithPayload::default(),
&false.into(),
Some(&filter),
10,
None,
&segment_query_context,
)
.unwrap();
assert_eq!(search_res_1, batch_res[0]);
assert_eq!(search_res_2, batch_res[1]);
}
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let stopped = AtomicBool::new(false);
let payload_index_ptr = segment.payload_index.clone();
let m = 8;
let ef_construct = 100;
let full_scan_threshold = 10000;
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = num_rayon_threads(hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr,
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
for _ in 0..10 {
let query_vector_1 = random_vector(&mut rng, dim).into();
let query_vector_2 = random_vector(&mut rng, dim).into();
let payload_value = random_int_payload(&mut rng, 1..=1).pop().unwrap();
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
JsonPath::new(int_key),
payload_value.into(),
)));
let search_res_1 = hnsw_index
.search(
&[&query_vector_1],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
let search_res_2 = hnsw_index
.search(
&[&query_vector_2],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
let batch_res = hnsw_index
.search(
&[&query_vector_1, &query_vector_2],
Some(&filter),
10,
None,
&Default::default(),
)
.unwrap();
assert_eq!(search_res_1[0], batch_res[0]);
assert_eq!(search_res_2[0], batch_res[1]);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/multivector_filtrable_hnsw_test.rs | lib/segment/tests/integration/multivector_filtrable_hnsw_test.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use common::budget::ResourcePermit;
use common::flags::FeatureFlags;
use common::progress_tracker::ProgressTracker;
use common::types::TelemetryDetail;
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rstest::rstest;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, only_default_multi_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::fixtures::payload_fixtures::{random_int_payload, random_multi_vector};
use segment::fixtures::query_fixtures::{QueryVariant, random_multi_vec_query};
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::{PayloadIndex, VectorIndex};
use segment::segment_constructor::build_segment;
use segment::types::{
Condition, Distance, FieldCondition, Filter, HnswConfig, Indexes, MultiVectorConfig,
PayloadSchemaType, Range, SearchParams, SegmentConfig, SeqNumberType, VectorDataConfig,
VectorStorageType,
};
use segment::vector_storage::VectorStorage;
use tempfile::Builder;
/// Check all cases with single vector per multi and several vectors per multi
#[rstest]
#[case::nearest_eq(QueryVariant::Nearest, 1, 32, 5)]
#[case::nearest_multi(QueryVariant::Nearest, 3, 64, 20)]
#[case::discovery_eq(QueryVariant::Discovery, 1, 128, 5)]
#[case::discovery_multi(QueryVariant::Discovery, 3, 128, 20)]
#[case::recobestscore_eq(QueryVariant::RecoBestScore, 1, 64, 5)]
#[case::recobestscore_multi(QueryVariant::RecoBestScore, 2, 64, 10)]
#[case::recosumscores_eq(QueryVariant::RecoSumScores, 1, 64, 5)]
#[case::recosumscores_multi(QueryVariant::RecoSumScores, 2, 64, 10)]
fn test_multi_filterable_hnsw(
#[case] query_variant: QueryVariant,
#[case] max_num_vector_per_points: usize,
#[case] ef: usize,
#[case] max_failures: usize, // out of 100
) {
use common::counter::hardware_counter::HardwareCounterCell;
use segment::json_path::JsonPath;
use segment::payload_json;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::types::HnswGlobalConfig;
let stopped = AtomicBool::new(false);
let vector_dim = 8;
let m = 8;
let num_points: u64 = 5_000;
let ef_construct = 16;
let distance = Distance::Cosine;
let full_scan_threshold = 8; // KB
let num_payload_values = 2;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let hnsw_dir = Builder::new().prefix("hnsw_dir").tempdir().unwrap();
let config = SegmentConfig {
vector_data: HashMap::from([(
DEFAULT_VECTOR_NAME.to_owned(),
VectorDataConfig {
size: vector_dim,
distance,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {}, // uses plain index for comparison
quantization_config: None,
multivector_config: Some(MultiVectorConfig::default()), // uses multivec config
datatype: None,
},
)]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
};
let int_key = "int";
let hw_counter = HardwareCounterCell::new();
let mut segment = build_segment(dir.path(), &config, true).unwrap();
for n in 0..num_points {
let idx = n.into();
// Random number of vectors per multivec point
let num_vector_for_point = rng.random_range(1..=max_num_vector_per_points);
let multi_vec = random_multi_vector(&mut rng, vector_dim, num_vector_for_point);
let int_payload = random_int_payload(&mut rng, num_payload_values..=num_payload_values);
let payload = payload_json! {int_key: int_payload};
let named_vectors = only_default_multi_vector(&multi_vec);
segment
.upsert_point(n as SeqNumberType, idx, named_vectors, &hw_counter)
.unwrap();
segment
.set_full_payload(n as SeqNumberType, idx, &payload, &hw_counter)
.unwrap();
}
assert_eq!(
segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.borrow()
.total_vector_count(),
num_points as usize
);
let payload_index_ptr = segment.payload_index.clone();
payload_index_ptr
.borrow_mut()
.set_indexed(
&JsonPath::new(int_key),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold,
max_indexing_threads: 2,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let permit_cpu_count = 1; // single-threaded for deterministic build
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
let vector_storage = &segment.vector_data[DEFAULT_VECTOR_NAME].vector_storage;
let quantized_vectors = &segment.vector_data[DEFAULT_VECTOR_NAME].quantized_vectors;
let hnsw_index = HNSWIndex::build(
HnswIndexOpenArgs {
path: hnsw_dir.path(),
id_tracker: segment.id_tracker.clone(),
vector_storage: vector_storage.clone(),
quantized_vectors: quantized_vectors.clone(),
payload_index: payload_index_ptr,
hnsw_config,
},
VectorIndexBuildArgs {
permit,
old_indices: &[],
gpu_device: None,
rng: &mut rng,
stopped: &stopped,
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: FeatureFlags::default(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap();
let top = 3;
let mut hits = 0;
let attempts = 100;
for i in 0..attempts {
// Random number of vectors per multivec query
let num_vector_for_query = rng.random_range(1..=max_num_vector_per_points);
let query =
random_multi_vec_query(&query_variant, &mut rng, vector_dim, num_vector_for_query);
let range_size = 40;
let left_range = rng.random_range(0..400);
let right_range = left_range + range_size;
let filter = Filter::new_must(Condition::Field(FieldCondition::new_range(
JsonPath::new(int_key),
Range {
lt: None,
gt: None,
gte: Some(OrderedFloat(f64::from(left_range))),
lte: Some(OrderedFloat(f64::from(right_range))),
},
)));
let filter_query = Some(&filter);
let index_result = hnsw_index
.search(
&[&query],
filter_query,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.unwrap();
// check that search was performed using HNSW index
assert_eq!(
hnsw_index
.get_telemetry_data(TelemetryDetail::default())
.filtered_large_cardinality
.count,
i + 1
);
// segment uses a plain index by configuration
let plain_result = segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[&query], filter_query, top, None, &Default::default())
.unwrap();
if plain_result == index_result {
hits += 1;
} else {
eprintln!("Attempt {i}/{attempts}");
eprintln!("Different results for query {query:?}");
eprintln!("plain_result = {plain_result:#?}");
eprintln!("index_result = {index_result:#?}");
}
}
assert!(
attempts - hits <= max_failures,
"hits: {hits}/{attempts} (expected less than {max_failures} failures)"
); // Not more than X% failures
eprintln!("hits = {hits:#?} out of {attempts}");
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/fixtures/mod.rs | lib/segment/tests/integration/fixtures/mod.rs | pub mod segment;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/tests/integration/fixtures/segment.rs | lib/segment/tests/integration/fixtures/segment.rs | use std::collections::HashMap;
use std::path::Path;
use common::counter::hardware_counter::HardwareCounterCell;
use segment::data_types::named_vectors::NamedVectors;
use segment::data_types::vectors::{DenseVector, VectorRef, only_default_vector};
use segment::entry::entry_point::SegmentEntry;
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::payload_json;
use segment::segment::Segment;
use segment::segment_constructor::build_segment;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Distance, Indexes, SegmentConfig, SparseVectorDataConfig, SparseVectorStorageType,
VectorDataConfig, VectorName, VectorStorageType,
};
use sparse::common::sparse_vector::SparseVector;
pub fn empty_segment(path: &Path) -> Segment {
build_simple_segment(path, 4, Distance::Dot).unwrap()
}
pub const PAYLOAD_KEY: &str = "color";
pub const SPARSE_VECTOR_NAME: &VectorName = "sparse";
pub fn build_segment_1(path: &Path) -> Segment {
let mut segment1 = empty_segment(path);
let vec1 = vec![1.0, 0.0, 1.0, 1.0];
let vec2 = vec![1.0, 0.0, 1.0, 0.0];
let vec3 = vec![1.0, 1.0, 1.0, 1.0];
let vec4 = vec![1.0, 1.0, 0.0, 1.0];
let vec5 = vec![1.0, 0.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(1, 1.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment1
.upsert_point(2, 2.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment1
.upsert_point(3, 3.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment1
.upsert_point(4, 4.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment1
.upsert_point(5, 5.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
let payload_key = PAYLOAD_KEY;
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment1
.set_payload(6, 1.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 2.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 3.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 4.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 5.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
}
pub fn build_segment_2(path: &Path) -> Segment {
let mut segment2 = empty_segment(path);
let vec1 = vec![-1.0, 0.0, 1.0, 1.0];
let vec2 = vec![-1.0, 0.0, 1.0, 0.0];
let vec3 = vec![-1.0, 1.0, 1.0, 1.0];
let vec4 = vec![-1.0, 1.0, 0.0, 1.0];
let vec5 = vec![-1.0, 0.0, 0.0, 0.0];
let hw_counter = HardwareCounterCell::new();
segment2
.upsert_point(11, 11.into(), only_default_vector(&vec1), &hw_counter)
.unwrap();
segment2
.upsert_point(12, 12.into(), only_default_vector(&vec2), &hw_counter)
.unwrap();
segment2
.upsert_point(13, 13.into(), only_default_vector(&vec3), &hw_counter)
.unwrap();
segment2
.upsert_point(14, 14.into(), only_default_vector(&vec4), &hw_counter)
.unwrap();
segment2
.upsert_point(15, 15.into(), only_default_vector(&vec5), &hw_counter)
.unwrap();
let payload_key = PAYLOAD_KEY;
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment2
.set_payload(16, 11.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 12.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 13.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 14.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 15.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment2
}
pub fn build_segment_3(path: &Path) -> Segment {
let mut segment3 = build_segment(
path,
&SegmentConfig {
vector_data: HashMap::from([
(
"vector1".into(),
VectorDataConfig {
size: 4,
distance: Distance::Dot,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
),
(
"vector2".into(),
VectorDataConfig {
size: 1,
distance: Distance::Dot,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
),
(
"vector3".into(),
VectorDataConfig {
size: 4,
distance: Distance::Euclid,
storage_type: VectorStorageType::default(),
index: Indexes::Plain {},
quantization_config: None,
multivector_config: None,
datatype: None,
},
),
]),
sparse_vector_data: Default::default(),
payload_storage_type: Default::default(),
},
true,
)
.unwrap();
let collect_points_data = |vectors: &[DenseVector]| {
NamedVectors::from_pairs([
("vector1".into(), vectors[0].clone()),
("vector2".into(), vectors[1].clone()),
("vector3".into(), vectors[2].clone()),
])
};
let vec1 = [
vec![1.0, 0.0, 1.0, 1.0],
vec![0.0],
vec![-1.0, 0.0, 1.0, 1.0],
];
let vec2 = [
vec![1.0, 0.0, 1.0, 0.0],
vec![1.0],
vec![-1.0, 0.0, 1.0, 1.0],
];
let vec3 = [
vec![1.0, 1.0, 1.0, 1.0],
vec![2.0],
vec![-1.0, 0.0, 1.0, 1.0],
];
let vec4 = [
vec![1.0, 1.0, 0.0, 1.0],
vec![3.0],
vec![-1.0, 0.0, 1.0, 1.0],
];
let vec5 = [
vec![1.0, 0.0, 0.0, 0.0],
vec![4.0],
vec![-1.0, 0.0, 1.0, 1.0],
];
let hw_counter = HardwareCounterCell::new();
segment3
.upsert_point(1, 1.into(), collect_points_data(&vec1), &hw_counter)
.unwrap();
segment3
.upsert_point(2, 2.into(), collect_points_data(&vec2), &hw_counter)
.unwrap();
segment3
.upsert_point(3, 3.into(), collect_points_data(&vec3), &hw_counter)
.unwrap();
segment3
.upsert_point(4, 4.into(), collect_points_data(&vec4), &hw_counter)
.unwrap();
segment3
.upsert_point(5, 5.into(), collect_points_data(&vec5), &hw_counter)
.unwrap();
let payload_key = PAYLOAD_KEY;
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment3
.set_payload(6, 1.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment3
.set_payload(6, 2.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment3
.set_payload(6, 3.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment3
.set_payload(6, 4.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment3
.set_payload(6, 5.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment3
}
pub fn build_segment_sparse_1(path: &Path) -> Segment {
let mut segment1 = build_segment(
path,
&SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig::new(None, SparseIndexType::MutableRam, None),
storage_type: SparseVectorStorageType::default(),
modifier: None,
},
)]),
payload_storage_type: Default::default(),
},
true,
)
.unwrap();
let vec1 = SparseVector::new(vec![0, 1, 2, 3], vec![1.0, 0.0, 1.0, 1.0]).unwrap();
let vec2 = SparseVector::new(vec![0, 1, 2, 3], vec![1.0, 0.0, 1.0, 0.0]).unwrap();
let vec3 = SparseVector::new(vec![0, 1, 2, 3], vec![1.0, 1.0, 1.0, 1.0]).unwrap();
let vec4 = SparseVector::new(vec![0, 1, 2, 3], vec![1.0, 1.0, 0.0, 1.0]).unwrap();
let vec5 = SparseVector::new(vec![0, 1, 2, 3], vec![1.0, 0.0, 0.0, 0.0]).unwrap();
let hw_counter = HardwareCounterCell::new();
segment1
.upsert_point(
1,
1.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec1)),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
2,
2.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec2)),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
3,
3.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec3)),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
4,
4.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec4)),
&hw_counter,
)
.unwrap();
segment1
.upsert_point(
5,
5.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec5)),
&hw_counter,
)
.unwrap();
let payload_key = PAYLOAD_KEY;
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment1
.set_payload(6, 1.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 2.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 3.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 4.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
.set_payload(6, 5.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment1
}
pub fn build_segment_sparse_2(path: &Path) -> Segment {
let mut segment2 = build_segment(
path,
&SegmentConfig {
vector_data: Default::default(),
sparse_vector_data: HashMap::from([(
SPARSE_VECTOR_NAME.to_owned(),
SparseVectorDataConfig {
index: SparseIndexConfig::new(None, SparseIndexType::MutableRam, None),
storage_type: SparseVectorStorageType::default(),
modifier: None,
},
)]),
payload_storage_type: Default::default(),
},
true,
)
.unwrap();
let hw_counter = HardwareCounterCell::new();
let vec1 = SparseVector::new(vec![0, 1, 2, 3], vec![-1.0, 0.0, 1.0, 1.0]).unwrap();
let vec2 = SparseVector::new(vec![0, 1, 2, 3], vec![-1.0, 0.0, 1.0, 0.0]).unwrap();
let vec3 = SparseVector::new(vec![0, 1, 2, 3], vec![-1.0, 1.0, 1.0, 1.0]).unwrap();
let vec4 = SparseVector::new(vec![0, 1, 2, 3], vec![-1.0, 1.0, 0.0, 1.0]).unwrap();
let vec5 = SparseVector::new(vec![0, 1, 2, 3], vec![-1.0, 0.0, 0.0, 0.0]).unwrap();
segment2
.upsert_point(
11,
11.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec1)),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
12,
12.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec2)),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
13,
13.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec3)),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
14,
14.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec4)),
&hw_counter,
)
.unwrap();
segment2
.upsert_point(
15,
15.into(),
NamedVectors::from_ref(SPARSE_VECTOR_NAME, VectorRef::Sparse(&vec5)),
&hw_counter,
)
.unwrap();
let payload_key = PAYLOAD_KEY;
let payload_option1 = payload_json! {payload_key: vec!["red".to_owned()]};
let payload_option2 = payload_json! {payload_key: vec!["red".to_owned(), "blue".to_owned()]};
let payload_option3 = payload_json! {payload_key: vec!["blue".to_owned()]};
segment2
.set_payload(16, 11.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 12.into(), &payload_option1, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 13.into(), &payload_option3, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 14.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment2
.set_payload(16, 15.into(), &payload_option2, &None, &hw_counter)
.unwrap();
segment2
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/scorer_mmap.rs | lib/segment/benches/scorer_mmap.rs | use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use rand::Rng;
use rand::distr::StandardUniform;
use segment::data_types::named_vectors::CowVector;
use segment::data_types::vectors::{DenseVector, QueryVector};
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::id_tracker::IdTrackerSS;
use segment::index::hnsw_index::point_scorer::BatchFilteredSearcher;
use segment::types::Distance;
use segment::vector_storage::dense::memmap_dense_vector_storage::open_memmap_vector_storage;
use segment::vector_storage::{DEFAULT_STOPPED, VectorStorage, VectorStorageEnum};
use tempfile::Builder;
#[cfg(not(target_os = "windows"))]
mod prof;
const NUM_VECTORS: usize = 10_000;
const DIM: usize = 1024;
fn random_vector(size: usize) -> DenseVector {
let rng = rand::rng();
rng.sample_iter(StandardUniform).take(size).collect()
}
fn init_mmap_vector_storage(
path: &Path,
dim: usize,
num: usize,
dist: Distance,
) -> (VectorStorageEnum, Arc<AtomicRefCell<IdTrackerSS>>) {
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(num)));
let mut storage = open_memmap_vector_storage(path, dim, dist).unwrap();
let mut vectors = (0..num).map(|_id| {
let vector = random_vector(dim);
(CowVector::from(vector), false)
});
storage
.update_from(&mut vectors, &AtomicBool::from(false))
.unwrap();
assert_eq!(storage.available_vector_count(), num);
drop(storage);
let storage = open_memmap_vector_storage(path, dim, dist).unwrap();
assert_eq!(storage.available_vector_count(), num);
(storage, id_tracker)
}
fn benchmark_scorer_mmap(c: &mut Criterion) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dist = Distance::Dot;
let (storage, id_tracker) = init_mmap_vector_storage(dir.path(), DIM, NUM_VECTORS, dist);
let borrowed_id_tracker = id_tracker.borrow();
let mut group = c.benchmark_group("storage-score-all");
group.bench_function("storage batched vector scoring", |b| {
b.iter_batched(
|| QueryVector::from(random_vector(DIM)),
|vector| {
BatchFilteredSearcher::new_for_test(
&[vector],
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
10,
)
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
},
BatchSize::SmallInput,
)
});
}
// Batched search gives performance benefit only when memory is contended.
// For a single-threaded criterion run, it only shows that batching penalty is relatively small.
// We might run a thread pool explicitly, though.
fn benchmark_scorer_mmap_4(c: &mut Criterion) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dist = Distance::Dot;
let (storage, id_tracker) = init_mmap_vector_storage(dir.path(), DIM, NUM_VECTORS, dist);
let borrowed_id_tracker = id_tracker.borrow();
let mut group = c.benchmark_group("storage-score-all");
group.bench_function("storage batched vector scoring, 4 vectors batch", |b| {
b.iter_batched(
|| {
[
QueryVector::from(random_vector(DIM)),
QueryVector::from(random_vector(DIM)),
QueryVector::from(random_vector(DIM)),
QueryVector::from(random_vector(DIM)),
]
},
|vecs| {
BatchFilteredSearcher::new_for_test(
&vecs,
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
10,
)
.peek_top_all(&DEFAULT_STOPPED)
.unwrap()
},
BatchSize::SmallInput,
)
});
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = benchmark_scorer_mmap, benchmark_scorer_mmap_4,
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = benchmark_scorer_mmap, benchmark_scorer_mmap_4,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/prof.rs | lib/segment/benches/prof.rs | use std::io::Write;
use std::os::raw::c_int;
use std::path::Path;
use criterion::profiler::Profiler;
use fs_err as fs;
use fs_err::File;
use pprof::ProfilerGuard;
use pprof::flamegraph::TextTruncateDirection;
use pprof::protos::Message;
/// Small custom profiler that can be used with Criterion to create a flamegraph for benchmarks.
/// Also see [the Criterion documentation on this][custom-profiler].
///
/// ## Example on how to enable the custom profiler:
///
/// ```
/// mod perf;
/// use perf::FlamegraphProfiler;
///
/// fn fibonacci_profiled(criterion: &mut Criterion) {
/// // Use the criterion struct as normal here.
/// }
///
/// fn custom() -> Criterion {
/// Criterion::default().with_profiler(FlamegraphProfiler::new())
/// }
///
/// criterion_group! {
/// name = benches;
/// config = custom();
/// targets = fibonacci_profiled
/// }
/// ```
///
/// The neat thing about this is that it will sample _only_ the benchmark, and not other stuff like
/// the setup process.
///
/// Further, it will only kick in if `--profile-time <time>` is passed to the benchmark binary.
/// A flamegraph will be created for each individual benchmark in its report directory under
/// `profile/flamegraph.svg`.
///
/// [custom-profiler]: https://bheisler.github.io/criterion.rs/book/user_guide/profiling.html#implementing-in-process-profiling-hooks
pub struct FlamegraphProfiler<'a> {
frequency: c_int,
active_profiler: Option<ProfilerGuard<'a>>,
}
impl FlamegraphProfiler<'_> {
#[allow(dead_code)]
pub fn new(frequency: c_int) -> Self {
FlamegraphProfiler {
frequency,
active_profiler: None,
}
}
}
impl Profiler for FlamegraphProfiler<'_> {
fn start_profiling(&mut self, _benchmark_id: &str, _benchmark_dir: &Path) {
self.active_profiler = Some(ProfilerGuard::new(self.frequency).unwrap());
}
fn stop_profiling(&mut self, _benchmark_id: &str, benchmark_dir: &Path) {
fs::create_dir_all(benchmark_dir).unwrap();
let pprof_path = benchmark_dir.join("profile.pb");
let flamegraph_path = benchmark_dir.join("flamegraph.svg");
eprintln!("\nflamegraph_path = {flamegraph_path:#?}");
let flamegraph_file = File::create(&flamegraph_path)
.expect("File system error while creating flamegraph.svg");
let mut options = pprof::flamegraph::Options::default();
options.hash = true;
options.image_width = Some(2500);
options.text_truncate_direction = TextTruncateDirection::Left;
options.font_size /= 3;
if let Some(profiler) = self.active_profiler.take() {
let report = profiler.report().build().unwrap();
let mut file = File::create(pprof_path).unwrap();
let profile = report.pprof().unwrap();
let mut content = Vec::new();
profile.encode(&mut content).unwrap();
file.write_all(&content).unwrap();
report
.flamegraph_with_options(flamegraph_file, &mut options)
.expect("Error writing flamegraph");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/hnsw_incremental_build.rs | lib/segment/benches/hnsw_incremental_build.rs | use std::collections::BTreeSet;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use clap::Parser;
use common::budget::ResourcePermit;
use common::counter::hardware_counter::HardwareCounterCell;
use common::flags::{FeatureFlags, feature_flags, init_feature_flags};
use common::progress_tracker::ProgressTracker;
use common::types::ScoredPointOffset;
use fs_err as fs;
use fs_err::File;
use io::file_operations::{atomic_save_json, read_json};
use itertools::Itertools as _;
use ndarray::{ArrayView2, Axis};
use ndarray_npy::ViewNpyExt;
use rand::rngs::StdRng;
use rand::seq::SliceRandom as _;
use rand::{Rng, SeedableRng as _};
use rayon::iter::{
IndexedParallelIterator as _, IntoParallelIterator as _, IntoParallelRefIterator,
ParallelIterator,
};
use segment::common::operation_error::OperationResult;
use segment::data_types::vectors::{
DEFAULT_VECTOR_NAME, QueryVector, VectorElementType, VectorInternal, only_default_vector,
};
use segment::entry::SegmentEntry as _;
use segment::fixtures::index_fixtures::random_vector;
use segment::id_tracker::IdTrackerSS;
use segment::index::hnsw_index::hnsw::{HNSWIndex, HnswIndexOpenArgs};
use segment::index::hnsw_index::num_rayon_threads;
use segment::index::{VectorIndex as _, VectorIndexEnum};
use segment::segment::Segment;
use segment::segment_constructor::VectorIndexBuildArgs;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{
Distance, ExtendedPointId, HnswConfig, HnswGlobalConfig, SearchParams, SeqNumberType,
};
use sha2::Digest as _;
use tap::Pipe as _;
use tempfile::Builder;
use zerocopy::IntoBytes;
/// This benchmark measures time and accuracy of incremental HNSW building.
///
/// To speed up the benchmark across runs, some operations that not related
/// to incremental HNSW are cached.
///
/// # Plan
///
/// 1. Non-incrementally build initial segment of size `init-vectors`.
/// The sliding window is set to `0..init_vectors`.
/// If `--cache` is set, the index is cached across runs.
/// 2. For each iteration in a loop:
/// - Slide the window by adding `to-add` vectors and removing `to-remove`
/// vectors.
/// - Incrementally build an index using the previous index.
/// - Check the accuracy of the index.
/// - Exact search results are cached across runs.
#[derive(Parser, Debug)]
#[clap(verbatim_doc_comment)]
struct Args {
/// Ignored. (`cargo bench` passes this argument)
#[clap(long)]
bench: bool,
/// Path to a dataset in numpy format.
/// Incompatible with `--dimensions`.
#[clap(long)]
dataset: Option<PathBuf>,
/// Number of dimensions to generate random vectors.
/// Incompatible with `--dataset`.
#[clap(long, value_parser(parse_number::<usize>))]
dimensions: Option<usize>,
/// Number of iterations.
#[clap(long, default_value = "300")]
iterations: usize,
/// Initial number of vectors.
#[clap(long, default_value = "100_000", value_parser(parse_number::<usize>))]
init_vectors: usize,
/// Number of vectors to add in each iteration.
#[clap(long, default_value = "1_000", value_parser(parse_number::<usize>))]
to_add: usize,
/// Number of vectors to remove in each iteration.
#[clap(long, default_value = "1_000", value_parser(parse_number::<usize>))]
to_remove: usize,
/// Distance function to use.
#[clap(long, default_value = "Cosine")]
distance: Distance,
/// Index build parameter `m`.
#[clap(long, default_value = "16", value_parser(parse_number::<usize>))]
m: usize,
/// Index build parameter `ef_construct`.
/// Also, used as a search parameter `ef`.
#[clap(long, default_value = "100", value_parser(parse_number::<usize>))]
ef_construct: usize,
/// Number of queries for checking the accuracy.
#[clap(long, default_value = "100", value_parser(parse_number::<usize>))]
queries: usize,
/// Index search parameter `ef`.
#[clap(long, default_value = "64", value_parser(parse_number::<usize>))]
ef: usize,
/// Check the accuracy every Nth iteration.
/// Set to 0 to disable accuracy checks.
#[clap(long, default_value = "1", value_parser(parse_number::<usize>))]
accuracy_check_period: usize,
/// Random seed.
#[clap(long, default_value = "42", value_parser(parse_number::<u64>))]
random_seed: u64,
/// If set, cache the initial HNSW index between runs.
#[clap(long)]
cache: bool,
}
fn main() {
env_logger::builder()
.filter_level(log::LevelFilter::Debug)
.init();
init_feature_flags(FeatureFlags::default());
let args = Args::parse();
log::info!("args={args:?}");
let mut main_rng = StdRng::seed_from_u64(args.random_seed);
let tmp_dir = Builder::new()
.prefix("hnsw_incremental_build")
.tempdir()
.unwrap();
let cache_path = Path::new(env!("CARGO_TARGET_TMPDIR"))
.join(env!("CARGO_PKG_NAME"))
.join(env!("CARGO_CRATE_NAME"));
// Load the dataset or generate random vectors.
let (dataset_mmap, dataset);
let vectors_mem;
let (query_vectors, vectors): (Vec<QueryVector>, Vec<&[VectorElementType]>) =
match (args.dimensions, args.dataset) {
// Load the dataset from a file.
(None, Some(dataset_path)) => {
let dataset_file = File::open(dataset_path).unwrap();
dataset_mmap = unsafe { memmap2::Mmap::map(&dataset_file).unwrap() };
dataset = ArrayView2::<f32>::view_npy(&dataset_mmap).unwrap();
let dataset_len = dataset.len_of(Axis(0));
let required_points = args.init_vectors + args.queries;
let max_points = args.init_vectors + args.to_add * args.iterations + args.queries;
if dataset_len < required_points {
panic!("Dataset has {dataset_len} points, need at least {max_points}");
}
log::info!(
"Dataset length: {dataset_len}, the dataset will be traversed {:.2} times",
(max_points as f64) / ((dataset_len - args.init_vectors) as f64)
);
let mut slices_vec = dataset
.axis_iter(Axis(0))
.map(|x| x.to_slice().unwrap())
.collect_vec();
// Shuffle the dataset to avoid bias.
slices_vec.shuffle(&mut StdRng::from_rng(&mut main_rng));
// Last `arg_queries` vectors from the dataset are used as query vectors.
let query_vectors = slices_vec
.split_off(slices_vec.len() - args.queries)
.iter()
.map(|&x| QueryVector::from(x.to_vec()))
.collect_vec();
(query_vectors, slices_vec)
}
// Generate random vectors.
(Some(dimensions), None) => {
let mut rng = StdRng::from_rng(&mut main_rng);
let query_vectors = std::iter::repeat_with(|| random_vector(&mut rng, dimensions))
.take(args.queries)
.map(QueryVector::from)
.collect_vec();
let mut rng = StdRng::from_rng(&mut main_rng);
vectors_mem = std::iter::repeat_with(|| random_vector(&mut rng, dimensions))
.take(args.init_vectors + args.to_add * args.iterations)
.collect_vec();
(
query_vectors,
vectors_mem.iter().map(|x| x.as_slice()).collect_vec(),
)
}
(_, _) => panic!("Either --dimensions or --dataset must be provided, but not both."),
};
// Hash query vectors to use as a cache key.
let queries_hash = dataset_hash(query_vectors.iter().map(|q| match q {
QueryVector::Nearest(VectorInternal::Dense(v)) => v.as_slice(),
_ => unreachable!(),
}));
// Build initial segment and index it non-incrementally.
let mut sliding_window = 0..args.init_vectors;
let mut rng = StdRng::from_rng(&mut main_rng);
let mut last_segment = make_segment(
&mut rng,
tmp_dir.path(),
&vectors,
sliding_window.clone(),
args.distance,
);
let initial_index_path = if args.cache {
cache_path.join(format!(
"initial-{dataset_hash}-{m}-{ef_construct}-{distance:?}",
dataset_hash = dataset_hash(vectors[sliding_window.clone()].iter().copied()),
m = args.m,
ef_construct = args.ef_construct,
distance = args.distance,
))
} else {
last_segment.data_path().join("hnsw_bench")
};
let index = build_hnsw_index(
&mut rng,
&initial_index_path,
&last_segment,
&[],
args.m,
args.ef_construct,
);
let mut last_index = Arc::new(AtomicRefCell::new(VectorIndexEnum::Hnsw(index)));
for iteration in 0..args.iterations {
// Build a new segment and index it incrementally.
let segment = make_segment(
&mut rng,
tmp_dir.path(),
&vectors,
sliding_window.clone(),
args.distance,
);
let index = build_hnsw_index(
&mut rng,
&segment.data_path().join("hnsw_bench"),
&segment,
&[Arc::clone(&last_index)],
args.m,
args.ef_construct,
);
// Check the accuracy of the index.
if args.accuracy_check_period > 0
&& (iteration % args.accuracy_check_period == 0 || iteration == args.iterations - 1)
{
let top = 10;
let exact_cache_path = cache_path.join(format!(
"exact-{queries_hash}-{}-{top}",
dataset_hash(sliding_window.clone().map(|i| vectors[i % vectors.len()])),
));
let accuracy = measure_accuracy(
&exact_cache_path,
&segment,
&query_vectors,
&index,
args.ef,
top,
);
println!("iteration={iteration}, accuracy={accuracy}");
assert!(accuracy > 0.4);
} else {
println!("iteration={iteration}, accuracy=N/A");
}
let last_segment_path = last_segment.data_path();
last_segment = segment;
last_index = Arc::new(AtomicRefCell::new(VectorIndexEnum::Hnsw(index)));
// Cleanup previous segment and index.
fs::remove_dir_all(last_segment_path).unwrap();
// Slide the window.
sliding_window =
(sliding_window.start + args.to_remove)..(sliding_window.end + args.to_add);
}
}
fn make_segment(
rng: &mut StdRng,
path: &Path,
all_vectors: &[&[VectorElementType]],
sliding_window: std::ops::Range<usize>,
distance: Distance,
) -> Segment {
let mut sequence = sliding_window.map(|x| x % all_vectors.len()).collect_vec();
sequence.shuffle(rng);
let hw_counter = HardwareCounterCell::new();
let mut segment = build_simple_segment(path, all_vectors[0].len(), distance).unwrap();
for n in sequence {
let vector = only_default_vector(all_vectors[n]);
segment
.upsert_point(
n as SeqNumberType,
ExtendedPointId::NumId(n as u64),
vector,
&hw_counter,
)
.unwrap();
}
segment
}
/// Hash the dataset to use as a cache key.
fn dataset_hash<'a>(
vectors: impl DoubleEndedIterator<Item = &'a [VectorElementType]> + ExactSizeIterator,
) -> String {
let mut hasher = sha2::Sha256::new();
let mut vectors = vectors.peekable();
hasher.update(vectors.len().to_le_bytes());
hasher.update(vectors.peek().unwrap().len().to_le_bytes());
// For performance reasons, hash only first and last 100 vectors.
for vector in vectors.by_ref().take(100) {
hasher.update(vector.as_bytes());
}
for vector in vectors.rev().take(100) {
hasher.update(vector.as_bytes());
}
format!("{:x}", hasher.finalize())
}
fn build_hnsw_index<R: Rng + ?Sized>(
rng: &mut R,
path: &Path,
segment: &Segment,
old_indices: &[Arc<AtomicRefCell<VectorIndexEnum>>],
m: usize,
ef_construct: usize,
) -> HNSWIndex {
let hnsw_config = HnswConfig {
m,
ef_construct,
full_scan_threshold: 1,
max_indexing_threads: 0,
on_disk: Some(false),
payload_m: None,
inline_storage: None,
};
let open_args = HnswIndexOpenArgs {
path,
id_tracker: segment.id_tracker.clone(),
vector_storage: segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_storage
.clone(),
quantized_vectors: segment.vector_data[DEFAULT_VECTOR_NAME]
.quantized_vectors
.clone(),
payload_index: Arc::clone(&segment.payload_index),
hnsw_config,
};
pub const HNSW_INDEX_CONFIG_FILE: &str = "hnsw_config.json";
if path.join(HNSW_INDEX_CONFIG_FILE).exists() {
log::info!("Loading cached HNSW index from {path:?}");
return HNSWIndex::open(open_args).unwrap();
}
let permit_cpu_count = num_rayon_threads(open_args.hnsw_config.max_indexing_threads);
let permit = Arc::new(ResourcePermit::dummy(permit_cpu_count as u32));
HNSWIndex::build(
open_args,
VectorIndexBuildArgs {
permit,
old_indices,
gpu_device: None,
rng,
stopped: &AtomicBool::new(false),
hnsw_global_config: &HnswGlobalConfig::default(),
feature_flags: feature_flags(),
progress: ProgressTracker::new_for_test(),
},
)
.unwrap()
}
fn measure_accuracy(
exact_cache_path: &Path,
segment: &Segment,
query_vectors: &[QueryVector],
hnsw_index: &HNSWIndex,
ef: usize,
top: usize,
) -> f64 {
let id_tracker = segment.id_tracker.borrow();
// Exact search (aka full scan) is slow, so we cache the results.
let exact_search_results;
if exact_cache_path.exists() {
exact_search_results = read_json(exact_cache_path).unwrap()
} else {
let start = std::time::Instant::now();
exact_search_results = query_vectors
.par_iter()
.map(|query| {
segment.vector_data[DEFAULT_VECTOR_NAME]
.vector_index
.borrow()
.search(&[query], None, top, None, &Default::default())
.pipe(|results| process_search_results(&*id_tracker, results))
})
.collect::<Vec<_>>();
log::debug!("Exact search time = {:?}", start.elapsed());
atomic_save_json(exact_cache_path, &exact_search_results).unwrap();
}
let sames: usize = query_vectors
.par_iter()
.zip(exact_search_results.into_par_iter())
.map(|(query, plain_result)| {
let index_result = hnsw_index
.search(
&[query],
None,
top,
Some(&SearchParams {
hnsw_ef: Some(ef),
..Default::default()
}),
&Default::default(),
)
.pipe(|results| process_search_results(&*id_tracker, results));
// Get number of same results.
index_result
.iter()
.collect::<BTreeSet<_>>()
.intersection(&plain_result.iter().collect())
.count()
})
.sum();
sames as f64 / (query_vectors.len() * top) as f64
}
fn process_search_results(
id_tracker: &IdTrackerSS,
results: OperationResult<Vec<Vec<ScoredPointOffset>>>,
) -> Vec<ExtendedPointId> {
// Expect exactly one result
let result = results.unwrap().into_iter().exactly_one().unwrap();
// Convert ScoredPointOffset to ExtendedPointId
result
.into_iter()
.map(|x| id_tracker.external_id(x.idx).unwrap())
.collect_vec()
}
fn parse_number<T: TryFrom<u64>>(n: &str) -> Result<T, String> {
parse_number_impl(n)
.and_then(|v| v.try_into().ok())
.ok_or_else(|| format!("Invalid number: {n}"))
}
fn parse_number_impl(n: &str) -> Option<u64> {
let mut chars = n.chars();
let mut result = u64::from(chars.next()?.to_digit(10)?);
while let Some(c) = chars.next() {
if let Some(v) = c.to_digit(10) {
result = result.checked_mul(10)?.checked_add(u64::from(v))?;
} else if c != '_' {
let power = "kMGT".find(c)? as u32 + 1;
let multiplier = match chars.next() {
Some('i') => 1024u64.pow(power),
Some(_) => return None,
None => 1000u64.pow(power),
};
return result.checked_mul(multiplier);
}
}
Some(result)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/id_type_benchmark.rs | lib/segment/benches/id_type_benchmark.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::collections::{BTreeMap, HashMap};
use criterion::{Criterion, criterion_group, criterion_main};
use rand::Rng;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
enum EnumIdTagged {
Num(u64),
Uuid(Uuid),
}
#[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[serde(untagged)]
enum EnumId {
Num(u64),
Uuid(Uuid),
}
#[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
struct StructId {
id: Option<u64>,
uuid: Option<Uuid>,
}
fn id_serialization_speed(c: &mut Criterion) {
let mut group = c.benchmark_group("serialization-group");
let mut rng = rand::rng();
group.bench_function("u64", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
bincode::serialize(&key).unwrap();
});
});
group.bench_function("u128", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = u128::from(key);
bincode::serialize(&new_key).unwrap();
});
});
group.bench_function("struct-u64", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = StructId {
id: Some(key),
uuid: None,
};
bincode::serialize(&new_key).unwrap();
});
});
group.bench_function("struct-uuid", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = StructId {
id: None,
uuid: Some(Uuid::from_u128(u128::from(key))),
};
bincode::serialize(&new_key).unwrap();
});
});
group.bench_function("enum-u64", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = EnumIdTagged::Num(key);
bincode::serialize(&new_key).unwrap();
});
});
group.bench_function("enum-uuid", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = EnumIdTagged::Uuid(Uuid::from_u128(u128::from(key)));
bincode::serialize(&new_key).unwrap();
});
});
group.bench_function("struct-cbor-u128", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = u128::from(key);
serde_cbor::to_vec(&new_key).unwrap();
});
});
group.bench_function("struct-cbor-u64", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = StructId {
id: Some(key),
uuid: None,
};
serde_cbor::to_vec(&new_key).unwrap();
});
});
group.bench_function("struct-cbor-uuid", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = StructId {
id: None,
uuid: Some(Uuid::from_u128(u128::from(key))),
};
serde_cbor::to_vec(&new_key).unwrap();
});
});
group.bench_function("enum-cbor-u64", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = EnumId::Num(key);
serde_cbor::to_vec(&new_key).unwrap();
});
});
group.bench_function("enum-cbor-uuid", |b| {
b.iter(|| {
let key: u64 = rng.random_range(0..100000000);
let new_key = EnumId::Uuid(Uuid::from_u128(u128::from(key)));
serde_cbor::to_vec(&new_key).unwrap();
});
});
}
fn u128_hash_search(c: &mut Criterion) {
let mut group = c.benchmark_group("hash-search-group");
let mut data: HashMap<u128, bool> = HashMap::default();
let key: u128 = 123;
let val = true;
group.bench_function("u128", |b| {
b.iter(|| {
data.insert(key, val);
data.get(&key);
});
});
}
fn enum_hash_search(c: &mut Criterion) {
let mut group = c.benchmark_group("hash-search-group");
let mut data: BTreeMap<EnumId, bool> = BTreeMap::default();
let key: EnumId = EnumId::Num(123);
let val = true;
group.bench_function("enum-u64", |b| {
b.iter(|| {
data.insert(key, val);
data.get(&key);
});
});
let key: EnumId = EnumId::Uuid(Uuid::from_u128(123));
let val = true;
group.bench_function("enum-uuid", |b| {
b.iter(|| {
data.insert(key, val);
data.get(&key);
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = id_serialization_speed, u128_hash_search, enum_hash_search
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/sparse_index_build.rs | lib/segment/benches/sparse_index_build.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::borrow::Cow;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use half::f16;
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::index::VectorIndex;
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::index::sparse_index::sparse_vector_index::{
SparseVectorIndex, SparseVectorIndexOpenArgs,
};
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use segment::types::VectorStorageDatatype;
use segment::vector_storage::VectorStorage;
use segment::vector_storage::sparse::simple_sparse_vector_storage::open_simple_sparse_vector_storage;
use sparse::common::sparse_vector_fixture::random_sparse_vector;
use sparse::index::inverted_index::InvertedIndex;
use sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use sparse::index::inverted_index::inverted_index_mmap::InvertedIndexMmap;
use sparse::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use tempfile::Builder;
const NUM_VECTORS: usize = 10_000;
const MAX_SPARSE_DIM: usize = 1_000;
fn sparse_vector_index_build_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("sparse-vector-build-group");
let stopped = AtomicBool::new(false);
let mut rnd = StdRng::seed_from_u64(42);
let payload_dir = Builder::new().prefix("payload_dir").tempdir().unwrap();
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let index_dir = Builder::new().prefix("index_dir").tempdir().unwrap();
// setup
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(NUM_VECTORS)));
let payload_storage = InMemoryPayloadStorage::default();
let wrapped_payload_storage = Arc::new(AtomicRefCell::new(payload_storage.into()));
let payload_index = StructPayloadIndex::open(
wrapped_payload_storage,
id_tracker.clone(),
std::collections::HashMap::new(),
payload_dir.path(),
true,
true,
)
.unwrap();
let wrapped_payload_index = Arc::new(AtomicRefCell::new(payload_index));
let db = open_db(storage_dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut vector_storage = open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &stopped).unwrap();
let hw_counter = HardwareCounterCell::new();
// add points to storage only once
for idx in 0..NUM_VECTORS {
let vec = &random_sparse_vector(&mut rnd, MAX_SPARSE_DIM);
vector_storage
.insert_vector(idx as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
// save index config to disk
let index_config = SparseIndexConfig::new(
Some(10_000),
SparseIndexType::MutableRam,
Some(VectorStorageDatatype::Float32),
);
let vector_storage = Arc::new(AtomicRefCell::new(vector_storage));
// intent: measure in-memory build time from storage
group.bench_function("build-ram-index", |b| {
b.iter(|| {
let sparse_vector_index: SparseVectorIndex<InvertedIndexRam> =
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: index_config,
id_tracker: id_tracker.clone(),
vector_storage: vector_storage.clone(),
payload_index: wrapped_payload_index.clone(),
path: index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
assert_eq!(sparse_vector_index.indexed_vector_count(), NUM_VECTORS);
})
});
// build once to reuse in mmap conversion benchmark
let sparse_vector_index: SparseVectorIndex<InvertedIndexRam> =
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: index_config,
id_tracker,
vector_storage,
payload_index: wrapped_payload_index,
path: index_dir.path(),
stopped: &stopped,
tick_progress: || (),
})
.unwrap();
// intent: measure mmap conversion time
group.bench_function("convert-mmap-index", |b| {
b.iter(|| {
let mmap_index_dir = Builder::new().prefix("mmap_index_dir").tempdir().unwrap();
let mmap_inverted_index = InvertedIndexMmap::from_ram_index(
Cow::Borrowed(sparse_vector_index.inverted_index()),
&mmap_index_dir,
)
.unwrap();
assert_eq!(mmap_inverted_index.vector_count(), NUM_VECTORS);
})
});
group.bench_function("convert-mmap-index-f32", |b| {
b.iter(|| {
let mmap_index_dir = Builder::new().prefix("mmap_index_dir").tempdir().unwrap();
let mmap_inverted_index = InvertedIndexCompressedMmap::<f32>::from_ram_index(
Cow::Borrowed(sparse_vector_index.inverted_index()),
&mmap_index_dir,
)
.unwrap();
assert_eq!(mmap_inverted_index.vector_count(), NUM_VECTORS);
})
});
group.bench_function("convert-mmap-index-f16", |b| {
b.iter(|| {
let mmap_index_dir = Builder::new().prefix("mmap_index_dir").tempdir().unwrap();
let mmap_inverted_index = InvertedIndexCompressedMmap::<f16>::from_ram_index(
Cow::Borrowed(sparse_vector_index.inverted_index()),
&mmap_index_dir,
)
.unwrap();
assert_eq!(mmap_inverted_index.vector_count(), NUM_VECTORS);
})
});
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = sparse_vector_index_build_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = sparse_vector_index_build_benchmark,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/sparse_vector_storage.rs | lib/segment/benches/sparse_vector_storage.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use segment::vector_storage::sparse::mmap_sparse_vector_storage::MmapSparseVectorStorage;
use segment::vector_storage::sparse::simple_sparse_vector_storage::open_simple_sparse_vector_storage;
use segment::vector_storage::{Random, VectorStorage};
use sparse::common::sparse_vector_fixture::random_sparse_vector;
use tempfile::Builder;
const NUM_VECTORS: usize = 10_000;
const MAX_SPARSE_DIM: usize = 1_000;
fn sparse_vector_storage_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("sparse-vector-storage-group");
let stopped = AtomicBool::new(false);
let mut rnd = StdRng::seed_from_u64(42);
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let db = open_db(storage_dir.path(), &[DB_VECTOR_CF]).unwrap();
let mut rocksdb_sparse_vector_storage =
open_simple_sparse_vector_storage(db, DB_VECTOR_CF, &stopped).unwrap();
let hw_counter = HardwareCounterCell::new();
group.bench_function("insert-rocksdb", |b| {
b.iter(|| {
for idx in 0..NUM_VECTORS {
let vec = &random_sparse_vector(&mut rnd, MAX_SPARSE_DIM);
rocksdb_sparse_vector_storage
.insert_vector(idx as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
})
});
group.bench_function("read-rocksdb", |b| {
b.iter(|| {
for idx in 0..NUM_VECTORS {
let vec =
rocksdb_sparse_vector_storage.get_vector_opt::<Random>(idx as PointOffsetType);
assert!(vec.is_some());
}
})
});
drop(rocksdb_sparse_vector_storage);
let storage_dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let mut mmap_sparse_vector_storage =
MmapSparseVectorStorage::open_or_create(storage_dir.path()).unwrap();
group.bench_function("insert-mmap-compression", |b| {
b.iter(|| {
for idx in 0..NUM_VECTORS {
let vec = &random_sparse_vector(&mut rnd, MAX_SPARSE_DIM);
mmap_sparse_vector_storage
.insert_vector(idx as PointOffsetType, vec.into(), &hw_counter)
.unwrap();
}
})
});
group.bench_function("read-mmap-compression", |b| {
b.iter(|| {
for idx in 0..NUM_VECTORS {
let vec =
mmap_sparse_vector_storage.get_vector_opt::<Random>(idx as PointOffsetType);
assert!(vec.is_some());
}
})
});
drop(mmap_sparse_vector_storage);
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = sparse_vector_storage_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = sparse_vector_storage_benchmark,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/dynamic_mmap_flags.rs | lib/segment/benches/dynamic_mmap_flags.rs | use std::hint::black_box;
use std::iter;
use std::sync::atomic::AtomicBool;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use segment::common::flags::dynamic_mmap_flags::DynamicMmapFlags;
use segment::common::operation_error::check_process_stopped;
use tempfile::tempdir;
const FLAG_COUNT: usize = 50_000_000;
fn dynamic_mmap_flag_count(c: &mut Criterion) {
let mut rng = StdRng::seed_from_u64(42);
let dir = tempdir().unwrap();
let random_flags: Vec<bool> = iter::repeat_with(|| rng.random())
.take(FLAG_COUNT)
.collect();
let stopped = AtomicBool::new(false);
// Build dynamic mmap flags with random deletions
let mut dynamic_flags = DynamicMmapFlags::open(dir.path(), false).unwrap();
dynamic_flags.set_len(FLAG_COUNT).unwrap();
random_flags
.iter()
.enumerate()
.filter(|(_, flag)| **flag)
.for_each(|(i, _)| assert!(!dynamic_flags.set(i, true)));
dynamic_flags.flusher()().unwrap();
let real_count = random_flags.iter().filter(|&&flag| flag).count();
let mut group = c.benchmark_group("dynamic-mmap-flag-count");
group.bench_function("manual-count-loop-stoppable", |b| {
b.iter(|| {
let mut count = 0;
for i in 0..FLAG_COUNT {
if dynamic_flags.get(i) {
count += 1;
}
check_process_stopped(&stopped).unwrap();
}
assert_eq!(count, real_count);
black_box(count)
});
});
group.bench_function("manual-count-loop", |b| {
b.iter(|| {
let mut count = 0;
for i in 0..FLAG_COUNT {
if dynamic_flags.get(i) {
count += 1;
}
}
assert_eq!(count, real_count);
black_box(count)
});
});
group.bench_function("count-ones", |b| {
b.iter(|| {
let count = dynamic_flags.count_flags();
assert_eq!(count, real_count);
black_box(count)
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = dynamic_mmap_flag_count
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/in_memory_id_tracker.rs | lib/segment/benches/in_memory_id_tracker.rs | use std::time::Instant;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::Rng;
use segment::id_tracker::IdTracker;
use segment::id_tracker::in_memory_id_tracker::InMemoryIdTracker;
use segment::types::ExtendedPointId;
fn benchmark(c: &mut Criterion) {
c.bench_function("idtracker", |b| {
b.iter_custom(|i| {
let mut id_tracker = InMemoryIdTracker::new();
let mut rand = rand::rng();
let ids: Vec<i32> = (0..i).map(|_| rand.random_range(0..100_000)).collect();
let start = Instant::now();
for external in 0..i {
id_tracker
.set_link(
ExtendedPointId::NumId(external),
ids[external as usize] as PointOffsetType,
)
.unwrap();
}
start.elapsed()
})
});
}
criterion_group!(benches, benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/range_filtering.rs | lib/segment/benches/range_filtering.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use ordered_float::OrderedFloat;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::fixtures::payload_fixtures::{FLT_KEY, INT_KEY};
use segment::index::PayloadIndex;
use segment::index::struct_payload_index::StructPayloadIndex;
use segment::payload_json;
use segment::payload_storage::PayloadStorage;
use segment::payload_storage::in_memory_payload_storage::InMemoryPayloadStorage;
use segment::types::{
Condition, FieldCondition, Filter, PayloadSchemaType, Range as RangeCondition,
};
use tempfile::Builder;
const NUM_POINTS: usize = 100_000;
const MAX_RANGE: f64 = 100_000.0;
fn random_range_filter<R: Rng + ?Sized>(rng: &mut R, key: &str) -> Filter {
Filter::new_must(Condition::Field(FieldCondition::new_range(
key.parse().unwrap(),
RangeCondition {
lt: None,
gt: None,
gte: Some(OrderedFloat(rng.random_range(0.0..MAX_RANGE / 2.0))),
lte: Some(OrderedFloat(rng.random_range(MAX_RANGE / 2.0..MAX_RANGE))),
},
)))
}
fn range_filtering(c: &mut Criterion) {
let mut group = c.benchmark_group("range-filtering-group");
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
// generate points with payload
let mut payload_storage = InMemoryPayloadStorage::default();
for id in 0..NUM_POINTS {
let payload = payload_json! {
INT_KEY: rng.random_range(0..MAX_RANGE.round() as usize),
FLT_KEY: rng.random_range(0.0..MAX_RANGE),
};
payload_storage
.set(id as PointOffsetType, &payload, &hw_counter)
.unwrap();
}
let payload_storage = Arc::new(AtomicRefCell::new(payload_storage.into()));
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(NUM_POINTS)));
let mut index = StructPayloadIndex::open(
payload_storage.clone(),
id_tracker.clone(),
std::collections::HashMap::new(),
dir.path(),
true,
true,
)
.unwrap();
// add numeric float index
index
.set_indexed(
&FLT_KEY.parse().unwrap(),
PayloadSchemaType::Float,
&hw_counter,
)
.unwrap();
// add numeric integer index
index
.set_indexed(
&INT_KEY.parse().unwrap(),
PayloadSchemaType::Integer,
&hw_counter,
)
.unwrap();
// make sure all points are indexed
assert_eq!(index.indexed_points(&FLT_KEY.parse().unwrap()), NUM_POINTS);
assert_eq!(index.indexed_points(&INT_KEY.parse().unwrap()), NUM_POINTS);
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("float-mutable-index", |b| {
b.iter_batched(
|| random_range_filter(&mut rng, FLT_KEY),
|filter| {
result_size += index.query_points(&filter, &hw_counter, &is_stopped).len();
query_count += 1;
},
BatchSize::SmallInput,
)
});
group.bench_function("integer-mutable-index", |b| {
b.iter_batched(
|| random_range_filter(&mut rng, INT_KEY),
|filter| {
result_size += index.query_points(&filter, &hw_counter, &is_stopped).len();
query_count += 1;
},
BatchSize::SmallInput,
)
});
// flush data
index.flusher()().unwrap();
drop(index);
// reload as IMMUTABLE index
let index = StructPayloadIndex::open(
payload_storage,
id_tracker,
std::collections::HashMap::new(),
dir.path(),
false,
true,
)
.unwrap();
group.bench_function("float-immutable-index", |b| {
b.iter_batched(
|| random_range_filter(&mut rng, FLT_KEY),
|filter| {
result_size += index.query_points(&filter, &hw_counter, &is_stopped).len();
query_count += 1;
},
BatchSize::SmallInput,
)
});
group.bench_function("integer-immutable-index", |b| {
b.iter_batched(
|| random_range_filter(&mut rng, INT_KEY),
|filter| {
result_size += index.query_points(&filter, &hw_counter, &is_stopped).len();
query_count += 1;
},
BatchSize::SmallInput,
)
});
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = range_filtering
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/conditional_search.rs | lib/segment/benches/conditional_search.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use segment::fixtures::payload_context_fixture::{
create_plain_payload_index, create_struct_payload_index,
};
use segment::fixtures::payload_fixtures::{random_match_any_filter, random_must_filter};
use segment::index::PayloadIndex;
use tempfile::Builder;
const NUM_POINTS: usize = 100000;
const CHECK_SAMPLE_SIZE: usize = 1000;
fn conditional_plain_search_benchmark(c: &mut Criterion) {
let seed = 42;
let mut rng = StdRng::seed_from_u64(seed);
let mut group = c.benchmark_group("conditional-search-group");
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let plain_index = create_plain_payload_index(dir.path(), NUM_POINTS, seed);
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("conditional-search-query-points", |b| {
b.iter(|| {
let filter = random_must_filter(&mut rng, 2);
result_size += plain_index
.query_points(&filter, &hw_counter, &is_stopped)
.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
let mut result_size = 0;
let mut query_count = 0;
// Same benchmark, but with larger expected result
group.bench_function("conditional-search-query-points-large", |b| {
b.iter(|| {
let filter = random_must_filter(&mut rng, 1);
result_size += plain_index
.query_points(&filter, &hw_counter, &is_stopped)
.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("conditional-search-context-check", |b| {
b.iter(|| {
let filter = random_must_filter(&mut rng, 2);
let sample = (0..CHECK_SAMPLE_SIZE)
.map(|_| rng.random_range(0..NUM_POINTS) as PointOffsetType)
.collect_vec();
let context = plain_index.filter_context(&filter, &hw_counter);
let filtered_sample = sample
.into_iter()
.filter(|id| context.check(*id))
.collect_vec();
result_size += filtered_sample.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("conditional-search-match-any", |b| {
let filter = random_match_any_filter(&mut rng, 2, 51.0);
b.iter(|| {
let sample = (0..CHECK_SAMPLE_SIZE)
.map(|_| rng.random_range(0..NUM_POINTS) as PointOffsetType)
.collect_vec();
let context = plain_index.filter_context(&filter, &hw_counter);
let filtered_sample = sample
.into_iter()
.filter(|id| context.check(*id))
.collect_vec();
result_size += filtered_sample.len();
query_count += 1;
});
});
group.bench_function("conditional-search-large-match-any", |b| {
let filter = random_match_any_filter(&mut rng, 1000, 15.0);
b.iter(|| {
let sample = (0..CHECK_SAMPLE_SIZE)
.map(|_| rng.random_range(0..NUM_POINTS) as PointOffsetType)
.collect_vec();
let context = plain_index.filter_context(&filter, &hw_counter);
let filtered_sample = sample
.into_iter()
.filter(|id| context.check(*id))
.collect_vec();
result_size += filtered_sample.len();
query_count += 1;
});
});
group.finish();
}
fn conditional_struct_search_benchmark(c: &mut Criterion) {
let mut rng = StdRng::seed_from_u64(42);
let mut group = c.benchmark_group("conditional-search-group");
let seed = 42;
let hw_counter = HardwareCounterCell::new();
let is_stopped = AtomicBool::new(false);
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let struct_index = create_struct_payload_index(dir.path(), NUM_POINTS, seed);
let mut result_size = 0;
let mut query_count = 0;
let filter = random_must_filter(&mut rng, 2);
let cardinality = struct_index.estimate_cardinality(&filter, &hw_counter);
let indexed_fields = struct_index.indexed_fields();
eprintln!("cardinality = {cardinality:#?}");
eprintln!("indexed_fields = {indexed_fields:#?}");
group.bench_function("struct-conditional-search-query-points", |b| {
b.iter(|| {
let filter = random_must_filter(&mut rng, 2);
result_size += struct_index
.query_points(&filter, &hw_counter, &is_stopped)
.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
let mut result_size = 0;
let mut query_count = 0;
group.bench_function("struct-conditional-search-context-check", |b| {
b.iter(|| {
let filter = random_must_filter(&mut rng, 2);
let sample = (0..CHECK_SAMPLE_SIZE)
.map(|_| rng.random_range(0..NUM_POINTS) as PointOffsetType)
.collect_vec();
let context = struct_index.filter_context(&filter, &hw_counter);
let filtered_sample = sample
.into_iter()
.filter(|id| context.check(*id))
.collect_vec();
result_size += filtered_sample.len();
query_count += 1;
})
});
if query_count != 0 {
eprintln!(
"result_size / query_count = {:#?}",
result_size / query_count
);
}
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = conditional_struct_search_benchmark, conditional_plain_search_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = conditional_struct_search_benchmark, conditional_plain_search_benchmark
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/segment_info.rs | lib/segment/benches/segment_info.rs | use common::counter::hardware_counter::HardwareCounterCell;
use criterion::{Criterion, criterion_group, criterion_main};
use segment::data_types::vectors::only_default_vector;
use segment::entry::entry_point::SegmentEntry;
use segment::json_path::JsonPath;
use segment::segment_constructor::simple_segment_constructor::build_simple_segment;
use segment::types::{Distance, Payload, PayloadFieldSchema, PayloadSchemaType};
use serde_json::{Map, Value};
use tempfile::Builder;
pub fn criterion_benchmark(c: &mut Criterion) {
let dir = Builder::new().prefix("segment_dir").tempdir().unwrap();
let dim = 400;
let mut segment = build_simple_segment(dir.path(), dim, Distance::Dot).unwrap();
let vector = vec![0.1f32; 400];
let mut payload: Map<String, Value> = Map::default();
for i in 0..3 {
let key = format!("key{i}");
payload.insert(key.clone(), "value".to_string().into());
segment
.create_field_index(
100,
&JsonPath::new(&key),
Some(&PayloadFieldSchema::FieldType(PayloadSchemaType::Keyword)),
&HardwareCounterCell::new(),
)
.unwrap();
}
let payload = Payload::from(payload);
let hw_counter = HardwareCounterCell::new();
for id in 0..100000u64 {
segment
.upsert_point(100, id.into(), only_default_vector(&vector), &hw_counter)
.unwrap();
segment
.set_payload(100, id.into(), &payload, &None, &hw_counter)
.unwrap();
}
c.bench_function("segment-info", |b| {
b.iter(|| {
let _ = segment.info();
})
});
c.bench_function("segment-size-info", |b| {
b.iter(|| {
let _ = segment.size_info();
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/hnsw_build_asymptotic.rs | lib/segment/benches/hnsw_build_asymptotic.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::cell::LazyCell;
use common::iterator_ext::IteratorExt as _;
use criterion::{Criterion, criterion_group, criterion_main};
use itertools::Itertools;
use rand::{Rng, rng};
use segment::fixtures::index_fixtures::{TestRawScorerProducer, random_vector};
use segment::index::hnsw_index::graph_layers::SearchAlgorithm;
use segment::spaces::metric::Metric;
use segment::spaces::simple::{CosineMetric, DotProductMetric};
use segment::types::Distance;
use segment::vector_storage::DEFAULT_STOPPED;
const DIM: usize = 16;
const M: usize = 16;
const TOP: usize = 10;
const EF_CONSTRUCT: usize = 64;
const EF: usize = 64;
const USE_HEURISTIC: bool = true;
mod fixture;
fn hnsw_build_asymptotic(c: &mut Criterion) {
let mut group = c.benchmark_group("hnsw-index-build-asymptotic");
let mut rng = rng();
let setup_5k = LazyCell::new(|| {
eprintln!();
fixture::make_cached_graph::<CosineMetric>(5_000, DIM, M, EF_CONSTRUCT, USE_HEURISTIC)
});
group.bench_function("build-n-search-hnsw-5k", |b| {
let (vector_holder, graph_layers) = &*setup_5k;
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let scorer = vector_holder.scorer(query);
graph_layers
.search(
TOP,
EF,
SearchAlgorithm::Hnsw,
scorer,
None,
&DEFAULT_STOPPED,
)
.unwrap();
})
});
drop(setup_5k);
const NUM_VECTORS: usize = 1_000_000;
let setup_1m = LazyCell::new(|| {
eprintln!();
fixture::make_cached_graph::<CosineMetric>(NUM_VECTORS, DIM, M, EF_CONSTRUCT, USE_HEURISTIC)
});
group.bench_function("build-n-search-hnsw-1M", |b| {
let (vector_holder, graph_layers) = &*setup_1m;
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let scorer = vector_holder.scorer(query);
graph_layers
.search(
TOP,
EF,
SearchAlgorithm::Hnsw,
scorer,
None,
&DEFAULT_STOPPED,
)
.unwrap();
})
});
group.bench_function("build-n-search-hnsw-1M-score-point", |b| {
let (vector_holder, _graph_layers) = &*setup_1m;
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let mut scorer = vector_holder.scorer(query);
let mut points_to_score = (0..1500)
.map(|_| rng.random_range(0..NUM_VECTORS) as u32)
.collect_vec();
scorer.score_points(&mut points_to_score, 1000).black_box();
})
});
drop(setup_1m);
}
fn scoring_vectors(c: &mut Criterion) {
let mut group = c.benchmark_group("scoring-vector");
let mut rng = rng();
let points_per_cycle = 1000;
let base_num_vectors = 10_000;
let num_vectors = base_num_vectors;
let vector_holder =
TestRawScorerProducer::new(DIM, Distance::Dot, num_vectors, false, &mut rng);
group.bench_function("score-point", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let mut scorer = vector_holder.scorer(query);
let mut points_to_score = (0..points_per_cycle)
.map(|_| rng.random_range(0..num_vectors) as u32)
.collect_vec();
scorer
.score_points(&mut points_to_score, points_per_cycle)
.black_box();
})
});
let num_vectors = base_num_vectors * 10;
let vector_holder =
TestRawScorerProducer::new(DIM, Distance::Dot, num_vectors, false, &mut rng);
group.bench_function("score-point-10x", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let mut scorer = vector_holder.scorer(query);
let mut points_to_score = (0..points_per_cycle)
.map(|_| rng.random_range(0..num_vectors) as u32)
.collect_vec();
scorer
.score_points(&mut points_to_score, points_per_cycle)
.black_box();
})
});
let num_vectors = base_num_vectors * 50;
let vector_holder =
TestRawScorerProducer::new(DIM, Distance::Dot, num_vectors, false, &mut rng);
group.bench_function("score-point-50x", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let mut scorer = vector_holder.scorer(query);
let mut points_to_score = (0..points_per_cycle)
.map(|_| rng.random_range(0..num_vectors) as u32)
.collect_vec();
scorer
.score_points(&mut points_to_score, points_per_cycle)
.black_box();
})
});
}
fn basic_scoring_vectors(c: &mut Criterion) {
let mut group = c.benchmark_group("scoring-vector");
let points_per_cycle = 1000;
let base_num_vectors = 10_000_000;
let num_vectors = base_num_vectors;
let setup = LazyCell::new(|| {
let mut rng = rng();
(0..num_vectors)
.map(|_| random_vector(&mut rng, DIM))
.collect_vec()
});
group.bench_function("basic-score-point", |b| {
let vectors = &*setup;
let mut rng = rng();
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let points_to_score = (0..points_per_cycle).map(|_| rng.random_range(0..num_vectors));
let _s: f32 = points_to_score
.map(|x| DotProductMetric::similarity(&vectors[x], &query))
.sum();
})
});
drop(setup);
let num_vectors = base_num_vectors * 2;
let setup = LazyCell::new(|| {
let mut rng = rng();
(0..num_vectors)
.map(|_| random_vector(&mut rng, DIM))
.collect_vec()
});
group.bench_function("basic-score-point-10x", |b| {
let vectors = &*setup;
let mut rng = rng();
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let points_to_score = (0..points_per_cycle).map(|_| rng.random_range(0..num_vectors));
let _s: f32 = points_to_score
.map(|x| DotProductMetric::similarity(&vectors[x], &query))
.sum();
})
});
drop(setup);
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = hnsw_build_asymptotic, scoring_vectors, basic_scoring_vectors
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = hnsw_build_asymptotic, scoring_vectors, basic_scoring_vectors
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/mmap_bitslice_buffered_update_wrapper.rs | lib/segment/benches/mmap_bitslice_buffered_update_wrapper.rs | use std::hint::black_box;
use std::iter;
use criterion::{Criterion, criterion_group, criterion_main};
use fs_err::File;
use memmap2::MmapMut;
use memory::mmap_type::MmapBitSlice;
use rand::prelude::*;
use rand::rngs::StdRng;
use segment::common::mmap_bitslice_buffered_update_wrapper::MmapBitSliceBufferedUpdateWrapper;
use tempfile::tempdir;
const SIZE: usize = 4 * 1024 * 1024;
const FLAG_COUNT: usize = 1_000_000;
const LOOKUP_COUNT: usize = 1_000_000;
fn mmap_bitslice_buffered_update_wrapper(c: &mut Criterion) {
let mut rng = StdRng::seed_from_u64(42);
let dir = tempdir().unwrap();
let path = dir.path().join("bitslice.mmap");
let file = File::create_new(path).unwrap();
file.set_len(SIZE as u64).unwrap();
file.sync_all().unwrap();
let mmap_mut = unsafe { MmapMut::map_mut(&file).unwrap() };
let mmap_bitslice = MmapBitSlice::from(mmap_mut, 0);
let mmap_bitslice_buffered_update_wrapper =
MmapBitSliceBufferedUpdateWrapper::new(mmap_bitslice);
// Set random flags and persist
for _ in 0..FLAG_COUNT {
mmap_bitslice_buffered_update_wrapper
.set(rng.random::<u64>() as usize % SIZE, rng.random());
}
mmap_bitslice_buffered_update_wrapper.flusher()().unwrap();
let mut group = c.benchmark_group("mmap-bitslice-buffered-update-wrapper");
let lookups: Vec<_> = iter::repeat_with(|| rng.random::<u64>() as usize % SIZE)
.take(LOOKUP_COUNT)
.collect();
group.bench_function("lookup-without-pending-changes", |b| {
b.iter(|| {
for lookup in &lookups {
black_box(mmap_bitslice_buffered_update_wrapper.get(*lookup).unwrap());
}
});
});
// Set random flags and keep them in pending changes list
for _ in 0..FLAG_COUNT {
mmap_bitslice_buffered_update_wrapper
.set(rng.random::<u64>() as usize % SIZE, rng.random());
}
group.bench_function("lookup-with-pending-changes", |b| {
b.iter(|| {
for lookup in &lookups {
black_box(mmap_bitslice_buffered_update_wrapper.get(*lookup).unwrap());
}
});
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = mmap_bitslice_buffered_update_wrapper
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/hnsw_search_graph.rs | lib/segment/benches/hnsw_search_graph.rs | #[cfg(not(target_os = "windows"))]
mod prof;
use std::hint::black_box;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::fixtures::index_fixtures::random_vector;
use segment::index::hnsw_index::graph_layers::SearchAlgorithm;
use segment::spaces::simple::CosineMetric;
use segment::vector_storage::DEFAULT_STOPPED;
const NUM_VECTORS: usize = 1_000_000;
const DIM: usize = 64;
const M: usize = 16;
const TOP: usize = 10;
const EF_CONSTRUCT: usize = 100;
const EF: usize = 100;
const USE_HEURISTIC: bool = true;
mod fixture;
type Metric = CosineMetric;
fn hnsw_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("hnsw-search-graph");
let (vector_holder, mut graph_layers) =
fixture::make_cached_graph::<Metric>(NUM_VECTORS, DIM, M, EF_CONSTRUCT, USE_HEURISTIC);
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("uncompressed", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let scorer = vector_holder.scorer(query);
black_box(
graph_layers
.search(
TOP,
EF,
SearchAlgorithm::Hnsw,
scorer,
None,
&DEFAULT_STOPPED,
)
.unwrap(),
);
})
});
graph_layers.compress_ram();
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("compressed", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let scorer = vector_holder.scorer(query);
black_box(
graph_layers
.search(
TOP,
EF,
SearchAlgorithm::Hnsw,
scorer,
None,
&DEFAULT_STOPPED,
)
.unwrap(),
);
})
});
let mut plain_search_range: Vec<PointOffsetType> =
(0..NUM_VECTORS as PointOffsetType).collect();
let mut rng = StdRng::seed_from_u64(42);
group.bench_function("plain", |b| {
b.iter(|| {
let query = random_vector(&mut rng, DIM);
let mut scorer = vector_holder.scorer(query);
let mut top_score = 0.;
let scores = scorer.score_points(&mut plain_search_range, NUM_VECTORS);
scores.for_each(|score| {
if score.score > top_score {
top_score = score.score
}
});
})
});
group.finish();
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = hnsw_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = hnsw_benchmark
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/fixture.rs | lib/segment/benches/fixture.rs | use std::path::Path;
use std::time::Duration;
use common::types::PointOffsetType;
use fs_err as fs;
use rand::SeedableRng as _;
use rand::rngs::StdRng;
use rayon::iter::{IntoParallelIterator as _, ParallelIterator as _};
use segment::fixtures::index_fixtures::TestRawScorerProducer;
use segment::index::hnsw_index::HnswM;
use segment::index::hnsw_index::graph_layers::GraphLayers;
use segment::index::hnsw_index::graph_layers_builder::GraphLayersBuilder;
use segment::index::hnsw_index::graph_links::GraphLinksFormatParam;
use segment::index::hnsw_index::hnsw::SINGLE_THREADED_HNSW_BUILD_THRESHOLD;
use segment::spaces::metric::Metric;
/// Generate vectors and HNSW graph to be used in benchmarks.
///
/// Graph layers are cached on disk to avoid wait times across repeated
/// benchmark runs.
/// Vectors values are not saved on disk, but generated deterministically using
/// the same seed.
pub fn make_cached_graph<METRIC>(
num_vectors: usize,
dim: usize,
m: usize,
ef_construct: usize,
use_heuristic: bool,
) -> (TestRawScorerProducer, GraphLayers)
where
METRIC: Metric<f32> + Sync + Send,
{
use indicatif::{ParallelProgressIterator as _, ProgressStyle};
let path = Path::new(env!("CARGO_TARGET_TMPDIR"))
.join(env!("CARGO_PKG_NAME"))
.join(env!("CARGO_CRATE_NAME"))
.join(format!(
"{num_vectors}-{dim}-{m}-{ef_construct}-{use_heuristic}-{:?}",
METRIC::distance(),
));
// Note: make sure that vector generation is deterministic.
let vector_holder = TestRawScorerProducer::new(
dim,
METRIC::distance(),
num_vectors,
false,
&mut StdRng::seed_from_u64(42),
);
let graph_layers_path = GraphLayers::get_path(&path);
let graph_layers = if graph_layers_path.exists() {
let updated_ago = updated_ago(&graph_layers_path).unwrap_or_else(|_| "???".to_string());
eprintln!("Loading cached links (built {updated_ago} ago) from {graph_layers_path:?}.");
eprintln!("Delete the directory above if code related to HNSW graph building is changed");
GraphLayers::load(&path, false, false).unwrap()
} else {
let mut graph_layers_builder =
GraphLayersBuilder::new(num_vectors, HnswM::new2(m), ef_construct, 10, use_heuristic);
let mut rng = StdRng::seed_from_u64(42);
for idx in 0..num_vectors {
let level = graph_layers_builder.get_random_layer(&mut rng);
graph_layers_builder.set_levels(idx as PointOffsetType, level);
}
let add_point = |idx| {
let scorer = vector_holder.internal_scorer(idx as PointOffsetType);
graph_layers_builder.link_new_point(idx as PointOffsetType, scorer);
};
(0..SINGLE_THREADED_HNSW_BUILD_THRESHOLD.min(num_vectors)).for_each(add_point);
(SINGLE_THREADED_HNSW_BUILD_THRESHOLD..num_vectors)
.into_par_iter()
.progress_with_style(
ProgressStyle::with_template("{percent:>3}% Buildng HNSW {wide_bar}").unwrap(),
)
.for_each(add_point);
fs::create_dir_all(&path).unwrap();
graph_layers_builder
.into_graph_layers(&path, GraphLinksFormatParam::Plain, false)
.unwrap()
};
(vector_holder, graph_layers)
}
fn updated_ago(path: &Path) -> Result<String, Box<dyn std::error::Error>> {
let elapsed = fs::metadata(path)?.modified()?.elapsed()?;
let secs_rounded = elapsed.as_secs().next_multiple_of(60);
Ok(humantime::format_duration(Duration::from_secs(secs_rounded)).to_string())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/vector_search.rs | lib/segment/benches/vector_search.rs | use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use atomic_refcell::AtomicRefCell;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{Criterion, criterion_group, criterion_main};
use rand::Rng;
use rand::distr::StandardUniform;
use segment::common::rocksdb_wrapper::{DB_VECTOR_CF, open_db};
use segment::data_types::vectors::{DenseVector, VectorInternal, VectorRef};
use segment::fixtures::payload_context_fixture::FixtureIdTracker;
use segment::id_tracker::IdTrackerSS;
use segment::index::hnsw_index::point_scorer::{BatchFilteredSearcher, FilteredScorer};
use segment::types::{Distance, VectorStorageDatatype};
use segment::vector_storage::dense::simple_dense_vector_storage::open_simple_dense_vector_storage;
use segment::vector_storage::{DEFAULT_STOPPED, VectorStorage, VectorStorageEnum};
use tempfile::Builder;
const NUM_VECTORS: usize = 100000;
const DIM: usize = 1024; // Larger dimensionality - greater the SIMD advantage
fn random_vector(size: usize) -> DenseVector {
let rng = rand::rng();
rng.sample_iter(StandardUniform).take(size).collect()
}
fn init_vector_storage(
path: &Path,
dim: usize,
num: usize,
dist: Distance,
) -> (VectorStorageEnum, Arc<AtomicRefCell<IdTrackerSS>>) {
let db = open_db(path, &[DB_VECTOR_CF]).unwrap();
let id_tracker = Arc::new(AtomicRefCell::new(FixtureIdTracker::new(num)));
let mut storage = open_simple_dense_vector_storage(
VectorStorageDatatype::Float32,
db,
DB_VECTOR_CF,
dim,
dist,
&AtomicBool::new(false),
)
.unwrap();
let hw_counter = HardwareCounterCell::new();
{
for i in 0..num {
let vector: VectorInternal = random_vector(dim).into();
storage
.insert_vector(i as PointOffsetType, VectorRef::from(&vector), &hw_counter)
.unwrap();
}
}
(storage, id_tracker)
}
fn benchmark_naive(c: &mut Criterion) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dist = Distance::Dot;
let (storage, id_tracker) = init_vector_storage(dir.path(), DIM, NUM_VECTORS, dist);
let borrowed_id_tracker = id_tracker.borrow();
let mut group = c.benchmark_group("storage-score-all");
group.bench_function("storage vector search", |b| {
b.iter(|| {
let vector = random_vector(DIM);
let vector = vector.as_slice().into();
BatchFilteredSearcher::new_for_test(
&[vector],
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
10,
)
.peek_top_all(&DEFAULT_STOPPED)
.unwrap();
})
});
}
// Batched search gives performance benefit only when memory is contended.
// For a single-threaded criterion run, it only shows that batching penalty is relatively small.
// We might run a thread pool explicitly, though.
fn benchmark_naive_4(c: &mut Criterion) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dist = Distance::Dot;
let (storage, id_tracker) = init_vector_storage(dir.path(), DIM, NUM_VECTORS, dist);
let borrowed_id_tracker = id_tracker.borrow();
let mut group = c.benchmark_group("storage-score-all");
group.bench_function("storage vector search, 4 vectors batch", |b| {
b.iter(|| {
let vectors = [
random_vector(DIM).into(),
random_vector(DIM).into(),
random_vector(DIM).into(),
random_vector(DIM).into(),
];
BatchFilteredSearcher::new_for_test(
&vectors,
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
10,
)
.peek_top_all(&DEFAULT_STOPPED)
.unwrap();
})
});
}
fn random_access_benchmark(c: &mut Criterion) {
let dir = Builder::new().prefix("storage_dir").tempdir().unwrap();
let dist = Distance::Dot;
let (storage, id_tracker) = init_vector_storage(dir.path(), DIM, NUM_VECTORS, dist);
let borrowed_id_tracker = id_tracker.borrow();
let mut group = c.benchmark_group("storage-score-random");
let vector = random_vector(DIM);
let vector = vector.as_slice().into();
let scorer = FilteredScorer::new_for_test(
vector,
&storage,
borrowed_id_tracker.deleted_point_bitslice(),
);
let mut total_score = 0.;
group.bench_function("storage vector search", |b| {
b.iter(|| {
let random_id = rand::rng().random_range(0..NUM_VECTORS) as PointOffsetType;
total_score += scorer.score_point(random_id);
})
});
eprintln!("total_score = {total_score:?}");
}
criterion_group!(
benches,
benchmark_naive,
benchmark_naive_4,
random_access_benchmark
);
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/lib/segment/benches/sparse_index_search.rs | lib/segment/benches/sparse_index_search.rs | use std::sync::atomic::AtomicBool;
use common::counter::hardware_counter::HardwareCounterCell;
use common::types::PointOffsetType;
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use dataset::Dataset;
use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use itertools::Itertools as _;
use rand::SeedableRng;
use rand::rngs::StdRng;
use segment::fixtures::sparse_fixtures::fixture_sparse_index_from_iter;
use segment::index::sparse_index::sparse_index_config::{SparseIndexConfig, SparseIndexType};
use segment::index::sparse_index::sparse_vector_index::{
SparseVectorIndex, SparseVectorIndexOpenArgs,
};
use segment::index::{PayloadIndex, VectorIndex};
use segment::payload_json;
use segment::types::PayloadSchemaType::Keyword;
use segment::types::{Condition, FieldCondition, Filter};
use sparse::common::sparse_vector::SparseVector;
use sparse::common::sparse_vector_fixture::{random_positive_sparse_vector, random_sparse_vector};
use sparse::index::inverted_index::inverted_index_compressed_mmap::InvertedIndexCompressedMmap;
use sparse::index::inverted_index::inverted_index_ram::InvertedIndexRam;
use sparse::index::loaders::Csr;
use tempfile::Builder;
#[cfg(not(target_os = "windows"))]
mod prof;
const NUM_VECTORS: usize = 50_000;
const MAX_SPARSE_DIM: usize = 30_000;
const NUM_QUERIES: usize = 2048;
const TOP: usize = 10;
const FULL_SCAN_THRESHOLD: usize = 1; // low value to trigger index usage by default
fn sparse_vector_index_search_benchmark(c: &mut Criterion) {
let mut rnd = StdRng::seed_from_u64(0);
let query_vectors = (0..NUM_QUERIES)
// Positive values to test pruning.
.map(|_| random_positive_sparse_vector(&mut rnd, MAX_SPARSE_DIM))
.collect::<Vec<_>>();
let mut rnd = StdRng::seed_from_u64(42);
let random_vectors = (0..NUM_VECTORS).map(|_| random_sparse_vector(&mut rnd, MAX_SPARSE_DIM));
sparse_vector_index_search_benchmark_impl(c, "random-50k", random_vectors, &query_vectors);
let dataset_vectors = Csr::open(Dataset::NeurIps2023_1M.download().unwrap()).unwrap();
let query_vectors = Csr::open(Dataset::NeurIps2023Queries.download().unwrap())
.unwrap()
.iter()
.map(|v| v.unwrap())
.collect_vec();
sparse_vector_index_search_benchmark_impl(
c,
"neurips2023-1M",
dataset_vectors.iter().map(|v| v.unwrap()),
&query_vectors,
);
}
fn sparse_vector_index_search_benchmark_impl(
c: &mut Criterion,
group: &str,
vectors: impl ExactSizeIterator<Item = SparseVector>,
query_vectors: &[SparseVector],
) {
let mut group = c.benchmark_group(format!("sparse_vector_index_search/{group}"));
group.sample_size(10);
let vectors_len = vectors.len();
let stopped = AtomicBool::new(false);
let data_dir = Builder::new().prefix("data_dir").tempdir().unwrap();
let sparse_vector_index = fixture_sparse_index_from_iter::<InvertedIndexRam>(
data_dir.path(),
progress("Indexing (1/2)", vectors_len).wrap_iter(vectors),
FULL_SCAN_THRESHOLD,
SparseIndexType::MutableRam,
)
.unwrap();
// adding payload on field
let field_name = "field";
let field_value = "important value";
let payload = payload_json! {field_name: field_value};
let hw_counter = HardwareCounterCell::new();
// all points have the same payload
let mut payload_index = sparse_vector_index.payload_index().borrow_mut();
for idx in 0..NUM_VECTORS {
payload_index
.set_payload(idx as PointOffsetType, &payload, &None, &hw_counter)
.unwrap();
}
drop(payload_index);
let mut query_vector_it = query_vectors.iter().cycle();
// mmap inverted index
let mmap_index_dir = Builder::new().prefix("mmap_index_dir").tempdir().unwrap();
let sparse_index_config =
SparseIndexConfig::new(Some(FULL_SCAN_THRESHOLD), SparseIndexType::Mmap, None);
let pb = progress("Indexing (2/2)", vectors_len);
let sparse_vector_index_mmap: SparseVectorIndex<InvertedIndexCompressedMmap<f32>> =
SparseVectorIndex::open(SparseVectorIndexOpenArgs {
config: sparse_index_config,
id_tracker: sparse_vector_index.id_tracker().clone(),
vector_storage: sparse_vector_index.vector_storage().clone(),
payload_index: sparse_vector_index.payload_index().clone(),
path: mmap_index_dir.path(),
stopped: &stopped,
tick_progress: || pb.inc(1),
})
.unwrap();
pb.finish_and_clear();
assert_eq!(sparse_vector_index_mmap.indexed_vector_count(), vectors_len);
// intent: bench `search` without filter on mmap inverted index
group.bench_function("mmap-inverted-index-search", |b| {
b.iter_batched(
|| query_vector_it.next().unwrap().clone().into(),
|vec| {
let results = sparse_vector_index_mmap
.search(&[&vec], None, TOP, None, &Default::default())
.unwrap();
assert_eq!(results[0].len(), TOP);
},
BatchSize::SmallInput,
)
});
// intent: bench `search` without filter
group.bench_function("inverted-index-search", |b| {
b.iter_batched(
|| query_vector_it.next().unwrap().clone().into(),
|vec| {
let results = sparse_vector_index
.search(&[&vec], None, TOP, None, &Default::default())
.unwrap();
assert_eq!(results[0].len(), TOP);
},
BatchSize::SmallInput,
)
});
// filter by field
let filter = Filter::new_must(Condition::Field(FieldCondition::new_match(
field_name.parse().unwrap(),
field_value.to_owned().into(),
)));
// intent: bench plain search when the filtered payload key is not indexed
if vectors_len < 100_000 {
group.bench_function("inverted-index-filtered-plain", |b| {
b.iter_batched(
|| query_vector_it.next().unwrap(),
|vec| {
let mut prefiltered_points = None;
let results = sparse_vector_index
.search_plain(
vec,
&filter,
TOP,
&mut prefiltered_points,
&Default::default(),
)
.unwrap();
assert_eq!(results.len(), TOP);
},
BatchSize::SmallInput,
)
});
}
let mut payload_index = sparse_vector_index.payload_index().borrow_mut();
// create payload field index
payload_index
.set_indexed(&field_name.parse().unwrap(), Keyword, &hw_counter)
.unwrap();
drop(payload_index);
// intent: bench `search` when the filtered payload key is indexed
group.bench_function("inverted-index-filtered-payload-index", |b| {
b.iter_batched(
|| query_vector_it.next().unwrap().clone().into(),
|vec| {
let results = sparse_vector_index
.search(&[&vec], Some(&filter), TOP, None, &Default::default())
.unwrap();
assert_eq!(results[0].len(), TOP);
},
BatchSize::SmallInput,
);
});
// intent: bench plain search when the filtered payload key is indexed
if vectors_len < 100_000 {
group.bench_function("plain-filtered-payload-index", |b| {
b.iter_batched(
|| query_vector_it.next().unwrap(),
|vec| {
let mut prefiltered_points = None;
let results = sparse_vector_index
.search_plain(
vec,
&filter,
TOP,
&mut prefiltered_points,
&Default::default(),
)
.unwrap();
assert_eq!(results.len(), TOP);
},
BatchSize::SmallInput,
)
});
}
group.finish();
}
fn progress(name: &str, length: usize) -> ProgressBar {
let pb =
ProgressBar::with_draw_target(Some(length as u64), ProgressDrawTarget::stderr_with_hz(12));
pb.set_style(
ProgressStyle::default_bar()
.template("{msg} {wide_bar} {pos}/{len} (eta:{eta})")
.unwrap(),
);
pb.set_message(name.to_owned());
pb
}
#[cfg(not(target_os = "windows"))]
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(prof::FlamegraphProfiler::new(100));
targets = sparse_vector_index_search_benchmark
}
#[cfg(target_os = "windows")]
criterion_group! {
name = benches;
config = Criterion::default();
targets = sparse_vector_index_search_benchmark,
}
criterion_main!(benches);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.