repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/state/command_status.rs | default-plugins/sequence/src/state/command_status.rs | use zellij_tile::prelude::*;
#[derive(Debug, Clone)]
pub enum CommandStatus {
Exited(Option<i32>, Option<PaneId>),
Running(Option<PaneId>),
Pending,
Interrupted(Option<PaneId>),
}
impl CommandStatus {
pub fn get_pane_id(&self) -> Option<PaneId> {
match self {
CommandStatus::Exited(_, pane_id) => *pane_id,
CommandStatus::Running(pane_id) => *pane_id,
CommandStatus::Pending => None,
CommandStatus::Interrupted(pane_id) => *pane_id,
}
}
}
impl Default for CommandStatus {
fn default() -> Self {
CommandStatus::Pending
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/state/execution.rs | default-plugins/sequence/src/state/execution.rs | use crate::path_formatting::format_cwd;
use crate::state::{ChainType, CommandEntry, CommandStatus};
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::time::Duration;
use zellij_tile::prelude::*;
pub struct Execution {
pub all_commands: Vec<CommandEntry>,
pub current_running_command_index: usize,
pub is_running: bool,
pub sequence_id: u64,
pub primary_pane_id_before_sequence: Option<PaneId>,
}
impl Execution {
pub fn new() -> Self {
Self {
all_commands: vec![CommandEntry::default()],
current_running_command_index: 0,
is_running: false,
sequence_id: 0,
primary_pane_id_before_sequence: None,
}
}
pub fn longest_cwd_display(&self, global_cwd: &Option<PathBuf>) -> String {
self.all_commands
.iter()
.map(|cmd| {
let cwd = cmd.get_cwd().or_else(|| global_cwd.clone());
if let Some(cwd) = &cwd {
format_cwd(cwd)
} else {
"~".to_string()
}
})
.max_by_key(|s| s.len())
.unwrap_or_else(|| "~".to_string())
}
pub fn remove_empty_commands(&mut self) {
if self.all_commands.iter().len() > 1 {
self.all_commands.retain(|c| !c.get_text().is_empty());
}
}
pub fn get_first_command(&self) -> Option<CommandEntry> {
self.all_commands.iter().next().cloned()
}
pub fn set_command_status(&mut self, command_index: usize, status: CommandStatus) {
self.all_commands
.get_mut(command_index)
.map(|c| c.set_status(status));
}
pub fn set_current_running_command_status(&mut self, status: CommandStatus) {
self.all_commands
.get_mut(self.current_running_command_index)
.map(|c| c.set_status(status));
}
pub fn get_current_running_command_status(&self) -> Option<CommandStatus> {
self.all_commands
.get(self.current_running_command_index)
.map(|c| c.get_status())
}
pub fn can_run_sequence(&self) -> bool {
self.all_commands.iter().any(|command| !command.is_empty())
}
pub fn copy_to_clipboard(&self) {
let text_to_copy = self
.all_commands
.iter()
.map(|c| format!("{}", c.get_text()))
.collect::<Vec<_>>()
.join("\n");
if !text_to_copy.is_empty() {
copy_to_clipboard(text_to_copy);
}
}
pub fn update_pane_id_for_command(&mut self, pane_id: PaneId, command_text: &str) {
for command in self.all_commands.iter_mut() {
if let CommandStatus::Pending | CommandStatus::Running(None) = command.get_status() {
let cmd_text = command.get_text();
if cmd_text == command_text {
command.set_status(CommandStatus::Running(Some(pane_id)));
break;
}
}
}
}
pub fn update_exited_command_statuses(&mut self, pane_manifest: &PaneManifest) -> bool {
let mut updated = false;
for command in self.all_commands.iter_mut() {
let status = command.get_status();
let pane_id_opt = match &status {
CommandStatus::Running(pid) => *pid,
CommandStatus::Exited(_, pid) => *pid,
CommandStatus::Interrupted(pid) => *pid,
_ => None,
};
if let Some(pane_id) = pane_id_opt {
let mut pane_found = false;
for (_tab_index, panes) in &pane_manifest.panes {
for pane_info in panes {
let pane_matches = match pane_id {
PaneId::Terminal(id) => !pane_info.is_plugin && pane_info.id == id,
PaneId::Plugin(id) => pane_info.is_plugin && pane_info.id == id,
};
if pane_matches {
pane_found = true;
if pane_info.exited {
command.set_status(CommandStatus::Exited(
pane_info.exit_status,
Some(pane_id),
));
updated = true;
}
break;
}
}
if pane_found {
break;
}
}
if !pane_found && command.start_time.elapsed() > Duration::from_millis(400) {
match command.get_status() {
CommandStatus::Running(_) => {
eprintln!(
"Pane {:?} was closed while running, setting pane_id to None",
pane_id
);
command.set_status(CommandStatus::Running(None));
updated = true;
},
CommandStatus::Exited(exit_code, _) => {
eprintln!(
"Pane {:?} was closed after exiting, setting pane_id to None",
pane_id
);
command.set_status(CommandStatus::Exited(exit_code, None));
updated = true;
},
CommandStatus::Interrupted(_) => {
eprintln!("Pane {:?} was closed after being interrupted, setting pane_id to None", pane_id);
command.set_status(CommandStatus::Interrupted(None));
updated = true;
},
_ => {},
}
}
}
}
updated
}
pub fn update_sequence_stopped_state(&mut self) -> bool {
let mut needs_rerender = false;
if self.is_running {
let current_idx = self.current_running_command_index;
if let Some(command) = self.all_commands.get_mut(current_idx) {
if let CommandStatus::Exited(exit_code, _) = command.get_status() {
let should_stop;
if current_idx >= self.all_commands.len().saturating_sub(1) {
should_stop = true;
} else {
if let Some(chain_type) = &self
.all_commands
.get(current_idx)
.map(|c| c.get_chain_type())
{
match chain_type {
ChainType::And => {
should_stop = exit_code.unwrap_or(0) != 0;
},
ChainType::Or => {
should_stop = exit_code.unwrap_or(0) == 0;
},
ChainType::Then => {
should_stop = false;
},
ChainType::None => {
should_stop = true;
},
}
} else {
should_stop = true;
}
};
if should_stop {
self.is_running = false;
}
}
}
needs_rerender = true;
}
needs_rerender
}
pub fn execute_command_sequence(
&mut self,
shell: &Option<PathBuf>,
global_cwd: &Option<PathBuf>,
primary_pane_id: Option<PaneId>,
) {
use zellij_tile::prelude::actions::{Action, RunCommandAction};
self.all_commands.retain(|c| !c.is_empty());
let Some(first_active_sequence_command) = self.get_first_command() else {
return;
};
let shell = shell.clone().unwrap_or_else(|| PathBuf::from("/bin/bash"));
let first_command = first_active_sequence_command.get_text();
let first_chain_type = first_active_sequence_command.get_chain_type();
let command_cwd = first_active_sequence_command
.get_cwd()
.or_else(|| global_cwd.clone());
let command = RunCommandAction {
command: shell.clone(),
args: vec!["-ic".to_string(), first_command.trim().to_string()],
cwd: command_cwd,
hold_on_close: true,
..Default::default()
};
let placement = NewPanePlacement::InPlace {
pane_id_to_replace: primary_pane_id,
close_replaced_pane: false,
};
let action = Action::NewBlockingPane {
placement,
command: Some(command),
pane_name: Some(first_command.trim().to_string()),
unblock_condition: first_chain_type.to_unblock_condition(),
near_current_pane: true,
};
self.sequence_id += 1;
let mut context = BTreeMap::new();
context.insert("sequence_id".to_string(), self.sequence_id.to_string());
run_action(action, context);
self.set_command_status(0, CommandStatus::Running(None));
}
}
impl Default for Execution {
fn default() -> Self {
Self::new()
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/state/positioning.rs | default-plugins/sequence/src/state/positioning.rs | use zellij_tile::prelude::*;
pub fn reposition_plugin_for_sequence(
plugin_id: u32,
total_cols: usize,
total_rows: usize,
total_commands: usize,
) {
// Width: 30% of viewport
let width = std::cmp::max((total_cols * 30) / 100, 50);
// Height: UI overhead + commands, capped at 25% of viewport
let overhead_rows = 6;
let height = (overhead_rows + total_commands).min((total_rows * 25) / 100);
// Position: top-right with 1-space margins
let x = total_cols.saturating_sub(width);
let y = 1;
change_floating_pane_coordinates_absolute(
plugin_id,
Some(x),
Some(y),
Some(width),
Some(height),
true,
);
}
pub fn change_floating_pane_coordinates_absolute(
own_plugin_id: u32,
x: Option<usize>,
y: Option<usize>,
width: Option<usize>,
height: Option<usize>,
should_be_pinned: bool,
) {
let coordinates = FloatingPaneCoordinates::new(
x.map(|x| x.to_string()),
y.map(|y| y.to_string()),
width.map(|width| width.to_string()),
height.map(|height| height.to_string()),
Some(should_be_pinned),
);
if let Some(coordinates) = coordinates {
change_floating_panes_coordinates(vec![(PaneId::Plugin(own_plugin_id), coordinates)]);
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/truncation.rs | default-plugins/sequence/src/ui/truncation.rs | use unicode_width::UnicodeWidthStr;
const BASE_SEPARATOR_WIDTH: usize = 2;
pub fn calculate_available_cmd_width(
cols: usize,
folder_width: usize,
overflow_indicator: Option<&String>,
chain_width: usize,
status_width: usize,
) -> usize {
let mut separator_width = BASE_SEPARATOR_WIDTH;
if overflow_indicator.is_some() {
separator_width += 1;
}
if status_width > 0 {
separator_width += 1;
}
let overflow_width = overflow_indicator.map(|s| s.chars().count()).unwrap_or(0);
cols.saturating_sub(folder_width)
.saturating_sub(chain_width)
.saturating_sub(status_width)
.saturating_sub(separator_width)
.saturating_sub(overflow_width)
.saturating_sub(2)
.max(1)
}
pub fn truncate_middle(
text: &str,
max_width: usize,
cursor_position: Option<usize>,
) -> (String, Option<usize>) {
let text_width = text.width();
if text_width <= max_width {
return (text.to_string(), cursor_position);
}
if max_width < 5 {
return truncate_minimal(text, max_width, cursor_position);
}
if let Some(cursor_char_idx) = cursor_position {
return truncate_with_cursor(text, max_width, cursor_char_idx);
}
truncate_no_cursor(text, max_width)
}
fn truncate_minimal(
text: &str,
max_width: usize,
cursor_position: Option<usize>,
) -> (String, Option<usize>) {
let mut result = String::new();
let mut current_width = 0;
let mut new_cursor_pos = None;
let mut char_pos = 0;
for ch in text.chars() {
let ch_width = ch.to_string().width();
if current_width + ch_width <= max_width {
result.push(ch);
if let Some(cursor_char) = cursor_position {
if char_pos == cursor_char {
new_cursor_pos = Some(char_pos);
}
}
char_pos += 1;
current_width += ch_width;
} else {
break;
}
}
(result, new_cursor_pos)
}
fn truncate_no_cursor(text: &str, max_width: usize) -> (String, Option<usize>) {
let available_for_text = max_width.saturating_sub(3);
let left_width = available_for_text / 2;
let right_width = available_for_text - left_width;
let mut left_part = String::new();
let mut current_width = 0;
for ch in text.chars() {
let ch_width = ch.to_string().width();
if current_width + ch_width <= left_width {
left_part.push(ch);
current_width += ch_width;
} else {
break;
}
}
let chars: Vec<char> = text.chars().collect();
let mut right_part = String::new();
let mut current_width = 0;
for ch in chars.iter().rev() {
let ch_width = ch.to_string().width();
if current_width + ch_width <= right_width {
right_part.insert(0, *ch);
current_width += ch_width;
} else {
break;
}
}
(format!("{}...{}", left_part, right_part), None)
}
fn truncate_with_cursor(
text: &str,
max_width: usize,
cursor_char_idx: usize,
) -> (String, Option<usize>) {
let chars: Vec<char> = text.chars().collect();
let char_widths: Vec<usize> = chars.iter().map(|ch| ch.to_string().width()).collect();
let width_before_cursor: usize = char_widths[..cursor_char_idx].iter().sum();
let width_after_cursor: usize = char_widths[cursor_char_idx..].iter().sum();
let available_one_ellipsis = max_width.saturating_sub(3);
let (start_idx, end_idx) = if width_before_cursor <= available_one_ellipsis {
calculate_end_truncation(&chars, &char_widths, available_one_ellipsis)
} else if width_after_cursor <= available_one_ellipsis {
calculate_start_truncation(&chars, &char_widths, available_one_ellipsis)
} else {
calculate_middle_truncation(&char_widths, max_width, cursor_char_idx)
};
let (start_idx, end_idx) =
adjust_small_truncations(start_idx, end_idx, &char_widths, cursor_char_idx, &chars);
let (start_idx, end_idx) =
trim_excess(&char_widths, start_idx, end_idx, max_width, cursor_char_idx);
build_truncated_result(&chars, start_idx, end_idx, cursor_char_idx)
}
fn calculate_end_truncation(
chars: &[char],
char_widths: &[usize],
available: usize,
) -> (usize, usize) {
let mut end_idx = 0;
let mut width = 0;
while end_idx < chars.len() && width + char_widths[end_idx] <= available {
width += char_widths[end_idx];
end_idx += 1;
}
(0, end_idx)
}
fn calculate_start_truncation(
chars: &[char],
char_widths: &[usize],
available: usize,
) -> (usize, usize) {
let mut start_idx = chars.len();
let mut width = 0;
while start_idx > 0 && width + char_widths[start_idx - 1] <= available {
start_idx -= 1;
width += char_widths[start_idx];
}
(start_idx, chars.len())
}
fn calculate_middle_truncation(
char_widths: &[usize],
max_width: usize,
cursor_char_idx: usize,
) -> (usize, usize) {
let available_both_ellipsis = max_width.saturating_sub(6);
let target_before = available_both_ellipsis / 2;
let target_after = available_both_ellipsis - target_before;
let mut start_idx = cursor_char_idx;
let mut width_before = 0;
while start_idx > 0 && width_before + char_widths[start_idx - 1] <= target_before {
start_idx -= 1;
width_before += char_widths[start_idx];
}
let mut end_idx = cursor_char_idx;
let mut width_after = 0;
while end_idx < char_widths.len() && width_after + char_widths[end_idx] <= target_after {
width_after += char_widths[end_idx];
end_idx += 1;
}
let leftover_before = target_before.saturating_sub(width_before);
let leftover_after = target_after.saturating_sub(width_after);
if leftover_before > 0 {
let mut extra = leftover_before;
while end_idx < char_widths.len() && char_widths[end_idx] <= extra {
extra -= char_widths[end_idx];
end_idx += 1;
}
}
if leftover_after > 0 {
let mut extra = leftover_after;
while start_idx > 0 && char_widths[start_idx - 1] <= extra {
start_idx -= 1;
extra -= char_widths[start_idx];
}
}
(start_idx, end_idx)
}
fn adjust_small_truncations(
start_idx: usize,
end_idx: usize,
char_widths: &[usize],
cursor_char_idx: usize,
chars: &[char],
) -> (usize, usize) {
let width_truncated_start: usize = char_widths[..start_idx].iter().sum();
let width_truncated_end: usize = char_widths[end_idx..].iter().sum();
let mut start_idx = start_idx;
let mut end_idx = end_idx;
if width_truncated_start > 0 && width_truncated_start < 3 && end_idx < chars.len() {
let gained = width_truncated_start;
start_idx = 0;
let mut removed = 0;
while end_idx > cursor_char_idx + 1 && removed < gained {
end_idx -= 1;
removed += char_widths[end_idx];
}
} else if width_truncated_end > 0 && width_truncated_end < 3 && start_idx > 0 {
let gained = width_truncated_end;
end_idx = chars.len();
let mut removed = 0;
while start_idx < cursor_char_idx && removed < gained {
removed += char_widths[start_idx];
start_idx += 1;
}
}
(start_idx, end_idx)
}
fn trim_excess(
char_widths: &[usize],
start_idx: usize,
end_idx: usize,
max_width: usize,
cursor_char_idx: usize,
) -> (usize, usize) {
let truncate_start = start_idx > 0;
let truncate_end = end_idx < char_widths.len();
let ellipsis_width = match (truncate_start, truncate_end) {
(true, true) => 6,
(true, false) | (false, true) => 3,
(false, false) => 0,
};
let visible_width: usize = char_widths[start_idx..end_idx].iter().sum();
let mut start_idx = start_idx;
let mut end_idx = end_idx;
if visible_width + ellipsis_width > max_width {
let mut excess = visible_width + ellipsis_width - max_width;
if truncate_end {
while excess > 0 && end_idx > cursor_char_idx + 1 {
end_idx -= 1;
excess = excess.saturating_sub(char_widths[end_idx]);
}
}
if excess > 0 && truncate_start {
while excess > 0 && start_idx < cursor_char_idx {
excess = excess.saturating_sub(char_widths[start_idx]);
start_idx += 1;
}
}
}
(start_idx, end_idx)
}
fn build_truncated_result(
chars: &[char],
start_idx: usize,
end_idx: usize,
cursor_char_idx: usize,
) -> (String, Option<usize>) {
let mut result = String::new();
let mut new_cursor_char_pos = 0;
let mut current_char_pos = 0;
if start_idx > 0 {
result.push_str("...");
current_char_pos = 3;
}
for i in start_idx..end_idx {
if i == cursor_char_idx {
new_cursor_char_pos = current_char_pos;
}
result.push(chars[i]);
current_char_pos += 1;
}
if cursor_char_idx >= end_idx {
new_cursor_char_pos = current_char_pos;
}
if end_idx < chars.len() {
result.push_str("...");
}
(result, Some(new_cursor_char_pos))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_truncate_middle_no_truncation_needed() {
let text = "hello world";
let (result, cursor_pos) = truncate_middle(text, 20, Some(6));
assert_eq!(result, "hello world");
assert_eq!(cursor_pos, Some(6));
}
#[test]
fn test_truncate_middle_no_cursor() {
let text = "this is a very long string that needs truncation";
let (result, cursor_pos) = truncate_middle(text, 20, None);
assert!(result.contains("..."));
assert_eq!(cursor_pos, None);
}
#[test]
fn test_truncate_middle_cursor_at_start() {
let text = "this is a very long string that needs truncation";
let (result, cursor_pos) = truncate_middle(text, 20, Some(0));
assert!(result.starts_with("this"));
assert!(result.ends_with("..."));
assert_eq!(cursor_pos, Some(0));
}
#[test]
fn test_truncate_middle_cursor_at_end() {
let text = "this is a very long string that needs truncation";
let cursor_char = text.chars().count();
let (result, _cursor_pos) = truncate_middle(text, 20, Some(cursor_char));
assert!(result.starts_with("..."));
assert!(result.ends_with("truncation"));
}
#[test]
fn test_truncate_middle_cursor_in_middle() {
let text = "this is a very long string that needs truncation";
let cursor_char = "this is a very ".chars().count();
let (result, cursor_pos) = truncate_middle(text, 20, Some(cursor_char));
assert!(result.contains("very"));
assert!(cursor_pos.is_some());
}
#[test]
fn test_truncate_middle_wide_chars() {
let text = "こんにちは世界";
let (result, _) = truncate_middle(text, 10, None);
assert!(result.width() <= 10);
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/text_input.rs | default-plugins/sequence/src/ui/text_input.rs | use zellij_tile::prelude::*;
const MAX_UNDO_STACK_SIZE: usize = 100;
/// Action returned by TextInput after handling a key event
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(test, derive(Eq))]
pub enum InputAction {
/// Continue editing
Continue,
/// User pressed Enter to submit
Submit,
/// User pressed Esc to cancel
Cancel,
/// User pressed Tab to request completion
Complete,
/// Key was not handled by the input
NoAction,
}
/// A reusable text input component with cursor support and standard editing keybindings
#[derive(Debug, Clone)]
pub struct TextInput {
buffer: String,
cursor_position: usize, // Character position (0-based), NOT byte position
undo_stack: Vec<(String, usize)>, // (buffer, cursor) snapshots
redo_stack: Vec<(String, usize)>,
last_edit_was_insert: bool, // For coalescing consecutive inserts
}
impl TextInput {
/// Create a new TextInput with the given initial text
/// Cursor is positioned at the end of the text
pub fn new(initial_text: String) -> Self {
let cursor_position = initial_text.chars().count();
Self {
buffer: initial_text,
cursor_position,
undo_stack: Vec::new(),
redo_stack: Vec::new(),
last_edit_was_insert: false,
}
}
/// Create an empty TextInput
pub fn empty() -> Self {
Self::new(String::new())
}
/// Get the current text
pub fn get_text(&self) -> &str {
&self.buffer
}
/// Get the cursor position (in characters, not bytes)
pub fn get_cursor_position(&self) -> usize {
self.cursor_position
}
/// Check if the input is empty
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
/// Get a shorthand for cursor_position
pub fn cursor_position(&self) -> usize {
self.cursor_position
}
/// Get mutable access to the underlying buffer for direct manipulation
pub fn get_text_mut(&mut self) -> &mut String {
&mut self.buffer
}
/// Set the text and move cursor to the end
pub fn set_text(&mut self, text: String) {
self.break_coalescing();
self.save_undo_state();
self.cursor_position = text.chars().count();
self.buffer = text;
}
/// Set cursor position (clamped to text length)
pub fn set_cursor_position(&mut self, pos: usize) {
let text_len = self.buffer.chars().count();
self.cursor_position = pos.min(text_len);
}
/// Clear all text and reset cursor
pub fn clear(&mut self) {
self.break_coalescing();
self.save_undo_state();
self.buffer.clear();
self.cursor_position = 0;
}
/// Insert a character at the current cursor position
pub fn insert_char(&mut self, c: char) {
self.save_undo_state_unless_coalescing();
// Convert cursor position (char index) to byte index
let byte_index = self.char_index_to_byte_index(self.cursor_position);
self.buffer.insert(byte_index, c);
self.cursor_position += 1;
}
/// Delete the character before the cursor (backspace)
pub fn backspace(&mut self) {
if self.cursor_position > 0 {
self.break_coalescing();
self.save_undo_state();
self.cursor_position -= 1;
let byte_index = self.char_index_to_byte_index(self.cursor_position);
self.buffer.remove(byte_index);
}
}
/// Delete the character at the cursor position (delete key)
pub fn delete(&mut self) {
let len = self.buffer.chars().count();
if self.cursor_position < len {
self.break_coalescing();
self.save_undo_state();
let byte_index = self.char_index_to_byte_index(self.cursor_position);
self.buffer.remove(byte_index);
}
}
/// Delete the word before the cursor (Ctrl/Alt + Backspace)
pub fn delete_word_backward(&mut self) {
if self.cursor_position == 0 {
return;
}
self.break_coalescing();
self.save_undo_state();
let old_position = self.cursor_position;
self.move_word_left();
let new_position = self.cursor_position;
// Delete from new position to old position
let start_byte = self.char_index_to_byte_index(new_position);
let end_byte = self.char_index_to_byte_index(old_position);
self.buffer.drain(start_byte..end_byte);
}
/// Delete the word after the cursor (Ctrl/Alt + Delete)
pub fn delete_word_forward(&mut self) {
let chars: Vec<char> = self.buffer.chars().collect();
let len = chars.len();
if self.cursor_position >= len {
return;
}
self.break_coalescing();
self.save_undo_state();
let start_position = self.cursor_position;
let mut end_position = start_position;
// Skip the current word
while end_position < len && !chars[end_position].is_whitespace() {
end_position += 1;
}
// Skip any whitespace after the word
while end_position < len && chars[end_position].is_whitespace() {
end_position += 1;
}
// Delete from start to end position
let start_byte = self.char_index_to_byte_index(start_position);
let end_byte = self.char_index_to_byte_index(end_position);
self.buffer.drain(start_byte..end_byte);
}
/// Move cursor one position to the left
pub fn move_left(&mut self) {
if self.cursor_position > 0 {
self.break_coalescing();
self.cursor_position -= 1;
}
}
/// Move cursor one position to the right
pub fn move_right(&mut self) {
let len = self.buffer.chars().count();
if self.cursor_position < len {
self.break_coalescing();
self.cursor_position += 1;
}
}
/// Move cursor to the start of the text (Ctrl-A / Home)
pub fn move_to_start(&mut self) {
self.break_coalescing();
self.cursor_position = 0;
}
/// Move cursor to the end of the text (Ctrl-E / End)
pub fn move_to_end(&mut self) {
self.break_coalescing();
self.cursor_position = self.buffer.chars().count();
}
/// Move cursor to the start of the previous word (Ctrl/Alt + Left)
pub fn move_word_left(&mut self) {
if self.cursor_position == 0 {
return;
}
self.break_coalescing();
let chars: Vec<char> = self.buffer.chars().collect();
let mut pos = self.cursor_position;
// Skip any whitespace immediately to the left
while pos > 0 && chars[pos - 1].is_whitespace() {
pos -= 1;
}
// Skip the word characters
while pos > 0 && !chars[pos - 1].is_whitespace() {
pos -= 1;
}
self.cursor_position = pos;
}
/// Move cursor to the start of the next word (Ctrl/Alt + Right)
pub fn move_word_right(&mut self) {
let chars: Vec<char> = self.buffer.chars().collect();
let len = chars.len();
if self.cursor_position >= len {
return;
}
self.break_coalescing();
let mut pos = self.cursor_position;
// Skip the current word
while pos < len && !chars[pos].is_whitespace() {
pos += 1;
}
// Skip any whitespace
while pos < len && chars[pos].is_whitespace() {
pos += 1;
}
self.cursor_position = pos;
}
/// Handle a key event and return the appropriate action
/// This is the main entry point for key handling
pub fn handle_key(&mut self, key: KeyWithModifier) -> InputAction {
// Check for Ctrl modifiers
if key.has_modifiers(&[KeyModifier::Ctrl]) {
match key.bare_key {
BareKey::Char('a') => {
self.move_to_start();
return InputAction::Continue;
},
BareKey::Char('e') => {
self.move_to_end();
return InputAction::Continue;
},
BareKey::Char('c') => {
// Ctrl-C clears the prompt
return InputAction::Cancel;
},
BareKey::Char('z') => {
// Ctrl-Z: Undo
self.undo();
return InputAction::Continue;
},
BareKey::Char('y') => {
// Ctrl-Y: Redo
self.redo();
return InputAction::Continue;
},
BareKey::Left => {
self.move_word_left();
return InputAction::Continue;
},
BareKey::Right => {
self.move_word_right();
return InputAction::Continue;
},
BareKey::Backspace => {
self.delete_word_backward();
return InputAction::Continue;
},
BareKey::Delete => {
self.delete_word_forward();
return InputAction::Continue;
},
_ => {},
}
}
// Check for Ctrl+Shift modifiers (alternative redo: Ctrl+Shift+Z)
if key.has_modifiers(&[KeyModifier::Ctrl, KeyModifier::Shift]) {
match key.bare_key {
BareKey::Char('Z') => {
// Ctrl-Shift-Z: Redo (alternative)
self.redo();
return InputAction::Continue;
},
_ => {},
}
}
// Check for Alt modifiers
if key.has_modifiers(&[KeyModifier::Alt]) {
match key.bare_key {
BareKey::Left => {
self.move_word_left();
return InputAction::Continue;
},
BareKey::Right => {
self.move_word_right();
return InputAction::Continue;
},
BareKey::Backspace => {
self.delete_word_backward();
return InputAction::Continue;
},
BareKey::Delete => {
self.delete_word_forward();
return InputAction::Continue;
},
_ => {},
}
}
// Handle bare keys (no modifiers)
match key.bare_key {
BareKey::Enter => InputAction::Submit,
BareKey::Esc => InputAction::Cancel,
BareKey::Tab => InputAction::Complete,
BareKey::Backspace => {
self.backspace();
InputAction::Continue
},
BareKey::Delete => {
self.delete();
InputAction::Continue
},
BareKey::Left => {
self.move_left();
InputAction::Continue
},
BareKey::Right => {
self.move_right();
InputAction::Continue
},
BareKey::Home => {
self.move_to_start();
InputAction::Continue
},
BareKey::End => {
self.move_to_end();
InputAction::Continue
},
BareKey::Char(c) => {
self.insert_char(c);
InputAction::Continue
},
_ => InputAction::NoAction,
}
}
/// Helper: Convert character index to byte index
fn char_index_to_byte_index(&self, char_index: usize) -> usize {
self.buffer
.char_indices()
.nth(char_index)
.map(|(byte_idx, _)| byte_idx)
.unwrap_or(self.buffer.len())
}
/// Save current state to undo stack before making changes
fn save_undo_state(&mut self) {
if self.undo_stack.len() >= MAX_UNDO_STACK_SIZE {
self.undo_stack.remove(0);
}
self.undo_stack
.push((self.buffer.clone(), self.cursor_position));
self.redo_stack.clear();
}
/// Save state only if not coalescing with previous insert
fn save_undo_state_unless_coalescing(&mut self) {
if !self.last_edit_was_insert {
self.save_undo_state();
}
self.last_edit_was_insert = true;
}
/// Mark that a non-insert edit occurred (breaks coalescing)
fn break_coalescing(&mut self) {
self.last_edit_was_insert = false;
}
/// Undo last change
pub fn undo(&mut self) -> bool {
if let Some((buffer, cursor)) = self.undo_stack.pop() {
self.redo_stack
.push((self.buffer.clone(), self.cursor_position));
self.buffer = buffer;
self.cursor_position = cursor;
self.break_coalescing();
true
} else {
false
}
}
/// Redo last undone change
pub fn redo(&mut self) -> bool {
if let Some((buffer, cursor)) = self.redo_stack.pop() {
self.undo_stack
.push((self.buffer.clone(), self.cursor_position));
self.buffer = buffer;
self.cursor_position = cursor;
self.break_coalescing();
true
} else {
false
}
}
/// Check if undo is available
pub fn can_undo(&self) -> bool {
!self.undo_stack.is_empty()
}
/// Check if redo is available
pub fn can_redo(&self) -> bool {
!self.redo_stack.is_empty()
}
pub fn drain_text(&mut self) -> String {
self.cursor_position = 0;
self.buffer.drain(..).collect()
}
}
// run with:
// cargo test --lib --target x86_64-unknown-linux-gnu
//
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_and_empty() {
let input = TextInput::new("hello".to_string());
assert_eq!(input.get_text(), "hello");
assert_eq!(input.get_cursor_position(), 5);
let empty = TextInput::empty();
assert_eq!(empty.get_text(), "");
assert_eq!(empty.get_cursor_position(), 0);
}
#[test]
fn test_insert_char() {
let mut input = TextInput::new("helo".to_string());
input.cursor_position = 3; // Position after "hel"
input.insert_char('l');
assert_eq!(input.get_text(), "hello");
assert_eq!(input.get_cursor_position(), 4);
}
#[test]
fn test_backspace() {
let mut input = TextInput::new("hello".to_string());
input.backspace();
assert_eq!(input.get_text(), "hell");
assert_eq!(input.get_cursor_position(), 4);
// Backspace at start does nothing
input.cursor_position = 0;
input.backspace();
assert_eq!(input.get_text(), "hell");
assert_eq!(input.get_cursor_position(), 0);
}
#[test]
fn test_delete() {
let mut input = TextInput::new("hello".to_string());
input.cursor_position = 0;
input.delete();
assert_eq!(input.get_text(), "ello");
assert_eq!(input.get_cursor_position(), 0);
// Delete at end does nothing
input.move_to_end();
input.delete();
assert_eq!(input.get_text(), "ello");
}
#[test]
fn test_cursor_movement() {
let mut input = TextInput::new("hello".to_string());
assert_eq!(input.get_cursor_position(), 5);
input.move_left();
assert_eq!(input.get_cursor_position(), 4);
input.move_right();
assert_eq!(input.get_cursor_position(), 5);
input.move_to_start();
assert_eq!(input.get_cursor_position(), 0);
input.move_to_end();
assert_eq!(input.get_cursor_position(), 5);
}
#[test]
fn test_unicode_support() {
let mut input = TextInput::new("hello 🦀 world".to_string());
assert_eq!(input.get_cursor_position(), 13); // 13 characters
input.cursor_position = 6; // After "hello "
input.insert_char('🐱');
assert_eq!(input.get_text(), "hello 🐱🦀 world");
}
#[test]
fn test_word_jump_right() {
let mut input = TextInput::new("hello world foo bar".to_string());
input.cursor_position = 0;
// Jump from start to "world"
input.move_word_right();
assert_eq!(input.get_cursor_position(), 6); // After "hello "
// Jump to "foo"
input.move_word_right();
assert_eq!(input.get_cursor_position(), 12); // After "world "
// Jump to "bar"
input.move_word_right();
assert_eq!(input.get_cursor_position(), 16); // After "foo "
// Jump to end
input.move_word_right();
assert_eq!(input.get_cursor_position(), 19); // At end
}
#[test]
fn test_word_jump_left() {
let mut input = TextInput::new("hello world foo bar".to_string());
input.move_to_end();
assert_eq!(input.get_cursor_position(), 19);
// Jump back to "bar"
input.move_word_left();
assert_eq!(input.get_cursor_position(), 16); // Start of "bar"
// Jump back to "foo"
input.move_word_left();
assert_eq!(input.get_cursor_position(), 12); // Start of "foo"
// Jump back to "world"
input.move_word_left();
assert_eq!(input.get_cursor_position(), 6); // Start of "world"
// Jump back to "hello"
input.move_word_left();
assert_eq!(input.get_cursor_position(), 0); // Start of "hello"
}
#[test]
fn test_word_jump_with_multiple_spaces() {
let mut input = TextInput::new("hello world".to_string());
input.cursor_position = 0;
// Jump over multiple spaces
input.move_word_right();
assert_eq!(input.get_cursor_position(), 8); // After "hello ", at start of "world"
// Jump back should skip spaces
input.move_word_left();
assert_eq!(input.get_cursor_position(), 0); // Back to start of "hello"
}
#[test]
fn test_word_jump_boundaries() {
let mut input = TextInput::new("test".to_string());
// At start - word left does nothing
input.cursor_position = 0;
input.move_word_left();
assert_eq!(input.get_cursor_position(), 0);
// At end - word right does nothing
input.move_to_end();
let end_pos = input.get_cursor_position();
input.move_word_right();
assert_eq!(input.get_cursor_position(), end_pos);
}
#[test]
fn test_up_down_arrows() {
let mut input = TextInput::new("hello world".to_string());
// Start in the middle
input.cursor_position = 5;
assert_eq!(input.get_cursor_position(), 5);
// Up arrow should go to start
input.move_to_start();
assert_eq!(input.get_cursor_position(), 0);
// Move back to middle
input.cursor_position = 5;
// Down arrow should go to end
input.move_to_end();
assert_eq!(input.get_cursor_position(), 11);
}
#[test]
fn test_delete_word_backward() {
let mut input = TextInput::new("hello world foo".to_string());
// Delete "foo" from end
input.move_to_end();
input.delete_word_backward();
assert_eq!(input.get_text(), "hello world ");
assert_eq!(input.get_cursor_position(), 12);
// Delete "world "
input.delete_word_backward();
assert_eq!(input.get_text(), "hello ");
assert_eq!(input.get_cursor_position(), 6);
// Delete "hello "
input.delete_word_backward();
assert_eq!(input.get_text(), "");
assert_eq!(input.get_cursor_position(), 0);
// Delete on empty buffer does nothing
input.delete_word_backward();
assert_eq!(input.get_text(), "");
assert_eq!(input.get_cursor_position(), 0);
}
#[test]
fn test_delete_word_backward_middle() {
let mut input = TextInput::new("hello world foo".to_string());
// Position in middle of "world"
input.cursor_position = 8; // After "hello wo"
input.delete_word_backward();
assert_eq!(input.get_text(), "hello rld foo");
assert_eq!(input.get_cursor_position(), 6); // After "hello "
}
#[test]
fn test_delete_word_forward() {
let mut input = TextInput::new("hello world foo".to_string());
// Delete "hello " from start
input.cursor_position = 0;
input.delete_word_forward();
assert_eq!(input.get_text(), "world foo");
assert_eq!(input.get_cursor_position(), 0);
// Delete "world "
input.delete_word_forward();
assert_eq!(input.get_text(), "foo");
assert_eq!(input.get_cursor_position(), 0);
// Delete "foo"
input.delete_word_forward();
assert_eq!(input.get_text(), "");
assert_eq!(input.get_cursor_position(), 0);
// Delete on empty buffer does nothing
input.delete_word_forward();
assert_eq!(input.get_text(), "");
assert_eq!(input.get_cursor_position(), 0);
}
#[test]
fn test_delete_word_forward_middle() {
let mut input = TextInput::new("hello world foo".to_string());
// Position in middle of "world"
input.cursor_position = 8; // After "hello wo"
input.delete_word_forward();
assert_eq!(input.get_text(), "hello wofoo");
assert_eq!(input.get_cursor_position(), 8); // Same position, text deleted forward
}
#[test]
fn test_delete_word_with_multiple_spaces() {
let mut input = TextInput::new("hello world".to_string());
// Delete forward includes trailing spaces
input.cursor_position = 0;
input.delete_word_forward();
assert_eq!(input.get_text(), "world");
assert_eq!(input.get_cursor_position(), 0);
}
#[test]
fn test_undo_redo_basic() {
let mut input = TextInput::empty();
// Type "hello"
input.insert_char('h');
input.insert_char('e');
input.insert_char('l');
input.insert_char('l');
input.insert_char('o');
assert_eq!(input.get_text(), "hello");
// Undo should remove all characters (coalesced into one undo entry)
assert!(input.can_undo());
assert!(input.undo());
assert_eq!(input.get_text(), "");
assert_eq!(input.get_cursor_position(), 0);
// Redo should restore "hello"
assert!(input.can_redo());
assert!(input.redo());
assert_eq!(input.get_text(), "hello");
assert_eq!(input.get_cursor_position(), 5);
}
#[test]
fn test_undo_coalescing_breaks_on_cursor_move() {
let mut input = TextInput::empty();
// Type "he"
input.insert_char('h');
input.insert_char('e');
// Move cursor to start (breaks coalescing)
input.move_to_start();
// Type "llo"
input.insert_char('l');
input.insert_char('l');
input.insert_char('o');
assert_eq!(input.get_text(), "llohe");
// First undo removes "llo" (second coalesced group)
input.undo();
assert_eq!(input.get_text(), "he");
// Second undo removes "he" (first coalesced group)
input.undo();
assert_eq!(input.get_text(), "");
}
#[test]
fn test_undo_backspace() {
let mut input = TextInput::new("hello".to_string());
// Backspace once
input.backspace();
assert_eq!(input.get_text(), "hell");
// Undo should restore "hello"
input.undo();
assert_eq!(input.get_text(), "hello");
assert_eq!(input.get_cursor_position(), 5);
}
#[test]
fn test_undo_delete() {
let mut input = TextInput::new("hello".to_string());
input.cursor_position = 0;
// Delete first character
input.delete();
assert_eq!(input.get_text(), "ello");
// Undo should restore "hello"
input.undo();
assert_eq!(input.get_text(), "hello");
assert_eq!(input.get_cursor_position(), 0);
}
#[test]
fn test_undo_word_delete() {
let mut input = TextInput::new("hello world".to_string());
// Delete "world" backward
input.delete_word_backward();
assert_eq!(input.get_text(), "hello ");
// Undo should restore "hello world"
input.undo();
assert_eq!(input.get_text(), "hello world");
assert_eq!(input.get_cursor_position(), 11);
}
#[test]
fn test_undo_clear() {
let mut input = TextInput::new("hello world".to_string());
// Clear the buffer
input.clear();
assert_eq!(input.get_text(), "");
// Undo should restore the text
input.undo();
assert_eq!(input.get_text(), "hello world");
}
#[test]
fn test_undo_set_text() {
let mut input = TextInput::new("hello".to_string());
// Replace with new text
input.set_text("goodbye".to_string());
assert_eq!(input.get_text(), "goodbye");
// Undo should restore "hello"
input.undo();
assert_eq!(input.get_text(), "hello");
}
#[test]
fn test_redo_clears_on_new_edit() {
let mut input = TextInput::empty();
// Type "hello"
input.insert_char('h');
input.insert_char('e');
input.insert_char('l');
input.insert_char('l');
input.insert_char('o');
// Undo
input.undo();
assert_eq!(input.get_text(), "");
assert!(input.can_redo());
// Make a new edit (should clear redo stack)
input.insert_char('x');
assert!(!input.can_redo());
}
#[test]
fn test_multiple_undo_redo() {
let mut input = TextInput::empty();
// First edit: type "hello"
for c in "hello".chars() {
input.insert_char(c);
}
// Break coalescing
input.move_left();
// Second edit: type "world"
for c in "world".chars() {
input.insert_char(c);
}
assert_eq!(input.get_text(), "hellworldo");
// Undo "world"
input.undo();
assert_eq!(input.get_text(), "hello");
// Undo "hello"
input.undo();
assert_eq!(input.get_text(), "");
// Redo "hello"
input.redo();
assert_eq!(input.get_text(), "hello");
// Redo "world"
input.redo();
assert_eq!(input.get_text(), "hellworldo");
}
#[test]
fn test_undo_stack_limit() {
let mut input = TextInput::empty();
// Perform 102 separate edits (breaking coalescing each time)
for _i in 0..102 {
input.backspace(); // Break coalescing
input.insert_char('x');
}
// Should have at most 100 undo entries
let mut undo_count = 0;
while input.undo() {
undo_count += 1;
}
// We should have 100 undo entries (the stack limit)
// Plus the final state change from the last coalescing break
assert!(
undo_count <= 100,
"Undo count should be at most 100, got {}",
undo_count
);
}
#[test]
fn test_undo_redo_empty_stack() {
let mut input = TextInput::empty();
// Undo on empty stack should return false
assert!(!input.can_undo());
assert!(!input.undo());
// Redo on empty stack should return false
assert!(!input.can_redo());
assert!(!input.redo());
}
#[test]
fn test_undo_restores_cursor_position() {
let mut input = TextInput::new("hello world".to_string());
// Move cursor to position 5 (before "world")
input.cursor_position = 5;
// Insert a space
input.insert_char(' ');
assert_eq!(input.get_text(), "hello world");
assert_eq!(input.get_cursor_position(), 6);
// Undo should restore both text and cursor position
input.undo();
assert_eq!(input.get_text(), "hello world");
assert_eq!(input.get_cursor_position(), 5);
}
#[test]
fn test_coalescing_consecutive_inserts() {
let mut input = TextInput::empty();
// Type several characters
input.insert_char('a');
input.insert_char('b');
input.insert_char('c');
assert_eq!(input.get_text(), "abc");
// Single undo should remove all three (they were coalesced)
input.undo();
assert_eq!(input.get_text(), "");
// No more undo available
assert!(!input.can_undo());
}
#[test]
fn test_backspace_breaks_coalescing() {
let mut input = TextInput::empty();
// Type "ab"
input.insert_char('a');
input.insert_char('b');
// Backspace
input.backspace();
assert_eq!(input.get_text(), "a");
// Type "c"
input.insert_char('c');
assert_eq!(input.get_text(), "ac");
// Undo should remove just "c"
input.undo();
assert_eq!(input.get_text(), "a");
// Undo should remove backspace operation
input.undo();
assert_eq!(input.get_text(), "ab");
// Undo should remove "ab"
input.undo();
assert_eq!(input.get_text(), "");
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/fuzzy_complete.rs | default-plugins/sequence/src/ui/fuzzy_complete.rs | use fuzzy_matcher::skim::SkimMatcherV2;
use fuzzy_matcher::FuzzyMatcher;
use std::collections::BTreeMap;
use std::fs;
use std::path::PathBuf;
/// Result of a fuzzy completion operation
#[derive(Debug, Clone)]
pub struct CompletionResult {
pub completed_text: String,
pub score: i64,
pub is_directory: bool,
pub is_prefix_completion: bool, // True if this is the shortest of multiple matches with same prefix
}
/// Type of completion
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum CompletionType {
Command,
Path,
}
/// Fuzzy match executables (commands) from the PATH
pub fn fuzzy_complete_command(
query: &str,
executables: &BTreeMap<String, PathBuf>,
) -> Option<CompletionResult> {
if query.is_empty() {
return None;
}
let matcher = SkimMatcherV2::default().ignore_case();
// Collect all matches with their scores
let mut all_matches: Vec<(String, i64)> = Vec::new();
for executable_name in executables.keys() {
if let Some((score, _indices)) = matcher.fuzzy_indices(executable_name, query) {
all_matches.push((executable_name.clone(), score));
}
}
if all_matches.is_empty() {
return None;
}
// Sort by score (highest first), then by length (shortest first)
all_matches.sort_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.len().cmp(&b.0.len())));
// Get the best match
let (best_name, best_score) = &all_matches[0];
// Check if there are multiple matches that share the same prefix
let is_prefix_completion = check_for_prefix_matches(&all_matches, best_name);
Some(CompletionResult {
completed_text: best_name.clone(),
score: *best_score,
is_directory: false, // Commands are not directories
is_prefix_completion,
})
}
/// Fuzzy match paths (directories and files) relative to the current working directory
pub fn fuzzy_complete_path(query: &str, cwd: Option<&PathBuf>) -> Option<CompletionResult> {
if query.is_empty() {
return None;
}
let matcher = SkimMatcherV2::default().ignore_case();
// Determine the base directory to search in
let (base_dir, search_prefix) = if query.starts_with('/') {
// Absolute path
let parts: Vec<&str> = query.rsplitn(2, '/').collect();
if parts.len() == 2 {
// Has a directory component
let dir = parts[1];
let prefix = parts[0];
(PathBuf::from(dir), prefix.to_string())
} else {
// Just "/"
(PathBuf::from("/"), String::new())
}
} else if query.starts_with("~/") {
// Home directory
let home_dir = std::env::var("HOME").ok()?;
let query_without_tilde = &query[2..];
let parts: Vec<&str> = query_without_tilde.rsplitn(2, '/').collect();
if parts.len() == 2 {
let dir = PathBuf::from(&home_dir).join(parts[1]);
let prefix = parts[0];
(dir, prefix.to_string())
} else {
// Just "~/" or "~/something"
(PathBuf::from(home_dir), query_without_tilde.to_string())
}
} else {
// Relative path
let base = cwd?;
let parts: Vec<&str> = query.rsplitn(2, '/').collect();
if parts.len() == 2 {
// Has a directory component
let dir = base.join(parts[1]);
let prefix = parts[0];
(dir, prefix.to_string())
} else {
// Just a name in the current directory
(base.clone(), query.to_string())
}
};
// Convert to host path
let host_base_dir =
PathBuf::from("/host").join(base_dir.strip_prefix("/").unwrap_or(&base_dir));
// Read directory entries
let entries = fs::read_dir(&host_base_dir).ok()?;
// Collect all matches with their scores and directory status
let mut all_matches: Vec<(String, i64, bool)> = Vec::new();
for entry in entries.flatten() {
if let Some(name) = entry.file_name().to_str() {
// Skip hidden files unless the query starts with a dot
if name.starts_with('.') && !search_prefix.starts_with('.') {
continue;
}
if let Some((score, _indices)) = matcher.fuzzy_indices(name, &search_prefix) {
// Check if this entry is a directory
let is_dir = entry.metadata().ok().map(|m| m.is_dir()).unwrap_or(false);
all_matches.push((name.to_string(), score, is_dir));
}
}
}
if all_matches.is_empty() {
return None;
}
// Sort by score (highest first), then by length (shortest first)
all_matches.sort_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.len().cmp(&b.0.len())));
// Get the best match
let (name, score, is_directory) = all_matches[0].clone();
// Check if there are multiple matches that share the same prefix
let name_matches: Vec<(String, i64)> = all_matches
.iter()
.map(|(n, s, _)| (n.clone(), *s))
.collect();
let is_prefix_completion = check_for_prefix_matches(&name_matches, &name);
// Reconstruct the full path
let completed_text = if query.starts_with('/') {
// Absolute path
let dir_part = query.rsplitn(2, '/').nth(1).unwrap_or("");
if dir_part.is_empty() {
format!("/{}", name)
} else {
format!("{}/{}", dir_part, name)
}
} else if query.starts_with("~/") {
// Home directory
let query_without_tilde = &query[2..];
let dir_part = query_without_tilde.rsplitn(2, '/').nth(1).unwrap_or("");
if dir_part.is_empty() {
format!("~/{}", name)
} else {
format!("~/{}/{}", dir_part, name)
}
} else {
// Relative path
let dir_part = query.rsplitn(2, '/').nth(1).unwrap_or("");
if dir_part.is_empty() {
name.clone()
} else {
format!("{}/{}", dir_part, name)
}
};
Some(CompletionResult {
completed_text,
score,
is_directory,
is_prefix_completion,
})
}
/// Check if there are multiple matches that start with the best match's name
/// Returns true if the best match is a prefix of other matches
fn check_for_prefix_matches(matches: &[(String, i64)], best_match: &str) -> bool {
if matches.len() <= 1 {
return false;
}
// Count how many matches start with the best_match text
let prefix_count = matches
.iter()
.filter(|(name, _)| name.starts_with(best_match) && name != best_match)
.count();
prefix_count > 0
}
/// Perform fuzzy completion on the current input
/// Returns the best completion (command or path) with appropriate suffix (space or slash)
pub fn fuzzy_complete(
input: &str,
executables: &BTreeMap<String, PathBuf>,
cwd: Option<&PathBuf>,
) -> Option<String> {
// If input is empty, nothing to complete
if input.is_empty() {
return None;
}
// Extract the last word/token to complete
// For simplicity, we'll complete the entire input if it's a single word,
// or the last space-separated token if there are multiple words
let tokens: Vec<&str> = input.split_whitespace().collect();
let to_complete = if tokens.is_empty() {
input
} else {
tokens.last().unwrap()
};
// Get both command and path completions
let command_result = fuzzy_complete_command(to_complete, executables);
let path_result = fuzzy_complete_path(to_complete, cwd);
// Choose the better match based on score
match (command_result, path_result) {
(Some(cmd), Some(path)) => {
// Both matched, use the higher score
let (result, completion_type, is_dir, is_prefix) = if cmd.score >= path.score {
(
cmd.completed_text,
CompletionType::Command,
false,
cmd.is_prefix_completion,
)
} else {
(
path.completed_text,
CompletionType::Path,
path.is_directory,
path.is_prefix_completion,
)
};
// Add appropriate suffix (skip slash if this is a prefix completion)
let suffix = get_completion_suffix(completion_type, is_dir, is_prefix);
let completed = format!("{}{}", result, suffix);
// Replace the last token with the completion
Some(replace_last_token(input, &completed))
},
(Some(cmd), None) => {
// Command completion - add space (unless it's a prefix completion)
let suffix = if cmd.is_prefix_completion { "" } else { " " };
let completed = format!("{}{}", cmd.completed_text, suffix);
Some(replace_last_token(input, &completed))
},
(None, Some(path)) => {
// Path completion - add slash if directory (unless it's a prefix completion)
let suffix = if path.is_prefix_completion {
""
} else if path.is_directory {
"/"
} else {
""
};
let completed = format!("{}{}", path.completed_text, suffix);
Some(replace_last_token(input, &completed))
},
(None, None) => None,
}
}
/// Get the appropriate suffix for a completion
fn get_completion_suffix(
completion_type: CompletionType,
is_directory: bool,
is_prefix_completion: bool,
) -> &'static str {
// No suffix for prefix completions
if is_prefix_completion {
return "";
}
match completion_type {
CompletionType::Command => " ",
CompletionType::Path => {
if is_directory {
"/"
} else {
""
}
},
}
}
/// Replace the last whitespace-separated token in the input with the completion
fn replace_last_token(input: &str, completion: &str) -> String {
let tokens: Vec<&str> = input.split_whitespace().collect();
if tokens.is_empty() {
return completion.to_string();
}
if tokens.len() == 1 {
completion.to_string()
} else {
// Join all tokens except the last one, then add the completion
let mut result = tokens[..tokens.len() - 1].join(" ");
result.push(' ');
result.push_str(completion);
result
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/layout_calculations.rs | default-plugins/sequence/src/ui/layout_calculations.rs | pub fn calculate_viewport(
total_commands: usize,
max_visible: usize,
selected_index: Option<usize>,
running_index: usize,
) -> (usize, usize, usize, usize) {
if total_commands <= max_visible {
return (0, total_commands, 0, 0);
}
let focus_index = selected_index.unwrap_or(running_index);
let half_visible = max_visible / 2;
let offset = if focus_index < half_visible {
0
} else if focus_index >= total_commands.saturating_sub(half_visible) {
total_commands.saturating_sub(max_visible)
} else {
focus_index.saturating_sub(half_visible)
};
let offset = offset.min(total_commands.saturating_sub(max_visible));
let visible_count = max_visible.min(total_commands.saturating_sub(offset));
let hidden_above = offset;
let hidden_below = total_commands.saturating_sub(offset + visible_count);
(offset, visible_count, hidden_above, hidden_below)
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/mod.rs | default-plugins/sequence/src/ui/mod.rs | pub mod components;
pub mod fuzzy_complete;
pub mod layout_calculations;
pub mod text_input;
pub mod truncation;
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/sequence/src/ui/components.rs | default-plugins/sequence/src/ui/components.rs | use crate::path_formatting::format_cwd;
use crate::state::{CommandStatus, State};
use crate::ui::truncation::{calculate_available_cmd_width, truncate_middle};
use std::path::PathBuf;
use zellij_tile::prelude::*;
const SPINNER_FRAMES: &[&str] = &["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧"];
const HELP_RUNNING_WITH_SELECTION: &str = "<Ctrl c> - interrupt, <Enter> - run from selected";
const HELP_RUNNING_NO_SELECTION: &str = "<Ctrl c> - interrupt, <↓↑> - navigate";
const HELP_STOPPED_WITH_SELECTION: &str =
"<Ctrl w> - close all, <Enter> - run from selected, <e> - edit";
const HELP_STOPPED_NO_SELECTION: &str =
"<Ctrl w> - close all, <↓↑> - navigate, <Enter> - run from first";
const HELP_ONE_PENDING_COMMAND: &str = "<Enter> - run, <Ctrl Enter> - add command";
const HELP_ALL_COMMANDS_PENDING: &str =
"<Enter> - run, <Ctrl Enter> - add command, <↓↑> - navigate";
const HELP_ALL_COMMANDS_PENDING_WITH_SELECTION: &str =
"<Enter> - run, <Ctrl Enter> - add command, <↓↑> - navigate, <e> - edit selected";
const HELP_EDITING_FIRST_LINE: &str =
"<Enter> - accept, <Ctrl Enter> - add command, <↓↑> - navigate";
fn select_help_text(sequence: &State) -> &'static str {
let is_running = sequence
.execution
.all_commands
.iter()
.any(|command| matches!(command.get_status(), CommandStatus::Running(_)));
let all_pending = sequence
.execution
.all_commands
.iter()
.all(|command| matches!(command.get_status(), CommandStatus::Pending));
let has_selection = sequence.selection.current_selected_command_index.is_some();
let is_editing = sequence.editing.editing_input.is_some();
if all_pending && !sequence.execution.is_running {
if sequence.execution.all_commands.len() == 1 {
HELP_ONE_PENDING_COMMAND
} else if has_selection && !is_editing {
HELP_ALL_COMMANDS_PENDING_WITH_SELECTION
} else if is_editing {
HELP_EDITING_FIRST_LINE
} else {
HELP_ALL_COMMANDS_PENDING
}
} else if is_running {
if has_selection {
HELP_RUNNING_WITH_SELECTION
} else {
HELP_RUNNING_NO_SELECTION
}
} else {
if has_selection && !is_editing {
HELP_STOPPED_WITH_SELECTION
} else if is_editing {
HELP_EDITING_FIRST_LINE
} else {
HELP_STOPPED_NO_SELECTION
}
}
}
fn style_help_text(text: &str) -> Text {
Text::new(text)
.color_substring(3, "<Ctrl c>")
.color_substring(3, "<Ctrl w>")
.color_substring(3, "<↓↑>")
.color_substring(3, "<Esc>")
.color_substring(3, "<Enter>")
.color_substring(3, "<Ctrl Enter>")
.color_substring(3, "<e>")
}
pub fn render_help_lines(
sequence: &State,
max_width: Option<usize>,
) -> (Text, usize, Option<(Text, usize)>) {
let is_editing = sequence.editing.editing_input.is_some();
if is_editing {
let help_text = select_help_text(sequence);
let (truncated_text, help_len) = if let Some(width) = max_width {
truncate_help_line(help_text, width)
} else {
(help_text.to_string(), help_text.chars().count())
};
let first_line = (style_help_text(&truncated_text).unbold_all(), help_len);
let editing_help_text = "Navigate with cd, chain with ||, &&, ; ";
let editing_help = Text::new(editing_help_text)
.color_substring(3, "cd")
.color_substring(3, "||")
.color_substring(3, "&&")
.color_substring(3, ";")
.unbold_all();
let editing_len = editing_help_text.len();
return (
first_line.0,
first_line.1,
Some((editing_help, editing_len)),
);
}
let help_text = select_help_text(sequence);
if let Some(width) = max_width {
if let Some((first_line, second_line)) = split_help_line(help_text, width) {
let first_len = first_line.chars().count();
let first_styled = style_help_text(&first_line).unbold_all();
let second_line_width = second_line.chars().count();
let (second_styled, second_len) = if second_line_width > width {
let (truncated, len) = truncate_help_line(&second_line, width);
(style_help_text(&truncated).unbold_all(), len)
} else {
(
style_help_text(&second_line).unbold_all(),
second_line_width,
)
};
return (first_styled, first_len, Some((second_styled, second_len)));
}
let (truncated_text, help_len) = truncate_help_line(help_text, width);
(
style_help_text(&truncated_text).unbold_all(),
help_len,
None,
)
} else {
let help_len = help_text.chars().count();
(style_help_text(help_text).unbold_all(), help_len, None)
}
}
fn split_help_line(help_text: &str, max_width: usize) -> Option<(String, String)> {
let text_width = help_text.chars().count();
if text_width <= max_width {
return None;
}
let keybindings = parse_help_keybindings(help_text);
if keybindings.is_empty() {
return None;
}
for split_point in 1..keybindings.len() {
let first_part: Vec<String> = keybindings
.iter()
.take(split_point)
.map(|(key, _, action)| format!("{} - {}", key, action))
.collect();
let first_line = first_part.join(", ");
let second_part: Vec<String> = keybindings
.iter()
.skip(split_point)
.map(|(key, _, action)| format!("{} - {}", key, action))
.collect();
let second_line = second_part.join(", ");
if first_line.chars().count() <= max_width && second_line.chars().count() <= max_width {
return Some((first_line, second_line));
}
}
None
}
fn folder_cell(cwd: &Option<PathBuf>) -> (Text, String) {
let cwd_display = if let Some(cwd) = cwd {
format_cwd(cwd)
} else {
"~".to_string()
};
let folder_display = format!("{} >", cwd_display);
let text = Text::new(&folder_display).color_range(0, 0..cwd_display.len());
(text, folder_display)
}
fn command_cell(text: &str) -> Text {
let text = if text.is_empty() { " " } else { text };
Text::new(text)
}
fn chain_cell(chain_type: &crate::state::ChainType) -> Text {
let text = chain_type_to_str(chain_type);
if !text.is_empty() {
Text::new(text).color_range(1, 0..text.len())
} else {
Text::new(text)
}
}
fn get_spinner_frame(frame: usize) -> &'static str {
SPINNER_FRAMES[frame % SPINNER_FRAMES.len()]
}
pub fn format_status_text(status: &CommandStatus, spinner_frame: usize) -> String {
match status {
CommandStatus::Running(_) => {
let spinner = get_spinner_frame(spinner_frame);
format!("[RUNNING] {}", spinner)
},
CommandStatus::Interrupted(_) => "[INTERRUPTED]".to_string(),
CommandStatus::Exited(Some(code), _) => format!("[EXIT CODE: {}]", code),
CommandStatus::Exited(None, _) => "[EXITED]".to_string(),
CommandStatus::Pending => " ".to_string(),
}
}
fn apply_status_color(text: Text, status: &CommandStatus, status_str: &str) -> Text {
match status {
CommandStatus::Running(_) => text.color_range(3, 0..status_str.len()),
CommandStatus::Interrupted(_) => text.error_color_range(0..status_str.len()),
CommandStatus::Exited(Some(0), _) => {
let number_start = 12;
let number_end = status_str.len() - 1;
text.success_color_range(number_start..number_end)
},
CommandStatus::Exited(Some(_), _) => {
let number_start = 12;
let number_end = status_str.len() - 1;
text.error_color_range(number_start..number_end)
},
_ => text,
}
}
fn status_cell(status: &CommandStatus, spinner_frame: usize) -> Text {
let status_str = format_status_text(status, spinner_frame);
let text = Text::new(&status_str);
apply_status_color(text, status, &status_str)
}
fn apply_row_styles(cells: Vec<Text>, is_selected: bool) -> Vec<Text> {
let mut styled_cells = cells;
if is_selected {
styled_cells = styled_cells.into_iter().map(|c| c.selected()).collect();
}
styled_cells
}
fn overflow_cell(indicator: &str, is_selected: bool) -> Text {
let mut text = Text::new(indicator).dim_all();
if is_selected {
text = text.selected();
}
text
}
fn command_sequence_row(
state: &State,
cwd: &Option<PathBuf>,
truncated_cmd_text: &str,
chain_type: &crate::state::ChainType,
status: &crate::state::CommandStatus,
is_selected: bool,
overflow_indicator: Option<String>,
) -> (Vec<Text>, usize) {
let (folder_text, folder_display) = folder_cell(cwd);
let mut cells = vec![
folder_text,
command_cell(truncated_cmd_text),
chain_cell(chain_type),
status_cell(status, state.layout.spinner_frame),
];
cells = apply_row_styles(cells, is_selected);
let truncated_cmd_text = if truncated_cmd_text.is_empty() {
" "
} else {
truncated_cmd_text
};
let mut row_length = folder_display.chars().count()
+ truncated_cmd_text.chars().count()
+ chain_type_to_str(chain_type).chars().count()
+ format_status_text(status, state.layout.spinner_frame)
.chars()
.count()
+ 3;
if let Some(indicator) = overflow_indicator {
let indicator_len = indicator.chars().count();
cells.push(overflow_cell(&indicator, is_selected));
row_length += indicator_len + 1;
}
(cells, row_length)
}
pub fn build_table_header(has_overflow: bool) -> Table {
let header_cols = if has_overflow {
vec![" ", " ", " ", " ", " "]
} else {
vec![" ", " ", " ", " "]
};
Table::new().add_row(header_cols)
}
fn calculate_overflow_indicator(
visible_idx: usize,
visible_count: usize,
hidden_above: usize,
hidden_below: usize,
) -> Option<String> {
if visible_idx == 0 && hidden_above > 0 {
Some(format!("[+{}]", hidden_above))
} else if visible_idx == visible_count.saturating_sub(1) && hidden_below > 0 {
Some(format!("[+{}]", hidden_below))
} else if hidden_above > 0 || hidden_below > 0 {
Some(format!(" "))
} else {
None
}
}
pub fn calculate_row_layout_info(
index: usize,
offset: usize,
visible_count: usize,
hidden_above: usize,
hidden_below: usize,
cols: usize,
folder_width: usize,
max_chain_width: usize,
max_status_width: usize,
) -> Option<(Option<String>, usize)> {
if index < offset || index >= offset + visible_count {
return None;
}
let visible_idx = index.saturating_sub(offset);
let overflow_indicator =
calculate_overflow_indicator(visible_idx, visible_count, hidden_above, hidden_below);
let available_cmd_width = calculate_available_cmd_width(
cols,
folder_width,
overflow_indicator.as_ref(),
max_chain_width,
max_status_width,
);
Some((overflow_indicator, available_cmd_width))
}
pub fn add_command_row(
table: Table,
state: &State,
index: usize,
offset: usize,
visible_count: usize,
hidden_above: usize,
hidden_below: usize,
) -> Table {
let command = match state.execution.all_commands.get(index) {
Some(cmd) => cmd,
None => return table,
};
let cmd_text = &command.get_text();
let chain_type = &command.get_chain_type();
let status = &command.get_status();
let command_cwd = command.get_cwd().or_else(|| state.cwd.clone());
let cols = state.own_columns.unwrap_or(80);
let longest_cwd_display = state.execution.longest_cwd_display(&state.cwd);
let folder_display = format!("{} >", longest_cwd_display);
let folder_width = folder_display.chars().count();
let (max_chain_width, max_status_width) =
calculate_max_widths(&state.execution.all_commands, state.layout.spinner_frame);
let Some((overflow_indicator, available_cmd_width)) = calculate_row_layout_info(
index,
offset,
visible_count,
hidden_above,
hidden_below,
cols,
folder_width,
max_chain_width,
max_status_width,
) else {
return table;
};
if let Some(text_input) = &state.editing.editing_input {
if state.selection.current_selected_command_index == Some(index) {
let (truncated_editing_text, _) = truncate_middle(
text_input.get_text(),
available_cmd_width,
Some(text_input.cursor_position()),
);
let (row, _) = command_sequence_row(
state,
&command_cwd,
&truncated_editing_text,
chain_type,
&CommandStatus::Pending,
false,
overflow_indicator,
);
return table.add_styled_row(row);
}
}
let truncated_cmd_text = truncate_middle(cmd_text, available_cmd_width, None).0;
let (row, _) = command_sequence_row(
state,
&command_cwd,
&truncated_cmd_text,
chain_type,
status,
state.selection.current_selected_command_index == Some(index),
overflow_indicator,
);
table.add_styled_row(row)
}
pub fn calculate_max_widths(
commands: &[crate::state::CommandEntry],
spinner_frame: usize,
) -> (usize, usize) {
let max_chain_width = commands
.iter()
.map(|cmd| chain_type_to_str(&cmd.get_chain_type()).chars().count())
.max()
.unwrap_or(0);
let max_status_width = commands
.iter()
.map(|cmd| {
format_status_text(&cmd.get_status(), spinner_frame)
.chars()
.count()
})
.max()
.unwrap_or(0);
(max_chain_width, max_status_width)
}
pub fn calculate_longest_line(
longest_cwd_display: &str,
longest_command: usize,
max_chain_width: usize,
max_status_width: usize,
) -> usize {
let folder_display = format!("{} >", longest_cwd_display);
let cell_padding = 3;
folder_display.chars().count()
+ longest_command
+ max_chain_width
+ max_status_width
+ cell_padding
}
pub fn chain_type_to_str(chain_type: &crate::state::ChainType) -> &'static str {
match chain_type {
crate::state::ChainType::And => "AND",
crate::state::ChainType::Or => "OR",
crate::state::ChainType::Then => "THEN",
crate::state::ChainType::None => " ",
}
}
pub fn truncate_help_line(help_text: &str, max_width: usize) -> (String, usize) {
let text_width = help_text.chars().count();
if text_width <= max_width {
return (help_text.to_string(), text_width);
}
let keybindings = parse_help_keybindings(help_text);
if let Some(result) = try_shortened_help(&keybindings, max_width) {
return result;
}
if let Some(result) = try_keys_spaced(&keybindings, max_width) {
return result;
}
if let Some(result) = try_keys_tight(&keybindings, max_width) {
return result;
}
truncate_with_ellipsis(
&keybindings
.iter()
.map(|(k, _, _)| *k)
.collect::<Vec<_>>()
.join("/"),
max_width,
)
}
fn parse_help_keybindings(help_text: &str) -> Vec<(&str, &str, &str)> {
help_text
.split(", ")
.filter_map(|part| {
part.find(" - ").map(|dash_pos| {
let key = &part[..dash_pos];
let action = &part[dash_pos + 3..];
let first_word = action.split_whitespace().next().unwrap_or(action);
(key, first_word, action)
})
})
.collect()
}
fn try_shortened_help(
keybindings: &[(&str, &str, &str)],
max_width: usize,
) -> Option<(String, usize)> {
let shortened: Vec<String> = keybindings
.iter()
.map(|(key, first_word, _)| format!("{} - {}", key, first_word))
.collect();
let shortened_text = shortened.join(", ");
let shortened_width = shortened_text.chars().count();
if shortened_width <= max_width {
Some((shortened_text, shortened_width))
} else {
None
}
}
fn try_keys_spaced(
keybindings: &[(&str, &str, &str)],
max_width: usize,
) -> Option<(String, usize)> {
let keys_spaced_text = keybindings
.iter()
.map(|(key, _, _)| *key)
.collect::<Vec<_>>()
.join(" / ");
let keys_spaced_width = keys_spaced_text.chars().count();
if keys_spaced_width <= max_width {
Some((keys_spaced_text, keys_spaced_width))
} else {
None
}
}
fn try_keys_tight(keybindings: &[(&str, &str, &str)], max_width: usize) -> Option<(String, usize)> {
let keys_tight_text = keybindings
.iter()
.map(|(key, _, _)| *key)
.collect::<Vec<_>>()
.join("/");
let keys_tight_width = keys_tight_text.chars().count();
if keys_tight_width <= max_width {
Some((keys_tight_text, keys_tight_width))
} else {
None
}
}
fn truncate_with_ellipsis(text: &str, max_width: usize) -> (String, usize) {
let available = max_width.saturating_sub(3);
let mut truncated = String::new();
let mut current_width = 0;
for ch in text.chars() {
if current_width + 1 <= available {
truncated.push(ch);
current_width += 1;
} else {
break;
}
}
truncated.push_str("...");
let truncated_width = truncated.chars().count();
(truncated, truncated_width)
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/strider/src/search_view.rs | default-plugins/strider/src/search_view.rs | use crate::shared::{calculate_list_bounds, render_list_tip};
use fuzzy_matcher::skim::SkimMatcherV2;
use fuzzy_matcher::FuzzyMatcher;
use pretty_bytes::converter::convert as pretty_bytes;
use unicode_width::UnicodeWidthStr;
use zellij_tile::prelude::*;
use crate::file_list_view::FsEntry;
#[derive(Default, Debug)]
pub struct SearchView {
pub search_results: Vec<SearchResult>,
pub selected_search_result: usize,
}
impl SearchView {
pub fn search_result_count(&self) -> usize {
self.search_results.len()
}
pub fn update_search_results(&mut self, search_term: &str, files: &Vec<FsEntry>) {
self.selected_search_result = 0;
if search_term.is_empty() {
self.search_results.clear();
} else {
let mut matches = vec![];
let matcher = SkimMatcherV2::default().use_cache(true);
for file in files {
let name = file.name();
if let Some((score, indices)) = matcher.fuzzy_indices(&name, search_term) {
matches.push(SearchResult::new(file.clone(), score, indices));
}
}
matches.sort_by(|a, b| b.score.cmp(&a.score));
self.search_results = matches;
}
}
pub fn clear_and_reset_selection(&mut self) {
self.search_results.clear();
self.selected_search_result = 0;
}
pub fn move_selection_up(&mut self) {
self.selected_search_result = self.selected_search_result.saturating_sub(1);
}
pub fn move_selection_down(&mut self) {
if self.selected_search_result + 1 < self.search_results.len() {
self.selected_search_result += 1;
}
}
pub fn get_selected_entry(&self) -> Option<FsEntry> {
self.search_results
.get(self.selected_search_result)
.map(|s| s.entry.clone())
}
pub fn render(&mut self, rows: usize, cols: usize) {
let (start_index, selected_index_in_range, end_index) = calculate_list_bounds(
self.search_results.len(),
rows.saturating_sub(1),
Some(self.selected_search_result),
);
render_list_tip(3, cols);
for i in start_index..end_index {
if let Some(search_result) = self.search_results.get(i) {
let is_selected = Some(i) == selected_index_in_range;
let mut search_result_text = search_result.name();
let size = search_result
.size()
.map(|s| pretty_bytes(s as f64))
.unwrap_or("".to_owned());
if search_result.is_folder() {
search_result_text.push('/');
}
let search_result_text_width = search_result_text.width();
let size_width = size.width();
let text = if search_result_text_width + size_width < cols {
let padding = " ".repeat(
cols.saturating_sub(search_result_text_width)
.saturating_sub(size_width),
);
format!("{}{}{}", search_result_text, padding, size)
} else {
// drop the size, no room for it
let padding = " ".repeat(cols.saturating_sub(search_result_text_width));
format!("{}{}", search_result_text, padding)
};
let mut text_element = if is_selected {
Text::new(text).selected()
} else {
Text::new(text)
};
if search_result.is_folder() {
text_element = text_element.color_range(0, ..);
}
text_element = text_element.color_indices(3, search_result.indices());
print_text_with_coordinates(
text_element,
0,
i.saturating_sub(start_index) + 4,
Some(cols),
None,
);
}
}
}
}
#[derive(Debug)]
pub struct SearchResult {
pub entry: FsEntry,
pub score: i64,
pub indices: Vec<usize>,
}
impl SearchResult {
pub fn new(entry: FsEntry, score: i64, indices: Vec<usize>) -> Self {
SearchResult {
entry,
score,
indices,
}
}
pub fn name(&self) -> String {
self.entry.name()
}
pub fn size(&self) -> Option<u64> {
self.entry.size()
}
pub fn indices(&self) -> Vec<usize> {
self.indices.clone()
}
pub fn is_folder(&self) -> bool {
self.entry.is_folder()
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/strider/src/state.rs | default-plugins/strider/src/state.rs | use crate::file_list_view::{FileListView, FsEntry};
use crate::search_view::SearchView;
use crate::shared::calculate_list_bounds;
use std::{
collections::BTreeMap,
path::{Path, PathBuf},
};
use zellij_tile::prelude::*;
#[derive(Default)]
pub struct State {
pub file_list_view: FileListView,
pub search_view: SearchView,
pub hide_hidden_files: bool,
pub current_rows: Option<usize>,
pub handling_filepick_request_from: Option<(PipeSource, BTreeMap<String, String>)>,
pub initial_cwd: PathBuf,
pub is_searching: bool,
pub search_term: String,
pub close_on_selection: bool,
}
impl State {
pub fn update_search_term(&mut self, character: char) {
self.search_term.push(character);
if &self.search_term == ".." {
self.descend_to_previous_path();
} else if &self.search_term == "/" {
self.descend_to_root_path();
} else {
self.is_searching = true;
self.search_view
.update_search_results(&self.search_term, &self.file_list_view.files);
}
}
pub fn handle_backspace(&mut self) {
if self.search_term.is_empty() {
self.descend_to_previous_path();
} else {
self.search_term.pop();
if self.search_term.is_empty() {
self.is_searching = false;
}
self.search_view
.update_search_results(&self.search_term, &self.file_list_view.files);
}
}
pub fn clear_search_term(&mut self) {
self.search_term.clear();
self.search_view
.update_search_results(&self.search_term, &self.file_list_view.files);
self.is_searching = false;
}
pub fn clear_search_term_or_descend(&mut self) {
if self.search_term.is_empty() {
self.descend_to_previous_path();
} else {
self.search_term.clear();
self.search_view
.update_search_results(&self.search_term, &self.file_list_view.files);
self.is_searching = false;
}
}
pub fn move_selection_up(&mut self) {
if self.is_searching {
self.search_view.move_selection_up();
} else {
self.file_list_view.move_selection_up();
}
}
pub fn move_selection_down(&mut self) {
if self.is_searching {
self.search_view.move_selection_down();
} else {
self.file_list_view.move_selection_down();
}
}
pub fn handle_left_click(&mut self, line: isize) {
if let Some(current_rows) = self.current_rows {
let rows_for_list = current_rows.saturating_sub(5);
if self.is_searching {
let (start_index, _selected_index_in_range, _end_index) = calculate_list_bounds(
self.search_view.search_result_count(),
rows_for_list,
Some(self.search_view.selected_search_result),
);
let prev_selected = self.search_view.selected_search_result;
self.search_view.selected_search_result =
(line as usize).saturating_sub(4) + start_index;
if prev_selected == self.search_view.selected_search_result {
self.traverse_dir();
}
} else {
let (start_index, _selected_index_in_range, _end_index) = calculate_list_bounds(
self.file_list_view.files.len(),
rows_for_list,
self.file_list_view.selected(),
);
let prev_selected = self.file_list_view.selected();
*self.file_list_view.selected_mut() =
(line as usize).saturating_sub(4) + start_index;
if prev_selected == self.file_list_view.selected() {
self.traverse_dir();
}
}
}
}
pub fn handle_mouse_hover(&mut self, line: isize) {
if let Some(current_rows) = self.current_rows {
let rows_for_list = current_rows.saturating_sub(5);
if self.is_searching {
let (start_index, _selected_index_in_range, _end_index) = calculate_list_bounds(
self.search_view.search_result_count(),
rows_for_list,
Some(self.search_view.selected_search_result),
);
self.search_view.selected_search_result =
(line as usize).saturating_sub(4) + start_index;
} else {
let (start_index, _selected_index_in_range, _end_index) = calculate_list_bounds(
self.file_list_view.files.len(),
rows_for_list,
self.file_list_view.selected(),
);
*self.file_list_view.selected_mut() =
(line as usize).saturating_sub(4) + start_index;
}
}
}
pub fn descend_to_previous_path(&mut self) {
self.search_term.clear();
self.search_view.clear_and_reset_selection();
self.file_list_view.descend_to_previous_path();
}
pub fn descend_to_root_path(&mut self) {
self.search_term.clear();
self.search_view.clear_and_reset_selection();
self.file_list_view.descend_to_root_path(&self.initial_cwd);
refresh_directory(&self.file_list_view.path);
}
pub fn toggle_hidden_files(&mut self) {
self.hide_hidden_files = !self.hide_hidden_files;
}
pub fn traverse_dir(&mut self) {
let entry = if self.is_searching {
self.search_view.get_selected_entry()
} else {
self.file_list_view.get_selected_entry()
};
if let Some(entry) = entry {
match &entry {
FsEntry::Dir(_p) => {
self.file_list_view.enter_dir(&entry);
self.search_view.clear_and_reset_selection();
refresh_directory(&self.file_list_view.path);
},
FsEntry::File(_p, _) => {
self.file_list_view.enter_dir(&entry);
self.search_view.clear_and_reset_selection();
if self.handling_filepick_request_from.is_some() {
self.send_filepick_response();
} else {
self.open_selected_path();
}
},
}
} else if self.handling_filepick_request_from.is_some() {
self.send_filepick_response();
} else {
self.open_selected_path();
}
self.is_searching = false;
self.search_term.clear();
self.search_view.clear_and_reset_selection();
}
pub fn update_files(&mut self, paths: Vec<(PathBuf, Option<FileMetadata>)>) {
self.file_list_view
.update_files(paths, self.hide_hidden_files);
}
pub fn open_selected_path(&mut self) {
if self.file_list_view.path_is_dir {
if self.close_on_selection {
open_terminal_in_place_of_plugin(&self.file_list_view.path, true);
} else {
open_terminal(&self.file_list_view.path);
}
} else {
if let Some(parent_folder) = self.file_list_view.path.parent() {
if self.close_on_selection {
open_file_in_place_of_plugin(
FileToOpen::new(&self.file_list_view.path).with_cwd(parent_folder.into()),
true,
BTreeMap::new(),
);
} else {
open_file(
FileToOpen::new(&self.file_list_view.path).with_cwd(parent_folder.into()),
BTreeMap::new(),
);
}
} else {
if self.close_on_selection {
open_file_in_place_of_plugin(
FileToOpen::new(&self.file_list_view.path),
true,
BTreeMap::new(),
);
} else {
open_file(FileToOpen::new(&self.file_list_view.path), BTreeMap::new());
}
}
}
}
pub fn send_filepick_response(&mut self) {
let selected_path = &self.file_list_view.path;
match &self.handling_filepick_request_from {
Some((PipeSource::Plugin(plugin_id), args)) => {
pipe_message_to_plugin(
MessageToPlugin::new("filepicker_result")
.with_destination_plugin_id(*plugin_id)
.with_args(args.clone())
.with_payload(selected_path.display().to_string()),
);
#[cfg(target_family = "wasm")]
close_self();
},
Some((PipeSource::Cli(pipe_id), _args)) => {
#[cfg(target_family = "wasm")]
cli_pipe_output(pipe_id, &selected_path.display().to_string());
#[cfg(target_family = "wasm")]
unblock_cli_pipe_input(pipe_id);
#[cfg(target_family = "wasm")]
close_self();
},
_ => {},
}
}
}
pub(crate) fn refresh_directory(full_path: &Path) {
change_host_folder(PathBuf::from(full_path));
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/strider/src/file_list_view.rs | default-plugins/strider/src/file_list_view.rs | use crate::shared::{calculate_list_bounds, render_list_tip};
use crate::state::refresh_directory;
use pretty_bytes::converter::convert as pretty_bytes;
use std::collections::HashMap;
use std::path::PathBuf;
use unicode_width::UnicodeWidthStr;
use zellij_tile::prelude::*;
#[derive(Debug, Clone)]
pub struct FileListView {
pub path: PathBuf,
pub path_is_dir: bool,
pub files: Vec<FsEntry>,
pub cursor_hist: HashMap<PathBuf, usize>,
}
impl Default for FileListView {
fn default() -> Self {
FileListView {
path_is_dir: true,
path: PathBuf::new(),
files: Default::default(),
cursor_hist: Default::default(),
}
}
}
impl FileListView {
pub fn descend_to_previous_path(&mut self) {
if let Some(parent) = self.path.parent() {
self.path = parent.to_path_buf();
} else {
self.path = PathBuf::new();
}
self.path_is_dir = true;
self.files.clear();
self.clear_selected();
refresh_directory(&self.path);
}
pub fn descend_to_root_path(&mut self, initial_cwd: &PathBuf) {
self.path = initial_cwd.clone();
self.path_is_dir = true;
self.files.clear();
self.clear_selected();
}
pub fn enter_dir(&mut self, entry: &FsEntry) {
let is_dir = entry.is_folder();
let path = entry.get_full_pathbuf();
self.path = path;
self.path_is_dir = is_dir;
self.files.clear();
self.clear_selected();
}
pub fn clear_selected(&mut self) {
self.cursor_hist.remove(&self.path);
}
pub fn update_files(
&mut self,
paths: Vec<(PathBuf, Option<FileMetadata>)>,
hide_hidden_files: bool,
) {
let mut files = vec![];
for (entry, entry_metadata) in paths {
let entry = self
.path
.join(entry.strip_prefix("/host").unwrap_or(&entry));
if entry_metadata.map(|e| e.is_symlink).unwrap_or(false) {
continue;
}
let entry = if entry_metadata.map(|e| e.is_dir).unwrap_or(false) {
FsEntry::Dir(entry)
} else {
let size = entry_metadata.map(|e| e.len).unwrap_or(0);
FsEntry::File(entry, size)
};
if !entry.is_hidden_file() || !hide_hidden_files {
files.push(entry);
}
}
self.files = files;
self.files.sort_unstable();
}
pub fn get_selected_entry(&self) -> Option<FsEntry> {
self.selected().and_then(|f| self.files.get(f).cloned())
}
pub fn selected_mut(&mut self) -> &mut usize {
self.cursor_hist.entry(self.path.clone()).or_default()
}
pub fn selected(&self) -> Option<usize> {
self.cursor_hist.get(&self.path).copied()
}
pub fn move_selection_up(&mut self) {
if let Some(selected) = self.selected() {
*self.selected_mut() = selected.saturating_sub(1);
}
}
pub fn move_selection_down(&mut self) {
if let Some(selected) = self.selected() {
let next = selected.saturating_add(1);
*self.selected_mut() = std::cmp::min(self.files.len().saturating_sub(1), next);
} else {
*self.selected_mut() = 0;
}
}
pub fn render(&mut self, rows: usize, cols: usize) {
let (start_index, selected_index_in_range, end_index) =
calculate_list_bounds(self.files.len(), rows.saturating_sub(1), self.selected());
render_list_tip(3, cols);
for i in start_index..end_index {
if let Some(entry) = self.files.get(i) {
let is_selected = Some(i) == selected_index_in_range;
let mut file_or_folder_name = entry.name();
let size = entry
.size()
.map(|s| pretty_bytes(s as f64))
.unwrap_or("".to_owned());
if entry.is_folder() {
file_or_folder_name.push('/');
}
let file_or_folder_name_width = file_or_folder_name.width();
let size_width = size.width();
let text = if file_or_folder_name_width + size_width < cols {
let padding = " ".repeat(
cols.saturating_sub(file_or_folder_name_width)
.saturating_sub(size_width),
);
format!("{}{}{}", file_or_folder_name, padding, size)
} else {
let padding = " ".repeat(cols.saturating_sub(file_or_folder_name_width));
format!("{}{}", file_or_folder_name, padding)
};
let mut text_element = if is_selected {
Text::new(text).selected()
} else {
Text::new(text)
};
if entry.is_folder() {
text_element = text_element.color_range(0, ..);
}
print_text_with_coordinates(
text_element,
0,
4 + i.saturating_sub(start_index),
Some(cols),
None,
);
}
}
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub enum FsEntry {
Dir(PathBuf),
File(PathBuf, u64),
}
impl FsEntry {
pub fn name(&self) -> String {
let path = match self {
FsEntry::Dir(p) => p,
FsEntry::File(p, _) => p,
};
path.file_name().unwrap().to_string_lossy().into_owned()
}
pub fn size(&self) -> Option<u64> {
match self {
FsEntry::Dir(_p) => None,
FsEntry::File(_, size) => Some(*size),
}
}
pub fn get_full_pathbuf(&self) -> PathBuf {
match self {
FsEntry::Dir(p) => p.clone(),
FsEntry::File(p, _) => p.clone(),
}
}
pub fn is_hidden_file(&self) -> bool {
self.name().starts_with('.')
}
pub fn is_folder(&self) -> bool {
matches!(self, FsEntry::Dir(_))
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/strider/src/main.rs | default-plugins/strider/src/main.rs | mod file_list_view;
mod search_view;
mod shared;
mod state;
use shared::{render_current_path, render_instruction_line, render_search_term};
use state::{refresh_directory, State};
use std::collections::BTreeMap;
use std::path::PathBuf;
use zellij_tile::prelude::*;
register_plugin!(State);
impl ZellijPlugin for State {
fn load(&mut self, configuration: BTreeMap<String, String>) {
let plugin_ids = get_plugin_ids();
self.initial_cwd = plugin_ids.initial_cwd;
let show_hidden_files = configuration
.get("show_hidden_files")
.map(|v| v == "true")
.unwrap_or(false);
self.hide_hidden_files = !show_hidden_files;
self.close_on_selection = configuration
.get("close_on_selection")
.map(|v| v == "true")
.unwrap_or(false);
subscribe(&[
EventType::Key,
EventType::Mouse,
EventType::CustomMessage,
EventType::Timer,
EventType::FileSystemUpdate,
EventType::HostFolderChanged,
EventType::PermissionRequestResult,
]);
self.file_list_view.clear_selected();
match configuration.get("caller_cwd").map(|c| PathBuf::from(c)) {
Some(caller_cwd) => {
self.file_list_view.path = caller_cwd;
},
None => {
self.file_list_view.path = self.initial_cwd.clone();
},
}
if self.initial_cwd != self.file_list_view.path {
change_host_folder(self.file_list_view.path.clone());
} else {
scan_host_folder(&"/host");
}
}
fn update(&mut self, event: Event) -> bool {
let mut should_render = false;
match event {
Event::FileSystemUpdate(paths) => {
self.update_files(paths);
should_render = true;
},
Event::HostFolderChanged(_new_host_folder) => {
scan_host_folder(&"/host");
should_render = true;
},
Event::Key(key) => match key.bare_key {
BareKey::Char(character) if key.has_no_modifiers() => {
self.update_search_term(character);
should_render = true;
},
BareKey::Backspace if key.has_no_modifiers() => {
self.handle_backspace();
should_render = true;
},
BareKey::Esc if key.has_no_modifiers() => {
if self.is_searching {
self.clear_search_term();
} else {
self.file_list_view.clear_selected();
}
should_render = true;
},
BareKey::Char('c') if key.has_modifiers(&[KeyModifier::Ctrl]) => {
self.clear_search_term_or_descend();
},
BareKey::Up if key.has_no_modifiers() => {
self.move_selection_up();
should_render = true;
},
BareKey::Down if key.has_no_modifiers() => {
self.move_selection_down();
should_render = true;
},
BareKey::Right | BareKey::Tab | BareKey::Enter if key.has_no_modifiers() => {
self.traverse_dir();
should_render = true;
},
BareKey::Right if key.has_no_modifiers() => {
self.traverse_dir();
should_render = true;
},
BareKey::Left if key.has_no_modifiers() => {
self.descend_to_previous_path();
should_render = true;
},
BareKey::Char('e') if key.has_modifiers(&[KeyModifier::Ctrl]) => {
should_render = true;
self.toggle_hidden_files();
refresh_directory(&self.file_list_view.path);
},
_ => (),
},
Event::Mouse(mouse_event) => match mouse_event {
Mouse::ScrollDown(_) => {
self.move_selection_down();
should_render = true;
},
Mouse::ScrollUp(_) => {
self.move_selection_up();
should_render = true;
},
Mouse::LeftClick(line, _) => {
self.handle_left_click(line);
should_render = true;
},
Mouse::Hover(line, _) => {
if line >= 0 {
self.handle_mouse_hover(line);
should_render = true;
}
},
_ => {},
},
_ => {
dbg!("Unknown event {:?}", event);
},
};
should_render
}
fn pipe(&mut self, pipe_message: PipeMessage) -> bool {
if pipe_message.is_private && pipe_message.name == "filepicker" {
if let PipeSource::Cli(pipe_id) = &pipe_message.source {
#[cfg(target_family = "wasm")]
block_cli_pipe_input(pipe_id);
}
self.handling_filepick_request_from = Some((pipe_message.source, pipe_message.args));
true
} else {
false
}
}
fn render(&mut self, rows: usize, cols: usize) {
self.current_rows = Some(rows);
let rows_for_list = rows.saturating_sub(6);
render_search_term(&self.search_term);
render_current_path(
&self.file_list_view.path,
self.file_list_view.path_is_dir,
self.handling_filepick_request_from.is_some(),
cols,
);
if self.is_searching {
self.search_view.render(rows_for_list, cols);
} else {
self.file_list_view.render(rows_for_list, cols);
}
render_instruction_line(rows, cols);
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/strider/src/shared.rs | default-plugins/strider/src/shared.rs | use std::path::PathBuf;
use unicode_width::UnicodeWidthStr;
use zellij_tile::prelude::*;
pub fn render_instruction_line(y: usize, max_cols: usize) {
if max_cols > 78 {
let text = "Help: go back with <Ctrl c>, go to root with /, <Ctrl e> - toggle hidden files";
let text = Text::new(text)
.color_range(3, 19..27)
.color_range(3, 45..46)
.color_range(3, 48..56);
print_text_with_coordinates(text, 0, y, Some(max_cols), None);
} else if max_cols > 56 {
let text = "Help: <Ctrl c> - back, / - root, <Ctrl e> - hidden files";
let text = Text::new(text)
.color_range(3, 6..14)
.color_range(3, 23..24)
.color_range(3, 33..41);
print_text_with_coordinates(text, 0, y, Some(max_cols), None);
} else if max_cols > 25 {
let text = "<Ctrl c> - back, / - root";
let text = Text::new(text).color_range(3, ..8).color_range(3, 17..18);
print_text_with_coordinates(text, 0, y, Some(max_cols), None);
}
}
pub fn render_list_tip(y: usize, max_cols: usize) {
let tip = Text::new(format!("(<↓↑> - Navigate, <TAB> - Select)"))
.color_range(3, 1..5)
.color_range(3, 18..23);
print_text_with_coordinates(tip, 0, y, Some(max_cols), None);
}
pub fn calculate_list_bounds(
result_count: usize,
max_result_count: usize,
selected_index_in_all_results: Option<usize>,
) -> (usize, Option<usize>, usize) {
match selected_index_in_all_results {
Some(selected_index_in_all_results) => {
let mut room_in_list = max_result_count;
let mut start_index = selected_index_in_all_results;
let mut end_index = selected_index_in_all_results + 1;
let mut alternate = false;
loop {
if room_in_list == 0 {
break;
}
if !alternate && start_index > 0 {
start_index = start_index.saturating_sub(1);
room_in_list = room_in_list.saturating_sub(1);
} else if alternate && end_index < result_count {
end_index += 1;
room_in_list = room_in_list.saturating_sub(1);
} else if start_index > 0 {
start_index = start_index.saturating_sub(1);
room_in_list = room_in_list.saturating_sub(1);
} else if end_index < result_count {
end_index += 1;
room_in_list = room_in_list.saturating_sub(1);
} else {
break;
}
alternate = !alternate;
}
(start_index, Some(selected_index_in_all_results), end_index)
},
None => (0, None, max_result_count + 1),
}
}
pub fn render_search_term(search_term: &str) {
let prompt = "FIND: ";
let text = Text::new(format!("{}{}_", prompt, search_term))
.color_range(2, 0..prompt.len())
.color_range(3, prompt.len()..);
print_text(text);
println!("")
}
pub fn render_current_path(
full_path: &PathBuf,
path_is_dir: bool,
handling_filepick: bool,
max_cols: usize,
) {
let prompt = "PATH: ";
let current_path = full_path.display().to_string();
let prompt_len = prompt.width();
let current_path_len = current_path.width();
let enter_tip = if handling_filepick {
"Select"
} else if path_is_dir {
"Open terminal here"
} else {
"Open in editor"
};
if max_cols > prompt_len + current_path_len + enter_tip.width() + 13 {
let path_end = prompt_len + current_path_len;
let current_path = Text::new(format!(
"{}{} (<ENTER> - {})",
prompt, current_path, enter_tip
))
.color_range(2, 0..prompt_len)
.color_range(0, prompt_len..path_end)
.color_range(3, path_end + 2..path_end + 9);
print_text(current_path);
} else {
let max_path_len = max_cols
.saturating_sub(prompt_len)
.saturating_sub(8)
.saturating_sub(prompt_len);
let current_path = if current_path_len <= max_path_len {
current_path
} else {
truncate_path(
full_path.clone(),
current_path_len.saturating_sub(max_path_len),
)
};
let current_path_len = current_path.width();
let path_end = prompt_len + current_path_len;
let current_path = Text::new(format!("{}{} <ENTER>", prompt, current_path))
.color_range(2, 0..prompt_len)
.color_range(0, prompt_len..path_end)
.color_range(3, path_end + 1..path_end + 9);
print_text(current_path);
}
println!();
println!();
}
fn truncate_path(path: PathBuf, mut char_count_to_remove: usize) -> String {
let mut truncated = String::new();
let component_count = path.iter().count();
for (i, component) in path.iter().enumerate() {
let mut component_str = component.to_string_lossy().to_string();
if char_count_to_remove > 0 {
truncated.push(component_str.remove(0));
if i != 0 && i + 1 != component_count {
truncated.push('/');
}
char_count_to_remove = char_count_to_remove.saturating_sub(component_str.width() + 1);
} else {
truncated.push_str(&component_str);
if i != 0 && i + 1 != component_count {
truncated.push('/');
}
}
}
truncated
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/tooltip.rs | default-plugins/compact-bar/src/tooltip.rs | use crate::keybind_utils::KeybindProcessor;
use zellij_tile::prelude::*;
pub struct TooltipRenderer<'a> {
mode_info: &'a ModeInfo,
}
impl<'a> TooltipRenderer<'a> {
pub fn new(mode_info: &'a ModeInfo) -> Self {
Self { mode_info }
}
pub fn render(&self, rows: usize, cols: usize) {
let current_mode = self.mode_info.mode;
if current_mode == InputMode::Normal {
let (text_components, tooltip_rows, tooltip_columns) =
self.normal_mode_tooltip(current_mode);
let base_x = cols.saturating_sub(tooltip_columns) / 2;
let base_y = rows.saturating_sub(tooltip_rows) / 2;
for (text, ribbon, x, y) in text_components {
let text_width = text.content().chars().count();
let ribbon_content_width = ribbon.content().chars().count();
let ribbon_total_width = ribbon_content_width + 4;
let total_element_width = text_width + ribbon_total_width + 1;
// Check if this element would exceed the available columns and render an ellipses
// if it does
if base_x + x + total_element_width > cols {
let remaining_space = cols.saturating_sub(base_x + x);
let ellipsis = Text::new("...").opaque();
print_text_with_coordinates(
ellipsis,
base_x + x,
base_y + y,
Some(remaining_space),
None,
);
break;
}
print_text_with_coordinates(text, base_x + x, base_y + y, None, None);
print_ribbon_with_coordinates(
ribbon,
base_x + x + text_width,
base_y + y,
None,
None,
);
}
} else {
let (table, tooltip_rows, tooltip_columns) = self.other_mode_tooltip(current_mode);
let base_x = cols.saturating_sub(tooltip_columns) / 2;
let base_y = rows.saturating_sub(tooltip_rows) / 2;
print_table_with_coordinates(table, base_x, base_y, None, None);
}
}
pub fn calculate_dimensions(&self, current_mode: InputMode) -> (usize, usize) {
match current_mode {
InputMode::Normal => {
let (_, tooltip_rows, tooltip_cols) = self.normal_mode_tooltip(current_mode);
(tooltip_rows, tooltip_cols)
},
_ => {
let (_, tooltip_rows, tooltip_cols) = self.other_mode_tooltip(current_mode);
(tooltip_rows + 1, tooltip_cols) // + 1 for the invisible table title
},
}
}
fn normal_mode_tooltip(
&self,
current_mode: InputMode,
) -> (Vec<(Text, Text, usize, usize)>, usize, usize) {
let actions = KeybindProcessor::get_predetermined_actions(self.mode_info, current_mode);
let y = 0;
let mut running_x = 0;
let mut components = Vec::new();
let mut max_columns = 0;
let mut is_first = true;
for (key, description) in actions {
let text = if is_first {
Text::new(format!("{} ", &key)).color_all(3).opaque()
} else {
Text::new(format!(" {} ", &key)).color_all(3).opaque()
};
let ribbon = Text::new(&description);
let line_length = if is_first {
key.chars().count() + description.chars().count()
} else {
key.chars().count() + 1 + description.chars().count()
};
components.push((text, ribbon, running_x, y));
running_x += line_length + 5;
max_columns = max_columns.max(running_x);
is_first = false;
}
let total_rows = 1;
(components, total_rows, max_columns)
}
fn other_mode_tooltip(&self, current_mode: InputMode) -> (Table, usize, usize) {
let actions = KeybindProcessor::get_predetermined_actions(self.mode_info, current_mode);
let actions_vec: Vec<_> = actions.into_iter().collect();
let mut table = Table::new().add_row(vec![" ".to_owned(); 2]);
let mut row_count = 1; // Start with header row
if actions_vec.is_empty() {
let tooltip_text = match self.mode_info.mode {
InputMode::EnterSearch => "Entering search term...".to_owned(),
InputMode::RenameTab => "Renaming tab...".to_owned(),
InputMode::RenamePane => "Renaming pane...".to_owned(),
_ => {
format!("{:?}", self.mode_info.mode)
},
};
let total_width = tooltip_text.chars().count();
table = table.add_styled_row(vec![Text::new(tooltip_text).color_all(0)]);
row_count += 1;
(table, row_count, total_width)
} else {
let mut key_width = 0;
let mut action_width = 0;
for (key, description) in actions_vec.into_iter() {
let description_formatted = format!("- {}", description);
key_width = key_width.max(key.chars().count());
action_width = action_width.max(description_formatted.chars().count());
table = table.add_styled_row(vec![
Text::new(&key).color_all(3),
Text::new(description_formatted),
]);
row_count += 1;
}
let total_width = key_width + action_width + 1; // +1 for separator
(table, row_count, total_width)
}
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/action_types.rs | default-plugins/compact-bar/src/action_types.rs | use zellij_tile::prelude::actions::Action;
use zellij_tile::prelude::*;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ActionType {
MoveFocus,
MovePaneWithDirection,
MovePaneWithoutDirection,
ResizeIncrease,
ResizeDecrease,
ResizeAny,
Search,
NewPaneWithDirection,
NewPaneWithoutDirection,
BreakPaneLeftOrRight,
GoToAdjacentTab,
Scroll,
PageScroll,
HalfPageScroll,
SessionManager,
Configuration,
PluginManager,
About,
SwitchToMode(InputMode),
TogglePaneEmbedOrFloating,
ToggleFocusFullscreen,
ToggleFloatingPanes,
CloseFocus,
CloseTab,
ToggleActiveSyncTab,
ToggleTab,
BreakPane,
EditScrollback,
NewTab,
Detach,
Quit,
NewStackedPane,
Other(String), // Fallback for unhandled actions
}
impl ActionType {
pub fn description(&self) -> String {
match self {
ActionType::MoveFocus => "Move focus".to_string(),
ActionType::MovePaneWithDirection => "Move pane".to_string(),
ActionType::MovePaneWithoutDirection => "Move pane".to_string(),
ActionType::ResizeIncrease => "Increase size in direction".to_string(),
ActionType::ResizeDecrease => "Decrease size in direction".to_string(),
ActionType::ResizeAny => "Increase or decrease size".to_string(),
ActionType::Search => "Search".to_string(),
ActionType::NewPaneWithDirection => "Split right/down".to_string(),
ActionType::NewPaneWithoutDirection => "New pane".to_string(),
ActionType::BreakPaneLeftOrRight => "Break pane to adjacent tab".to_string(),
ActionType::GoToAdjacentTab => "Move tab focus".to_string(),
ActionType::Scroll => "Scroll".to_string(),
ActionType::PageScroll => "Scroll page".to_string(),
ActionType::HalfPageScroll => "Scroll half Page".to_string(),
ActionType::SessionManager => "Session manager".to_string(),
ActionType::PluginManager => "Plugin manager".to_string(),
ActionType::Configuration => "Configuration".to_string(),
ActionType::About => "About Zellij".to_string(),
ActionType::SwitchToMode(input_mode) if input_mode == &InputMode::RenamePane => {
"Rename pane".to_string()
},
ActionType::SwitchToMode(input_mode) if input_mode == &InputMode::RenameTab => {
"Rename tab".to_string()
},
ActionType::SwitchToMode(input_mode) if input_mode == &InputMode::EnterSearch => {
"Search".to_string()
},
ActionType::SwitchToMode(input_mode) if input_mode == &InputMode::Locked => {
"Lock".to_string()
},
ActionType::SwitchToMode(input_mode) if input_mode == &InputMode::Normal => {
"Unlock".to_string()
},
ActionType::SwitchToMode(input_mode) => format!("{:?}", input_mode),
ActionType::TogglePaneEmbedOrFloating => "Float or embed".to_string(),
ActionType::NewStackedPane => "New stacked pane".to_string(),
ActionType::ToggleFocusFullscreen => "Toggle fullscreen".to_string(),
ActionType::ToggleFloatingPanes => "Show/hide floating panes".to_string(),
ActionType::CloseFocus => "Close pane".to_string(),
ActionType::CloseTab => "Close tab".to_string(),
ActionType::ToggleActiveSyncTab => "Sync panes in tab".to_string(),
ActionType::ToggleTab => "Circle tab focus".to_string(),
ActionType::BreakPane => "Break pane to new tab".to_string(),
ActionType::EditScrollback => "Open pane scrollback in editor".to_string(),
ActionType::NewTab => "New tab".to_string(),
ActionType::Detach => "Detach".to_string(),
ActionType::Quit => "Quit".to_string(),
ActionType::Other(_) => "Other action".to_string(),
}
}
pub fn from_action(action: &Action) -> Self {
match action {
Action::MoveFocus { .. } => ActionType::MoveFocus,
Action::MovePane { direction: Some(_) } => ActionType::MovePaneWithDirection,
Action::MovePane { direction: None } => ActionType::MovePaneWithoutDirection,
Action::Resize {
resize: Resize::Increase,
direction: Some(_),
} => ActionType::ResizeIncrease,
Action::Resize {
resize: Resize::Decrease,
direction: Some(_),
} => ActionType::ResizeDecrease,
Action::Resize {
resize: _,
direction: None,
} => ActionType::ResizeAny,
Action::Search { .. } => ActionType::Search,
Action::NewPane {
direction: Some(_), ..
} => ActionType::NewPaneWithDirection,
Action::NewPane {
direction: None, ..
} => ActionType::NewPaneWithoutDirection,
Action::NewStackedPane { .. } => ActionType::NewStackedPane,
Action::BreakPaneLeft | Action::BreakPaneRight => ActionType::BreakPaneLeftOrRight,
Action::GoToPreviousTab | Action::GoToNextTab => ActionType::GoToAdjacentTab,
Action::ScrollUp | Action::ScrollDown => ActionType::Scroll,
Action::PageScrollUp | Action::PageScrollDown => ActionType::PageScroll,
Action::HalfPageScrollUp | Action::HalfPageScrollDown => ActionType::HalfPageScroll,
Action::SwitchToMode { input_mode } => ActionType::SwitchToMode(*input_mode),
Action::TogglePaneEmbedOrFloating => ActionType::TogglePaneEmbedOrFloating,
Action::ToggleFocusFullscreen => ActionType::ToggleFocusFullscreen,
Action::ToggleFloatingPanes => ActionType::ToggleFloatingPanes,
Action::CloseFocus => ActionType::CloseFocus,
Action::CloseTab => ActionType::CloseTab,
Action::ToggleActiveSyncTab => ActionType::ToggleActiveSyncTab,
Action::ToggleTab => ActionType::ToggleTab,
Action::BreakPane => ActionType::BreakPane,
Action::EditScrollback => ActionType::EditScrollback,
Action::Detach => ActionType::Detach,
Action::Quit => ActionType::Quit,
action if action.launches_plugin("session-manager") => ActionType::SessionManager,
action if action.launches_plugin("configuration") => ActionType::Configuration,
action if action.launches_plugin("plugin-manager") => ActionType::PluginManager,
action if action.launches_plugin("zellij:about") => ActionType::About,
action if matches!(action, Action::NewTab { .. }) => ActionType::NewTab,
_ => ActionType::Other(format!("{:?}", action)),
}
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/keybind_utils.rs | default-plugins/compact-bar/src/keybind_utils.rs | use crate::action_types::ActionType;
use std::collections::HashSet;
use zellij_tile::prelude::actions::Action;
use zellij_tile::prelude::*;
pub struct KeybindProcessor;
impl KeybindProcessor {
/// Find predetermined actions based on predicates while maintaining order
pub fn find_predetermined_actions<F>(
mode_info: &ModeInfo,
mode: InputMode,
predicates: Vec<F>,
) -> Vec<(String, String)>
where
F: Fn(&Action) -> bool,
{
let mut result = Vec::new();
let keybinds = mode_info.get_keybinds_for_mode(mode);
let mut processed_action_types = HashSet::new();
// Iterate through predicates in order to maintain the desired sequence
for predicate in predicates {
// Find the first matching action for this predicate
let mut found_match = false;
for (_key, actions) in &keybinds {
if let Some(first_action) = actions.first() {
if predicate(first_action) {
let action_type = ActionType::from_action(first_action);
// Skip if we've already processed this action type
if processed_action_types.contains(&action_type) {
found_match = true;
break;
}
let mut matching_keys = Vec::new();
// Find all keys that match this action type (including different directions)
for (inner_key, inner_actions) in &keybinds {
if let Some(inner_first_action) = inner_actions.first() {
if ActionType::from_action(inner_first_action) == action_type {
matching_keys.push(format!("{}", inner_key));
}
}
}
if !matching_keys.is_empty() {
let description = action_type.description();
let should_add_brackets_to_keys = mode != InputMode::Normal;
// Check if this is switching to normal mode
let is_switching_to_locked = matches!(
first_action,
Action::SwitchToMode {
input_mode: InputMode::Locked
}
);
let grouped_keys = Self::group_key_sets(
&matching_keys,
should_add_brackets_to_keys,
is_switching_to_locked,
);
result.push((grouped_keys, description));
processed_action_types.insert(action_type);
}
found_match = true;
break;
}
}
}
// If we found a match for this predicate, we've processed it
if found_match {
continue;
}
}
result
}
/// Group keys into sets and separate different key types with '|'
fn group_key_sets(
keys: &[String],
should_add_brackets_to_keys: bool,
is_switching_to_locked: bool,
) -> String {
if keys.is_empty() {
return String::new();
}
// Filter out Esc and Enter keys when switching to normal mode, but only if other keys exist
let filtered_keys: Vec<String> = if is_switching_to_locked {
let non_esc_enter_keys: Vec<String> = keys
.iter()
.filter(|k| k.as_str() != "ESC" && k.as_str() != "ENTER")
.cloned()
.collect();
if non_esc_enter_keys.is_empty() {
// If no other keys exist, keep the original keys
keys.to_vec()
} else {
// Use filtered keys (without Esc/Enter)
non_esc_enter_keys
}
} else {
keys.to_vec()
};
if filtered_keys.len() == 1 {
return if should_add_brackets_to_keys {
format!("<{}>", filtered_keys[0])
} else {
filtered_keys[0].clone()
};
}
// Group keys by type
let mut arrow_keys = Vec::new();
let mut hjkl_lower = Vec::new();
let mut hjkl_upper = Vec::new();
let mut square_bracket_keys = Vec::new();
let mut plus_minus_keys = Vec::new();
let mut pgup_pgdown = Vec::new();
let mut other_keys = Vec::new();
for key in &filtered_keys {
match key.as_str() {
"Left" | "←" => arrow_keys.push("←"),
"Down" | "↓" => arrow_keys.push("↓"),
"Up" | "↑" => arrow_keys.push("↑"),
"Right" | "→" => arrow_keys.push("→"),
"h" => hjkl_lower.push("h"),
"j" => hjkl_lower.push("j"),
"k" => hjkl_lower.push("k"),
"l" => hjkl_lower.push("l"),
"H" => hjkl_upper.push("H"),
"J" => hjkl_upper.push("J"),
"K" => hjkl_upper.push("K"),
"L" => hjkl_upper.push("L"),
"[" => square_bracket_keys.push("["),
"]" => square_bracket_keys.push("]"),
"+" => plus_minus_keys.push("+"),
"-" => plus_minus_keys.push("-"),
"=" => plus_minus_keys.push("="),
"PgUp" => pgup_pgdown.push("PgUp"),
"PgDn" => pgup_pgdown.push("PgDn"),
_ => {
if should_add_brackets_to_keys {
other_keys.push(format!("<{}>", key));
} else {
other_keys.push(key.clone());
}
},
}
}
let mut groups = Vec::new();
// Add hjkl group if present (prioritize hjkl over arrows)
if !hjkl_lower.is_empty() {
Self::sort_hjkl(&mut hjkl_lower);
groups.push(Self::format_key_group(
&hjkl_lower,
should_add_brackets_to_keys,
false,
));
}
// Add HJKL group if present
if !hjkl_upper.is_empty() {
Self::sort_hjkl_upper(&mut hjkl_upper);
groups.push(Self::format_key_group(
&hjkl_upper,
should_add_brackets_to_keys,
false,
));
}
// Add arrow keys group if present
if !arrow_keys.is_empty() {
Self::sort_arrows(&mut arrow_keys);
groups.push(Self::format_key_group(
&arrow_keys,
should_add_brackets_to_keys,
false,
));
}
if !square_bracket_keys.is_empty() {
Self::sort_square_brackets(&mut square_bracket_keys);
groups.push(Self::format_key_group(
&square_bracket_keys,
should_add_brackets_to_keys,
false,
));
}
if !plus_minus_keys.is_empty() {
Self::sort_plus_minus(&mut plus_minus_keys);
groups.push(Self::format_key_group(
&plus_minus_keys,
should_add_brackets_to_keys,
false,
));
}
if !pgup_pgdown.is_empty() {
Self::sort_pgup_pgdown(&mut pgup_pgdown);
groups.push(Self::format_key_group(
&pgup_pgdown,
should_add_brackets_to_keys,
true,
));
}
// Add other keys with / separator
if !other_keys.is_empty() {
groups.push(other_keys.join("/"));
}
groups.join("/")
}
fn sort_hjkl(keys: &mut Vec<&str>) {
keys.sort_by(|a, b| {
let order = ["h", "j", "k", "l"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
}
fn sort_hjkl_upper(keys: &mut Vec<&str>) {
keys.sort_by(|a, b| {
let order = ["H", "J", "K", "L"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
}
fn sort_arrows(keys: &mut Vec<&str>) {
keys.sort();
keys.dedup();
keys.sort_by(|a, b| {
let order = ["←", "↓", "↑", "→"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
}
fn sort_square_brackets(keys: &mut Vec<&str>) {
keys.sort_by(|a, b| {
let order = ["[", "]"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
}
fn sort_plus_minus(keys: &mut Vec<&str>) {
keys.sort_by(|a, b| {
let order = ["+", "-"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
// Remove "=" if both "+" and "=" are present
if keys.contains(&"+") && keys.contains(&"=") {
keys.retain(|k| k != &"=");
}
}
fn sort_pgup_pgdown(keys: &mut Vec<&str>) {
keys.sort_by(|a, b| {
let order = ["PgUp", "PgDn"];
let pos_a = order.iter().position(|&x| &x == a).unwrap_or(usize::MAX);
let pos_b = order.iter().position(|&x| &x == b).unwrap_or(usize::MAX);
pos_a.cmp(&pos_b)
});
}
fn format_key_group(
keys: &[&str],
should_add_brackets: bool,
use_pipe_separator: bool,
) -> String {
let separator = if use_pipe_separator { "|" } else { "" };
let joined = keys.join(separator);
if should_add_brackets {
format!("<{}>", joined)
} else {
joined
}
}
/// Get predetermined actions for a specific mode
pub fn get_predetermined_actions(
mode_info: &ModeInfo,
mode: InputMode,
) -> Vec<(String, String)> {
match mode {
InputMode::Locked => {
let ordered_predicates = vec![|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Normal
}
)
}];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Normal => {
let ordered_predicates = vec![
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Locked
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Pane
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Tab
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Resize
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Move
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Scroll
}
)
},
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::Session
}
)
},
|action: &Action| matches!(action, Action::Quit),
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Pane => {
let ordered_predicates = vec![
|action: &Action| {
matches!(
action,
Action::NewPane {
direction: None,
pane_name: None,
start_suppressed: false
}
)
},
|action: &Action| {
matches!(
action,
Action::MoveFocus {
direction: Direction::Left
}
)
},
|action: &Action| {
matches!(
action,
Action::MoveFocus {
direction: Direction::Down
}
)
},
|action: &Action| {
matches!(
action,
Action::MoveFocus {
direction: Direction::Up
}
)
},
|action: &Action| {
matches!(
action,
Action::MoveFocus {
direction: Direction::Right
}
)
},
|action: &Action| matches!(action, Action::CloseFocus),
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::RenamePane
}
)
},
|action: &Action| matches!(action, Action::ToggleFocusFullscreen),
|action: &Action| matches!(action, Action::ToggleFloatingPanes),
|action: &Action| matches!(action, Action::TogglePaneEmbedOrFloating),
|action: &Action| {
matches!(
action,
Action::NewStackedPane {
command: None,
pane_name: None,
near_current_pane: false,
}
)
},
|action: &Action| {
matches!(
action,
Action::NewPane {
direction: Some(Direction::Right),
pane_name: None,
start_suppressed: false
}
)
},
|action: &Action| {
matches!(
action,
Action::NewPane {
direction: Some(Direction::Down),
pane_name: None,
start_suppressed: false
}
)
},
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Tab => {
let ordered_predicates = vec![
|action: &Action| matches!(action, Action::GoToPreviousTab),
|action: &Action| matches!(action, Action::GoToNextTab),
|action: &Action| {
matches!(
action,
Action::NewTab {
tiled_layout: None,
floating_layouts: _,
swap_tiled_layouts: None,
swap_floating_layouts: None,
tab_name: None,
should_change_focus_to_new_tab: true,
cwd: None,
initial_panes: _,
first_pane_unblock_condition: _,
}
)
},
|action: &Action| matches!(action, Action::CloseTab),
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::RenameTab
}
)
},
|action: &Action| matches!(action, Action::TabNameInput { .. }),
|action: &Action| matches!(action, Action::ToggleActiveSyncTab),
|action: &Action| matches!(action, Action::BreakPane),
|action: &Action| matches!(action, Action::BreakPaneLeft),
|action: &Action| matches!(action, Action::BreakPaneRight),
|action: &Action| matches!(action, Action::ToggleTab),
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Resize => {
let ordered_predicates = vec![
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Increase,
direction: None
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Decrease,
direction: None
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Increase,
direction: Some(Direction::Left)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Increase,
direction: Some(Direction::Down)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Increase,
direction: Some(Direction::Up)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Increase,
direction: Some(Direction::Right)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Decrease,
direction: Some(Direction::Left)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Decrease,
direction: Some(Direction::Down)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Decrease,
direction: Some(Direction::Up)
}
)
},
|action: &Action| {
matches!(
action,
Action::Resize {
resize: Resize::Decrease,
direction: Some(Direction::Right)
}
)
},
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Move => {
let ordered_predicates = vec![
|action: &Action| {
matches!(
action,
Action::MovePane {
direction: Some(Direction::Left)
}
)
},
|action: &Action| {
matches!(
action,
Action::MovePane {
direction: Some(Direction::Down)
}
)
},
|action: &Action| {
matches!(
action,
Action::MovePane {
direction: Some(Direction::Up)
}
)
},
|action: &Action| {
matches!(
action,
Action::MovePane {
direction: Some(Direction::Right)
}
)
},
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Scroll => {
let ordered_predicates = vec![
|action: &Action| matches!(action, Action::ScrollDown),
|action: &Action| matches!(action, Action::ScrollUp),
|action: &Action| matches!(action, Action::HalfPageScrollDown),
|action: &Action| matches!(action, Action::HalfPageScrollUp),
|action: &Action| matches!(action, Action::PageScrollDown),
|action: &Action| matches!(action, Action::PageScrollUp),
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::EnterSearch
}
)
},
|action: &Action| matches!(action, Action::EditScrollback),
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Search => {
let ordered_predicates = vec![
|action: &Action| {
matches!(
action,
Action::SwitchToMode {
input_mode: InputMode::EnterSearch
}
)
},
|action: &Action| matches!(action, Action::SearchInput { .. }),
|action: &Action| matches!(action, Action::ScrollDown),
|action: &Action| matches!(action, Action::ScrollUp),
|action: &Action| matches!(action, Action::PageScrollDown),
|action: &Action| matches!(action, Action::PageScrollUp),
|action: &Action| matches!(action, Action::HalfPageScrollDown),
|action: &Action| matches!(action, Action::HalfPageScrollUp),
|action: &Action| {
matches!(
action,
Action::Search {
direction: actions::SearchDirection::Down
}
)
},
|action: &Action| {
matches!(
action,
Action::Search {
direction: actions::SearchDirection::Up
}
)
},
|action: &Action| {
matches!(
action,
Action::SearchToggleOption {
option: actions::SearchOption::CaseSensitivity
}
)
},
|action: &Action| {
matches!(
action,
Action::SearchToggleOption {
option: actions::SearchOption::Wrap
}
)
},
|action: &Action| {
matches!(
action,
Action::SearchToggleOption {
option: actions::SearchOption::WholeWord
}
)
},
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::Session => {
let ordered_predicates = vec![
|action: &Action| matches!(action, Action::Detach),
|action: &Action| action.launches_plugin("session-manager"),
|action: &Action| action.launches_plugin("plugin-manager"),
|action: &Action| action.launches_plugin("configuration"),
|action: &Action| action.launches_plugin("zellij:about"),
];
Self::find_predetermined_actions(mode_info, mode, ordered_predicates)
},
InputMode::EnterSearch
| InputMode::RenameTab
| InputMode::RenamePane
| InputMode::Prompt
| InputMode::Tmux => Vec::new(),
}
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/clipboard_utils.rs | default-plugins/compact-bar/src/clipboard_utils.rs | use crate::LinePart;
use zellij_tile::prelude::*;
pub fn text_copied_hint(copy_destination: CopyDestination) -> LinePart {
let hint = match copy_destination {
CopyDestination::Command => "Text piped to external command",
#[cfg(not(target_os = "macos"))]
CopyDestination::Primary => "Text copied to system primary selection",
#[cfg(target_os = "macos")] // primary selection does not exist on macos
CopyDestination::Primary => "Text copied to system clipboard",
CopyDestination::System => "Text copied to system clipboard",
};
LinePart {
part: serialize_text(&Text::new(&hint).color_range(2, ..).opaque()),
len: hint.len(),
tab_index: None,
}
}
pub fn system_clipboard_error() -> LinePart {
let hint = " Error using the system clipboard.";
LinePart {
part: serialize_text(&Text::new(&hint).color_range(2, ..).opaque()),
len: hint.len(),
tab_index: None,
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/main.rs | default-plugins/compact-bar/src/main.rs | mod action_types;
mod clipboard_utils;
mod keybind_utils;
mod line;
mod tab;
mod tooltip;
use std::cmp::{max, min};
use std::collections::BTreeMap;
use std::convert::TryInto;
use tab::get_tab_to_focus;
use zellij_tile::prelude::*;
use crate::clipboard_utils::{system_clipboard_error, text_copied_hint};
use crate::line::tab_line;
use crate::tab::tab_style;
use crate::tooltip::TooltipRenderer;
static ARROW_SEPARATOR: &str = "";
const CONFIG_IS_TOOLTIP: &str = "is_tooltip";
const CONFIG_TOGGLE_TOOLTIP_KEY: &str = "tooltip";
const MSG_TOGGLE_TOOLTIP: &str = "toggle_tooltip";
const MSG_TOGGLE_PERSISTED_TOOLTIP: &str = "toggle_persisted_tooltip";
const MSG_LAUNCH_TOOLTIP: &str = "launch_tooltip_if_not_launched";
#[derive(Debug, Default)]
pub struct LinePart {
part: String,
len: usize,
tab_index: Option<usize>,
}
#[derive(Default)]
struct State {
// Tab state
tabs: Vec<TabInfo>,
active_tab_idx: usize,
// Display state
mode_info: ModeInfo,
tab_line: Vec<LinePart>,
display_area_rows: usize,
display_area_cols: usize,
// Clipboard state
text_copy_destination: Option<CopyDestination>,
display_system_clipboard_failure: bool,
// Plugin configuration
config: BTreeMap<String, String>,
own_plugin_id: Option<u32>,
toggle_tooltip_key: Option<String>,
// Tooltip state
is_tooltip: bool,
tooltip_is_active: bool,
persist: bool,
is_first_run: bool,
own_tab_index: Option<usize>,
own_client_id: u16,
}
struct TabRenderData {
tabs: Vec<LinePart>,
active_tab_index: usize,
active_swap_layout_name: Option<String>,
is_swap_layout_dirty: bool,
}
register_plugin!(State);
impl ZellijPlugin for State {
fn load(&mut self, configuration: BTreeMap<String, String>) {
let plugin_ids = get_plugin_ids();
self.own_plugin_id = Some(plugin_ids.plugin_id);
self.own_client_id = plugin_ids.client_id;
self.initialize_configuration(configuration);
self.setup_subscriptions();
self.configure_keybinds();
}
fn update(&mut self, event: Event) -> bool {
self.is_first_run = false;
match event {
Event::ModeUpdate(mode_info) => self.handle_mode_update(mode_info),
Event::TabUpdate(tabs) => self.handle_tab_update(tabs),
Event::PaneUpdate(pane_manifest) => self.handle_pane_update(pane_manifest),
Event::Mouse(mouse_event) => {
self.handle_mouse_event(mouse_event);
false
},
Event::CopyToClipboard(copy_destination) => {
self.handle_clipboard_copy(copy_destination)
},
Event::SystemClipboardFailure => self.handle_clipboard_failure(),
Event::InputReceived => self.handle_input_received(),
_ => false,
}
}
fn pipe(&mut self, message: PipeMessage) -> bool {
if self.is_tooltip && message.is_private {
self.handle_tooltip_pipe(message);
} else if message.name == MSG_TOGGLE_TOOLTIP
&& message.is_private
&& self.toggle_tooltip_key.is_some()
// only launch once per plugin instance
&& self.own_tab_index == Some(self.active_tab_idx.saturating_sub(1))
// only launch once per client of plugin instance
&& Some(format!("{}", self.own_client_id)) == message.payload
{
self.toggle_persisted_tooltip(self.mode_info.mode);
}
false
}
fn render(&mut self, rows: usize, cols: usize) {
if self.is_tooltip {
self.render_tooltip(rows, cols);
} else {
self.render_tab_line(cols);
}
}
}
impl State {
fn initialize_configuration(&mut self, configuration: BTreeMap<String, String>) {
self.config = configuration.clone();
self.is_tooltip = self.parse_bool_config(CONFIG_IS_TOOLTIP, false);
if !self.is_tooltip {
if let Some(tooltip_toggle_key) = configuration.get(CONFIG_TOGGLE_TOOLTIP_KEY) {
self.toggle_tooltip_key = Some(tooltip_toggle_key.clone());
}
}
if self.is_tooltip {
self.is_first_run = true;
}
}
fn setup_subscriptions(&self) {
set_selectable(false);
let events = if self.is_tooltip {
vec![EventType::ModeUpdate, EventType::TabUpdate]
} else {
vec![
EventType::TabUpdate,
EventType::PaneUpdate,
EventType::ModeUpdate,
EventType::Mouse,
EventType::CopyToClipboard,
EventType::InputReceived,
EventType::SystemClipboardFailure,
]
};
subscribe(&events);
}
fn configure_keybinds(&self) {
if !self.is_tooltip && self.toggle_tooltip_key.is_some() {
if let Some(toggle_key) = &self.toggle_tooltip_key {
reconfigure(
bind_toggle_key_config(toggle_key, self.own_client_id),
false,
);
}
}
}
fn parse_bool_config(&self, key: &str, default: bool) -> bool {
self.config
.get(key)
.and_then(|v| v.parse().ok())
.unwrap_or(default)
}
// Event handlers
fn handle_mode_update(&mut self, mode_info: ModeInfo) -> bool {
let should_render = self.mode_info != mode_info;
let old_mode = self.mode_info.mode;
let new_mode = mode_info.mode;
let base_mode = mode_info.base_mode.unwrap_or(InputMode::Normal);
self.mode_info = mode_info;
if self.is_tooltip {
self.handle_tooltip_mode_update(old_mode, new_mode, base_mode);
} else {
self.handle_main_mode_update(new_mode, base_mode);
}
should_render
}
fn handle_main_mode_update(&self, new_mode: InputMode, base_mode: InputMode) {
if self.toggle_tooltip_key.is_some()
&& new_mode != base_mode
&& !self.is_restricted_mode(new_mode)
{
self.launch_tooltip_if_not_launched(new_mode);
}
}
fn handle_tooltip_mode_update(
&mut self,
old_mode: InputMode,
new_mode: InputMode,
base_mode: InputMode,
) {
if !self.persist && (new_mode == base_mode || self.is_restricted_mode(new_mode)) {
close_self();
} else if new_mode != old_mode || self.persist {
self.update_tooltip_for_mode_change(new_mode);
}
}
fn handle_tab_update(&mut self, tabs: Vec<TabInfo>) -> bool {
self.update_display_area(&tabs);
if let Some(active_tab_index) = tabs.iter().position(|t| t.active) {
let active_tab_idx = active_tab_index + 1; // Convert to 1-based indexing
let should_render = self.active_tab_idx != active_tab_idx || self.tabs != tabs;
if self.is_tooltip && self.active_tab_idx != active_tab_idx {
self.move_tooltip_to_new_tab(active_tab_idx);
}
self.active_tab_idx = active_tab_idx;
self.tabs = tabs;
should_render
} else {
false
}
}
fn handle_pane_update(&mut self, pane_manifest: PaneManifest) -> bool {
if self.toggle_tooltip_key.is_some() {
let previous_tooltip_state = self.tooltip_is_active;
self.tooltip_is_active = self.detect_tooltip_presence(&pane_manifest);
self.own_tab_index = self.find_own_tab_index(&pane_manifest);
previous_tooltip_state != self.tooltip_is_active
} else {
false
}
}
fn handle_mouse_event(&mut self, mouse_event: Mouse) {
if self.is_tooltip {
return;
}
match mouse_event {
Mouse::LeftClick(_, col) => self.handle_tab_click(col),
Mouse::ScrollUp(_) => self.scroll_tab_up(),
Mouse::ScrollDown(_) => self.scroll_tab_down(),
_ => {},
}
}
fn handle_clipboard_copy(&mut self, copy_destination: CopyDestination) -> bool {
if self.is_tooltip {
return false;
}
let should_render = match self.text_copy_destination {
Some(current) => current != copy_destination,
None => true,
};
self.text_copy_destination = Some(copy_destination);
should_render
}
fn handle_clipboard_failure(&mut self) -> bool {
if self.is_tooltip {
return false;
}
self.display_system_clipboard_failure = true;
true
}
fn handle_input_received(&mut self) -> bool {
if self.is_tooltip {
return false;
}
let should_render =
self.text_copy_destination.is_some() || self.display_system_clipboard_failure;
self.clear_clipboard_state();
should_render
}
fn handle_tooltip_pipe(&mut self, message: PipeMessage) {
if message.name == MSG_TOGGLE_PERSISTED_TOOLTIP {
if self.is_first_run {
self.persist = true;
} else {
#[cfg(target_family = "wasm")]
close_self();
}
}
}
// Helper methods
fn update_display_area(&mut self, tabs: &[TabInfo]) {
for tab in tabs {
if tab.active {
self.display_area_rows = tab.display_area_rows;
self.display_area_cols = tab.display_area_columns;
break;
}
}
}
fn detect_tooltip_presence(&self, pane_manifest: &PaneManifest) -> bool {
for (_tab_index, panes) in &pane_manifest.panes {
for pane in panes {
if pane.plugin_url == Some("zellij:compact-bar".to_owned())
&& pane.pane_x != pane.pane_content_x
{
return true;
}
}
}
false
}
fn find_own_tab_index(&self, pane_manifest: &PaneManifest) -> Option<usize> {
for (tab_index, panes) in &pane_manifest.panes {
for pane in panes {
if pane.is_plugin && Some(pane.id) == self.own_plugin_id {
return Some(*tab_index);
}
}
}
None
}
fn handle_tab_click(&self, col: usize) {
if let Some(tab_idx) = get_tab_to_focus(&self.tab_line, self.active_tab_idx, col) {
switch_tab_to(tab_idx.try_into().unwrap());
}
}
fn scroll_tab_up(&self) {
let next_tab = min(self.active_tab_idx + 1, self.tabs.len());
switch_tab_to(next_tab as u32);
}
fn scroll_tab_down(&self) {
let prev_tab = max(self.active_tab_idx.saturating_sub(1), 1);
switch_tab_to(prev_tab as u32);
}
fn clear_clipboard_state(&mut self) {
self.text_copy_destination = None;
self.display_system_clipboard_failure = false;
}
fn is_restricted_mode(&self, mode: InputMode) -> bool {
matches!(
mode,
InputMode::Locked
| InputMode::EnterSearch
| InputMode::RenameTab
| InputMode::RenamePane
| InputMode::Prompt
| InputMode::Tmux
)
}
// Tooltip operations
fn toggle_persisted_tooltip(&self, new_mode: InputMode) {
#[allow(unused_variables)]
let message = self
.create_tooltip_message(MSG_TOGGLE_PERSISTED_TOOLTIP, new_mode)
.with_args(self.create_persist_args());
#[cfg(target_family = "wasm")]
pipe_message_to_plugin(message);
}
fn launch_tooltip_if_not_launched(&self, new_mode: InputMode) {
let message = self.create_tooltip_message(MSG_LAUNCH_TOOLTIP, new_mode);
pipe_message_to_plugin(message);
}
fn create_tooltip_message(&self, name: &str, mode: InputMode) -> MessageToPlugin {
let mut tooltip_config = self.config.clone();
tooltip_config.insert(CONFIG_IS_TOOLTIP.to_string(), "true".to_string());
MessageToPlugin::new(name)
.with_plugin_url("zellij:OWN_URL")
.with_plugin_config(tooltip_config)
.with_floating_pane_coordinates(self.calculate_tooltip_coordinates())
.new_plugin_instance_should_have_pane_title(format!("{:?}", mode))
}
fn create_persist_args(&self) -> BTreeMap<String, String> {
let mut args = BTreeMap::new();
args.insert("persist".to_string(), String::new());
args
}
fn update_tooltip_for_mode_change(&self, new_mode: InputMode) {
if let Some(plugin_id) = self.own_plugin_id {
let coordinates = self.calculate_tooltip_coordinates();
change_floating_panes_coordinates(vec![(PaneId::Plugin(plugin_id), coordinates)]);
rename_plugin_pane(plugin_id, format!("{:?}", new_mode));
}
}
fn move_tooltip_to_new_tab(&self, new_tab_index: usize) {
if let Some(plugin_id) = self.own_plugin_id {
break_panes_to_tab_with_index(
&[PaneId::Plugin(plugin_id)],
new_tab_index.saturating_sub(1), // Convert to 0-based indexing
false,
);
}
}
fn calculate_tooltip_coordinates(&self) -> FloatingPaneCoordinates {
let tooltip_renderer = TooltipRenderer::new(&self.mode_info);
let (tooltip_rows, tooltip_cols) =
tooltip_renderer.calculate_dimensions(self.mode_info.mode);
let width = tooltip_cols + 4; // 2 for borders, 2 for padding
let height = tooltip_rows + 2; // 2 for borders
let x_position = 2;
let y_position = self.display_area_rows.saturating_sub(height + 2);
FloatingPaneCoordinates::new(
Some(x_position.to_string()),
Some(y_position.to_string()),
Some(width.to_string()),
Some(height.to_string()),
Some(true),
)
.unwrap_or_default()
}
// Rendering
fn render_tooltip(&self, rows: usize, cols: usize) {
let tooltip_renderer = TooltipRenderer::new(&self.mode_info);
tooltip_renderer.render(rows, cols);
}
fn render_tab_line(&mut self, cols: usize) {
if let Some(copy_destination) = self.text_copy_destination {
self.render_clipboard_hint(copy_destination);
} else if self.display_system_clipboard_failure {
self.render_clipboard_error();
} else {
self.render_tabs(cols);
}
}
fn render_clipboard_hint(&self, copy_destination: CopyDestination) {
let hint = text_copied_hint(copy_destination).part;
self.render_background_with_text(&hint);
}
fn render_clipboard_error(&self) {
let hint = system_clipboard_error().part;
self.render_background_with_text(&hint);
}
fn render_background_with_text(&self, text: &str) {
let background = self.mode_info.style.colors.text_unselected.background;
match background {
PaletteColor::Rgb((r, g, b)) => {
print!("{}\u{1b}[48;2;{};{};{}m\u{1b}[0K", text, r, g, b);
},
PaletteColor::EightBit(color) => {
print!("{}\u{1b}[48;5;{}m\u{1b}[0K", text, color);
},
}
}
fn render_tabs(&mut self, cols: usize) {
if self.tabs.is_empty() {
return;
}
let tab_data = self.prepare_tab_data();
self.tab_line = tab_line(
&self.mode_info,
tab_data,
cols,
self.toggle_tooltip_key.clone(),
self.tooltip_is_active,
);
let output = self
.tab_line
.iter()
.fold(String::new(), |acc, part| acc + &part.part);
self.render_background_with_text(&output);
}
fn prepare_tab_data(&self) -> TabRenderData {
let mut all_tabs = Vec::new();
let mut active_tab_index = 0;
let mut active_swap_layout_name = None;
let mut is_swap_layout_dirty = false;
let mut is_alternate_tab = false;
for tab in &self.tabs {
let tab_name = self.get_tab_display_name(tab);
if tab.active {
active_tab_index = tab.position;
if self.mode_info.mode != InputMode::RenameTab {
is_swap_layout_dirty = tab.is_swap_layout_dirty;
active_swap_layout_name = tab.active_swap_layout_name.clone();
}
}
let styled_tab = tab_style(
tab_name,
tab,
is_alternate_tab,
self.mode_info.style.colors,
self.mode_info.capabilities,
);
is_alternate_tab = !is_alternate_tab;
all_tabs.push(styled_tab);
}
TabRenderData {
tabs: all_tabs,
active_tab_index,
active_swap_layout_name,
is_swap_layout_dirty,
}
}
fn get_tab_display_name(&self, tab: &TabInfo) -> String {
let mut tab_name = tab.name.clone();
if tab.active && self.mode_info.mode == InputMode::RenameTab && tab_name.is_empty() {
tab_name = "Enter name...".to_string();
}
tab_name
}
}
fn bind_toggle_key_config(toggle_key: &str, client_id: u16) -> String {
format!(
r#"
keybinds {{
shared {{
bind "{}" {{
MessagePlugin "compact-bar" {{
name "toggle_tooltip"
tooltip "{}"
payload "{}"
}}
}}
}}
}}
"#,
toggle_key, toggle_key, client_id
)
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/line.rs | default-plugins/compact-bar/src/line.rs | use ansi_term::ANSIStrings;
use unicode_width::UnicodeWidthStr;
use crate::{LinePart, TabRenderData, ARROW_SEPARATOR};
use zellij_tile::prelude::*;
use zellij_tile_utils::style;
pub fn tab_line(
mode_info: &ModeInfo,
tab_data: TabRenderData,
cols: usize,
toggle_tooltip_key: Option<String>,
tooltip_is_active: bool,
) -> Vec<LinePart> {
let config = TabLineConfig {
session_name: mode_info.session_name.to_owned(),
hide_session_name: mode_info.style.hide_session_name,
mode: mode_info.mode,
active_swap_layout_name: tab_data.active_swap_layout_name,
is_swap_layout_dirty: tab_data.is_swap_layout_dirty,
toggle_tooltip_key,
tooltip_is_active,
};
let builder = TabLineBuilder::new(config, mode_info.style.colors, mode_info.capabilities, cols);
builder.build(tab_data.tabs, tab_data.active_tab_index)
}
#[derive(Debug, Clone)]
pub struct TabLineConfig {
pub session_name: Option<String>,
pub hide_session_name: bool,
pub mode: InputMode,
pub active_swap_layout_name: Option<String>,
pub is_swap_layout_dirty: bool,
pub toggle_tooltip_key: Option<String>,
pub tooltip_is_active: bool,
}
fn calculate_total_length(parts: &[LinePart]) -> usize {
parts.iter().map(|p| p.len).sum()
}
struct TabLinePopulator {
cols: usize,
palette: Styling,
capabilities: PluginCapabilities,
}
impl TabLinePopulator {
fn new(cols: usize, palette: Styling, capabilities: PluginCapabilities) -> Self {
Self {
cols,
palette,
capabilities,
}
}
fn populate_tabs(
&self,
tabs_before_active: &mut Vec<LinePart>,
tabs_after_active: &mut Vec<LinePart>,
tabs_to_render: &mut Vec<LinePart>,
) {
let mut middle_size = calculate_total_length(tabs_to_render);
let mut total_left = 0;
let mut total_right = 0;
loop {
let left_count = tabs_before_active.len();
let right_count = tabs_after_active.len();
let collapsed_indicators =
self.create_collapsed_indicators(left_count, right_count, tabs_to_render.len());
let total_size =
collapsed_indicators.left.len + middle_size + collapsed_indicators.right.len;
if total_size > self.cols {
break;
}
let tab_sizes = TabSizes {
left: tabs_before_active.last().map_or(usize::MAX, |tab| tab.len),
right: tabs_after_active.get(0).map_or(usize::MAX, |tab| tab.len),
};
let fit_analysis = self.analyze_tab_fit(
&tab_sizes,
total_size,
left_count,
right_count,
&collapsed_indicators,
);
match self.decide_next_action(&fit_analysis, total_left, total_right) {
TabAction::AddLeft => {
if let Some(tab) = tabs_before_active.pop() {
middle_size += tab.len;
total_left += tab.len;
tabs_to_render.insert(0, tab);
}
},
TabAction::AddRight => {
if !tabs_after_active.is_empty() {
let tab = tabs_after_active.remove(0);
middle_size += tab.len;
total_right += tab.len;
tabs_to_render.push(tab);
}
},
TabAction::Finish => {
tabs_to_render.insert(0, collapsed_indicators.left);
tabs_to_render.push(collapsed_indicators.right);
break;
},
}
}
}
fn create_collapsed_indicators(
&self,
left_count: usize,
right_count: usize,
rendered_count: usize,
) -> CollapsedIndicators {
let left_more_tab_index = left_count.saturating_sub(1);
let right_more_tab_index = left_count + rendered_count;
CollapsedIndicators {
left: self.create_left_indicator(left_count, left_more_tab_index),
right: self.create_right_indicator(right_count, right_more_tab_index),
}
}
fn analyze_tab_fit(
&self,
tab_sizes: &TabSizes,
total_size: usize,
left_count: usize,
right_count: usize,
collapsed_indicators: &CollapsedIndicators,
) -> TabFitAnalysis {
let size_by_adding_left =
tab_sizes
.left
.saturating_add(total_size)
.saturating_sub(if left_count == 1 {
collapsed_indicators.left.len
} else {
0
});
let size_by_adding_right =
tab_sizes
.right
.saturating_add(total_size)
.saturating_sub(if right_count == 1 {
collapsed_indicators.right.len
} else {
0
});
TabFitAnalysis {
left_fits: size_by_adding_left <= self.cols,
right_fits: size_by_adding_right <= self.cols,
}
}
fn decide_next_action(
&self,
fit_analysis: &TabFitAnalysis,
total_left: usize,
total_right: usize,
) -> TabAction {
if (total_left <= total_right || !fit_analysis.right_fits) && fit_analysis.left_fits {
TabAction::AddLeft
} else if fit_analysis.right_fits {
TabAction::AddRight
} else {
TabAction::Finish
}
}
fn create_left_indicator(&self, tab_count: usize, tab_index: usize) -> LinePart {
if tab_count == 0 {
return LinePart::default();
}
let more_text = self.format_count_text(tab_count, "← +{}", " ← +many ");
self.create_styled_indicator(more_text, tab_index)
}
fn create_right_indicator(&self, tab_count: usize, tab_index: usize) -> LinePart {
if tab_count == 0 {
return LinePart::default();
}
let more_text = self.format_count_text(tab_count, "+{} →", " +many → ");
self.create_styled_indicator(more_text, tab_index)
}
fn format_count_text(&self, count: usize, format_str: &str, fallback: &str) -> String {
if count < 10000 {
format!(" {} ", format_str.replace("{}", &count.to_string()))
} else {
fallback.to_string()
}
}
fn create_styled_indicator(&self, text: String, tab_index: usize) -> LinePart {
let separator = tab_separator(self.capabilities);
let text_len = text.width() + 2 * separator.width();
let colors = IndicatorColors {
text: self.palette.ribbon_unselected.base,
separator: self.palette.text_unselected.background,
background: self.palette.text_selected.emphasis_0,
};
let styled_parts = [
style!(colors.separator, colors.background).paint(separator),
style!(colors.text, colors.background).bold().paint(text),
style!(colors.background, colors.separator).paint(separator),
];
LinePart {
part: ANSIStrings(&styled_parts).to_string(),
len: text_len,
tab_index: Some(tab_index),
}
}
}
#[derive(Debug)]
struct CollapsedIndicators {
left: LinePart,
right: LinePart,
}
#[derive(Debug)]
struct TabSizes {
left: usize,
right: usize,
}
#[derive(Debug)]
struct TabFitAnalysis {
left_fits: bool,
right_fits: bool,
}
#[derive(Debug)]
struct IndicatorColors {
text: PaletteColor,
separator: PaletteColor,
background: PaletteColor,
}
#[derive(Debug)]
enum TabAction {
AddLeft,
AddRight,
Finish,
}
struct TabLinePrefixBuilder {
palette: Styling,
cols: usize,
}
impl TabLinePrefixBuilder {
fn new(palette: Styling, cols: usize) -> Self {
Self { palette, cols }
}
fn build(&self, session_name: Option<&str>, mode: InputMode) -> Vec<LinePart> {
let mut parts = vec![self.create_zellij_part()];
let mut used_len = parts.get(0).map_or(0, |p| p.len);
if let Some(name) = session_name {
if let Some(name_part) = self.create_session_name_part(name, used_len) {
used_len += name_part.len;
parts.push(name_part);
}
}
if let Some(mode_part) = self.create_mode_part(mode, used_len) {
parts.push(mode_part);
}
parts
}
fn create_zellij_part(&self) -> LinePart {
let prefix_text = " Zellij ";
let colors = self.get_text_colors();
LinePart {
part: style!(colors.text, colors.background)
.bold()
.paint(prefix_text)
.to_string(),
len: prefix_text.chars().count(),
tab_index: None,
}
}
fn create_session_name_part(&self, name: &str, used_len: usize) -> Option<LinePart> {
let name_part = format!("({})", name);
let name_part_len = name_part.width();
if self.cols.saturating_sub(used_len) >= name_part_len {
let colors = self.get_text_colors();
Some(LinePart {
part: style!(colors.text, colors.background)
.bold()
.paint(name_part)
.to_string(),
len: name_part_len,
tab_index: None,
})
} else {
None
}
}
fn create_mode_part(&self, mode: InputMode, used_len: usize) -> Option<LinePart> {
let mode_text = format!(" {} ", format!("{:?}", mode).to_uppercase());
let mode_len = mode_text.width();
if self.cols.saturating_sub(used_len) >= mode_len {
let colors = self.get_text_colors();
let style = match mode {
InputMode::Locked => {
style!(self.palette.text_unselected.emphasis_3, colors.background)
},
InputMode::Normal => {
style!(self.palette.text_unselected.emphasis_2, colors.background)
},
_ => style!(self.palette.text_unselected.emphasis_0, colors.background),
};
Some(LinePart {
part: style.bold().paint(mode_text).to_string(),
len: mode_len,
tab_index: None,
})
} else {
None
}
}
fn get_text_colors(&self) -> IndicatorColors {
IndicatorColors {
text: self.palette.text_unselected.base,
background: self.palette.text_unselected.background,
separator: self.palette.text_unselected.background,
}
}
}
struct RightSideElementsBuilder {
palette: Styling,
capabilities: PluginCapabilities,
}
impl RightSideElementsBuilder {
fn new(palette: Styling, capabilities: PluginCapabilities) -> Self {
Self {
palette,
capabilities,
}
}
fn build(&self, config: &TabLineConfig, available_space: usize) -> Vec<LinePart> {
let mut elements = Vec::new();
if let Some(ref tooltip_key) = config.toggle_tooltip_key {
elements.push(self.create_tooltip_indicator(tooltip_key, config.tooltip_is_active));
}
if let Some(swap_status) = self.create_swap_layout_status(config, available_space) {
elements.push(swap_status);
}
elements
}
fn create_tooltip_indicator(&self, toggle_key: &str, is_active: bool) -> LinePart {
let key_text = toggle_key;
let key = Text::new(key_text).color_all(3).opaque();
let ribbon_text = "Tooltip";
let mut ribbon = Text::new(ribbon_text);
if is_active {
ribbon = ribbon.selected();
}
LinePart {
part: format!("{} {}", serialize_text(&key), serialize_ribbon(&ribbon)),
len: key_text.chars().count() + ribbon_text.chars().count() + 6,
tab_index: None,
}
}
fn create_swap_layout_status(
&self,
config: &TabLineConfig,
max_len: usize,
) -> Option<LinePart> {
let swap_layout_name = config.active_swap_layout_name.as_ref()?;
let mut layout_name = format!(" {} ", swap_layout_name);
layout_name.make_ascii_uppercase();
let layout_name_len = layout_name.len() + 3;
let colors = SwapLayoutColors {
bg: self.palette.text_unselected.background,
fg: self.palette.ribbon_unselected.background,
green: self.palette.ribbon_selected.background,
};
let separator = tab_separator(self.capabilities);
let styled_parts = self.create_swap_layout_styled_parts(
&layout_name,
config.mode,
config.is_swap_layout_dirty,
&colors,
separator,
);
let indicator = format!("{}{}{}", styled_parts.0, styled_parts.1, styled_parts.2);
let (part, full_len) = (indicator.clone(), layout_name_len);
let short_len = layout_name_len + 1;
if full_len <= max_len {
Some(LinePart {
part,
len: full_len,
tab_index: None,
})
} else if short_len <= max_len && config.mode != InputMode::Locked {
Some(LinePart {
part: indicator,
len: short_len,
tab_index: None,
})
} else {
None
}
}
fn create_swap_layout_styled_parts(
&self,
layout_name: &str,
mode: InputMode,
is_dirty: bool,
colors: &SwapLayoutColors,
separator: &str,
) -> (String, String, String) {
match mode {
InputMode::Locked => (
style!(colors.bg, colors.fg).paint(separator).to_string(),
style!(colors.bg, colors.fg)
.italic()
.paint(layout_name)
.to_string(),
style!(colors.fg, colors.bg).paint(separator).to_string(),
),
_ if is_dirty => (
style!(colors.bg, colors.fg).paint(separator).to_string(),
style!(colors.bg, colors.fg)
.bold()
.paint(layout_name)
.to_string(),
style!(colors.fg, colors.bg).paint(separator).to_string(),
),
_ => (
style!(colors.bg, colors.green).paint(separator).to_string(),
style!(colors.bg, colors.green)
.bold()
.paint(layout_name)
.to_string(),
style!(colors.green, colors.bg).paint(separator).to_string(),
),
}
}
}
#[derive(Debug)]
struct SwapLayoutColors {
bg: PaletteColor,
fg: PaletteColor,
green: PaletteColor,
}
pub struct TabLineBuilder {
config: TabLineConfig,
palette: Styling,
capabilities: PluginCapabilities,
cols: usize,
}
impl TabLineBuilder {
pub fn new(
config: TabLineConfig,
palette: Styling,
capabilities: PluginCapabilities,
cols: usize,
) -> Self {
Self {
config,
palette,
capabilities,
cols,
}
}
pub fn build(self, all_tabs: Vec<LinePart>, active_tab_index: usize) -> Vec<LinePart> {
let (tabs_before_active, active_tab, tabs_after_active) =
self.split_tabs(all_tabs, active_tab_index);
let prefix_builder = TabLinePrefixBuilder::new(self.palette, self.cols);
let session_name = if self.config.hide_session_name {
None
} else {
self.config.session_name.as_deref()
};
let mut prefix = prefix_builder.build(session_name, self.config.mode);
let prefix_len = calculate_total_length(&prefix);
if prefix_len + active_tab.len > self.cols {
return prefix;
}
let mut tabs_to_render = vec![active_tab];
let populator = TabLinePopulator::new(
self.cols.saturating_sub(prefix_len),
self.palette,
self.capabilities,
);
let mut tabs_before = tabs_before_active;
let mut tabs_after = tabs_after_active;
populator.populate_tabs(&mut tabs_before, &mut tabs_after, &mut tabs_to_render);
prefix.append(&mut tabs_to_render);
self.add_right_side_elements(&mut prefix);
prefix
}
fn split_tabs(
&self,
mut all_tabs: Vec<LinePart>,
active_tab_index: usize,
) -> (Vec<LinePart>, LinePart, Vec<LinePart>) {
let mut tabs_after_active = all_tabs.split_off(active_tab_index);
let mut tabs_before_active = all_tabs;
let active_tab = if !tabs_after_active.is_empty() {
tabs_after_active.remove(0)
} else {
tabs_before_active.pop().unwrap_or_default()
};
(tabs_before_active, active_tab, tabs_after_active)
}
fn add_right_side_elements(&self, prefix: &mut Vec<LinePart>) {
let current_len = calculate_total_length(prefix);
if current_len < self.cols {
let right_builder = RightSideElementsBuilder::new(self.palette, self.capabilities);
let available_space = self.cols.saturating_sub(current_len);
let mut right_elements = right_builder.build(&self.config, available_space);
let right_len = calculate_total_length(&right_elements);
if current_len + right_len <= self.cols {
let remaining_space = self
.cols
.saturating_sub(current_len)
.saturating_sub(right_len);
if remaining_space > 0 {
prefix.push(self.create_spacer(remaining_space));
}
prefix.append(&mut right_elements);
}
}
}
fn create_spacer(&self, space: usize) -> LinePart {
let bg = self.palette.text_unselected.background;
let buffer = (0..space)
.map(|_| style!(bg, bg).paint(" ").to_string())
.collect::<String>();
LinePart {
part: buffer,
len: space,
tab_index: None,
}
}
}
pub fn tab_separator(capabilities: PluginCapabilities) -> &'static str {
if !capabilities.arrow_fonts {
ARROW_SEPARATOR
} else {
""
}
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
zellij-org/zellij | https://github.com/zellij-org/zellij/blob/3fe48a972c55537502128779116d38d8f8aedb7e/default-plugins/compact-bar/src/tab.rs | default-plugins/compact-bar/src/tab.rs | use crate::{line::tab_separator, LinePart};
use ansi_term::{ANSIString, ANSIStrings};
use unicode_width::UnicodeWidthStr;
use zellij_tile::prelude::*;
use zellij_tile_utils::style;
fn cursors<'a>(
focused_clients: &'a [ClientId],
colors: MultiplayerColors,
) -> (Vec<ANSIString<'a>>, usize) {
// cursor section, text length
let mut len = 0;
let mut cursors = vec![];
for client_id in focused_clients.iter() {
if let Some(color) = client_id_to_colors(*client_id, colors) {
cursors.push(style!(color.1, color.0).paint(" "));
len += 1;
}
}
len += 2; // 2 for the brackets: [ and ]
(cursors, len)
}
pub fn render_tab(
text: String,
tab: &TabInfo,
is_alternate_tab: bool,
palette: Styling,
separator: &str,
) -> LinePart {
let focused_clients = tab.other_focused_clients.as_slice();
let separator_width = separator.width();
let alternate_tab_color = if is_alternate_tab {
palette.ribbon_unselected.emphasis_1
} else {
palette.ribbon_unselected.background
};
let background_color = if tab.active {
palette.ribbon_selected.background
} else if is_alternate_tab {
alternate_tab_color
} else {
palette.ribbon_unselected.background
};
let foreground_color = if tab.active {
palette.ribbon_selected.base
} else {
palette.ribbon_unselected.base
};
let separator_fill_color = palette.text_unselected.background;
let left_separator = style!(separator_fill_color, background_color).paint(separator);
let mut tab_text_len = text.width() + (separator_width * 2) + 2; // + 2 for padding
let tab_styled_text = style!(foreground_color, background_color)
.bold()
.paint(format!(" {} ", text));
let right_separator = style!(background_color, separator_fill_color).paint(separator);
let tab_styled_text = if !focused_clients.is_empty() {
let (cursor_section, extra_length) =
cursors(focused_clients, palette.multiplayer_user_colors);
tab_text_len += extra_length;
let mut s = String::new();
let cursor_beginning = style!(foreground_color, background_color)
.bold()
.paint("[")
.to_string();
let cursor_section = ANSIStrings(&cursor_section).to_string();
let cursor_end = style!(foreground_color, background_color)
.bold()
.paint("]")
.to_string();
s.push_str(&left_separator.to_string());
s.push_str(&tab_styled_text.to_string());
s.push_str(&cursor_beginning);
s.push_str(&cursor_section);
s.push_str(&cursor_end);
s.push_str(&right_separator.to_string());
s
} else {
ANSIStrings(&[left_separator, tab_styled_text, right_separator]).to_string()
};
LinePart {
part: tab_styled_text,
len: tab_text_len,
tab_index: Some(tab.position),
}
}
pub fn tab_style(
mut tabname: String,
tab: &TabInfo,
mut is_alternate_tab: bool,
palette: Styling,
capabilities: PluginCapabilities,
) -> LinePart {
let separator = tab_separator(capabilities);
if tab.is_fullscreen_active {
tabname.push_str(" (FULLSCREEN)");
} else if tab.is_sync_panes_active {
tabname.push_str(" (SYNC)");
}
// we only color alternate tabs differently if we can't use the arrow fonts to separate them
if !capabilities.arrow_fonts {
is_alternate_tab = false;
}
render_tab(tabname, tab, is_alternate_tab, palette, separator)
}
pub(crate) fn get_tab_to_focus(
tab_line: &[LinePart],
active_tab_idx: usize,
mouse_click_col: usize,
) -> Option<usize> {
let clicked_line_part = get_clicked_line_part(tab_line, mouse_click_col)?;
let clicked_tab_idx = clicked_line_part.tab_index?;
// tabs are indexed starting from 1 so we need to add 1
let clicked_tab_idx = clicked_tab_idx + 1;
if clicked_tab_idx != active_tab_idx {
return Some(clicked_tab_idx);
}
None
}
pub(crate) fn get_clicked_line_part(
tab_line: &[LinePart],
mouse_click_col: usize,
) -> Option<&LinePart> {
let mut len = 0;
for tab_line_part in tab_line {
if mouse_click_col >= len && mouse_click_col < len + tab_line_part.len {
return Some(tab_line_part);
}
len += tab_line_part.len;
}
None
}
| rust | MIT | 3fe48a972c55537502128779116d38d8f8aedb7e | 2026-01-04T15:35:12.838106Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/settings.rs | src/settings.rs | use std::borrow::Cow;
use std::{env, io};
use api::grpc::transport_channel_pool::{
DEFAULT_CONNECT_TIMEOUT, DEFAULT_GRPC_TIMEOUT, DEFAULT_POOL_SIZE,
};
use collection::operations::validation;
use collection::shards::shard::PeerId;
use common::flags::FeatureFlags;
use config::{Config, ConfigError, Environment, File, FileFormat, Source};
use serde::Deserialize;
use storage::types::StorageConfig;
use validator::{Validate, ValidationError};
use crate::common::debugger::DebuggerConfig;
use crate::common::inference::config::InferenceConfig;
use crate::tracing;
const MAX_PEER_ID: u64 = (1 << 53) - 1;
const DEFAULT_CONFIG: &str = include_str!("../config/config.yaml");
#[derive(Debug, Deserialize, Validate, Clone)]
pub struct ServiceConfig {
#[validate(length(min = 1))]
pub host: String,
pub http_port: u16,
pub grpc_port: Option<u16>, // None means that gRPC is disabled
pub max_request_size_mb: usize,
pub max_workers: Option<usize>,
#[serde(default = "default_cors")]
pub enable_cors: bool,
#[serde(default)]
pub enable_tls: bool,
#[serde(default)]
pub verify_https_client_certificate: bool,
pub api_key: Option<String>,
pub read_only_api_key: Option<String>,
#[serde(default)]
pub jwt_rbac: Option<bool>,
#[serde(default)]
pub hide_jwt_dashboard: Option<bool>,
/// Directory where static files are served from.
/// For example, the Web-UI should be placed here.
#[serde(default)]
pub static_content_dir: Option<String>,
/// If serving of the static content is enabled.
/// This includes the Web-UI. True by default.
#[serde(default)]
pub enable_static_content: Option<bool>,
/// How much time is considered too long for a query to execute.
pub slow_query_secs: Option<f32>,
/// Whether to enable reporting of measured hardware utilization in API responses.
#[serde(default)]
pub hardware_reporting: Option<bool>,
/// Global prefix for metrics.
#[serde(default)]
#[validate(custom(function = validate_metrics_prefix))]
pub metrics_prefix: Option<String>,
}
impl ServiceConfig {
pub fn hardware_reporting(&self) -> bool {
self.hardware_reporting.unwrap_or_default()
}
}
#[derive(Debug, Deserialize, Clone, Default, Validate)]
pub struct ClusterConfig {
pub enabled: bool, // disabled by default
#[serde(default)]
#[validate(range(min = 1, max = MAX_PEER_ID))]
pub peer_id: Option<PeerId>,
#[serde(default = "default_timeout_ms")]
#[validate(range(min = 1))]
pub grpc_timeout_ms: u64,
#[serde(default = "default_connection_timeout_ms")]
#[validate(range(min = 1))]
pub connection_timeout_ms: u64,
#[serde(default)]
#[validate(nested)]
pub p2p: P2pConfig,
#[serde(default)]
#[validate(nested)]
pub consensus: ConsensusConfig,
#[serde(default)]
pub resharding_enabled: bool, // disabled by default
}
#[derive(Debug, Deserialize, Clone, Validate)]
pub struct P2pConfig {
#[serde(default)]
pub port: Option<u16>,
#[serde(default = "default_connection_pool_size")]
#[validate(range(min = 1))]
pub connection_pool_size: usize,
#[serde(default)]
pub enable_tls: bool,
}
impl Default for P2pConfig {
fn default() -> Self {
P2pConfig {
port: None,
connection_pool_size: default_connection_pool_size(),
enable_tls: false,
}
}
}
#[derive(Debug, Deserialize, Clone, Validate)]
pub struct ConsensusConfig {
#[serde(default = "default_max_message_queue_size")]
pub max_message_queue_size: usize, // controls the back-pressure at the Raft level
#[serde(default = "default_tick_period_ms")]
#[validate(range(min = 1))]
pub tick_period_ms: u64,
#[serde(default = "default_bootstrap_timeout_sec")]
#[validate(range(min = 1))]
pub bootstrap_timeout_sec: u64,
#[validate(range(min = 1))]
#[serde(default = "default_message_timeout_tics")]
pub message_timeout_ticks: u64,
/// Compact WAL when it grows to enough applied entries
#[serde(default = "default_compact_wal_entries")]
pub compact_wal_entries: u64,
}
impl Default for ConsensusConfig {
fn default() -> Self {
ConsensusConfig {
max_message_queue_size: default_max_message_queue_size(),
tick_period_ms: default_tick_period_ms(),
bootstrap_timeout_sec: default_bootstrap_timeout_sec(),
message_timeout_ticks: default_message_timeout_tics(),
compact_wal_entries: default_compact_wal_entries(),
}
}
}
#[derive(Debug, Deserialize, Clone, Validate)]
pub struct TlsConfig {
pub cert: String,
pub key: String,
pub ca_cert: Option<String>,
#[serde(default = "default_tls_cert_ttl")]
#[validate(range(min = 1))]
pub cert_ttl: Option<u64>,
}
#[allow(dead_code)]
#[derive(Clone, Debug, Deserialize, Validate)]
pub struct GpuConfig {
/// Enable GPU indexing.
#[serde(default)]
pub indexing: bool,
/// Force half precision for `f32` values while indexing.
/// `f16` conversion will take place only inside GPU memory and won't affect storage type.
#[serde(default)]
pub force_half_precision: bool,
/// Used vulkan "groups" of GPU. In other words, how many parallel points can be indexed by GPU.
/// Optimal value might depend on the GPU model.
/// Proportional, but doesn't necessary equal to the physical number of warps.
/// Do not change this value unless you know what you are doing.
/// Default: 512
#[serde(default)]
#[validate(range(min = 1))]
pub groups_count: Option<usize>,
/// Filter for GPU devices by hardware name. Case insensitive.
/// Comma-separated list of substrings to match against the gpu device name.
/// Example: "nvidia"
/// Default: "" - all devices are accepted.
#[serde(default)]
pub device_filter: String,
/// List of explicit GPU devices to use.
/// If host has multiple GPUs, this option allows to select specific devices
/// by their index in the list of found devices.
/// If `device_filter` is set, indexes are applied after filtering.
/// By default, all devices are accepted.
#[serde(default)]
pub devices: Option<Vec<usize>>,
/// How many parallel indexing processes are allowed to run.
/// Default: 1
#[serde(default)]
pub parallel_indexes: Option<usize>,
/// Allow to use integrated GPUs.
/// Default: false
#[serde(default)]
pub allow_integrated: bool,
/// Allow to use emulated GPUs like LLVMpipe. Useful for CI.
/// Default: false
#[serde(default)]
pub allow_emulated: bool,
}
#[derive(Debug, Deserialize, Clone, Validate)]
pub struct Settings {
#[serde(default)]
pub log_level: Option<String>,
#[serde(default)]
pub logger: tracing::LoggerConfig,
#[validate(nested)]
pub storage: StorageConfig,
#[validate(nested)]
pub service: ServiceConfig,
#[serde(default)]
#[validate(nested)]
pub cluster: ClusterConfig,
#[serde(default = "default_telemetry_disabled")]
pub telemetry_disabled: bool,
#[validate(nested)]
pub tls: Option<TlsConfig>,
#[serde(default)]
pub debugger: DebuggerConfig,
/// A list of messages for errors that happened during loading the configuration. We collect
/// them and store them here while loading because then our logger is not configured yet.
/// We therefore need to log these messages later, after the logger is ready.
#[serde(default, skip)]
pub load_errors: Vec<LogMsg>,
#[serde(default)]
pub inference: Option<InferenceConfig>,
#[serde(default)]
#[validate(nested)]
pub gpu: Option<GpuConfig>,
#[serde(default)]
pub feature_flags: FeatureFlags,
}
impl Settings {
pub fn new(custom_config_path: Option<String>) -> Result<Self, ConfigError> {
let mut load_errors = vec![];
let config_exists = |path| File::with_name(path).collect().is_ok();
// Check if custom config file exists, report error if not
if let Some(path) = &custom_config_path
&& !config_exists(path)
{
load_errors.push(LogMsg::Error(format!(
"Config file via --config-path is not found: {path}"
)));
}
let env = env::var("RUN_MODE").unwrap_or_else(|_| "development".into());
let config_path_env = format!("config/{env}");
// Report error if main or env config files exist, report warning if not
// Check if main and env configuration file
load_errors.extend(
["config/config", &config_path_env]
.into_iter()
.filter(|path| !config_exists(path))
.map(|path| LogMsg::Warn(format!("Config file not found: {path}"))),
);
// Configuration builder: define different levels of configuration files
let mut config = Config::builder()
// Start with compile-time base config
.add_source(File::from_str(DEFAULT_CONFIG, FileFormat::Yaml))
// Merge main config: config/config
.add_source(File::with_name("config/config").required(false))
// Merge env config: config/{env}
// Uses RUN_MODE, defaults to 'development'
.add_source(File::with_name(&config_path_env).required(false))
// Merge local config, not tracked in git: config/local
.add_source(File::with_name("config/local").required(false));
#[cfg(feature = "deb")]
{
// Read config, installed with deb package
config = config.add_source(File::with_name("/etc/qdrant/config").required(false));
}
// Merge user provided config with --config-path
if let Some(path) = custom_config_path {
config = config.add_source(File::with_name(&path).required(false));
}
// Merge environment settings
// E.g.: `QDRANT_DEBUG=1 ./target/app` would set `debug=true`
config = config.add_source(Environment::with_prefix("QDRANT").separator("__"));
// Build and merge config and deserialize into Settings, attach any load errors we had
let mut settings: Settings = config.build()?.try_deserialize()?;
settings.load_errors.extend(load_errors);
Ok(settings)
}
pub fn tls(&self) -> io::Result<&TlsConfig> {
self.tls
.as_ref()
.ok_or_else(Self::tls_config_is_undefined_error)
}
pub fn tls_config_is_undefined_error() -> io::Error {
io::Error::other("TLS config is not defined in the Qdrant config file")
}
pub fn validate_and_warn(&self) {
//
// JWT RBAC
//
// Using HMAC-SHA256, recommended secret size is 32 bytes
const JWT_RECOMMENDED_SECRET_LENGTH: usize = 256 / 8;
// Log if JWT RBAC is enabled but no API key is set
if self.service.jwt_rbac.unwrap_or_default() {
if self.service.api_key.clone().unwrap_or_default().is_empty() {
log::warn!("JWT RBAC configured but no API key set, JWT RBAC is not enabled")
// Log if JWT RAC is enabled, API key is set but smaller than recommended size for JWT secret
} else if self.service.api_key.clone().unwrap_or_default().len()
< JWT_RECOMMENDED_SECRET_LENGTH
{
log::warn!(
"It is highly recommended to use an API key of {JWT_RECOMMENDED_SECRET_LENGTH} bytes when JWT RBAC is enabled",
)
}
}
// Print any load error messages we had
self.load_errors.iter().for_each(LogMsg::log);
if let Err(ref errs) = self.validate() {
validation::warn_validation_errors("Settings configuration file", errs);
}
}
}
/// Returns the number of maximum actix workers.
pub fn max_web_workers(settings: &Settings) -> usize {
match settings.service.max_workers {
Some(0) => {
let num_cpu = common::cpu::get_num_cpus();
std::cmp::max(1, num_cpu - 1)
}
Some(max_workers) => max_workers,
None => settings.storage.performance.max_search_threads,
}
}
#[derive(Clone, Debug)]
pub enum LogMsg {
Warn(String),
Error(String),
}
impl LogMsg {
fn log(&self) {
match self {
Self::Warn(msg) => log::warn!("{msg}"),
Self::Error(msg) => log::error!("{msg}"),
}
}
}
const fn default_telemetry_disabled() -> bool {
false
}
const fn default_cors() -> bool {
true
}
const fn default_timeout_ms() -> u64 {
DEFAULT_GRPC_TIMEOUT.as_millis() as u64
}
const fn default_connection_timeout_ms() -> u64 {
DEFAULT_CONNECT_TIMEOUT.as_millis() as u64
}
const fn default_tick_period_ms() -> u64 {
100
}
// Should not be less than `DEFAULT_META_OP_WAIT` as bootstrapping perform sync. consensus meta operations.
const fn default_bootstrap_timeout_sec() -> u64 {
15
}
const fn default_max_message_queue_size() -> usize {
100
}
const fn default_connection_pool_size() -> usize {
DEFAULT_POOL_SIZE
}
const fn default_message_timeout_tics() -> u64 {
10
}
const fn default_compact_wal_entries() -> u64 {
128
}
#[allow(clippy::unnecessary_wraps)] // Used as serde default
const fn default_tls_cert_ttl() -> Option<u64> {
// Default one hour
Some(3600)
}
/// Custom validation function for metrics prefixes.
fn validate_metrics_prefix(prefix: &str) -> Result<(), ValidationError> {
// Prefix is not required
if prefix.is_empty() {
return Ok(());
}
// Only allow alphanumeric characters or '_'
if !prefix
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '_')
{
return Err(
ValidationError::new("invalid_metrics_prefix").with_message(Cow::Borrowed(
"Metrics prefix must be of all alphanumeric characters, with an exception for '_'",
)),
);
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::io::Write;
use fs_err as fs;
use sealed_test::prelude::*;
use super::*;
/// Ensure we can successfully deserialize into [`Settings`] with just the default configuration.
#[test]
fn test_default_config() {
Config::builder()
.add_source(File::from_str(DEFAULT_CONFIG, FileFormat::Yaml))
.build()
.expect("failed to build default config")
.try_deserialize::<Settings>()
.expect("failed to deserialize default config")
.validate()
.expect("failed to validate default config");
}
#[expect(
clippy::disallowed_methods,
reason = "#[sealed_test] uses std::fs::copy"
)]
#[expect(clippy::disallowed_types, reason = "#[sealed_test] uses std::fs::File")]
#[sealed_test(files = ["config/config.yaml", "config/development.yaml"])]
fn test_runtime_development_config() {
unsafe { env::set_var("RUN_MODE", "development") };
// `sealed_test` copies files into the same directory as the test runs in.
// We need them in a subdirectory.
fs::create_dir("config").expect("failed to create `config` subdirectory.");
fs::copy("config.yaml", "config/config.yaml").expect("failed to copy `config.yaml`.");
fs::copy("development.yaml", "config/development.yaml")
.expect("failed to copy `development.yaml`.");
// Read config
let config = Settings::new(None).expect("failed to load development config at runtime");
// Validate
config
.validate()
.expect("failed to validate development config at runtime");
assert!(config.load_errors.is_empty(), "must not have load errors")
}
#[expect(clippy::disallowed_types, reason = "#[sealed_test] uses std::fs::File")]
#[sealed_test]
fn test_no_config_files() {
let non_existing_config_path = "config/non_existing_config".to_string();
// Read config
let config = Settings::new(Some(non_existing_config_path))
.expect("failed to load with non-existing runtime config");
// Validate
config
.validate()
.expect("failed to validate with non-existing runtime config");
assert!(!config.load_errors.is_empty(), "must have load errors")
}
#[expect(clippy::disallowed_types, reason = "#[sealed_test] uses std::fs::File")]
#[sealed_test]
fn test_custom_config() {
let path = "config/custom.yaml";
// Create custom config file
{
fs::create_dir("config").unwrap();
let mut custom = fs::File::create(path).unwrap();
write!(&mut custom, "service:\n http_port: 9999").unwrap();
custom.flush().unwrap();
}
// Load settings with custom config
let config = Settings::new(Some(path.into())).unwrap();
// Ensure our custom config is the most important
assert_eq!(config.service.http_port, 9999);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/wal_inspector.rs | src/wal_inspector.rs | use std::env;
use std::path::Path;
use collection::operations::OperationWithClockTag;
use shard::wal::SerdeWal;
use storage::content_manager::consensus::consensus_wal::ConsensusOpWal;
use storage::content_manager::consensus_ops::ConsensusOperations;
use wal::WalOptions;
/// Executable to inspect the content of a write ahead log folder (collection OR consensus WAL).
/// e.g:
/// `cargo run --bin wal_inspector storage/collections/test-collection/0/wal/ collection`
/// `cargo run --bin wal_inspector -- storage/node4/wal/ consensus` (expects `collections_meta_wal` folder as first child)
fn main() {
let args: Vec<String> = env::args().collect();
let wal_path = Path::new(&args[1]);
let wal_type = args[2].as_str();
match wal_type {
"collection" => print_collection_wal(wal_path),
"consensus" => print_consensus_wal(wal_path),
_ => eprintln!("Unknown wal type: {wal_type}"),
}
}
fn print_consensus_wal(wal_path: &Path) {
// must live within a folder named `collections_meta_wal`
let wal = ConsensusOpWal::new(wal_path);
println!("==========================");
let first_index = wal.first_entry().unwrap();
println!("First entry: {first_index:?}");
let last_index = wal.last_entry().unwrap();
println!("Last entry: {last_index:?}");
println!(
"Offset of first entry: {:?}",
wal.index_offset().unwrap().wal_to_raft_offset
);
let entries = wal
.entries(
first_index.map(|f| f.index).unwrap_or(1),
last_index.map(|f| f.index).unwrap_or(0) + 1,
None,
)
.unwrap();
for entry in entries {
println!("==========================");
let command = ConsensusOperations::try_from(&entry);
let data = match command {
Ok(command) => format!("{command:?}"),
Err(_) => format!("{:?}", entry.data),
};
println!(
"Entry ID:{}\nterm:{}\nentry_type:{}\ndata:{:?}",
entry.index, entry.term, entry.entry_type, data
)
}
}
fn print_collection_wal(wal_path: &Path) {
let wal: Result<SerdeWal<OperationWithClockTag>, _> =
SerdeWal::new(wal_path, WalOptions::default());
match wal {
Err(error) => {
eprintln!("Unable to open write ahead log in directory {wal_path:?}: {error}.");
}
Ok(wal) => {
// print all entries
let mut count = 0;
for (idx, op) in wal.read_all(true) {
println!("==========================");
println!(
"Entry: {idx} Operation: {:?} Clock: {:?}",
op.operation, op.clock_tag
);
count += 1;
}
println!("==========================");
println!("End of WAL.");
println!("Found {count} entries.");
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/issues_setup.rs | src/issues_setup.rs | use std::time::Duration;
use collection::events::{CollectionDeletedEvent, IndexCreatedEvent, SlowQueryEvent};
use collection::problems::unindexed_field;
use storage::issues_subscribers::UnindexedFieldSubscriber;
use crate::settings::Settings;
pub fn setup_subscribers(settings: &Settings) {
settings
.service
.slow_query_secs
.map(|secs| unindexed_field::SLOW_QUERY_THRESHOLD.set(Duration::from_secs_f32(secs)));
let unindexed_subscriber = UnindexedFieldSubscriber;
issues::broker::add_subscriber::<SlowQueryEvent>(Box::new(unindexed_subscriber));
issues::broker::add_subscriber::<IndexCreatedEvent>(Box::new(unindexed_subscriber));
issues::broker::add_subscriber::<CollectionDeletedEvent>(Box::new(unindexed_subscriber));
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/greeting.rs | src/greeting.rs | use std::cmp::min;
use std::env;
use std::io::{IsTerminal, stdout};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use api::rest::models::get_git_commit_id;
use colored::{Color, ColoredString, Colorize};
use crate::settings::Settings;
fn paint_red(text: &str, true_color: bool) -> ColoredString {
if true_color {
text.bold().truecolor(184, 20, 56)
} else {
text.bold().color(Color::Red)
}
}
fn paint_green(text: &str, true_color: bool) -> ColoredString {
if true_color {
text.truecolor(134, 186, 144)
} else {
text.color(Color::Green)
}
}
fn paint_blue(text: &str, true_color: bool) -> ColoredString {
if true_color {
text.bold().truecolor(82, 139, 183)
} else {
text.bold().color(Color::Blue)
}
}
/// Check whether the given IP will be reachable from `localhost`
///
/// This is a static analysis based on (very) common defaults and doesn't probe the current
/// routing table.
fn is_localhost_ip(host: &str) -> bool {
let Ok(ip) = host.parse::<IpAddr>() else {
return false;
};
// Unspecified IPs bind to all interfaces, so `localhost` always points to it
if ip == IpAddr::V4(Ipv4Addr::UNSPECIFIED) || ip == IpAddr::V6(Ipv6Addr::UNSPECIFIED) {
return true;
}
// On all tested OSes IPv4 localhost points to `localhost`
if ip == IpAddr::V4(Ipv4Addr::LOCALHOST) {
return true;
}
// On macOS IPv6 localhost points to `localhost`, on Linux it is `ip6-localhost`
if cfg!(target_os = "macos") && ip == IpAddr::V6(Ipv6Addr::LOCALHOST) {
return true;
}
false
}
/// Prints welcome message
pub fn welcome(settings: &Settings) {
if !stdout().is_terminal() {
colored::control::set_override(false);
}
let mut true_color = true;
match env::var("COLORTERM") {
Ok(val) => {
if val != "24bit" && val != "truecolor" {
true_color = false;
}
}
Err(_) => true_color = false,
}
let title = [
r" _ _ ",
r" __ _ __| |_ __ __ _ _ __ | |_ ",
r" / _` |/ _` | '__/ _` | '_ \| __| ",
r"| (_| | (_| | | | (_| | | | | |_ ",
r" \__, |\__,_|_| \__,_|_| |_|\__| ",
r" |_| ",
];
for line in title {
println!("{}", paint_red(line, true_color));
}
println!();
// Print current version and, if available, first 8 characters of the git commit hash
let git_commit_info = get_git_commit_id()
.map(|git_commit| {
format!(
", {} {}",
paint_green("build:", true_color),
paint_blue(&git_commit[..min(8, git_commit.len())], true_color),
)
})
.unwrap_or_default();
println!(
"{} {}{}",
paint_green("Version:", true_color),
paint_blue(env!("CARGO_PKG_VERSION"), true_color),
git_commit_info
);
// Print link to web UI
let ui_link = format!(
"http{}://{}:{}/dashboard",
if settings.service.enable_tls { "s" } else { "" },
if is_localhost_ip(&settings.service.host) {
"localhost"
} else {
&settings.service.host
},
settings.service.http_port
);
println!(
"{} {}",
paint_green("Access web UI at", true_color),
paint_blue(&ui_link, true_color).underline()
);
println!();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_welcome() {
welcome(&Settings::new(None).unwrap());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/segment_inspector.rs | src/segment_inspector.rs | use std::path::Path;
use std::sync::atomic::AtomicBool;
use clap::Parser;
use common::counter::hardware_counter::HardwareCounterCell;
use segment::entry::entry_point::SegmentEntry;
use segment::segment_constructor::load_segment;
use segment::types::PointIdType;
#[derive(Parser, Debug)]
#[command(version, about)]
struct Args {
/// Path to the segment folder. May be a list
#[clap(short, long, num_args=1..)]
path: Vec<String>,
/// Print segment info
#[clap(long)]
info: bool,
/// Point ID to inspect
#[clap(long)]
point_id_int: Option<u64>,
/// Point ID to inspect (UUID)
#[clap(long)]
point_id_uuid: Option<String>,
}
fn main() {
let args: Args = Args::parse();
for segment_path in args.path {
let path = Path::new(&segment_path);
if !path.exists() {
eprintln!("Path does not exist: {segment_path}");
continue;
}
if !path.is_dir() {
eprintln!("Path is not a directory: {segment_path}");
continue;
}
// Open segment
let segment = load_segment(path, &AtomicBool::new(false))
.unwrap()
.unwrap();
eprintln!(
"path = {:#?}, size-points = {}",
path,
segment.available_point_count()
);
if args.info {
let info = segment.info();
eprintln!("info = {info:#?}");
}
if let Some(point_id_int) = args.point_id_int {
let point_id = PointIdType::NumId(point_id_int);
let internal_id = segment.get_internal_id(point_id);
if internal_id.is_some() {
let version = segment.point_version(point_id);
let payload = segment
.payload(point_id, &HardwareCounterCell::disposable())
.unwrap();
// let vectors = segment.all_vectors(point_id).unwrap();
println!("Internal ID: {internal_id:?}");
println!("Version: {version:?}");
println!("Payload: {payload:?}");
// println!("Vectors: {vectors:?}");
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/snapshots.rs | src/snapshots.rs | use std::io::BufReader;
use std::path::{Path, PathBuf};
use collection::collection::Collection;
use collection::shards::shard::PeerId;
use fs_err as fs;
use fs_err::File;
use log::info;
use segment::common::validate_snapshot_archive::open_snapshot_archive_with_validation;
use storage::content_manager::alias_mapping::AliasPersistence;
use storage::content_manager::snapshots::SnapshotConfig;
use storage::content_manager::toc::{ALIASES_PATH, COLLECTIONS_DIR};
/// Recover snapshots from the given arguments
///
/// # Arguments
///
/// * `mapping` - `[ <path>:<collection_name> ]`
/// * `force` - if true, allow to overwrite collections from snapshots
///
/// # Returns
///
/// * `Vec<String>` - list of collections that were recovered
pub fn recover_snapshots(
mapping: &[String],
force: bool,
temp_dir: Option<&str>,
storage_dir: &str,
this_peer_id: PeerId,
is_distributed: bool,
) -> Vec<String> {
let collection_dir_path = Path::new(storage_dir).join(COLLECTIONS_DIR);
let mut recovered_collections: Vec<String> = vec![];
for snapshot_params in mapping {
let mut split = snapshot_params.split(':');
let path = split
.next()
.unwrap_or_else(|| panic!("Snapshot path is missing: {snapshot_params}"));
let snapshot_path = Path::new(path);
let collection_name = split
.next()
.unwrap_or_else(|| panic!("Collection name is missing: {snapshot_params}"));
recovered_collections.push(collection_name.to_string());
assert!(
split.next().is_none(),
"Too many parts in snapshot mapping: {snapshot_params}"
);
info!("Recovering snapshot {collection_name} from {path}");
// check if collection already exists
// if it does, we need to check if we want to overwrite it
// if not, we need to abort
let collection_path = collection_dir_path.join(collection_name);
info!("Collection path: {}", collection_path.display());
if collection_path.exists() {
if !force {
panic!(
"Collection {collection_name} already exists. Use --force-snapshot to overwrite it."
);
}
info!("Overwriting collection {collection_name}");
}
let collection_temp_path = temp_dir
.map(PathBuf::from)
.unwrap_or_else(|| collection_path.with_extension("tmp"));
if let Err(err) = Collection::restore_snapshot(
snapshot_path,
&collection_temp_path,
this_peer_id,
is_distributed,
) {
panic!("Failed to recover snapshot {collection_name}: {err}");
}
// Remove collection_path directory if exists
if collection_path.exists()
&& let Err(err) = fs::remove_dir_all(&collection_path)
{
panic!("Failed to remove collection {collection_name}: {err}");
}
fs::rename(&collection_temp_path, &collection_path).unwrap();
}
recovered_collections
}
pub fn recover_full_snapshot(
temp_dir: Option<&str>,
snapshot_path: &str,
storage_dir: &str,
force: bool,
this_peer_id: PeerId,
is_distributed: bool,
) -> Vec<String> {
let snapshot_temp_path = temp_dir
.map(PathBuf::from)
.unwrap_or_else(|| Path::new(storage_dir).join("snapshots_recovery_tmp"));
fs::create_dir_all(&snapshot_temp_path).unwrap();
// Un-tar snapshot into temporary directory
let mut ar = open_snapshot_archive_with_validation(Path::new(snapshot_path)).unwrap();
ar.unpack(&snapshot_temp_path).unwrap();
// Read configuration file with snapshot-to-collection mapping
let config_path = snapshot_temp_path.join("config.json");
let config_file = BufReader::new(File::open(config_path).unwrap());
let config_json: SnapshotConfig = serde_json::from_reader(config_file).unwrap();
// Create mapping from the configuration file
let mapping: Vec<String> = config_json
.collections_mapping
.iter()
.map(|(collection_name, snapshot_file)| {
format!(
"{}:{collection_name}",
snapshot_temp_path.join(snapshot_file).to_str().unwrap(),
)
})
.collect();
// Launch regular recovery of snapshots
let recovered_collection = recover_snapshots(
&mapping,
force,
temp_dir,
storage_dir,
this_peer_id,
is_distributed,
);
let alias_path = Path::new(storage_dir).join(ALIASES_PATH);
let mut alias_persistence =
AliasPersistence::open(&alias_path).expect("Can't open database by the provided config");
for (alias, collection_name) in config_json.collections_aliases {
if alias_persistence.get(&alias).is_some() && !force {
panic!("Alias {alias} already exists. Use --force-snapshot to overwrite it.");
}
alias_persistence.insert(alias, collection_name).unwrap();
}
// Remove temporary directory
fs::remove_dir_all(&snapshot_temp_path).unwrap();
recovered_collection
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/wal_pop.rs | src/wal_pop.rs | use std::env;
use std::path::Path;
use wal::Wal;
fn main() {
let args: Vec<String> = env::args().collect();
let wal_path = Path::new(&args[1]);
let mut wal = Wal::open(wal_path).expect("Can't open consensus WAL");
let last_index = wal.last_index();
eprintln!("last_index = {last_index}");
wal.truncate(last_index).unwrap();
wal.flush_open_segment().unwrap();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/startup.rs | src/startup.rs | //! Contains a collection of functions that are called at the start of the program.
use std::backtrace::Backtrace;
use std::panic;
use std::path::PathBuf;
use fs_err as fs;
use crate::common::error_reporting::ErrorReporter;
const DEFAULT_INITIALIZED_FILE: &str = ".qdrant-initialized";
fn get_init_file_path() -> PathBuf {
std::env::var("QDRANT_INIT_FILE_PATH")
.map(PathBuf::from)
.unwrap_or_else(|_| DEFAULT_INITIALIZED_FILE.into())
}
pub fn setup_panic_hook(reporting_enabled: bool, reporting_id: String) {
panic::set_hook(Box::new(move |panic_info| {
let backtrace = Backtrace::force_capture().to_string();
let loc = if let Some(loc) = panic_info.location() {
format!(" in file {} at line {}", loc.file(), loc.line())
} else {
String::new()
};
let message = if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
s
} else if let Some(s) = panic_info.payload().downcast_ref::<String>() {
s
} else {
"Payload not captured as it is not a string."
};
log::error!("Panic backtrace: \n{backtrace}");
log::error!("Panic occurred{loc}: {message}");
if reporting_enabled {
ErrorReporter::report(message, &reporting_id, Some(&loc));
}
}));
}
/// Creates a file that indicates that the server has been started.
/// This file is used to check if the server has been successfully started before potential kill.
pub fn touch_started_file_indicator() {
if let Err(err) = fs::write(get_init_file_path(), "") {
log::warn!("Failed to create init file indicator: {err}");
}
}
/// Removes a file that indicates that the server has been started.
/// Use before server initialization to avoid false positives.
pub fn remove_started_file_indicator() {
let path = get_init_file_path();
if path.exists()
&& let Err(err) = fs::remove_file(path)
{
log::warn!("Failed to remove init file indicator: {err}");
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/main.rs | src/main.rs | mod actix;
mod common;
mod consensus;
mod greeting;
mod issues_setup;
mod migrations;
mod settings;
mod snapshots;
mod startup;
mod tonic;
mod tracing;
use std::io::Error;
use std::path::Path;
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
use ::common::budget::{ResourceBudget, get_io_budget};
use ::common::cpu::get_cpu_budget;
use ::common::flags::{feature_flags, init_feature_flags};
use ::tonic::transport::Uri;
use api::grpc::transport_channel_pool::TransportChannelPool;
use clap::Parser;
use collection::profiling::interface::init_requests_profile_collector;
use collection::shards::channel_service::ChannelService;
use consensus::Consensus;
use fs_err as fs;
use memory::checkfs::{check_fs_info, check_mmap_functionality};
use memory::mmap_ops::MULTI_MMAP_SUPPORT_CHECK_RESULT;
use slog::Drain;
use startup::setup_panic_hook;
use storage::content_manager::consensus::operation_sender::OperationSender;
use storage::content_manager::consensus::persistent::Persistent;
use storage::content_manager::consensus_manager::{ConsensusManager, ConsensusStateRef};
use storage::content_manager::toc::TableOfContent;
use storage::content_manager::toc::dispatcher::TocDispatcher;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
#[cfg(all(
not(target_env = "msvc"),
any(target_arch = "x86_64", target_arch = "aarch64")
))]
use tikv_jemallocator::Jemalloc;
use crate::common::helpers::{
create_general_purpose_runtime, create_search_runtime, create_update_runtime,
load_tls_client_config,
};
use crate::common::inference::service::InferenceService;
use crate::common::telemetry::TelemetryCollector;
use crate::common::telemetry_reporting::TelemetryReporter;
use crate::greeting::welcome;
use crate::migrations::single_to_cluster::handle_existing_collections;
use crate::settings::Settings;
use crate::snapshots::{recover_full_snapshot, recover_snapshots};
use crate::startup::{remove_started_file_indicator, touch_started_file_indicator};
#[cfg(all(
not(target_env = "msvc"),
any(target_arch = "x86_64", target_arch = "aarch64")
))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
const FULL_ACCESS: Access = Access::full("For main");
/// Qdrant (read: quadrant ) is a vector similarity search engine.
/// It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload.
///
/// This CLI starts a Qdrant peer/server.
#[derive(Parser, Debug)]
#[command(version, about)]
struct Args {
/// Uri of the peer to bootstrap from in case of multi-peer deployment.
/// If not specified - this peer will be considered as a first in a new deployment.
#[arg(long, value_parser, value_name = "URI", env = "QDRANT_BOOTSTRAP")]
bootstrap: Option<Uri>,
/// Uri of this peer.
/// Other peers should be able to reach it by this uri.
///
/// This value has to be supplied if this is the first peer in a new deployment.
///
/// In case this is not the first peer and it bootstraps the value is optional.
/// If not supplied then qdrant will take internal grpc port from config and derive the IP address of this peer on bootstrap peer (receiving side)
#[arg(long, value_parser, value_name = "URI", env = "QDRANT_URI")]
uri: Option<Uri>,
/// Force snapshot re-creation
/// If provided - existing collections will be replaced with snapshots.
/// Default is to not recreate from snapshots.
#[arg(short, long, action, default_value_t = false)]
force_snapshot: bool,
/// List of paths to snapshot files.
/// Format: <snapshot_file_path>:<target_collection_name>
///
/// WARN: Do not use this option if you are recovering collection in existing distributed cluster.
/// Use `/collections/<collection-name>/snapshots/recover` API instead.
#[arg(long, value_name = "PATH:NAME", alias = "collection-snapshot")]
snapshot: Option<Vec<String>>,
/// Path to snapshot of multiple collections.
/// Format: <snapshot_file_path>
///
/// WARN: Do not use this option if you are recovering collection in existing distributed cluster.
/// Use `/collections/<collection-name>/snapshots/recover` API instead.
#[arg(long, value_name = "PATH")]
storage_snapshot: Option<String>,
/// Path to an alternative configuration file.
/// Format: <config_file_path>
///
/// Default path: config/config.yaml
#[arg(long, value_name = "PATH")]
config_path: Option<String>,
/// Disable telemetry sending to developers
/// If provided - telemetry collection will be disabled.
/// Read more: <https://qdrant.tech/documentation/guides/telemetry>
#[arg(long, action, default_value_t = false)]
disable_telemetry: bool,
/// Run stacktrace collector. Used for debugging.
#[arg(long, action, default_value_t = false)]
stacktrace: bool,
/// Reinit consensus state.
/// When enabled, the service will assume the consensus should be reinitialized.
/// The exact behavior depends on if this current node has bootstrap URI or not.
/// If it has - it'll remove current consensus state and consensus WAL (while keeping peer ID)
/// and will try to receive state from the bootstrap peer.
/// If it doesn't have - it'll remove other peers from voters promote
/// the current peer to the leader and the single member of the cluster.
/// It'll also compact consensus WAL to force snapshot
#[arg(long, action, default_value_t = false)]
reinit: bool,
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
// Run backtrace collector, expected to used by `rstack` crate
if args.stacktrace {
#[cfg(all(target_os = "linux", feature = "stacktrace"))]
{
let _ = rstack_self::child();
}
return Ok(());
}
let settings = Settings::new(args.config_path)?;
// Set global feature flags, sourced from configuration
init_feature_flags(settings.feature_flags);
let reporting_enabled = !settings.telemetry_disabled && !args.disable_telemetry;
let reporting_id = TelemetryCollector::generate_id();
// Setup logging (no logging before this point)
let logger_handle = tracing::setup(
settings
.logger
.with_top_level_directive(settings.log_level.clone()),
)?;
remove_started_file_indicator();
setup_panic_hook(reporting_enabled, reporting_id.to_string());
memory::madvise::set_global(settings.storage.mmap_advice);
segment::vector_storage::common::set_async_scorer(
settings
.storage
.performance
.async_scorer
.unwrap_or_default(),
);
welcome(&settings);
#[cfg(feature = "gpu")]
if let Some(settings_gpu) = &settings.gpu {
use segment::index::hnsw_index::gpu::*;
// initialize GPU devices manager.
if settings_gpu.indexing {
set_gpu_force_half_precision(settings_gpu.force_half_precision);
set_gpu_groups_count(settings_gpu.groups_count);
let mut gpu_device_manager = GPU_DEVICES_MANAGER.write();
*gpu_device_manager = match gpu_devices_manager::GpuDevicesMaganer::new(
&settings_gpu.device_filter,
settings_gpu.devices.as_deref(),
settings_gpu.allow_integrated,
settings_gpu.allow_emulated,
true, // Currently we always wait for the free gpu device.
settings_gpu.parallel_indexes.unwrap_or(1),
) {
Ok(gpu_device_manager) => Some(gpu_device_manager),
Err(err) => {
log::error!("Can't initialize GPU devices manager: {err}");
None
}
}
}
}
if let Some(recovery_warning) = &settings.storage.recovery_mode {
log::warn!("Qdrant is loaded in recovery mode: {recovery_warning}");
log::warn!(
"Read more: https://qdrant.tech/documentation/guides/administration/#recovery-mode"
);
}
// Validate as soon as possible, but we must initialize logging first
settings.validate_and_warn();
fs::create_dir_all(&settings.storage.storage_path)?;
// Check if the filesystem is compatible with Qdrant
let mmaps_working;
match check_fs_info(&settings.storage.storage_path) {
memory::checkfs::FsCheckResult::Good => {
mmaps_working = true;
}
memory::checkfs::FsCheckResult::Unknown(details) => {
match check_mmap_functionality(&settings.storage.storage_path) {
Ok(true) => {
log::warn!(
"There is a potential issue with the filesystem for storage path {}. Details: {details}",
settings.storage.storage_path,
);
mmaps_working = true;
}
Ok(false) => {
log::error!(
"Filesystem check failed for storage path {}. Details: {details}",
settings.storage.storage_path,
);
mmaps_working = false;
}
Err(e) => {
log::error!(
"Unable to check mmap functionality for storage path {}. Details: {details}, error: {e}",
settings.storage.storage_path,
);
mmaps_working = false;
}
}
}
memory::checkfs::FsCheckResult::Bad(details) => {
log::error!(
"Filesystem check failed for storage path {}. Details: {details}",
settings.storage.storage_path,
);
mmaps_working = false;
}
}
let _ = MULTI_MMAP_SUPPORT_CHECK_RESULT.set(mmaps_working);
// Report feature flags that are enabled for easier debugging
let flags = feature_flags();
if !flags.is_default() {
log::debug!("Feature flags: {flags:?}");
}
let bootstrap = if args.bootstrap == args.uri {
if args.bootstrap.is_some() {
log::warn!(
"Bootstrap URI is the same as this peer URI. Consider this peer as a first in a new deployment.",
);
}
None
} else {
args.bootstrap
};
// Saved state of the consensus.
let persistent_consensus_state = Persistent::load_or_init(
&settings.storage.storage_path,
bootstrap.is_none(),
args.reinit,
settings.cluster.peer_id,
)?;
let is_distributed_deployment = settings.cluster.enabled;
let temp_path = settings.storage.temp_path.as_deref();
let restored_collections = if let Some(full_snapshot) = args.storage_snapshot {
recover_full_snapshot(
temp_path,
&full_snapshot,
&settings.storage.storage_path,
args.force_snapshot,
persistent_consensus_state.this_peer_id(),
is_distributed_deployment,
)
} else if let Some(snapshots) = args.snapshot {
// recover from snapshots
recover_snapshots(
&snapshots,
args.force_snapshot,
temp_path,
&settings.storage.storage_path,
persistent_consensus_state.this_peer_id(),
is_distributed_deployment,
)
} else {
vec![]
};
// Create and own search runtime out of the scope of async context to ensure correct
// destruction of it
let search_runtime = create_search_runtime(settings.storage.performance.max_search_threads)
.expect("Can't search create runtime.");
let update_runtime = create_update_runtime(
settings
.storage
.performance
.max_optimization_runtime_threads,
)
.expect("Can't optimizer create runtime.");
let general_runtime =
create_general_purpose_runtime().expect("Can't optimizer general purpose runtime.");
let runtime_handle = general_runtime.handle().clone();
// Use global CPU budget for optimizations based on settings
let cpu_budget = get_cpu_budget(settings.storage.performance.optimizer_cpu_budget);
let io_budget = get_io_budget(settings.storage.performance.optimizer_io_budget, cpu_budget);
let optimizer_resource_budget = ResourceBudget::new(cpu_budget, io_budget);
// Create a signal sender and receiver. It is used to communicate with the consensus thread.
let (propose_sender, propose_receiver) = std::sync::mpsc::channel();
let propose_operation_sender = if settings.cluster.enabled {
// High-level channel which could be used to send User-space consensus operations
Some(OperationSender::new(propose_sender))
} else {
// We don't need sender for the single-node mode
None
};
// Channel service is used to manage connections between peers.
// It allocates required number of channels and manages proper reconnection handling
let mut channel_service =
ChannelService::new(settings.service.http_port, settings.service.api_key.clone());
if is_distributed_deployment {
// We only need channel_service in case if cluster is enabled.
// So we initialize it with real values here
let p2p_grpc_timeout = Duration::from_millis(settings.cluster.grpc_timeout_ms);
let connection_timeout = Duration::from_millis(settings.cluster.connection_timeout_ms);
let tls_config = load_tls_client_config(&settings)?;
channel_service.channel_pool = Arc::new(TransportChannelPool::new(
p2p_grpc_timeout,
connection_timeout,
settings.cluster.p2p.connection_pool_size,
tls_config,
));
channel_service.id_to_address = persistent_consensus_state.peer_address_by_id.clone();
channel_service.id_to_metadata = persistent_consensus_state.peer_metadata_by_id.clone();
}
// Table of content manages the list of collections.
// It is a main entry point for the storage.
let toc = TableOfContent::new(
&settings.storage,
search_runtime,
update_runtime,
general_runtime,
optimizer_resource_budget,
channel_service.clone(),
persistent_consensus_state.this_peer_id(),
propose_operation_sender.clone(),
);
toc.clear_all_tmp_directories()?;
// Here we load all stored collections.
runtime_handle.block_on(async {
for collection in toc.all_collections(&FULL_ACCESS).await {
log::debug!("Loaded collection: {collection}");
}
});
let toc_arc = Arc::new(toc);
let storage_path = toc_arc.storage_path();
// Holder for all actively running threads of the service: web, gPRC, consensus, etc.
let mut handles: Vec<JoinHandle<Result<(), Error>>> = vec![];
// Router for external queries.
// It decides if query should go directly to the ToC or through the consensus.
let mut dispatcher = Dispatcher::new(toc_arc.clone());
let (telemetry_collector, dispatcher_arc, health_checker) = if is_distributed_deployment {
let consensus_state: ConsensusStateRef = ConsensusManager::new(
persistent_consensus_state,
toc_arc.clone(),
propose_operation_sender.unwrap(),
Path::new(storage_path),
)
.expect("initialize consensus manager")
.into();
let is_new_deployment = consensus_state.is_new_deployment();
dispatcher =
dispatcher.with_consensus(consensus_state.clone(), settings.cluster.resharding_enabled);
let toc_dispatcher = TocDispatcher::new(Arc::downgrade(&toc_arc), consensus_state.clone());
toc_arc.with_toc_dispatcher(toc_dispatcher);
let dispatcher_arc = Arc::new(dispatcher);
// Monitoring and telemetry.
let telemetry_collector =
TelemetryCollector::new(settings.clone(), dispatcher_arc.clone(), reporting_id);
let tonic_telemetry_collector = telemetry_collector.tonic_telemetry_collector.clone();
// `raft` crate uses `slog` crate so it is needed to use `slog_stdlog::StdLog` to forward
// logs from it to `log` crate
let slog_logger = slog::Logger::root(slog_stdlog::StdLog.fuse(), slog::o!());
// Runs raft consensus in a separate thread.
// Create a pipe `message_sender` to communicate with the consensus
let health_checker = Arc::new(common::health::HealthChecker::spawn(
toc_arc.clone(),
consensus_state.clone(),
&runtime_handle,
// NOTE: `wait_for_bootstrap` should be calculated *before* starting `Consensus` thread
consensus_state.is_new_deployment() && bootstrap.is_some(),
));
let handle = Consensus::run(
&slog_logger,
consensus_state.clone(),
bootstrap,
args.uri.map(|uri| uri.to_string()),
settings.clone(),
channel_service,
propose_receiver,
tonic_telemetry_collector,
toc_arc.clone(),
runtime_handle.clone(),
args.reinit,
)
.expect("Can't initialize consensus");
handles.push(handle);
let toc_arc_clone = toc_arc.clone();
let consensus_state_clone = consensus_state.clone();
let _cancel_transfer_handle = runtime_handle.spawn(async move {
consensus_state_clone.is_leader_established.await_ready();
match toc_arc_clone
.cancel_related_transfers("Source or target peer restarted")
.await
{
Ok(_) => {
log::debug!("All transfers if any cancelled");
}
Err(err) => {
log::error!("Can't cancel related transfers: {err}");
}
}
});
// TODO(resharding): Remove resharding driver?
//
// runtime_handle.block_on(async {
// toc_arc.resume_resharding_tasks().await;
// });
let collections_to_recover_in_consensus = if is_new_deployment {
let existing_collections =
runtime_handle.block_on(toc_arc.all_collections(&FULL_ACCESS));
existing_collections
.into_iter()
.map(|pass| pass.name().to_string())
.collect()
} else {
restored_collections
};
if !collections_to_recover_in_consensus.is_empty() {
runtime_handle.block_on(handle_existing_collections(
toc_arc.clone(),
consensus_state.clone(),
dispatcher_arc.clone(),
consensus_state.this_peer_id(),
collections_to_recover_in_consensus,
));
}
(telemetry_collector, dispatcher_arc, Some(health_checker))
} else {
log::info!("Distributed mode disabled");
let dispatcher_arc = Arc::new(dispatcher);
// Monitoring and telemetry.
let telemetry_collector =
TelemetryCollector::new(settings.clone(), dispatcher_arc.clone(), reporting_id);
(telemetry_collector, dispatcher_arc, None)
};
let tonic_telemetry_collector = telemetry_collector.tonic_telemetry_collector.clone();
//
// Telemetry reporting
//
let reporting_id = telemetry_collector.reporting_id();
let telemetry_collector = Arc::new(tokio::sync::Mutex::new(telemetry_collector));
if reporting_enabled {
log::info!("Telemetry reporting enabled, id: {reporting_id}");
runtime_handle.spawn(TelemetryReporter::run(telemetry_collector.clone()));
} else {
log::info!("Telemetry reporting disabled");
}
if settings.service.hardware_reporting == Some(true) {
log::info!("Hardware reporting enabled");
}
// Setup subscribers to listen for issue-able events
issues_setup::setup_subscribers(&settings);
init_requests_profile_collector(runtime_handle.clone());
// Helper to better log start errors
let log_err_if_any = |server_name, result| match result {
Err(err) => {
log::error!("Error while starting {server_name} server: {err}");
Err(err)
}
ok => ok,
};
//
// Inference Service
//
if let Err(err) = InferenceService::init_global(settings.inference.clone()) {
log::error!("Inference service init failed: {err}");
}
//
// REST API server
//
{
let dispatcher_arc = dispatcher_arc.clone();
let settings = settings.clone();
let handle = thread::Builder::new()
.name("web".to_string())
.spawn(move || {
log_err_if_any(
"REST",
actix::init(
dispatcher_arc.clone(),
telemetry_collector,
health_checker,
settings,
logger_handle,
),
)
})
.unwrap();
handles.push(handle);
}
//
// gRPC server
//
if let Some(grpc_port) = settings.service.grpc_port {
let settings = settings.clone();
let handle = thread::Builder::new()
.name("grpc".to_string())
.spawn(move || {
log_err_if_any(
"gRPC",
tonic::init(
dispatcher_arc,
tonic_telemetry_collector,
settings,
grpc_port,
runtime_handle,
),
)
})
.unwrap();
handles.push(handle);
} else {
log::info!("gRPC endpoint disabled");
}
#[cfg(feature = "service_debug")]
{
use std::fmt::Write;
use parking_lot::deadlock;
const DEADLOCK_CHECK_PERIOD: Duration = Duration::from_secs(10);
thread::Builder::new()
.name("deadlock_checker".to_string())
.spawn(move || {
loop {
thread::sleep(DEADLOCK_CHECK_PERIOD);
let deadlocks = deadlock::check_deadlock();
if deadlocks.is_empty() {
continue;
}
let mut error = format!("{} deadlocks detected\n", deadlocks.len());
for (i, threads) in deadlocks.iter().enumerate() {
writeln!(error, "Deadlock #{i}").expect("fail to writeln!");
for t in threads {
writeln!(
error,
"Thread Id {:#?}\n{:#?}",
t.thread_id(),
t.backtrace(),
)
.expect("fail to writeln!");
}
}
log::error!("{error}");
}
})
.unwrap();
}
touch_started_file_indicator();
for handle in handles {
log::debug!(
"Waiting for thread {} to finish",
handle.thread().name().unwrap()
);
handle.join().expect("thread is not panicking")?;
}
drop(toc_arc);
drop(settings);
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/consensus.rs | src/consensus.rs | use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use std::sync::{Arc, mpsc};
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use std::{cmp, fmt, thread};
use anyhow::{Context as _, anyhow};
use api::grpc::dynamic_channel_pool::make_grpc_channel;
use api::grpc::qdrant::raft_client::RaftClient;
use api::grpc::qdrant::{AllPeers, PeerId as GrpcPeerId, RaftMessage as GrpcRaftMessage};
use api::grpc::transport_channel_pool::TransportChannelPool;
use collection::shards::channel_service::ChannelService;
use collection::shards::shard::PeerId;
#[cfg(target_os = "linux")]
use common::cpu::linux_high_thread_priority;
use raft::eraftpb::Message as RaftMessage;
use raft::prelude::*;
use raft::{INVALID_ID, SoftState, StateRole};
use storage::content_manager::consensus_manager::ConsensusStateRef;
use storage::content_manager::consensus_ops::{ConsensusOperations, SnapshotStatus};
use storage::content_manager::toc::TableOfContent;
use tokio::runtime::Handle;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::watch;
use tokio::time::sleep;
use tonic::transport::{ClientTlsConfig, Uri};
use crate::common::helpers;
use crate::common::telemetry_ops::requests_telemetry::TonicTelemetryCollector;
use crate::settings::{ConsensusConfig, Settings};
use crate::tonic::init_internal;
type Node = RawNode<ConsensusStateRef>;
const RECOVERY_RETRY_TIMEOUT: Duration = Duration::from_secs(1);
const RECOVERY_MAX_RETRY_COUNT: usize = 3;
pub enum Message {
FromClient(ConsensusOperations),
FromPeer(Box<RaftMessage>),
}
/// Aka Consensus Thread
/// Manages proposed changes to consensus state, ensures that everything is ordered properly
pub struct Consensus {
/// Raft structure which handles raft-related state
node: Node,
/// Receives proposals from peers and client for applying in consensus
receiver: Receiver<Message>,
/// Runtime for async message sending
runtime: Handle,
/// Uri to some other known peer, used to join the consensus
/// ToDo: Make if many
config: ConsensusConfig,
broker: RaftMessageBroker,
raft_config: Config,
}
impl Consensus {
/// Create and run consensus node
#[allow(clippy::too_many_arguments)]
pub fn run(
logger: &slog::Logger,
state_ref: ConsensusStateRef,
bootstrap_peer: Option<Uri>,
uri: Option<String>,
settings: Settings,
channel_service: ChannelService,
propose_receiver: mpsc::Receiver<ConsensusOperations>,
telemetry_collector: Arc<parking_lot::Mutex<TonicTelemetryCollector>>,
toc: Arc<TableOfContent>,
runtime: Handle,
reinit: bool,
) -> anyhow::Result<JoinHandle<std::io::Result<()>>> {
let tls_client_config = helpers::load_tls_client_config(&settings)?;
let p2p_host = settings.service.host.clone();
let p2p_port = settings.cluster.p2p.port.expect("P2P port is not set");
let config = settings.cluster.consensus.clone();
let (mut consensus, message_sender) = Self::new(
logger,
state_ref.clone(),
bootstrap_peer,
uri,
p2p_port,
config,
tls_client_config,
channel_service,
runtime.clone(),
reinit,
)?;
let state_ref_clone = state_ref.clone();
thread::Builder::new()
.name("consensus".to_string())
.spawn(move || {
// On Linux, try to use high thread priority because consensus is important
// Likely fails as we cannot set a higher priority by default due to permissions
#[cfg(target_os = "linux")]
if let Err(err) = linux_high_thread_priority() {
log::debug!(
"Failed to set high thread priority for consensus, ignoring: {err}"
);
}
if let Err(err) = consensus.start() {
log::error!("Consensus stopped with error: {err:#}");
state_ref_clone.on_consensus_thread_err(err);
} else {
log::info!("Consensus stopped");
state_ref_clone.on_consensus_stopped();
}
})?;
let message_sender_moved = message_sender.clone();
thread::Builder::new()
.name("forward-proposals".to_string())
.spawn(move || {
// On Linux, try to use high thread priority because consensus is important
// Likely fails as we cannot set a higher priority by default due to permissions
#[cfg(target_os = "linux")]
if let Err(err) = linux_high_thread_priority() {
log::debug!(
"Failed to set high thread priority for consensus, ignoring: {err}"
);
}
while let Ok(entry) = propose_receiver.recv() {
if message_sender_moved
.blocking_send(Message::FromClient(entry))
.is_err()
{
log::error!("Can not forward new entry to consensus as it was stopped.");
break;
}
}
})?;
let server_tls = if settings.cluster.p2p.enable_tls {
let tls_config = settings
.tls
.clone()
.ok_or_else(Settings::tls_config_is_undefined_error)?;
Some(helpers::load_tls_internal_server_config(&tls_config)?)
} else {
None
};
let handle = thread::Builder::new()
.name("grpc_internal".to_string())
.spawn(move || {
init_internal(
toc,
state_ref,
telemetry_collector,
settings,
p2p_host,
p2p_port,
server_tls,
message_sender,
runtime,
)
})
.unwrap();
Ok(handle)
}
/// If `bootstrap_peer` peer is supplied, then either `uri` or `p2p_port` should be also supplied
#[allow(clippy::too_many_arguments)]
pub fn new(
logger: &slog::Logger,
state_ref: ConsensusStateRef,
bootstrap_peer: Option<Uri>,
uri: Option<String>,
p2p_port: u16,
config: ConsensusConfig,
tls_config: Option<ClientTlsConfig>,
channel_service: ChannelService,
runtime: Handle,
reinit: bool,
) -> anyhow::Result<(Self, Sender<Message>)> {
// If we want to re-initialize consensus, we need to prevent other peers
// from re-playing consensus WAL operations, as they should already have them applied.
// Do ensure that we are forcing compacting WAL on the first re-initialized peer,
// which should trigger snapshot transferring instead of replaying WAL.
let force_compact_wal = reinit && bootstrap_peer.is_none();
// On the bootstrap-ed peers during reinit of the consensus
// we want to make sure only the bootstrap peer will hold the true state
// Therefore we clear the WAL on the bootstrap peer to force it to request a snapshot
let clear_wal = reinit && bootstrap_peer.is_some();
if clear_wal {
log::debug!("Clearing WAL on the bootstrap peer to force snapshot transfer");
state_ref.clear_wal()?;
}
// raft will not return entries to the application smaller or equal to `applied`
let last_applied = state_ref.last_applied_entry().unwrap_or_default();
let raft_config = Config {
id: state_ref.this_peer_id(),
applied: last_applied,
..Default::default()
};
raft_config.validate()?;
// bounded channel for backpressure
let (sender, receiver) = tokio::sync::mpsc::channel(config.max_message_queue_size);
// State might be initialized but the node might be shutdown without actually syncing or committing anything.
if state_ref.is_new_deployment() || reinit {
let leader_established_in_ms =
config.tick_period_ms * raft_config.max_election_tick() as u64;
Self::init(
&state_ref,
bootstrap_peer.clone(),
uri,
p2p_port,
&config,
tls_config.clone(),
&runtime,
leader_established_in_ms,
)
.map_err(|err| anyhow!("Failed to initialize Consensus for new Raft state: {err}"))?;
} else {
runtime
.block_on(Self::recover(
&state_ref,
uri.clone(),
p2p_port,
&config,
tls_config.clone(),
))
.map_err(|err| {
anyhow!("Failed to recover Consensus from existing Raft state: {err}")
})?;
if bootstrap_peer.is_some() || uri.is_some() {
log::debug!("Local raft state found - bootstrap and uri cli arguments were ignored")
}
log::debug!("Local raft state found - skipping initialization");
};
let mut node = Node::new(&raft_config, state_ref.clone(), logger)?;
node.set_batch_append(true);
// Before consensus has started apply any unapplied committed entries
// They might have not been applied due to unplanned Qdrant shutdown
let _stop_consensus = state_ref.apply_entries(&mut node)?;
if force_compact_wal {
// Making sure that the WAL will be compacted on start
state_ref.compact_wal(1)?;
} else {
state_ref.compact_wal(config.compact_wal_entries)?;
}
let broker = RaftMessageBroker::new(
runtime.clone(),
bootstrap_peer,
tls_config,
config.clone(),
node.store().clone(),
channel_service.channel_pool,
);
let consensus = Self {
node,
receiver,
runtime,
config,
broker,
raft_config,
};
if !state_ref.is_new_deployment() {
state_ref.recover_first_voter()?;
}
Ok((consensus, sender))
}
#[allow(clippy::too_many_arguments)]
fn init(
state_ref: &ConsensusStateRef,
bootstrap_peer: Option<Uri>,
uri: Option<String>,
p2p_port: u16,
config: &ConsensusConfig,
tls_config: Option<ClientTlsConfig>,
runtime: &Handle,
leader_established_in_ms: u64,
) -> anyhow::Result<()> {
if let Some(bootstrap_peer) = bootstrap_peer {
log::debug!("Bootstrapping from peer with address: {bootstrap_peer}");
runtime.block_on(Self::bootstrap(
state_ref,
bootstrap_peer,
uri,
p2p_port,
config,
tls_config,
))?;
Ok(())
} else {
log::debug!(
"Bootstrapping is disabled. Assuming this peer is the first in the network"
);
let tick_period = config.tick_period_ms;
log::info!(
"With current tick period of {tick_period}ms, leader will be established in approximately {leader_established_in_ms}ms. To avoid rejected operations - add peers and submit operations only after this period.",
);
// First peer needs to add its own address
state_ref.add_peer(
state_ref.this_peer_id(),
uri.ok_or_else(|| anyhow::anyhow!("First peer should specify its uri."))?
.parse()?,
)?;
Ok(())
}
}
async fn add_peer_to_known_for(
this_peer_id: PeerId,
cluster_uri: Uri,
current_uri: Option<String>,
p2p_port: u16,
config: &ConsensusConfig,
tls_config: Option<ClientTlsConfig>,
) -> anyhow::Result<AllPeers> {
// Use dedicated transport channel for bootstrapping because of specific timeout
let channel = make_grpc_channel(
Duration::from_secs(config.bootstrap_timeout_sec),
Duration::from_secs(config.bootstrap_timeout_sec),
cluster_uri,
tls_config,
)
.await
.map_err(|err| anyhow!("Failed to create timeout channel: {err}"))?;
let mut client = RaftClient::new(channel);
let all_peers = client
.add_peer_to_known(tonic::Request::new(
api::grpc::qdrant::AddPeerToKnownMessage {
uri: current_uri,
port: Some(u32::from(p2p_port)),
id: this_peer_id,
},
))
.await
.map_err(|err| anyhow!("Failed to add peer to known: {err}"))?
.into_inner();
Ok(all_peers)
}
// Re-attach peer to the consensus:
// Notifies the cluster(any node) that this node changed its address
async fn recover(
state_ref: &ConsensusStateRef,
uri: Option<String>,
p2p_port: u16,
config: &ConsensusConfig,
tls_config: Option<ClientTlsConfig>,
) -> anyhow::Result<()> {
let this_peer_id = state_ref.this_peer_id();
let mut peer_to_uri = state_ref
.persistent
.read()
.peer_address_by_id
.read()
.clone();
let this_peer_url = peer_to_uri.remove(&this_peer_id);
// Recover url if a different one is provided
let do_recover = match (&this_peer_url, &uri) {
(Some(this_peer_url), Some(uri)) => this_peer_url != &Uri::from_str(uri)?,
_ => false,
};
if do_recover {
let mut tries = RECOVERY_MAX_RETRY_COUNT;
while tries > 0 {
// Try to inform any peer about the change of address
for (peer_id, peer_uri) in &peer_to_uri {
let res = Self::add_peer_to_known_for(
this_peer_id,
peer_uri.clone(),
uri.clone(),
p2p_port,
config,
tls_config.clone(),
)
.await;
if res.is_err() {
log::warn!(
"Failed to recover from peer with id {peer_id} at {peer_uri} with error {res:?}, trying others"
);
} else {
log::debug!(
"Successfully recovered from peer with id {peer_id} at {peer_uri}"
);
return Ok(());
}
}
tries -= 1;
log::warn!(
"Retrying recovering from known peers (retry {})",
RECOVERY_MAX_RETRY_COUNT - tries
);
let exp_timeout =
RECOVERY_RETRY_TIMEOUT * (RECOVERY_MAX_RETRY_COUNT - tries) as u32;
sleep(exp_timeout).await;
}
return Err(anyhow::anyhow!("Failed to recover from any known peers"));
}
Ok(())
}
/// Add node sequence:
///
/// 1. Add current node as a learner
/// 2. Start applying entries from consensus
/// 3. Eventually leader submits the promotion proposal
/// 4. Learners become voters once they read about the promotion from consensus log
async fn bootstrap(
state_ref: &ConsensusStateRef,
bootstrap_peer: Uri,
uri: Option<String>,
p2p_port: u16,
config: &ConsensusConfig,
tls_config: Option<ClientTlsConfig>,
) -> anyhow::Result<()> {
let this_peer_id = state_ref.this_peer_id();
let all_peers = Self::add_peer_to_known_for(
this_peer_id,
bootstrap_peer,
uri.clone(),
p2p_port,
config,
tls_config,
)
.await?;
// Although peer addresses are synchronized with consensus, addresses need to be pre-fetched in the case of a new peer
// or it will not know how to answer the Raft leader
for peer in all_peers.all_peers {
state_ref
.add_peer(
peer.id,
peer.uri
.parse()
.context(format!("Failed to parse peer URI: {}", peer.uri))?,
)
.map_err(|err| anyhow!("Failed to add peer: {err}"))?
}
// Only first peer has itself as a voter in the initial conf state.
// This needs to be propagated manually to other peers as it is not contained in any log entry.
// So we skip the learner phase for the first peer.
state_ref.set_first_voter(all_peers.first_peer_id)?;
state_ref.set_conf_state(ConfState::from((vec![all_peers.first_peer_id], vec![])))?;
Ok(())
}
pub fn start(&mut self) -> anyhow::Result<()> {
// If this is the only peer in the cluster, tick Raft node a few times to instantly
// self-elect itself as Raft leader
if self.node.store().peer_count() == 1 {
while !self.node.has_ready() {
self.node.tick();
}
}
// If this is the origin peer of the cluster, try to add origin peer to consensus
if let Err(err) = self.try_add_origin() {
log::error!("Failed to add origin peer to consensus: {err}");
}
let tick_period = Duration::from_millis(self.config.tick_period_ms);
let mut previous_tick = Instant::now();
let mut idle_cycles = 0_usize;
loop {
// Wait (for up to `tick_period`) for incoming client requests and Raft messages
let raft_messages = self.advance_node(tick_period)?;
// Calculate how many ticks passed since the last one
let elapsed_ticks = previous_tick.elapsed().div_duration_f32(tick_period) as u32;
// Update previous tick timestamp
previous_tick += tick_period * elapsed_ticks;
// Calculate how many ticks we should *report* to Raft node.
//
// If last iteration of the loop took too long to complete, and we report all elapsed
// ticks to Raft node, it might trigger unnecessary leader election.
//
// To prevent this, we check if we received new Raft messages (i.e., we are still
// connected to Raft leader), and cap how many ticks we report to Raft node.
//
// By default, election is triggered if no Raft messages were received for 20 ticks,
// so we report at most 15 ticks.
//
// See https://docs.rs/raft/latest/raft/struct.Config.html#structfield.election_tick.
let report_ticks = if raft_messages > 0 {
// Default `election_tick` is 20, so expected value here is 15
let max_elapsed_ticks =
cmp::max(1, self.raft_config.election_tick.saturating_sub(5));
cmp::min(elapsed_ticks, max_elapsed_ticks as u32)
} else {
elapsed_ticks
};
// Report elapsed ticks to Raft node
for _ in 0..report_ticks {
self.node.tick();
}
// Append new entries to the WAL, apply committed entries, etc...
let (stop_consensus, is_idle) = self.on_ready()?;
if stop_consensus {
return Ok(());
}
// If we only sent outgoing Raft messages, but did not change any state during `on_ready`,
// we consider Raft node to be "idle"
if is_idle {
// If current node is the only peer in the cluster, or if we received new Raft messages
// (i.e., we are still connected to Raft leader/peers), and Raft node is idle,
// count "idle cycle"
if raft_messages > 0 || (self.is_single_peer() && self.is_leader()) {
idle_cycles += 1;
}
} else {
// If Raft state was updated, reset idle cycle counter
idle_cycles = 0;
}
// If Raft node was idle for 3 cycles, try to sync local state to consensus
if idle_cycles >= 3 {
self.try_sync_local_state()?;
}
}
}
fn advance_node(&mut self, tick_period: Duration) -> anyhow::Result<usize> {
if self
.try_promote_learner()
.context("failed to promote learner")?
{
return Ok(0);
}
// This method propagates incoming client requests and Raft messages to Raft node
// It's more efficient to process multiple events, so we propagate up to 128 events at a time
const RAFT_BATCH_SIZE: usize = 128;
// We have to tick Raft node periodically, so we can wait for new events for up to `tick_period`
let hard_timeout_at = Instant::now() + tick_period;
// We also want to react to new events as quickly as possible, so we only wait for `tick_period / 10`
// for any consecutive events after the first one
let consecutive_message_timeout = tick_period / 10;
// Timeout to wait for the *next* event
let mut timeout_at = hard_timeout_at;
// Track how many *events* we received...
let mut events = 0;
// ...and how many of these events were *Raft messages*
let mut raft_messages = 0;
loop {
let Ok(message) = self.recv_update(timeout_at) else {
break;
};
// When we discover conf-change request, we have to break early and process it ASAP,
// because Raft node allows to process single conf-change request at a time.
//
// E.g., without this condition, if two nodes try to join cluster at the same time and
// both conf-change requests are processed in the same batch, the second request would
// be ignored and the node would fail to join.
let is_conf_change = matches!(
message,
Message::FromClient(
ConsensusOperations::AddPeer { .. } | ConsensusOperations::RemovePeer(_)
),
);
let is_raft_message = matches!(message, Message::FromPeer(_));
if let Err(err) = self.advance_node_impl(message) {
log::warn!("{err}");
continue;
}
timeout_at = cmp::min(
hard_timeout_at,
Instant::now() + consecutive_message_timeout,
);
events += 1;
raft_messages += usize::from(is_raft_message);
if events >= RAFT_BATCH_SIZE || is_conf_change {
break;
}
}
Ok(raft_messages)
}
fn recv_update(&mut self, timeout_at: Instant) -> Result<Message, TryRecvUpdateError> {
self.runtime.block_on(async {
tokio::select! {
biased;
message = self.receiver.recv() => message.ok_or(TryRecvUpdateError::Closed),
_ = tokio::time::sleep_until(timeout_at.into()) => Err(TryRecvUpdateError::Timeout),
}
})
}
fn advance_node_impl(&mut self, message: Message) -> anyhow::Result<()> {
match message {
Message::FromClient(ConsensusOperations::AddPeer { peer_id, uri }) => {
let existing_uris = self
.broker
.consensus_state
.peer_address_by_id()
.into_iter()
.map(|(peer_id, url)| (url, peer_id))
.collect::<HashMap<_, _>>();
// Don't allow a peer URI to join if already in consensus
// - new URIs can always join
// - existing URIs can re-join with the same peer ID
// See: <https://github.com/qdrant/qdrant/pull/7375>
if let Some(registered_peer_id) =
existing_uris.get(&uri.parse::<Uri>().context("peer URI is not a valid URI")?)
&& registered_peer_id != &peer_id
{
log::warn!(
"Rejected peer {peer_id} to join consensus, URI is already registered by peer {registered_peer_id} ({uri})",
);
return Err(anyhow!(
"peer URI {uri} already used by peer {registered_peer_id}, remove it first or use a different URI",
));
}
let mut change = ConfChangeV2::default();
change.set_changes(vec![raft_proto::new_conf_change_single(
peer_id,
ConfChangeType::AddLearnerNode,
)]);
log::debug!("Proposing network configuration change: {change:?}");
self.node
.propose_conf_change(uri.into_bytes(), change)
.context("failed to propose conf change")?;
}
Message::FromClient(ConsensusOperations::RemovePeer(peer_id)) => {
let mut change = ConfChangeV2::default();
change.set_changes(vec![raft_proto::new_conf_change_single(
peer_id,
ConfChangeType::RemoveNode,
)]);
log::debug!("Proposing network configuration change: {change:?}");
self.node
.propose_conf_change(vec![], change)
.context("failed to propose conf change")?;
}
Message::FromClient(ConsensusOperations::RequestSnapshot) => {
self.node
.request_snapshot()
.context("failed to request snapshot")?;
}
Message::FromClient(ConsensusOperations::ReportSnapshot { peer_id, status }) => {
self.node.report_snapshot(peer_id, status.into());
}
Message::FromClient(operation) => {
let data =
serde_cbor::to_vec(&operation).context("failed to serialize operation")?;
log::trace!("Proposing entry from client with length: {}", data.len());
self.node
.propose(vec![], data)
.context("failed to propose entry")?;
}
Message::FromPeer(message) => {
let is_heartbeat = matches!(
message.get_msg_type(),
MessageType::MsgHeartbeat | MessageType::MsgHeartbeatResponse,
);
if !is_heartbeat {
log::trace!(
"Received a message from peer with progress: {:?}. Message: {:?}",
self.node.raft.prs().get(message.from),
message,
);
}
self.node.step(*message).context("failed to step message")?;
}
}
Ok(())
}
fn is_single_peer(&self) -> bool {
self.node.store().peer_count() == 1
}
fn is_leader(&self) -> bool {
self.node.status().ss.raft_state == raft::StateRole::Leader
}
fn try_sync_local_state(&self) -> anyhow::Result<()> {
if !self.node.has_ready() {
// No updates to process
let store = self.node.store();
let pending_operations = store.persistent.read().unapplied_entities_count();
if pending_operations == 0 && store.is_leader_established.check_ready() {
// If leader is established and there is nothing else to do on this iteration,
// then we can check if there are any un-synchronized local state left.
store.sync_local_state()?;
}
}
Ok(())
}
/// Tries to propose "origin peer" (the very first peer, that starts new cluster) to consensus
fn try_add_origin(&mut self) -> Result<bool, TryAddOriginError> {
// We can determine origin peer from consensus state:
// - it should be the only peer in the cluster
// - and its commit index should be at 0 or 1
//
// When we add a new node to existing cluster, we have to bootstrap it from existing cluster
// node, and during bootstrap we explicitly add all current peers to consensus state. So,
// *all* peers added to the cluster after the origin will always have at least two peers.
//
// When origin peer starts new cluster, it self-elects itself as a leader and commits empty
// operation with index 1. It is impossible to commit anything to consensus before this
// operation is committed. And to add another (second/third/etc) peer to the cluster, we
// have to commit a conf-change operation. Which means that only origin peer can ever be at
// commit index 0 or 1.
// Check that we are the only peer in the cluster
if self.node.store().peer_count() > 1 {
return Ok(false);
}
let status = self.node.status();
// Check that we are at index 0 or 1
if status.hs.commit > 1 {
return Ok(false);
}
// If we reached this point, we are the origin peer, but it's impossible to propose anything
// to consensus, before leader is elected (`propose_conf_change` will return an error),
// so we have to wait for a few ticks for self-election
if status.ss.raft_state != StateRole::Leader {
return Err(TryAddOriginError::NotLeader);
}
// Propose origin peer to consensus
let mut change = ConfChangeV2::default();
change.set_changes(vec![raft_proto::new_conf_change_single(
status.id,
ConfChangeType::AddNode,
)]);
let peer_uri = self
.node
.store()
.persistent
.read()
.peer_address_by_id
.read()
.get(&status.id)
.ok_or_else(|| TryAddOriginError::UriNotFound)?
.to_string();
self.node.propose_conf_change(peer_uri.into(), change)?;
Ok(true)
}
/// Returns `true` if learner promotion was proposed, `false` otherwise.
/// Learner node does not vote on elections, cause it might not have a big picture yet.
/// So consensus should guarantee that learners are promoted one-by-one.
/// Promotions are done by leader and only after it has no pending entries,
/// that guarantees that learner will start voting only after it applies all the changes in the log
fn try_promote_learner(&mut self) -> anyhow::Result<bool> {
// Promote only if leader
if self.node.status().ss.raft_state != StateRole::Leader {
return Ok(false);
}
// Promote only when there are no uncommitted changes.
let store = self.node.store();
let commit = store.hard_state().commit;
let last_log_entry = store.last_index()?;
if commit != last_log_entry {
return Ok(false);
}
let Some(learner) = self.find_learner_to_promote() else {
return Ok(false);
};
log::debug!("Proposing promotion for learner {learner} to voter");
let mut change = ConfChangeV2::default();
change.set_changes(vec![raft_proto::new_conf_change_single(
learner,
ConfChangeType::AddNode,
)]);
self.node.propose_conf_change(vec![], change)?;
Ok(true)
}
fn find_learner_to_promote(&self) -> Option<u64> {
let commit = self.node.store().hard_state().commit;
let learners: HashSet<_> = self
.node
.store()
.conf_state()
.learners
.into_iter()
.collect();
let status = self.node.status();
status
.progress?
.iter()
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/schema_generator.rs | src/schema_generator.rs | #![allow(dead_code)]
use api::rest::models::{CollectionsResponse, ShardKeysResponse, Usage, VersionInfo};
use api::rest::schema::PointInsertOperations;
use api::rest::{
FacetRequest, FacetResponse, QueryGroupsRequest, QueryRequest, QueryRequestBatch,
QueryResponse, Record, ScoredPoint, SearchMatrixOffsetsResponse, SearchMatrixPairsResponse,
SearchMatrixRequest, UpdateVectors,
};
use collection::operations::cluster_ops::ClusterOperations;
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::payload_ops::{DeletePayload, SetPayload};
use collection::operations::point_ops::{PointsSelector, WriteOrdering};
use collection::operations::snapshot_ops::{
ShardSnapshotRecover, SnapshotDescription, SnapshotRecover,
};
use collection::operations::types::{
AliasDescription, CollectionClusterInfo, CollectionExistence, CollectionInfo,
CollectionsAliasesResponse, CountRequest, CountResult, DiscoverRequest, DiscoverRequestBatch,
GroupsResult, OptimizationsResponse, PointGroup, PointRequest, RecommendGroupsRequest,
RecommendRequest, RecommendRequestBatch, ScrollRequest, ScrollResult, SearchGroupsRequest,
SearchRequest, SearchRequestBatch, UpdateResult,
};
use collection::operations::vector_ops::DeleteVectors;
use schemars::JsonSchema;
use schemars::r#gen::SchemaSettings;
use serde::Serialize;
use storage::content_manager::collection_meta_ops::{
ChangeAliasesOperation, CreateCollection, UpdateCollection,
};
use storage::types::ClusterStatus;
use crate::common::telemetry::TelemetryData;
use crate::common::update::{CreateFieldIndex, UpdateOperations};
mod actix;
mod common;
mod settings;
mod tracing;
#[derive(Serialize, JsonSchema)]
struct AllDefinitions {
a1: CollectionsResponse,
a2: CollectionInfo,
// a3: CollectionMetaOperations,
a4: PointRequest,
a5: Record,
a6: SearchRequest,
a7: ScoredPoint,
a8: UpdateResult,
// a9: CollectionUpdateOperations,
aa: RecommendRequest,
ab: ScrollRequest,
ac: ScrollResult,
ad: CreateCollection,
ae: UpdateCollection,
af: ChangeAliasesOperation,
ag: CreateFieldIndex,
ah: PointsSelector,
ai: PointInsertOperations,
aj: SetPayload,
ak: DeletePayload,
al: ClusterStatus,
am: SnapshotDescription,
an: CountRequest,
ao: CountResult,
ap: CollectionClusterInfo,
aq: TelemetryData,
ar: ClusterOperations,
at: SearchRequestBatch,
au: RecommendRequestBatch,
aw: SnapshotRecover,
ax: CollectionsAliasesResponse,
ay: AliasDescription,
az: WriteOrdering,
b1: ReadConsistency,
b2: UpdateVectors,
b3: DeleteVectors,
b4: PointGroup,
b5: SearchGroupsRequest,
b6: RecommendGroupsRequest,
b7: GroupsResult,
b8: UpdateOperations,
b9: ShardSnapshotRecover,
ba: DiscoverRequest,
bb: DiscoverRequestBatch,
bc: VersionInfo,
bd: CollectionExistence,
be: QueryRequest,
bf: QueryRequestBatch,
bg: QueryResponse,
bh: QueryGroupsRequest,
bi: SearchMatrixRequest,
bj: SearchMatrixOffsetsResponse,
bk: SearchMatrixPairsResponse,
bl: FacetRequest,
bm: FacetResponse,
bn: Usage,
bo: ShardKeysResponse,
bp: OptimizationsResponse,
}
fn save_schema<T: JsonSchema>() {
let settings = SchemaSettings::draft07();
let generator = settings.into_generator();
let schema = generator.into_root_schema_for::<T>();
let schema_str = serde_json::to_string_pretty(&schema).unwrap();
println!("{schema_str}")
}
fn main() {
save_schema::<AllDefinitions>();
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/config.rs | src/tracing/config.rs | use std::collections::HashSet;
use serde::{Deserialize, Serialize};
use tracing_subscriber::fmt;
use super::*;
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
#[serde(default)]
pub struct LoggerConfig {
#[serde(flatten)]
pub default: default::Config,
#[serde(default)]
pub on_disk: on_disk::Config,
}
impl LoggerConfig {
pub fn with_top_level_directive(&self, log_level: Option<String>) -> Self {
let mut logger_config = self.clone();
if logger_config.default.log_level.is_some() && log_level.is_some() {
eprintln!(
"Both top-level `log_level` and `logger.log_level` config directives are used. \
`logger.log_level` takes priority, so top-level `log_level` will be ignored."
);
}
if logger_config.default.log_level.is_none() {
logger_config.default.log_level = log_level;
}
logger_config
}
pub fn merge(&mut self, other: Self) {
self.default.merge(other.default);
self.on_disk.merge(other.on_disk);
}
}
#[derive(Default, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum LogFormat {
#[default]
Text,
Json,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Hash)]
#[serde(rename_all = "snake_case")]
pub enum SpanEvent {
New,
Enter,
Exit,
Close,
}
impl SpanEvent {
pub fn unwrap_or_default_config(events: &Option<HashSet<Self>>) -> fmt::format::FmtSpan {
Self::into_fmt_span(events.as_ref().unwrap_or(&HashSet::new()).iter().copied())
}
pub fn into_fmt_span(events: impl IntoIterator<Item = Self>) -> fmt::format::FmtSpan {
events
.into_iter()
.fold(fmt::format::FmtSpan::NONE, |events, event| {
events | event.into()
})
}
}
impl From<SpanEvent> for fmt::format::FmtSpan {
fn from(event: SpanEvent) -> Self {
match event {
SpanEvent::New => fmt::format::FmtSpan::NEW,
SpanEvent::Enter => fmt::format::FmtSpan::ENTER,
SpanEvent::Exit => fmt::format::FmtSpan::EXIT,
SpanEvent::Close => fmt::format::FmtSpan::CLOSE,
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum Color {
#[default]
Auto,
#[serde(untagged)]
Explicit(bool),
}
impl Color {
pub fn to_bool(self) -> bool {
match self {
Self::Auto => colored::control::SHOULD_COLORIZE.should_colorize(),
Self::Explicit(bool) => bool,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/test.rs | src/tracing/test.rs | use std::collections::HashSet;
use serde_json::json;
use super::*;
#[test]
fn deserialize_logger_config() {
let json = json!({
"log_level": "debug",
"span_events": ["new", "close"],
"color": true,
"on_disk": {
"enabled": true,
"log_file": "/logs/qdrant",
"log_level": "tracing",
"span_events": ["new", "close"],
"buffer_size_bytes": 1024,
}
});
let config = deserialize_config(json);
let expected = LoggerConfig {
default: default::Config {
log_level: Some("debug".into()),
span_events: Some(HashSet::from([
config::SpanEvent::New,
config::SpanEvent::Close,
])),
format: None,
color: Some(config::Color::Explicit(true)),
},
on_disk: on_disk::Config {
enabled: Some(true),
log_file: Some("/logs/qdrant".into()),
log_level: Some("tracing".into()),
span_events: Some(HashSet::from([
config::SpanEvent::New,
config::SpanEvent::Close,
])),
format: None,
buffer_size_bytes: Some(1024),
},
};
assert_eq!(config, expected);
}
#[test]
fn deserialize_json_logger_config() {
let json = json!({
"log_level": "debug",
"span_events": ["new", "close"],
"format": "json",
"color": true,
"on_disk": {
"enabled": true,
"log_file": "/logs/qdrant",
"log_level": "tracing",
"span_events": ["new", "close"],
"format": "text",
"buffer_size_bytes": 1024,
}
});
let config = deserialize_config(json);
let expected = LoggerConfig {
default: default::Config {
log_level: Some("debug".into()),
span_events: Some(HashSet::from([
config::SpanEvent::New,
config::SpanEvent::Close,
])),
format: Some(config::LogFormat::Json),
color: Some(config::Color::Explicit(true)),
},
on_disk: on_disk::Config {
enabled: Some(true),
log_file: Some("/logs/qdrant".into()),
log_level: Some("tracing".into()),
span_events: Some(HashSet::from([
config::SpanEvent::New,
config::SpanEvent::Close,
])),
format: Some(config::LogFormat::Text),
buffer_size_bytes: Some(1024),
},
};
assert_eq!(config, expected);
}
#[test]
fn deserialize_empty_config() {
let config = deserialize_config(json!({}));
assert_eq!(config, LoggerConfig::default());
}
#[test]
fn deserialize_config_with_empty_on_disk() {
let config = deserialize_config(json!({ "on_disk": {} }));
assert_eq!(config, LoggerConfig::default());
}
#[test]
fn deseriailze_config_with_explicit_nulls() {
let json = json!({
"log_level": null,
"span_events": null,
"format": null,
"color": null,
"on_disk": {
"enabled": null,
"log_file": null,
"log_level": null,
"span_events": null,
"format": null,
"buffer_size_bytes": null,
}
});
let config = deserialize_config(json);
assert_eq!(config, LoggerConfig::default());
}
fn deserialize_config(json: serde_json::Value) -> LoggerConfig {
serde_json::from_value(json).unwrap()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/on_disk.rs | src/tracing/on_disk.rs | use std::collections::HashSet;
use std::io;
use std::sync::Mutex;
use anyhow::Context as _;
use common::ext::OptionExt;
use fs_err as fs;
use serde::{Deserialize, Serialize};
use tracing_subscriber::{Layer, fmt, registry};
use super::*;
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
#[serde(default)]
pub struct Config {
pub enabled: Option<bool>,
pub log_file: Option<String>,
pub log_level: Option<String>,
pub format: Option<config::LogFormat>,
pub span_events: Option<HashSet<config::SpanEvent>>,
pub buffer_size_bytes: Option<usize>,
}
impl Config {
pub fn merge(&mut self, other: Self) {
let Self {
enabled,
log_file,
log_level,
span_events,
format,
buffer_size_bytes,
} = other;
self.enabled.replace_if_some(enabled);
self.log_file.replace_if_some(log_file);
self.log_level.replace_if_some(log_level);
self.span_events.replace_if_some(span_events);
self.format.replace_if_some(format);
self.buffer_size_bytes.replace_if_some(buffer_size_bytes);
}
}
pub fn new_logger<S>(config: &mut Config) -> Logger<S>
where
S: tracing::Subscriber + for<'span> registry::LookupSpan<'span>,
{
let layer = match new_layer(config) {
Ok(layer) => layer,
Err(err) => {
eprintln!(
"failed to enable logging into {} log-file: {err}",
config.log_file.as_deref().unwrap_or(""),
);
config.enabled = Some(false);
None
}
};
let filter = new_filter(config);
layer.with_filter(filter)
}
pub fn new_layer<S>(config: &Config) -> anyhow::Result<Option<Box<dyn Layer<S> + Send + Sync>>>
where
S: tracing::Subscriber + for<'span> registry::LookupSpan<'span>,
{
if !config.enabled.unwrap_or_default() {
return Ok(None);
}
let Some(log_file) = &config.log_file else {
return Err(anyhow::format_err!(
"log file is not specified (it can only be specified in the config file)"
));
};
let writer = fs::OpenOptions::new()
.create(true)
.append(true)
.open(log_file)
.with_context(|| format!("failed to open log file {log_file}"))?;
let layer = fmt::Layer::default()
.with_writer(Mutex::new(io::BufWriter::with_capacity(
config.buffer_size_bytes.unwrap_or(8192),
writer,
)))
.with_span_events(config::SpanEvent::unwrap_or_default_config(
&config.span_events,
))
.with_ansi(false);
let layer = match config.format {
None | Some(config::LogFormat::Text) => Box::new(layer) as _,
Some(config::LogFormat::Json) => Box::new(layer.json()) as _,
};
Ok(Some(layer))
}
pub fn new_filter(config: &Config) -> filter::EnvFilter {
filter(config.log_level.as_deref().unwrap_or(""))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/default.rs | src/tracing/default.rs | use std::collections::HashSet;
use common::ext::OptionExt;
use serde::{Deserialize, Serialize};
use tracing_subscriber::{Layer, fmt, registry};
use super::*;
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
#[serde(default)]
pub struct Config {
pub log_level: Option<String>,
pub span_events: Option<HashSet<config::SpanEvent>>,
pub format: Option<config::LogFormat>,
pub color: Option<config::Color>,
}
impl Config {
pub fn merge(&mut self, other: Self) {
let Self {
log_level,
span_events,
format,
color,
} = other;
self.log_level.replace_if_some(log_level);
self.span_events.replace_if_some(span_events);
self.format.replace_if_some(format);
self.color.replace_if_some(color);
}
}
pub fn new_logger<S>(config: &Config) -> Logger<S>
where
S: tracing::Subscriber + for<'span> registry::LookupSpan<'span>,
{
let layer = new_layer(config);
let filter = new_filter(config);
Some(layer).with_filter(filter)
}
pub fn new_layer<S>(config: &Config) -> Box<dyn Layer<S> + Send + Sync>
where
S: tracing::Subscriber + for<'span> registry::LookupSpan<'span>,
{
let layer = fmt::Layer::default()
.with_span_events(config::SpanEvent::unwrap_or_default_config(
&config.span_events,
))
.with_ansi(config.color.unwrap_or_default().to_bool());
match config.format {
None | Some(config::LogFormat::Text) => Box::new(layer),
Some(config::LogFormat::Json) => Box::new(layer.json()),
}
}
pub fn new_filter(config: &Config) -> filter::EnvFilter {
filter(config.log_level.as_deref().unwrap_or(""))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/mod.rs | src/tracing/mod.rs | pub mod config;
pub mod default;
pub mod handle;
pub mod on_disk;
#[cfg(test)]
mod test;
use std::fmt::Write as _;
use std::str::FromStr as _;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{filter, reload};
pub use self::config::LoggerConfig;
pub use self::handle::LoggerHandle;
const DEFAULT_LOG_LEVEL: log::LevelFilter = log::LevelFilter::Info;
const DEFAULT_FILTERS: &[(&str, log::LevelFilter)] = &[
("hyper", log::LevelFilter::Info),
("h2", log::LevelFilter::Error),
("tower", log::LevelFilter::Warn),
("rustls", log::LevelFilter::Info),
("wal", log::LevelFilter::Warn),
("raft", log::LevelFilter::Warn),
];
pub fn setup(mut config: config::LoggerConfig) -> anyhow::Result<LoggerHandle> {
// Note that on-disk logger *have* to be initialized *before* default logger!
//
// If default logger is initialized before on-disk logger, then ANSI escape-sequences (that are
// used to apply color and formatting in the terminal, but looks like corrupted text in the text
// editor) might appear in the on-disk log-file.
//
// This happens because when multiple `fmt::Layer`s are initialized in the same subscriber,
// the top-level `fmt::Layer` would cache pre-formatted fragments of the log-line
// for the next `fmt::Layer`s to reuse.
//
// And default logger outputs colored log-lines, which on-disk logger reuse even if colors are
// disabled for the on-disk logger. :/
let on_disk_logger = on_disk::new_logger(&mut config.on_disk);
let (on_disk_logger, on_disk_logger_handle) = reload::Layer::new(on_disk_logger);
let reg = tracing_subscriber::registry().with(on_disk_logger);
let default_logger = default::new_logger(&config.default);
let (default_logger, default_logger_handle) = reload::Layer::new(default_logger);
let reg = reg.with(default_logger);
let logger_handle = LoggerHandle::new(config, default_logger_handle, on_disk_logger_handle);
// Use `console` or `console-subscriber` feature to enable `console-subscriber`
//
// Note, that `console-subscriber` requires manually enabling
// `--cfg tokio_unstable` rust flags during compilation!
//
// Otherwise `console_subscriber::spawn` call panics!
//
// See https://docs.rs/tokio/latest/tokio/#unstable-features
#[cfg(all(feature = "console-subscriber", tokio_unstable))]
let reg = reg.with(console_subscriber::spawn());
#[cfg(all(feature = "console-subscriber", not(tokio_unstable)))]
eprintln!(
"`console-subscriber` requires manually enabling \
`--cfg tokio_unstable` rust flags during compilation!"
);
// Use `tracy` or `tracing-tracy` feature to enable `tracing-tracy`
#[cfg(feature = "tracing-tracy")]
let reg = reg.with(
tracing_tracy::TracyLayer::new(tracing_tracy::DefaultConfig::default()).with_filter(
tracing_subscriber::filter::filter_fn(|metadata| metadata.is_span()),
),
);
tracing::subscriber::set_global_default(reg)?;
tracing_log::LogTracer::init()?;
Ok(logger_handle)
}
fn filter(user_filters: &str) -> filter::EnvFilter {
let mut filter = String::new();
let user_log_level = user_filters
.rsplit(',')
.find_map(|dir| log::LevelFilter::from_str(dir).ok());
if user_log_level.is_none() {
write!(&mut filter, "{DEFAULT_LOG_LEVEL}").unwrap(); // Writing into `String` never fails
}
for &(target, log_level) in DEFAULT_FILTERS {
if user_log_level.unwrap_or(DEFAULT_LOG_LEVEL) > log_level {
let comma = if filter.is_empty() { "" } else { "," };
write!(&mut filter, "{comma}{target}={log_level}").unwrap(); // Writing into `String` never fails
}
}
let comma = if filter.is_empty() { "" } else { "," };
write!(&mut filter, "{comma}{user_filters}").unwrap(); // Writing into `String` never fails
filter::EnvFilter::builder()
.with_regex(false)
.parse_lossy(filter)
}
#[rustfmt::skip] // `rustfmt` formats this into unreadable single line
type Logger<S> = filter::Filtered<
Option<Box<dyn tracing_subscriber::Layer<S> + Send + Sync>>,
filter::EnvFilter,
S,
>;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tracing/handle.rs | src/tracing/handle.rs | use std::sync::Arc;
use tokio::sync::RwLock;
use tracing_subscriber::{Registry, layer, reload};
use super::*;
#[derive(Clone)]
pub struct LoggerHandle {
config: Arc<RwLock<config::LoggerConfig>>,
default: DefaultLoggerReloadHandle,
on_disk: OnDiskLoggerReloadHandle,
}
#[rustfmt::skip] // `rustfmt` formats this into unreadable single line
type DefaultLoggerReloadHandle<S = DefaultLoggerSubscriber> = reload::Handle<
Logger<S>,
S,
>;
#[rustfmt::skip] // `rustfmt` formats this into unreadable single line
type DefaultLoggerSubscriber<S = Registry> = layer::Layered<
reload::Layer<Logger<S>, S>,
S,
>;
#[rustfmt::skip] // `rustfmt` formats this into unreadable single line
type OnDiskLoggerReloadHandle<S = Registry> = reload::Handle<
Logger<S>,
S,
>;
impl LoggerHandle {
pub fn new(
config: config::LoggerConfig,
default: DefaultLoggerReloadHandle,
on_disk: OnDiskLoggerReloadHandle,
) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
default,
on_disk,
}
}
pub async fn get_config(&self) -> config::LoggerConfig {
self.config.read().await.clone()
}
pub async fn update_config(&self, new_config: config::LoggerConfig) -> anyhow::Result<()> {
let mut config = self.config.write().await;
// `tracing-subscriber` does not support `reload`ing `Filtered` layers, so we *have to* use
// `modify`. However, `modify` would *deadlock* if provided closure logs anything or produce
// any `tracing` event.
//
// So, we structure `update_config` to only do an absolute minimum of changes and only use
// the most trivial operations during `modify`, to guarantee we won't deadlock.
//
// See:
// - https://docs.rs/tracing-subscriber/latest/tracing_subscriber/reload/struct.Handle.html#method.reload
// - https://github.com/tokio-rs/tracing/issues/1629
// - https://github.com/tokio-rs/tracing/pull/2657
let mut merged_config = config.clone();
merged_config.merge(new_config);
if merged_config.on_disk != config.on_disk {
let new_layer = on_disk::new_layer(&merged_config.on_disk)?;
let new_filter = on_disk::new_filter(&merged_config.on_disk);
self.on_disk.modify(move |logger| {
*logger.inner_mut() = new_layer;
*logger.filter_mut() = new_filter;
})?;
config.on_disk = merged_config.on_disk;
}
if merged_config.default != config.default {
let new_layer = default::new_layer(&merged_config.default);
let new_filter = default::new_filter(&merged_config.default);
self.default.modify(|logger| {
*logger.inner_mut() = Some(new_layer);
*logger.filter_mut() = new_filter;
})?;
config.default = merged_config.default;
}
Ok(())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/tonic_telemetry.rs | src/tonic/tonic_telemetry.rs | use std::sync::Arc;
use std::task::{Context, Poll};
use futures_util::future::BoxFuture;
use tower::Service;
use tower_layer::Layer;
use crate::common::telemetry_ops::requests_telemetry::{
TonicTelemetryCollector, TonicWorkerTelemetryCollector,
};
#[derive(Clone)]
pub struct TonicTelemetryService<T> {
service: T,
telemetry_data: Arc<parking_lot::Mutex<TonicWorkerTelemetryCollector>>,
}
#[derive(Clone)]
pub struct TonicTelemetryLayer {
telemetry_collector: Arc<parking_lot::Mutex<TonicTelemetryCollector>>,
}
impl<S> Service<tonic::codegen::http::Request<tonic::transport::Body>> for TonicTelemetryService<S>
where
S: Service<tonic::codegen::http::Request<tonic::transport::Body>>,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<S::Response, S::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(
&mut self,
request: tonic::codegen::http::Request<tonic::transport::Body>,
) -> Self::Future {
let method_name = request.uri().path().to_string();
let future = self.service.call(request);
let telemetry_data = self.telemetry_data.clone();
Box::pin(async move {
let instant = std::time::Instant::now();
let response = future.await?;
telemetry_data.lock().add_response(method_name, instant);
Ok(response)
})
}
}
impl TonicTelemetryLayer {
pub fn new(
telemetry_collector: Arc<parking_lot::Mutex<TonicTelemetryCollector>>,
) -> TonicTelemetryLayer {
Self {
telemetry_collector,
}
}
}
impl<S> Layer<S> for TonicTelemetryLayer {
type Service = TonicTelemetryService<S>;
fn layer(&self, service: S) -> Self::Service {
TonicTelemetryService {
service,
telemetry_data: self
.telemetry_collector
.lock()
.create_grpc_telemetry_collector(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/auth.rs | src/tonic/auth.rs | use std::sync::Arc;
use std::task::{Context, Poll};
use futures::future::BoxFuture;
use storage::rbac::Access;
use tonic::Status;
use tonic::body::BoxBody;
use tower::{Layer, Service};
use crate::common::auth::{AuthError, AuthKeys};
type Request = tonic::codegen::http::Request<tonic::transport::Body>;
type Response = tonic::codegen::http::Response<BoxBody>;
#[derive(Clone)]
pub struct AuthMiddleware<S> {
auth_keys: Arc<AuthKeys>,
service: S,
}
async fn check(auth_keys: Arc<AuthKeys>, mut req: Request) -> Result<Request, Status> {
// Allow health check endpoints to bypass authentication
let path = req.uri().path();
if path == "/qdrant.Qdrant/HealthCheck" || path == "/grpc.health.v1.Health/Check" {
// Set default full access for health check endpoints
let access = Access::full("Health check endpoints have full access without authentication");
let inference_token = crate::common::inference::token::InferenceToken(None);
req.extensions_mut().insert::<Access>(access);
req.extensions_mut().insert(inference_token);
return Ok(req);
}
let (access, inference_token) = auth_keys
.validate_request(|key| req.headers().get(key).and_then(|val| val.to_str().ok()))
.await
.map_err(|e| match e {
AuthError::Unauthorized(e) => Status::unauthenticated(e),
AuthError::Forbidden(e) => Status::permission_denied(e),
AuthError::StorageError(e) => Status::from(e),
})?;
let previous = req.extensions_mut().insert::<Access>(access);
debug_assert!(
previous.is_none(),
"Previous access object should not exist in the request"
);
let previous_token = req.extensions_mut().insert(inference_token);
debug_assert!(
previous_token.is_none(),
"Previous inference token should not exist in the request"
);
Ok(req)
}
impl<S> Service<Request> for AuthMiddleware<S>
where
S: Service<Request, Response = Response> + Clone + Send + 'static,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<Self::Response, S::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
let auth_keys = self.auth_keys.clone();
let mut service = self.service.clone();
Box::pin(async move {
match check(auth_keys, request).await {
Ok(req) => service.call(req).await,
Err(e) => Ok(e.to_http()),
}
})
}
}
#[derive(Clone)]
pub struct AuthLayer {
auth_keys: Arc<AuthKeys>,
}
impl AuthLayer {
pub fn new(auth_keys: AuthKeys) -> Self {
Self {
auth_keys: Arc::new(auth_keys),
}
}
}
impl<S> Layer<S> for AuthLayer {
type Service = AuthMiddleware<S>;
fn layer(&self, service: S) -> Self::Service {
Self::Service {
auth_keys: self.auth_keys.clone(),
service,
}
}
}
pub fn extract_access<R>(req: &mut tonic::Request<R>) -> Access {
req.extensions_mut().remove::<Access>().unwrap_or_else(|| {
Access::full("All requests have full by default access when API key is not configured")
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/mod.rs | src/tonic/mod.rs | mod api;
mod auth;
mod logging;
mod tonic_telemetry;
use std::io;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use ::api::grpc::QDRANT_DESCRIPTOR_SET;
use ::api::grpc::grpc_health_v1::health_check_response::ServingStatus;
use ::api::grpc::grpc_health_v1::health_server::{Health, HealthServer};
use ::api::grpc::grpc_health_v1::{
HealthCheckRequest as ProtocolHealthCheckRequest,
HealthCheckResponse as ProtocolHealthCheckResponse,
};
use ::api::grpc::qdrant::collections_internal_server::CollectionsInternalServer;
use ::api::grpc::qdrant::collections_server::CollectionsServer;
use ::api::grpc::qdrant::points_internal_server::PointsInternalServer;
use ::api::grpc::qdrant::points_server::PointsServer;
use ::api::grpc::qdrant::qdrant_internal_server::{QdrantInternal, QdrantInternalServer};
use ::api::grpc::qdrant::qdrant_server::{Qdrant, QdrantServer};
use ::api::grpc::qdrant::shard_snapshots_server::ShardSnapshotsServer;
use ::api::grpc::qdrant::snapshots_server::SnapshotsServer;
use ::api::grpc::qdrant::{
GetConsensusCommitRequest, GetConsensusCommitResponse, GetPeerTelemetryRequest,
GetPeerTelemetryResponse, HealthCheckReply, HealthCheckRequest, WaitOnConsensusCommitRequest,
WaitOnConsensusCommitResponse,
};
use ::api::rest::models::VersionInfo;
use collection::operations::verification::new_unchecked_verification_pass;
use storage::content_manager::consensus_manager::ConsensusStateRef;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use tokio::runtime::Handle;
use tokio::signal;
use tonic::codec::CompressionEncoding;
use tonic::transport::{Server, ServerTlsConfig};
use tonic::{Request, Response, Status};
use crate::common::auth::AuthKeys;
use crate::common::helpers;
use crate::common::http_client::HttpClient;
use crate::common::telemetry_ops::requests_telemetry::TonicTelemetryCollector;
use crate::settings::Settings;
use crate::tonic::api::collections_api::CollectionsService;
use crate::tonic::api::collections_internal_api::CollectionsInternalService;
use crate::tonic::api::points_api::PointsService;
use crate::tonic::api::points_internal_api::PointsInternalService;
use crate::tonic::api::snapshots_api::{ShardSnapshotsService, SnapshotsService};
#[derive(Default)]
pub struct QdrantService {}
#[tonic::async_trait]
impl Qdrant for QdrantService {
async fn health_check(
&self,
_request: Request<HealthCheckRequest>,
) -> Result<Response<HealthCheckReply>, Status> {
Ok(Response::new(VersionInfo::default().into()))
}
}
// Additional health check service that follows gRPC health check protocol as described in #2614
#[derive(Default)]
pub struct HealthService {}
#[tonic::async_trait]
impl Health for HealthService {
async fn check(
&self,
_request: Request<ProtocolHealthCheckRequest>,
) -> Result<Response<ProtocolHealthCheckResponse>, Status> {
let response = ProtocolHealthCheckResponse {
status: ServingStatus::Serving as i32,
};
Ok(Response::new(response))
}
}
pub struct QdrantInternalService {
/// Qdrant settings
settings: Settings,
/// Consensus state
consensus_state: ConsensusStateRef,
}
impl QdrantInternalService {
fn new(settings: Settings, consensus_state: ConsensusStateRef) -> Self {
Self {
settings,
consensus_state,
}
}
}
#[tonic::async_trait]
impl QdrantInternal for QdrantInternalService {
async fn get_consensus_commit(
&self,
_: tonic::Request<GetConsensusCommitRequest>,
) -> Result<Response<GetConsensusCommitResponse>, Status> {
let persistent = self.consensus_state.persistent.read();
let commit = persistent.state.hard_state.commit as _;
let term = persistent.state.hard_state.term as _;
Ok(Response::new(GetConsensusCommitResponse { commit, term }))
}
async fn wait_on_consensus_commit(
&self,
request: Request<WaitOnConsensusCommitRequest>,
) -> Result<Response<WaitOnConsensusCommitResponse>, Status> {
let request = request.into_inner();
let commit = request.commit as u64;
let term = request.term as u64;
let timeout = Duration::from_secs(request.timeout as u64);
let consensus_tick = Duration::from_millis(self.settings.cluster.consensus.tick_period_ms);
let ok = self
.consensus_state
.wait_for_consensus_commit(commit, term, consensus_tick, timeout)
.await
.is_ok();
Ok(Response::new(WaitOnConsensusCommitResponse { ok }))
}
async fn get_peer_telemetry(
&self,
_request: Request<GetPeerTelemetryRequest>,
) -> Result<Response<GetPeerTelemetryResponse>, Status> {
Err(Status::unimplemented("Not implemented"))
}
}
#[cfg(not(unix))]
async fn wait_stop_signal(for_what: &str) {
signal::ctrl_c().await.unwrap();
log::debug!("Stopping {for_what} on SIGINT");
}
#[cfg(unix)]
async fn wait_stop_signal(for_what: &str) {
let mut term = signal::unix::signal(signal::unix::SignalKind::terminate()).unwrap();
let mut inrt = signal::unix::signal(signal::unix::SignalKind::interrupt()).unwrap();
tokio::select! {
_ = term.recv() => log::debug!("Stopping {for_what} on SIGTERM"),
_ = inrt.recv() => log::debug!("Stopping {for_what} on SIGINT"),
}
}
pub fn init(
dispatcher: Arc<Dispatcher>,
telemetry_collector: Arc<parking_lot::Mutex<TonicTelemetryCollector>>,
settings: Settings,
grpc_port: u16,
runtime: Handle,
) -> io::Result<()> {
runtime.block_on(async {
let socket =
SocketAddr::from((settings.service.host.parse::<IpAddr>().unwrap(), grpc_port));
let qdrant_service = QdrantService::default();
let health_service = HealthService::default();
let collections_service = CollectionsService::new(dispatcher.clone());
let points_service = PointsService::new(dispatcher.clone(), settings.service.clone());
let snapshot_service = SnapshotsService::new(dispatcher.clone());
// Only advertise the public services. By default, all services in QDRANT_DESCRIPTOR_SET
// will be advertised, so explicitly list the services to be included.
let reflection_service = tonic_reflection::server::Builder::configure()
.register_encoded_file_descriptor_set(QDRANT_DESCRIPTOR_SET)
.with_service_name("qdrant.Collections")
.with_service_name("qdrant.Points")
.with_service_name("qdrant.Snapshots")
.with_service_name("qdrant.Qdrant")
.with_service_name("grpc.health.v1.Health")
.build()
.unwrap();
log::info!("Qdrant gRPC listening on {grpc_port}");
let mut server = Server::builder();
if settings.service.enable_tls {
log::info!("TLS enabled for gRPC API (TTL not supported)");
let tls_server_config = helpers::load_tls_external_server_config(settings.tls()?)?;
server = server
.tls_config(tls_server_config)
.map_err(helpers::tonic_error_to_io_error)?;
} else {
log::info!("TLS disabled for gRPC API");
}
// The stack of middleware that our service will be wrapped in
let middleware_layer = tower::ServiceBuilder::new()
.layer(logging::LoggingMiddlewareLayer::new())
.layer(tonic_telemetry::TonicTelemetryLayer::new(
telemetry_collector,
))
.option_layer({
AuthKeys::try_create(
&settings.service,
dispatcher
.toc(
&Access::full("For tonic auth middleware"),
&new_unchecked_verification_pass(),
)
.clone(),
)
.map(auth::AuthLayer::new)
})
.into_inner();
server
.layer(middleware_layer)
.add_service(reflection_service)
.add_service(
QdrantServer::new(qdrant_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
CollectionsServer::new(collections_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
PointsServer::new(points_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
SnapshotsServer::new(snapshot_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
HealthServer::new(health_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.serve_with_shutdown(socket, async {
wait_stop_signal("gRPC service").await;
})
.await
.map_err(helpers::tonic_error_to_io_error)
})?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn init_internal(
toc: Arc<TableOfContent>,
consensus_state: ConsensusStateRef,
telemetry_collector: Arc<parking_lot::Mutex<TonicTelemetryCollector>>,
settings: Settings,
host: String,
internal_grpc_port: u16,
tls_config: Option<ServerTlsConfig>,
to_consensus: tokio::sync::mpsc::Sender<crate::consensus::Message>,
runtime: Handle,
) -> std::io::Result<()> {
use ::api::grpc::qdrant::raft_server::RaftServer;
use crate::tonic::api::raft_api::RaftService;
let http_client = HttpClient::from_settings(&settings)?;
runtime
.block_on(async {
let socket = SocketAddr::from((host.parse::<IpAddr>().unwrap(), internal_grpc_port));
let qdrant_service = QdrantService::default();
let points_internal_service =
PointsInternalService::new(toc.clone(), settings.service.clone());
let qdrant_internal_service =
QdrantInternalService::new(settings, consensus_state.clone());
let collections_internal_service = CollectionsInternalService::new(toc.clone());
let shard_snapshots_service = ShardSnapshotsService::new(toc.clone(), http_client);
let raft_service =
RaftService::new(to_consensus, consensus_state, tls_config.is_some());
log::debug!("Qdrant internal gRPC listening on {internal_grpc_port}");
let mut server = Server::builder()
// Internally use a high limit for pending accept streams.
// We can have a huge number of reset/dropped HTTP2 streams in our internal
// communication when there are a lot of clients dropping connections. This
// internally causes an GOAWAY/ENHANCE_YOUR_CALM error breaking cluster consensus.
// We prefer to keep more pending reset streams even though this may be expensive,
// versus an internal error that is very hard to handle.
// More info: <https://github.com/qdrant/qdrant/issues/1907>
.http2_max_pending_accept_reset_streams(Some(1024));
if let Some(config) = tls_config {
log::info!("TLS enabled for internal gRPC API (TTL not supported)");
server = server.tls_config(config)?;
} else {
log::info!("TLS disabled for internal gRPC API");
};
// The stack of middleware that our service will be wrapped in
let middleware_layer = tower::ServiceBuilder::new()
.layer(logging::LoggingMiddlewareLayer::new())
.layer(tonic_telemetry::TonicTelemetryLayer::new(
telemetry_collector,
))
.into_inner();
server
.layer(middleware_layer)
.add_service(
QdrantServer::new(qdrant_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
QdrantInternalServer::new(qdrant_internal_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
CollectionsInternalServer::new(collections_internal_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
PointsInternalServer::new(points_internal_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
ShardSnapshotsServer::new(shard_snapshots_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.add_service(
RaftServer::new(raft_service)
.send_compressed(CompressionEncoding::Gzip)
.accept_compressed(CompressionEncoding::Gzip)
.max_decoding_message_size(usize::MAX),
)
.serve_with_shutdown(socket, async {
wait_stop_signal("internal gRPC").await;
})
.await
})
.unwrap();
Ok(())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/logging.rs | src/tonic/logging.rs | use std::task::{Context, Poll};
use futures_util::future::BoxFuture;
use tonic::Code;
use tonic::body::BoxBody;
use tonic::codegen::http::Response;
use tower::Service;
use tower_layer::Layer;
#[derive(Clone)]
pub struct LoggingMiddleware<T> {
inner: T,
}
#[derive(Clone)]
pub struct LoggingMiddlewareLayer;
impl LoggingMiddlewareLayer {
pub fn new() -> Self {
Self {}
}
}
impl<S> Service<tonic::codegen::http::Request<tonic::transport::Body>> for LoggingMiddleware<S>
where
S: Service<tonic::codegen::http::Request<tonic::transport::Body>, Response = Response<BoxBody>>
+ Clone,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<S::Response, S::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(
&mut self,
request: tonic::codegen::http::Request<tonic::transport::Body>,
) -> Self::Future {
let clone = self.inner.clone();
let mut inner = std::mem::replace(&mut self.inner, clone);
let method_name = request.uri().path().to_string();
let instant = std::time::Instant::now();
let future = inner.call(request);
Box::pin(async move {
let response = future.await;
let elapsed_sec = instant.elapsed().as_secs_f32();
match response {
Err(error) => {
log::error!("gRPC request error {method_name}");
Err(error)
}
Ok(response_tonic) => {
let grpc_status = tonic::Status::from_header_map(response_tonic.headers());
if let Some(grpc_status) = grpc_status {
match grpc_status.code() {
Code::Ok => {
log::trace!("gRPC {method_name} Ok {elapsed_sec:.6}");
}
Code::Cancelled => {
// cluster mode generates a large amount of `stream error received: stream no longer needed`
log::trace!("gRPC cancelled {method_name} {elapsed_sec:.6}");
}
Code::DeadlineExceeded
| Code::Aborted
| Code::OutOfRange
| Code::ResourceExhausted
| Code::NotFound
| Code::InvalidArgument
| Code::AlreadyExists
| Code::FailedPrecondition
| Code::PermissionDenied
| Code::Unauthenticated => {
log::info!(
"gRPC {} failed with {} {:?} {:.6}",
method_name,
grpc_status.code(),
grpc_status.message(),
elapsed_sec,
);
}
Code::Internal
| Code::Unimplemented
| Code::Unavailable
| Code::DataLoss
| Code::Unknown => log::error!(
"gRPC {} unexpectedly failed with {} {:?} {:.6}",
method_name,
grpc_status.code(),
grpc_status.message(),
elapsed_sec,
),
};
} else {
// Fallback to response's `status_code` if no `grpc-status` header found.
match response_tonic.status().as_u16() {
100..=199 => {
log::trace!(
"gRPC information {} {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
200..=299 => {
log::trace!(
"gRPC success {} {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
300..=399 => {
log::debug!(
"gRPC redirection {} {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
400..=499 => {
log::info!(
"gRPC client error {} {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
500..=599 => {
log::error!(
"gRPC server error {} {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
_ => {
log::warn!(
"gRPC {} unknown status code {} {:.6}",
method_name,
response_tonic.status(),
elapsed_sec,
);
}
};
}
Ok(response_tonic)
}
}
})
}
}
impl<S> Layer<S> for LoggingMiddlewareLayer {
type Service = LoggingMiddleware<S>;
fn layer(&self, service: S) -> Self::Service {
LoggingMiddleware { inner: service }
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/query_common.rs | src/tonic/api/query_common.rs | use std::time::{Duration, Instant};
use api::conversions::json::json_path_from_proto;
use api::grpc::qdrant::{
BatchResult, CoreSearchPoints, CountPoints, CountResponse, DiscoverBatchResponse,
DiscoverPoints, DiscoverResponse, FacetCounts, FacetResponse, GetPoints, GetResponse,
GroupsResult, QueryBatchResponse, QueryGroupsResponse, QueryPointGroups, QueryPoints,
QueryResponse, ReadConsistency as ReadConsistencyGrpc, RecommendBatchResponse,
RecommendGroupsResponse, RecommendPointGroups, RecommendPoints, RecommendResponse,
ScrollPoints, ScrollResponse, SearchBatchResponse, SearchGroupsResponse, SearchMatrixPoints,
SearchPointGroups, SearchPoints, SearchResponse,
};
use api::grpc::{InferenceUsage, Usage};
use api::rest::OrderByInterface;
use collection::collection::distance_matrix::{
CollectionSearchMatrixRequest, CollectionSearchMatrixResponse,
};
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::conversions::try_discover_request_from_grpc;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{
CoreSearchRequest, PointRequestInternal, ScrollRequestInternal, default_exact_count,
};
use collection::shards::shard::ShardId;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::data_types::facets::FacetParams;
use segment::data_types::order_by::OrderBy;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, NamedQuery, VectorInternal};
use shard::query::query_enum::QueryEnum;
use shard::search::CoreSearchRequestBatch;
use storage::content_manager::toc::TableOfContent;
use storage::content_manager::toc::request_hw_counter::RequestHwCounter;
use storage::rbac::Access;
use tonic::{Response, Status};
use crate::common::inference::params::InferenceParams;
use crate::common::inference::query_requests_grpc::{
convert_query_point_groups_from_grpc, convert_query_points_from_grpc,
};
use crate::common::query::*;
use crate::common::strict_mode::*;
pub(crate) fn convert_shard_selector_for_read(
shard_id_selector: Option<ShardId>,
shard_key_selector: Option<api::grpc::qdrant::ShardKeySelector>,
) -> Result<ShardSelectorInternal, Status> {
let res = match (shard_id_selector, shard_key_selector) {
(Some(shard_id), None) => ShardSelectorInternal::ShardId(shard_id),
(None, Some(shard_key_selector)) => ShardSelectorInternal::try_from(shard_key_selector)?,
(None, None) => ShardSelectorInternal::All,
(Some(shard_id), Some(_)) => {
debug_assert!(
false,
"Shard selection and shard key selector are mutually exclusive"
);
ShardSelectorInternal::ShardId(shard_id)
}
};
Ok(res)
}
pub async fn search(
toc_provider: impl CheckedTocProvider,
search_points: SearchPoints,
shard_selection: Option<ShardId>,
access: Access,
hw_measurement_acc: RequestHwCounter,
) -> Result<Response<SearchResponse>, Status> {
let SearchPoints {
collection_name,
vector,
filter,
limit,
offset,
with_payload,
params,
score_threshold,
vector_name,
with_vectors,
read_consistency,
timeout,
shard_key_selector,
sparse_indices,
} = search_points;
let vector_internal =
VectorInternal::from_vector_and_indices(vector, sparse_indices.map(|v| v.data));
let vector_struct =
api::grpc::conversions::into_named_vector_struct(vector_name, vector_internal)?;
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let search_request = CoreSearchRequest {
query: QueryEnum::Nearest(NamedQuery::from(vector_struct)),
filter: filter.map(|f| f.try_into()).transpose()?,
params: params.map(|p| p.into()),
limit: limit as usize,
offset: offset.unwrap_or_default() as usize,
with_payload: with_payload.map(|wp| wp.try_into()).transpose()?,
with_vector: Some(
with_vectors
.map(|selector| selector.into())
.unwrap_or_default(),
),
score_threshold,
};
let toc = toc_provider
.check_strict_mode(
&search_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let timing = Instant::now();
let scored_points = do_core_search_points(
toc,
&collection_name,
search_request,
read_consistency,
shard_selector,
access,
timeout.map(Duration::from_secs),
hw_measurement_acc.get_counter(),
)
.await?;
let response = SearchResponse {
result: scored_points
.into_iter()
.map(|point| point.into())
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(hw_measurement_acc.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn core_search_batch(
toc_provider: impl CheckedTocProvider,
collection_name: &str,
requests: Vec<(CoreSearchRequest, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistencyGrpc>,
access: Access,
timeout: Option<Duration>,
request_hw_counter: RequestHwCounter,
) -> Result<Response<SearchBatchResponse>, Status> {
let toc = toc_provider
.check_strict_mode_batch(
&requests,
|i| &i.0,
collection_name,
timeout.map(|i| i.as_secs() as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let timing = Instant::now();
let scored_points = do_search_batch_points(
toc,
collection_name,
requests,
read_consistency,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = SearchBatchResponse {
result: scored_points
.into_iter()
.map(|points| BatchResult {
result: points.into_iter().map(|p| p.into()).collect(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
#[allow(clippy::too_many_arguments)]
pub async fn core_search_list(
toc: &TableOfContent,
collection_name: String,
search_points: Vec<CoreSearchPoints>,
read_consistency: Option<ReadConsistencyGrpc>,
shard_selection: Option<ShardId>,
access: Access,
timeout: Option<Duration>,
request_hw_counter: RequestHwCounter,
) -> Result<Response<SearchBatchResponse>, Status> {
let searches: Result<Vec<_>, Status> = search_points
.into_iter()
.map(CoreSearchRequest::try_from)
.collect();
let request = CoreSearchRequestBatch {
searches: searches?,
};
let timing = Instant::now();
// As this function is handling an internal request,
// we can assume that shard_key is already resolved
let shard_selection = match shard_selection {
None => {
debug_assert!(false, "Shard selection is expected for internal request");
ShardSelectorInternal::All
}
Some(shard_id) => ShardSelectorInternal::ShardId(shard_id),
};
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let scored_points = toc
.core_search_batch(
&collection_name,
request,
read_consistency,
shard_selection,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = SearchBatchResponse {
result: scored_points
.into_iter()
.map(|points| BatchResult {
result: points.into_iter().map(|p| p.into()).collect(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn search_groups(
toc_provider: impl CheckedTocProvider,
search_point_groups: SearchPointGroups,
shard_selection: Option<ShardId>,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<SearchGroupsResponse>, Status> {
let search_groups_request = search_point_groups.clone().try_into()?;
let SearchPointGroups {
collection_name,
read_consistency,
timeout,
shard_key_selector,
..
} = search_point_groups;
let toc = toc_provider
.check_strict_mode(
&search_groups_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let timing = Instant::now();
let groups_result = crate::common::query::do_search_point_groups(
toc,
&collection_name,
search_groups_request,
read_consistency,
shard_selector,
access,
timeout.map(Duration::from_secs),
request_hw_counter.get_counter(),
)
.await?;
let groups_result = GroupsResult::try_from(groups_result)
.map_err(|e| Status::internal(format!("Failed to convert groups result: {e}")))?;
let response = SearchGroupsResponse {
result: Some(groups_result),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn recommend(
toc_provider: impl CheckedTocProvider,
recommend_points: RecommendPoints,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<RecommendResponse>, Status> {
// extract a few fields from the request and convert to internal request
let collection_name = recommend_points.collection_name.clone();
let read_consistency = recommend_points.read_consistency.clone();
let shard_key_selector = recommend_points.shard_key_selector.clone();
let timeout = recommend_points.timeout;
let request =
collection::operations::types::RecommendRequestInternal::try_from(recommend_points)?;
let toc = toc_provider
.check_strict_mode(
&request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
let timeout = timeout.map(Duration::from_secs);
let timing = Instant::now();
let recommended_points = toc
.recommend(
&collection_name,
request,
read_consistency,
shard_selector,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = RecommendResponse {
result: recommended_points
.into_iter()
.map(|point| point.into())
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn recommend_batch(
toc_provider: impl CheckedTocProvider,
collection_name: &str,
recommend_points: Vec<RecommendPoints>,
read_consistency: Option<ReadConsistencyGrpc>,
access: Access,
timeout: Option<Duration>,
request_hw_counter: RequestHwCounter,
) -> Result<Response<RecommendBatchResponse>, Status> {
let mut requests = Vec::with_capacity(recommend_points.len());
for mut request in recommend_points {
let shard_selector =
convert_shard_selector_for_read(None, request.shard_key_selector.take())?;
let internal_request: collection::operations::types::RecommendRequestInternal =
request.try_into()?;
requests.push((internal_request, shard_selector));
}
let toc = toc_provider
.check_strict_mode_batch(
&requests,
|i| &i.0,
collection_name,
timeout.map(|i| i.as_secs() as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let timing = Instant::now();
let scored_points = toc
.recommend_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = RecommendBatchResponse {
result: scored_points
.into_iter()
.map(|points| BatchResult {
result: points.into_iter().map(|p| p.into()).collect(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn recommend_groups(
toc_provider: impl CheckedTocProvider,
recommend_point_groups: RecommendPointGroups,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<RecommendGroupsResponse>, Status> {
let recommend_groups_request = recommend_point_groups.clone().try_into()?;
let RecommendPointGroups {
collection_name,
read_consistency,
timeout,
shard_key_selector,
..
} = recommend_point_groups;
let toc = toc_provider
.check_strict_mode(
&recommend_groups_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
let timing = Instant::now();
let groups_result = crate::common::query::do_recommend_point_groups(
toc,
&collection_name,
recommend_groups_request,
read_consistency,
shard_selector,
access,
timeout.map(Duration::from_secs),
request_hw_counter.get_counter(),
)
.await?;
let groups_result = GroupsResult::try_from(groups_result)
.map_err(|e| Status::internal(format!("Failed to convert groups result: {e}")))?;
let response = RecommendGroupsResponse {
result: Some(groups_result),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn discover(
toc_provider: impl CheckedTocProvider,
discover_points: DiscoverPoints,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<DiscoverResponse>, Status> {
let (request, collection_name, read_consistency, timeout, shard_key_selector) =
try_discover_request_from_grpc(discover_points)?;
let toc = toc_provider
.check_strict_mode(
&request,
&collection_name,
timeout.map(|i| i.as_secs() as usize),
&access,
)
.await?;
let timing = Instant::now();
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
let discovered_points = toc
.discover(
&collection_name,
request,
read_consistency,
shard_selector,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = DiscoverResponse {
result: discovered_points
.into_iter()
.map(|point| point.into())
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn discover_batch(
toc_provider: impl CheckedTocProvider,
collection_name: &str,
discover_points: Vec<DiscoverPoints>,
read_consistency: Option<ReadConsistencyGrpc>,
access: Access,
timeout: Option<Duration>,
request_hw_counter: RequestHwCounter,
) -> Result<Response<DiscoverBatchResponse>, Status> {
let mut requests = Vec::with_capacity(discover_points.len());
for discovery_request in discover_points {
let (internal_request, _collection_name, _consistency, _timeout, shard_key_selector) =
try_discover_request_from_grpc(discovery_request)?;
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
requests.push((internal_request, shard_selector));
}
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let toc = toc_provider
.check_strict_mode_batch(
&requests,
|i| &i.0,
collection_name,
timeout.map(|i| i.as_secs() as usize),
&access,
)
.await?;
let timing = Instant::now();
let scored_points = toc
.discover_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = DiscoverBatchResponse {
result: scored_points
.into_iter()
.map(|points| BatchResult {
result: points.into_iter().map(|p| p.into()).collect(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn scroll(
toc_provider: impl CheckedTocProvider,
scroll_points: ScrollPoints,
shard_selection: Option<ShardId>,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<ScrollResponse>, Status> {
let ScrollPoints {
collection_name,
filter,
offset,
limit,
with_payload,
with_vectors,
read_consistency,
shard_key_selector,
order_by,
timeout,
} = scroll_points;
let scroll_request = ScrollRequestInternal {
offset: offset.map(|o| o.try_into()).transpose()?,
limit: limit.map(|l| l as usize),
filter: filter.map(|f| f.try_into()).transpose()?,
with_payload: with_payload.map(|wp| wp.try_into()).transpose()?,
with_vector: with_vectors
.map(|selector| selector.into())
.unwrap_or_default(),
order_by: order_by
.map(OrderBy::try_from)
.transpose()?
.map(OrderByInterface::Struct),
};
let toc = toc_provider
.check_strict_mode(
&scroll_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let timing = Instant::now();
let scrolled_points = do_scroll_points(
toc,
&collection_name,
scroll_request,
read_consistency,
timeout,
shard_selector,
access,
request_hw_counter.get_counter(),
)
.await?;
let points: Result<_, _> = scrolled_points
.points
.into_iter()
.map(api::grpc::qdrant::RetrievedPoint::try_from)
.collect();
let points = points.map_err(|e| Status::internal(format!("Failed to convert points: {e}")))?;
let response = ScrollResponse {
next_page_offset: scrolled_points.next_page_offset.map(|n| n.into()),
result: points,
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn count(
toc_provider: impl CheckedTocProvider,
count_points: CountPoints,
shard_selection: Option<ShardId>,
access: &Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<CountResponse>, Status> {
let CountPoints {
collection_name,
filter,
exact,
read_consistency,
shard_key_selector,
timeout,
} = count_points;
let count_request = collection::operations::types::CountRequestInternal {
filter: filter.map(|f| f.try_into()).transpose()?,
exact: exact.unwrap_or_else(default_exact_count),
};
let toc = toc_provider
.check_strict_mode(
&count_request,
&collection_name,
timeout.map(|i| i as usize),
access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let timing = Instant::now();
let count_result = do_count_points(
toc,
&collection_name,
count_request,
read_consistency,
timeout,
shard_selector,
access.clone(),
request_hw_counter.get_counter(),
)
.await?;
let response = CountResponse {
result: Some(count_result.into()),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn get(
toc_provider: impl CheckedTocProvider,
get_points: GetPoints,
shard_selection: Option<ShardId>,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<GetResponse>, Status> {
let GetPoints {
collection_name,
ids,
with_payload,
with_vectors,
read_consistency,
shard_key_selector,
timeout,
} = get_points;
let point_request = PointRequestInternal {
ids: ids
.into_iter()
.map(|p| p.try_into())
.collect::<Result<_, _>>()?,
with_payload: with_payload.map(|wp| wp.try_into()).transpose()?,
with_vector: with_vectors
.map(|selector| selector.into())
.unwrap_or_default(),
};
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let timing = Instant::now();
let toc = toc_provider
.check_strict_mode(
&point_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let records = do_get_points(
toc,
&collection_name,
point_request,
read_consistency,
timeout,
shard_selector,
access,
request_hw_counter.get_counter(),
)
.await?;
let response = GetResponse {
result: records.into_iter().map(|point| point.into()).collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn query(
toc_provider: impl CheckedTocProvider,
query_points: QueryPoints,
shard_selection: Option<ShardId>,
access: Access,
request_hw_counter: RequestHwCounter,
inference_params: InferenceParams,
) -> Result<Response<QueryResponse>, Status> {
let shard_key_selector = query_points.shard_key_selector.clone();
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let read_consistency = query_points
.read_consistency
.clone()
.map(TryFrom::try_from)
.transpose()?;
let collection_name = query_points.collection_name.clone();
let timeout = query_points.timeout;
let (request, inference_usage) =
convert_query_points_from_grpc(query_points, inference_params).await?;
let toc = toc_provider
.check_strict_mode(
&request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let timing = Instant::now();
let scored_points = do_query_points(
toc,
&collection_name,
request,
read_consistency,
shard_selector,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = QueryResponse {
result: scored_points
.into_iter()
.map(|point| point.into())
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::new(request_hw_counter.to_grpc_api(), Some(inference_usage)).into_non_empty(),
};
Ok(Response::new(response))
}
#[allow(clippy::too_many_arguments)]
pub async fn query_batch(
toc_provider: impl CheckedTocProvider,
collection_name: &str,
points: Vec<QueryPoints>,
read_consistency: Option<ReadConsistencyGrpc>,
access: Access,
timeout: Option<Duration>,
request_hw_counter: RequestHwCounter,
inference_params: InferenceParams,
) -> Result<Response<QueryBatchResponse>, Status> {
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let mut requests = Vec::with_capacity(points.len());
let mut total_inference_usage = InferenceUsage::default();
for query_points in points {
let shard_key_selector = query_points.shard_key_selector.clone();
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
let (request, usage) =
convert_query_points_from_grpc(query_points, inference_params.clone()).await?;
total_inference_usage.merge(usage);
requests.push((request, shard_selector));
}
let toc = toc_provider
.check_strict_mode_batch(
&requests,
|i| &i.0,
collection_name,
timeout.map(|i| i.as_secs() as usize),
&access,
)
.await?;
let timing = Instant::now();
let scored_points = do_query_batch_points(
toc,
collection_name,
requests,
read_consistency,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let response = QueryBatchResponse {
result: scored_points
.into_iter()
.map(|points| BatchResult {
result: points.into_iter().map(|p| p.into()).collect(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::new(
request_hw_counter.to_grpc_api(),
total_inference_usage.into_non_empty(),
)
.into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn query_groups(
toc_provider: impl CheckedTocProvider,
query_points: QueryPointGroups,
shard_selection: Option<ShardId>,
access: Access,
request_hw_counter: RequestHwCounter,
inference_params: InferenceParams,
) -> Result<Response<QueryGroupsResponse>, Status> {
let shard_key_selector = query_points.shard_key_selector.clone();
let shard_selector = convert_shard_selector_for_read(shard_selection, shard_key_selector)?;
let read_consistency = query_points
.read_consistency
.clone()
.map(TryFrom::try_from)
.transpose()?;
let timeout = query_points.timeout;
let collection_name = query_points.collection_name.clone();
let (request, inference_usage) =
convert_query_point_groups_from_grpc(query_points, inference_params).await?;
let toc = toc_provider
.check_strict_mode(
&request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let timing = Instant::now();
let groups_result = do_query_point_groups(
toc,
&collection_name,
request,
read_consistency,
shard_selector,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let grpc_group_result = GroupsResult::try_from(groups_result)
.map_err(|err| Status::internal(format!("failed to convert result: {err}")))?;
let response = QueryGroupsResponse {
result: Some(grpc_group_result),
time: timing.elapsed().as_secs_f64(),
usage: Usage::new(request_hw_counter.to_grpc_api(), Some(inference_usage)).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn facet(
toc_provider: impl CheckedTocProvider,
facet_counts: FacetCounts,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<FacetResponse>, Status> {
let FacetCounts {
collection_name,
key,
filter,
exact,
limit,
read_consistency,
shard_key_selector,
timeout,
} = facet_counts;
let facet_request = FacetParams {
key: json_path_from_proto(&key)?,
filter: filter.map(TryInto::try_into).transpose()?,
limit: limit
.map(usize::try_from)
.transpose()
.map_err(|_| Status::invalid_argument("could not parse limit param into usize"))?
.unwrap_or(FacetParams::DEFAULT_LIMIT),
exact: exact.unwrap_or(FacetParams::DEFAULT_EXACT),
};
let toc = toc_provider
.check_strict_mode(
&facet_request,
&collection_name,
timeout.map(|i| i as usize),
&access,
)
.await?;
let timeout = timeout.map(Duration::from_secs);
let read_consistency = ReadConsistency::try_from_optional(read_consistency)?;
let shard_selector = convert_shard_selector_for_read(None, shard_key_selector)?;
let timing = Instant::now();
let facet_response = toc
.facet(
&collection_name,
facet_request,
shard_selector,
read_consistency,
access,
timeout,
request_hw_counter.get_counter(),
)
.await?;
let segment::data_types::facets::FacetResponse { hits } = facet_response;
let response = FacetResponse {
hits: hits.into_iter().map(From::from).collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(request_hw_counter.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(response))
}
pub async fn search_points_matrix(
toc_provider: impl CheckedTocProvider,
search_matrix_points: SearchMatrixPoints,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<CollectionSearchMatrixResponse, Status> {
let SearchMatrixPoints {
collection_name,
filter,
sample,
limit,
using,
read_consistency,
shard_key_selector,
timeout,
} = search_matrix_points;
let search_matrix_request = CollectionSearchMatrixRequest {
filter: filter.map(TryInto::try_into).transpose()?,
sample_size: sample
.map(usize::try_from)
.transpose()
.map_err(|_| Status::invalid_argument("could not parse 'sample' param into usize"))?
.unwrap_or(CollectionSearchMatrixRequest::DEFAULT_SAMPLE),
limit_per_sample: limit
.map(usize::try_from)
.transpose()
.map_err(|_| Status::invalid_argument("could not parse 'limit' param into usize"))?
.unwrap_or(CollectionSearchMatrixRequest::DEFAULT_LIMIT_PER_SAMPLE),
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
};
let toc = toc_provider
.check_strict_mode(
&search_matrix_request,
&collection_name,
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/collections_internal_api.rs | src/tonic/api/collections_internal_api.rs | use std::sync::Arc;
use std::time::{Duration, Instant};
use api::grpc::qdrant::collections_internal_server::CollectionsInternal;
use api::grpc::qdrant::{
CollectionOperationResponse, GetCollectionInfoRequestInternal, GetCollectionInfoResponse,
GetShardRecoveryPointRequest, GetShardRecoveryPointResponse, InitiateShardTransferRequest,
UpdateShardCutoffPointRequest, WaitForShardStateRequest,
};
use storage::content_manager::toc::TableOfContent;
use storage::rbac::{Access, AccessRequirements, CollectionPass};
use tonic::{Request, Response, Status};
use super::validate_and_log;
use crate::tonic::api::collections_common::get;
const FULL_ACCESS: Access = Access::full("Internal API");
fn full_access_pass(collection_name: &str) -> Result<CollectionPass<'_>, Status> {
FULL_ACCESS
.check_collection_access(collection_name, AccessRequirements::new())
.map_err(Status::from)
}
pub struct CollectionsInternalService {
toc: Arc<TableOfContent>,
}
impl CollectionsInternalService {
pub fn new(toc: Arc<TableOfContent>) -> Self {
Self { toc }
}
}
#[tonic::async_trait]
impl CollectionsInternal for CollectionsInternalService {
async fn get(
&self,
request: Request<GetCollectionInfoRequestInternal>,
) -> Result<Response<GetCollectionInfoResponse>, Status> {
validate_and_log(request.get_ref());
let GetCollectionInfoRequestInternal {
get_collection_info_request,
shard_id,
} = request.into_inner();
let get_collection_info_request = get_collection_info_request
.ok_or_else(|| Status::invalid_argument("GetCollectionInfoRequest is missing"))?;
get(
self.toc.as_ref(),
get_collection_info_request,
FULL_ACCESS.clone(),
Some(shard_id),
)
.await
}
async fn initiate(
&self,
request: Request<InitiateShardTransferRequest>,
) -> Result<Response<CollectionOperationResponse>, Status> {
// TODO: Ensure cancel safety!
validate_and_log(request.get_ref());
let timing = Instant::now();
let InitiateShardTransferRequest {
collection_name,
shard_id,
} = request.into_inner();
// TODO: Ensure cancel safety!
self.toc
.initiate_receiving_shard(collection_name, shard_id)
.await?;
let response = CollectionOperationResponse {
result: true,
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn wait_for_shard_state(
&self,
request: Request<WaitForShardStateRequest>,
) -> Result<Response<CollectionOperationResponse>, Status> {
let request = request.into_inner();
validate_and_log(&request);
let timing = Instant::now();
let WaitForShardStateRequest {
collection_name,
shard_id,
state,
timeout,
} = request;
let state = state.try_into()?;
let timeout = Duration::from_secs(timeout);
let collection_read = self
.toc
.get_collection(&full_access_pass(&collection_name)?)
.await
.map_err(|err| {
Status::not_found(format!(
"Collection {collection_name} could not be found: {err}"
))
})?;
// Wait for replica state
collection_read
.wait_local_shard_replica_state(shard_id, state, timeout)
.await
.map_err(|err| {
Status::aborted(format!(
"Failed to wait for shard {shard_id} to get into {state:?} state: {err}"
))
})?;
let response = CollectionOperationResponse {
result: true,
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn get_shard_recovery_point(
&self,
request: Request<GetShardRecoveryPointRequest>,
) -> Result<Response<GetShardRecoveryPointResponse>, Status> {
validate_and_log(request.get_ref());
let timing = Instant::now();
let GetShardRecoveryPointRequest {
collection_name,
shard_id,
} = request.into_inner();
let collection_read = self
.toc
.get_collection(&full_access_pass(&collection_name)?)
.await
.map_err(|err| {
Status::not_found(format!(
"Collection {collection_name} could not be found: {err}"
))
})?;
// Get shard recovery point
let recovery_point = collection_read
.shard_recovery_point(shard_id)
.await
.map_err(|err| {
Status::internal(format!(
"Failed to get recovery point for shard {shard_id}: {err}"
))
})?;
let response = GetShardRecoveryPointResponse {
recovery_point: Some(recovery_point.into()),
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn update_shard_cutoff_point(
&self,
request: Request<UpdateShardCutoffPointRequest>,
) -> Result<Response<CollectionOperationResponse>, Status> {
validate_and_log(request.get_ref());
let timing = Instant::now();
let UpdateShardCutoffPointRequest {
collection_name,
shard_id,
cutoff,
} = request.into_inner();
let cutoff = cutoff.ok_or_else(|| Status::invalid_argument("Missing cutoff point"))?;
let collection_read = self
.toc
.get_collection(&full_access_pass(&collection_name)?)
.await
.map_err(|err| {
Status::not_found(format!(
"Collection {collection_name} could not be found: {err}"
))
})?;
// Set the shard cutoff point
collection_read
.update_shard_cutoff_point(shard_id, &cutoff.try_into()?)
.await
.map_err(|err| {
Status::internal(format!(
"Failed to set shard cutoff point for shard {shard_id}: {err}"
))
})?;
let response = CollectionOperationResponse {
result: true,
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/mod.rs | src/tonic/api/mod.rs | pub mod collections_api;
pub mod collections_internal_api;
pub mod points_api;
pub mod points_internal_api;
pub mod raft_api;
pub mod snapshots_api;
mod collections_common;
mod query_common;
mod update_common;
use collection::operations::validation;
use tonic::Status;
use validator::Validate;
/// Validate the given request and fail on error.
///
/// Returns validation error on failure.
fn validate(request: &impl Validate) -> Result<(), Status> {
request.validate().map_err(|ref err| {
Status::invalid_argument(validation::label_errors("Validation error in body", err))
})
}
/// Validate the given request. Returns validation error on failure.
fn validate_and_log(request: &impl Validate) {
if let Err(ref err) = request.validate() {
validation::warn_validation_errors("Internal gRPC", err);
}
}
#[cfg(test)]
mod tests {
use validator::Validate;
use super::*;
#[derive(Validate, Debug)]
struct SomeThing {
#[validate(range(min = 1))]
pub idx: usize,
}
#[derive(Validate, Debug)]
struct OtherThing {
#[validate(nested)]
pub things: Vec<SomeThing>,
}
#[test]
fn test_validation() {
use tonic::Code;
let bad_config = OtherThing {
things: vec![SomeThing { idx: 0 }],
};
let validation =
validate(&bad_config).expect_err("validation of bad request payload should fail");
assert_eq!(validation.code(), Code::InvalidArgument);
assert_eq!(
validation.message(),
"Validation error in body: [things[0].idx: value 0 invalid, must be 1 or larger]"
)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/collections_api.rs | src/tonic/api/collections_api.rs | use std::sync::Arc;
use std::time::{Duration, Instant};
use api::grpc::qdrant::collections_server::Collections;
use api::grpc::qdrant::{
ChangeAliases, CollectionClusterInfoRequest, CollectionClusterInfoResponse,
CollectionExistsRequest, CollectionExistsResponse, CollectionOperationResponse,
CreateCollection, CreateShardKeyRequest, CreateShardKeyResponse, DeleteCollection,
DeleteShardKeyRequest, DeleteShardKeyResponse, GetCollectionInfoRequest,
GetCollectionInfoResponse, ListAliasesRequest, ListAliasesResponse,
ListCollectionAliasesRequest, ListCollectionsRequest, ListCollectionsResponse,
ListShardKeysRequest, ListShardKeysResponse, UpdateCollection,
UpdateCollectionClusterSetupRequest, UpdateCollectionClusterSetupResponse,
};
use collection::operations::cluster_ops::{
ClusterOperations, CreateShardingKeyOperation, DropShardingKeyOperation,
};
use collection::operations::types::CollectionsAliasesResponse;
use collection::operations::verification::new_unchecked_verification_pass;
use storage::dispatcher::Dispatcher;
use tonic::{Request, Response, Status};
use super::validate;
use crate::common::collections::*;
use crate::tonic::api::collections_common::get;
use crate::tonic::auth::extract_access;
pub struct CollectionsService {
dispatcher: Arc<Dispatcher>,
}
impl CollectionsService {
pub fn new(dispatcher: Arc<Dispatcher>) -> Self {
Self { dispatcher }
}
async fn perform_operation<O>(
&self,
mut request: Request<O>,
) -> Result<Response<CollectionOperationResponse>, Status>
where
O: WithTimeout
+ TryInto<
storage::content_manager::collection_meta_ops::CollectionMetaOperations,
Error = Status,
>,
{
let timing = Instant::now();
let access = extract_access(&mut request);
let operation = request.into_inner();
let wait_timeout = operation.wait_timeout();
let result = self
.dispatcher
.submit_collection_meta_op(operation.try_into()?, access, wait_timeout)
.await?;
let response = CollectionOperationResponse::from((timing, result));
Ok(Response::new(response))
}
}
#[tonic::async_trait]
impl Collections for CollectionsService {
async fn get(
&self,
mut request: Request<GetCollectionInfoRequest>,
) -> Result<Response<GetCollectionInfoResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
get(
self.dispatcher.toc(&access, &pass),
request.into_inner(),
access,
None,
)
.await
}
async fn list(
&self,
mut request: Request<ListCollectionsRequest>,
) -> Result<Response<ListCollectionsResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let result = do_list_collections(self.dispatcher.toc(&access, &pass), access).await?;
let response = ListCollectionsResponse::from((timing, result));
Ok(Response::new(response))
}
async fn create(
&self,
request: Request<CreateCollection>,
) -> Result<Response<CollectionOperationResponse>, Status> {
validate(request.get_ref())?;
self.perform_operation(request).await
}
async fn update(
&self,
request: Request<UpdateCollection>,
) -> Result<Response<CollectionOperationResponse>, Status> {
validate(request.get_ref())?;
self.perform_operation(request).await
}
async fn delete(
&self,
request: Request<DeleteCollection>,
) -> Result<Response<CollectionOperationResponse>, Status> {
validate(request.get_ref())?;
self.perform_operation(request).await
}
async fn update_aliases(
&self,
request: Request<ChangeAliases>,
) -> Result<Response<CollectionOperationResponse>, Status> {
validate(request.get_ref())?;
self.perform_operation(request).await
}
async fn list_collection_aliases(
&self,
mut request: Request<ListCollectionAliasesRequest>,
) -> Result<Response<ListAliasesResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let ListCollectionAliasesRequest { collection_name } = request.into_inner();
let CollectionsAliasesResponse { aliases } = do_list_collection_aliases(
self.dispatcher.toc(&access, &pass),
access,
&collection_name,
)
.await?;
let response = ListAliasesResponse {
aliases: aliases.into_iter().map(|alias| alias.into()).collect(),
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn list_aliases(
&self,
mut request: Request<ListAliasesRequest>,
) -> Result<Response<ListAliasesResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let CollectionsAliasesResponse { aliases } =
do_list_aliases(self.dispatcher.toc(&access, &pass), access).await?;
let response = ListAliasesResponse {
aliases: aliases.into_iter().map(|alias| alias.into()).collect(),
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn collection_exists(
&self,
mut request: Request<CollectionExistsRequest>,
) -> Result<Response<CollectionExistsResponse>, Status> {
let timing = Instant::now();
validate(request.get_ref())?;
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let CollectionExistsRequest { collection_name } = request.into_inner();
let result = do_collection_exists(
self.dispatcher.toc(&access, &pass),
access,
&collection_name,
)
.await?;
let response = CollectionExistsResponse {
result: Some(result),
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
async fn collection_cluster_info(
&self,
mut request: Request<CollectionClusterInfoRequest>,
) -> Result<Response<CollectionClusterInfoResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let response = do_get_collection_cluster(
self.dispatcher.toc(&access, &pass),
access,
request.into_inner().collection_name.as_str(),
)
.await?
.into();
Ok(Response::new(response))
}
async fn update_collection_cluster_setup(
&self,
mut request: Request<UpdateCollectionClusterSetupRequest>,
) -> Result<Response<UpdateCollectionClusterSetupResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let UpdateCollectionClusterSetupRequest {
collection_name,
operation,
timeout,
..
} = request.into_inner();
let result = do_update_collection_cluster(
self.dispatcher.as_ref(),
collection_name,
operation
.ok_or_else(|| Status::new(tonic::Code::InvalidArgument, "empty operation"))?
.try_into()?,
access,
timeout.map(std::time::Duration::from_secs),
)
.await?;
Ok(Response::new(UpdateCollectionClusterSetupResponse {
result,
}))
}
async fn list_shard_keys(
&self,
mut request: Request<ListShardKeysRequest>,
) -> Result<Response<ListShardKeysResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let result = do_get_collection_shard_keys(
self.dispatcher.toc(&access, &pass),
access,
request.into_inner().collection_name.as_str(),
)
.await?;
let response = ListShardKeysResponse::from((timing, result));
Ok(Response::new(response))
}
async fn create_shard_key(
&self,
mut request: Request<CreateShardKeyRequest>,
) -> Result<Response<CreateShardKeyResponse>, Status> {
let access = extract_access(&mut request);
let CreateShardKeyRequest {
collection_name,
request,
timeout,
} = request.into_inner();
let Some(request) = request else {
return Err(Status::new(tonic::Code::InvalidArgument, "empty request"));
};
let timeout = timeout.map(std::time::Duration::from_secs);
let operation = ClusterOperations::CreateShardingKey(CreateShardingKeyOperation {
create_sharding_key: request.try_into()?,
});
let result = do_update_collection_cluster(
self.dispatcher.as_ref(),
collection_name,
operation,
access,
timeout,
)
.await?;
Ok(Response::new(CreateShardKeyResponse { result }))
}
async fn delete_shard_key(
&self,
mut request: Request<DeleteShardKeyRequest>,
) -> Result<Response<DeleteShardKeyResponse>, Status> {
let access = extract_access(&mut request);
let DeleteShardKeyRequest {
collection_name,
request,
timeout,
} = request.into_inner();
let Some(request) = request else {
return Err(Status::new(tonic::Code::InvalidArgument, "empty request"));
};
let timeout = timeout.map(std::time::Duration::from_secs);
let operation = ClusterOperations::DropShardingKey(DropShardingKeyOperation {
drop_sharding_key: request.try_into()?,
});
let result = do_update_collection_cluster(
self.dispatcher.as_ref(),
collection_name,
operation,
access,
timeout,
)
.await?;
Ok(Response::new(DeleteShardKeyResponse { result }))
}
}
trait WithTimeout {
fn wait_timeout(&self) -> Option<Duration>;
}
macro_rules! impl_with_timeout {
($operation:ty) => {
impl WithTimeout for $operation {
fn wait_timeout(&self) -> Option<Duration> {
self.timeout.map(Duration::from_secs)
}
}
};
}
impl_with_timeout!(CreateCollection);
impl_with_timeout!(UpdateCollection);
impl_with_timeout!(DeleteCollection);
impl_with_timeout!(ChangeAliases);
impl_with_timeout!(UpdateCollectionClusterSetupRequest);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/raft_api.rs | src/tonic/api/raft_api.rs | use api::grpc::qdrant::raft_server::Raft;
use api::grpc::qdrant::{
AddPeerToKnownMessage, AllPeers, Peer, PeerId, RaftMessage as RaftMessageBytes, Uri as UriStr,
};
use itertools::Itertools;
use raft::eraftpb::Message as RaftMessage;
use storage::content_manager::consensus_manager::ConsensusStateRef;
use storage::content_manager::consensus_ops::ConsensusOperations;
use tokio::sync::mpsc::Sender;
use tonic::transport::Uri;
use tonic::{Request, Response, Status, async_trait};
use super::validate;
use crate::consensus;
pub struct RaftService {
message_sender: Sender<consensus::Message>,
consensus_state: ConsensusStateRef,
use_tls: bool,
}
impl RaftService {
pub fn new(
sender: Sender<consensus::Message>,
consensus_state: ConsensusStateRef,
use_tls: bool,
) -> Self {
Self {
message_sender: sender,
consensus_state,
use_tls,
}
}
}
#[async_trait]
impl Raft for RaftService {
async fn send(&self, mut request: Request<RaftMessageBytes>) -> Result<Response<()>, Status> {
let message =
<RaftMessage as prost_for_raft::Message>::decode(&request.get_mut().message[..])
.map_err(|err| {
Status::invalid_argument(format!("Failed to parse raft message: {err}"))
})?;
self.message_sender
.send(consensus::Message::FromPeer(Box::new(message)))
.await
.map_err(|_| Status::internal("Can't send Raft message over channel"))?;
Ok(Response::new(()))
}
async fn who_is(
&self,
request: tonic::Request<PeerId>,
) -> Result<tonic::Response<UriStr>, tonic::Status> {
let addresses = self.consensus_state.peer_address_by_id();
let uri = addresses
.get(&request.get_ref().id)
.ok_or_else(|| Status::internal("Peer not found"))?;
Ok(Response::new(UriStr {
uri: uri.to_string(),
}))
}
async fn add_peer_to_known(
&self,
request: tonic::Request<AddPeerToKnownMessage>,
) -> Result<tonic::Response<AllPeers>, tonic::Status> {
validate(request.get_ref())?;
let peer = request.get_ref();
let uri_string = if let Some(uri) = &peer.uri {
uri.clone()
} else {
let ip = request
.remote_addr()
.ok_or_else(|| {
Status::failed_precondition("Remote address unavailable due to the used IO")
})?
.ip();
let port = peer
.port
.ok_or_else(|| Status::invalid_argument("URI or port should be supplied"))?;
if self.use_tls {
format!("https://{ip}:{port}")
} else {
format!("http://{ip}:{port}")
}
};
let uri: Uri = uri_string
.parse()
.map_err(|err| Status::internal(format!("Failed to parse uri: {err}")))?;
let peer = request.into_inner();
// the consensus operation can take up to DEFAULT_META_OP_WAIT
self.consensus_state
.propose_consensus_op_with_await(
ConsensusOperations::AddPeer {
peer_id: peer.id,
uri: uri.to_string(),
},
None,
)
.await
.map_err(|err| Status::internal(format!("Failed to add peer: {err}")))?;
let mut addresses = self.consensus_state.peer_address_by_id();
// Make sure that the new peer is now present in the known addresses
if !addresses.values().contains(&uri) {
return Err(Status::internal(format!(
"Failed to add peer after consensus: {uri}"
)));
}
let first_peer_id = self.consensus_state.first_voter();
// If `first_peer_id` is not present in the list of peers, it means it was removed from
// cluster at some point.
//
// Before Qdrant version 1.11.6 origin peer was not committed to consensus, so if it was
// removed from cluster, any node added to the cluster after this would not recognize it as
// being part of the cluster in the past and will end up with a broken consensus state.
//
// To prevent this, we add `first_peer_id` (with a fake URI) to the list of peers.
//
// `add_peer_to_known` is used to add new peers to the cluster, and so `first_peer_id` (and
// its fake URI) would be removed from new peer's state shortly, while it will be synchronizing
// and applying past Raft log.
addresses.entry(first_peer_id).or_default();
Ok(Response::new(AllPeers {
all_peers: addresses
.into_iter()
.map(|(id, uri)| Peer {
id,
uri: uri.to_string(),
})
.collect(),
first_peer_id,
}))
}
// Left for compatibility - does nothing
async fn add_peer_as_participant(
&self,
_request: tonic::Request<PeerId>,
) -> Result<tonic::Response<()>, tonic::Status> {
Ok(Response::new(()))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/points_api.rs | src/tonic/api/points_api.rs | use std::sync::Arc;
use std::time::{Duration, Instant};
use api::grpc::Usage;
use api::grpc::qdrant::points_server::Points;
use api::grpc::qdrant::{
ClearPayloadPoints, CountPoints, CountResponse, CreateFieldIndexCollection,
DeleteFieldIndexCollection, DeletePayloadPoints, DeletePointVectors, DeletePoints,
DiscoverBatchPoints, DiscoverBatchResponse, DiscoverPoints, DiscoverResponse, FacetCounts,
FacetResponse, GetPoints, GetResponse, PointsOperationResponse, QueryBatchPoints,
QueryBatchResponse, QueryGroupsResponse, QueryPointGroups, QueryPoints, QueryResponse,
RecommendBatchPoints, RecommendBatchResponse, RecommendGroupsResponse, RecommendPointGroups,
RecommendPoints, RecommendResponse, ScrollPoints, ScrollResponse, SearchBatchPoints,
SearchBatchResponse, SearchGroupsResponse, SearchMatrixOffsets, SearchMatrixOffsetsResponse,
SearchMatrixPairs, SearchMatrixPairsResponse, SearchMatrixPoints, SearchPointGroups,
SearchPoints, SearchResponse, SetPayloadPoints, UpdateBatchPoints, UpdateBatchResponse,
UpdatePointVectors, UpsertPoints,
};
use collection::operations::types::CoreSearchRequest;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use storage::content_manager::toc::request_hw_counter::RequestHwCounter;
use storage::dispatcher::Dispatcher;
use tonic::{Request, Response, Status};
use super::query_common::*;
use super::update_common::*;
use super::validate;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::token::extract_token;
use crate::common::strict_mode::*;
use crate::common::update::InternalUpdateParams;
use crate::settings::ServiceConfig;
use crate::tonic::auth::extract_access;
pub struct PointsService {
dispatcher: Arc<Dispatcher>,
service_config: ServiceConfig,
}
impl PointsService {
pub fn new(dispatcher: Arc<Dispatcher>, service_config: ServiceConfig) -> Self {
Self {
dispatcher,
service_config,
}
}
fn get_request_collection_hw_usage_counter(
&self,
collection_name: String,
wait: Option<bool>,
) -> RequestHwCounter {
let counter = HwMeasurementAcc::new_with_metrics_drain(
self.dispatcher.get_collection_hw_metrics(collection_name),
);
let waiting = wait != Some(false);
RequestHwCounter::new(counter, self.service_config.hardware_reporting() && waiting)
}
}
#[tonic::async_trait]
impl Points for PointsService {
async fn upsert(
&self,
mut request: Request<UpsertPoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
upsert(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
inference_params,
hw_metrics,
)
.await
.map(|resp| resp.map(PointsOperationResponse::from))
}
async fn delete(
&self,
mut request: Request<DeletePoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
delete(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(PointsOperationResponse::from))
}
async fn get(&self, mut request: Request<GetPoints>) -> Result<Response<GetResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inner_request = request.into_inner();
let hw_metrics = self
.get_request_collection_hw_usage_counter(inner_request.collection_name.clone(), None);
get(
StrictModeCheckedTocProvider::new(&self.dispatcher),
inner_request,
None,
access,
hw_metrics,
)
.await
}
async fn update_vectors(
&self,
mut request: Request<UpdatePointVectors>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
// Nothing to verify here.
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
update_vectors(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
inference_params,
hw_metrics,
)
.await
.map(|resp| resp.map(PointsOperationResponse::from))
}
async fn delete_vectors(
&self,
mut request: Request<DeletePointVectors>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let hw_metrics = self.get_request_collection_hw_usage_counter(
request.get_ref().collection_name.clone(),
None,
);
delete_vectors(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn set_payload(
&self,
mut request: Request<SetPayloadPoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
set_payload(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn overwrite_payload(
&self,
mut request: Request<SetPayloadPoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
overwrite_payload(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn delete_payload(
&self,
mut request: Request<DeletePayloadPoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
delete_payload(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn clear_payload(
&self,
mut request: Request<ClearPayloadPoints>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
clear_payload(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn update_batch(
&self,
mut request: Request<UpdateBatchPoints>,
) -> Result<Response<UpdateBatchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
update_batch(
&self.dispatcher,
request.into_inner(),
InternalUpdateParams::default(),
access,
inference_params,
hw_metrics,
)
.await
}
async fn create_field_index(
&self,
mut request: Request<CreateFieldIndexCollection>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let wait = Some(request.get_ref().wait.unwrap_or(false));
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, wait);
create_field_index(
self.dispatcher.clone(),
request.into_inner(),
InternalUpdateParams::default(),
access,
hw_metrics,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn delete_field_index(
&self,
mut request: Request<DeleteFieldIndexCollection>,
) -> Result<Response<PointsOperationResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
delete_field_index(
self.dispatcher.clone(),
request.into_inner(),
InternalUpdateParams::default(),
access,
)
.await
.map(|resp| resp.map(Into::into))
}
async fn search(
&self,
mut request: Request<SearchPoints>,
) -> Result<Response<SearchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = search(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
None,
access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn search_batch(
&self,
mut request: Request<SearchBatchPoints>,
) -> Result<Response<SearchBatchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let SearchBatchPoints {
collection_name,
search_points,
read_consistency,
timeout,
} = request.into_inner();
let timeout = timeout.map(Duration::from_secs);
let mut requests = Vec::new();
for mut search_point in search_points {
let shard_key = search_point.shard_key_selector.take();
let shard_selector = convert_shard_selector_for_read(None, shard_key)?;
let core_search_request = CoreSearchRequest::try_from(search_point)?;
requests.push((core_search_request, shard_selector));
}
let hw_metrics =
self.get_request_collection_hw_usage_counter(collection_name.clone(), None);
let res = core_search_batch(
StrictModeCheckedTocProvider::new(&self.dispatcher),
&collection_name,
requests,
read_consistency,
access,
timeout,
hw_metrics,
)
.await?;
Ok(res)
}
async fn search_groups(
&self,
mut request: Request<SearchPointGroups>,
) -> Result<Response<SearchGroupsResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = search_groups(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
None,
access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn scroll(
&self,
mut request: Request<ScrollPoints>,
) -> Result<Response<ScrollResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inner_request = request.into_inner();
let hw_metrics = self
.get_request_collection_hw_usage_counter(inner_request.collection_name.clone(), None);
scroll(
StrictModeCheckedTocProvider::new(&self.dispatcher),
inner_request,
None,
access,
hw_metrics,
)
.await
}
async fn recommend(
&self,
mut request: Request<RecommendPoints>,
) -> Result<Response<RecommendResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = recommend(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn recommend_batch(
&self,
mut request: Request<RecommendBatchPoints>,
) -> Result<Response<RecommendBatchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let RecommendBatchPoints {
collection_name,
recommend_points,
read_consistency,
timeout,
} = request.into_inner();
let hw_metrics =
self.get_request_collection_hw_usage_counter(collection_name.clone(), None);
let res = recommend_batch(
StrictModeCheckedTocProvider::new(&self.dispatcher),
&collection_name,
recommend_points,
read_consistency,
access,
timeout.map(Duration::from_secs),
hw_metrics,
)
.await?;
Ok(res)
}
async fn recommend_groups(
&self,
mut request: Request<RecommendPointGroups>,
) -> Result<Response<RecommendGroupsResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = recommend_groups(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn discover(
&self,
mut request: Request<DiscoverPoints>,
) -> Result<Response<DiscoverResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = discover(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn discover_batch(
&self,
mut request: Request<DiscoverBatchPoints>,
) -> Result<Response<DiscoverBatchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let DiscoverBatchPoints {
collection_name,
discover_points,
read_consistency,
timeout,
} = request.into_inner();
let hw_metrics =
self.get_request_collection_hw_usage_counter(collection_name.clone(), None);
let res = discover_batch(
StrictModeCheckedTocProvider::new(&self.dispatcher),
&collection_name,
discover_points,
read_consistency,
access,
timeout.map(Duration::from_secs),
hw_metrics,
)
.await?;
Ok(res)
}
async fn count(
&self,
mut request: Request<CountPoints>,
) -> Result<Response<CountResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = count(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
None,
&access,
hw_metrics,
)
.await?;
Ok(res)
}
async fn query(
&self,
mut request: Request<QueryPoints>,
) -> Result<Response<QueryResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = query(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
None,
access,
hw_metrics,
inference_params,
)
.await?;
Ok(res)
}
async fn query_batch(
&self,
mut request: Request<QueryBatchPoints>,
) -> Result<Response<QueryBatchResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let request = request.into_inner();
let QueryBatchPoints {
collection_name,
query_points,
read_consistency,
timeout,
} = request;
let timeout = timeout.map(Duration::from_secs);
let hw_metrics =
self.get_request_collection_hw_usage_counter(collection_name.clone(), None);
let res = query_batch(
StrictModeCheckedTocProvider::new(&self.dispatcher),
&collection_name,
query_points,
read_consistency,
access,
timeout,
hw_metrics,
inference_params,
)
.await?;
Ok(res)
}
async fn query_groups(
&self,
mut request: Request<QueryPointGroups>,
) -> Result<Response<QueryGroupsResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let inference_token = extract_token(&request);
let timeout = request.get_ref().timeout.map(Duration::from_secs);
let inference_params = InferenceParams::new(inference_token, timeout);
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let res = query_groups(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
None,
access,
hw_metrics,
inference_params,
)
.await?;
Ok(res)
}
async fn facet(
&self,
mut request: Request<FacetCounts>,
) -> Result<Response<FacetResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let hw_metrics = self.get_request_collection_hw_usage_counter(
request.get_ref().collection_name.clone(),
None,
);
facet(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics,
)
.await
}
async fn search_matrix_pairs(
&self,
mut request: Request<SearchMatrixPoints>,
) -> Result<Response<SearchMatrixPairsResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let timing = Instant::now();
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let search_matrix_response = search_points_matrix(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics.get_counter(),
)
.await?;
let pairs_response = SearchMatrixPairsResponse {
result: Some(SearchMatrixPairs::from(search_matrix_response)),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(hw_metrics.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(pairs_response))
}
async fn search_matrix_offsets(
&self,
mut request: Request<SearchMatrixPoints>,
) -> Result<Response<SearchMatrixOffsetsResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let timing = Instant::now();
let collection_name = request.get_ref().collection_name.clone();
let hw_metrics = self.get_request_collection_hw_usage_counter(collection_name, None);
let search_matrix_response = search_points_matrix(
StrictModeCheckedTocProvider::new(&self.dispatcher),
request.into_inner(),
access,
hw_metrics.get_counter(),
)
.await?;
let offsets_response = SearchMatrixOffsetsResponse {
result: Some(SearchMatrixOffsets::from(search_matrix_response)),
time: timing.elapsed().as_secs_f64(),
usage: Usage::from_hardware_usage(hw_metrics.to_grpc_api()).into_non_empty(),
};
Ok(Response::new(offsets_response))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/collections_common.rs | src/tonic/api/collections_common.rs | use std::time::Instant;
use api::grpc::qdrant::{GetCollectionInfoRequest, GetCollectionInfoResponse};
use collection::shards::shard::ShardId;
use storage::content_manager::toc::TableOfContent;
use storage::rbac::Access;
use tonic::{Response, Status};
use crate::common::collections::do_get_collection;
pub async fn get(
toc: &TableOfContent,
get_collection_info_request: GetCollectionInfoRequest,
access: Access,
shard_selection: Option<ShardId>,
) -> Result<Response<GetCollectionInfoResponse>, Status> {
let timing = Instant::now();
let collection_name = get_collection_info_request.collection_name;
let result = do_get_collection(toc, access, &collection_name, shard_selection).await?;
let response = GetCollectionInfoResponse {
result: Some(result.into()),
time: timing.elapsed().as_secs_f64(),
};
Ok(Response::new(response))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/points_internal_api.rs | src/tonic/api/points_internal_api.rs | use std::any;
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use api::grpc::HardwareUsage;
use api::grpc::qdrant::points_internal_server::PointsInternal;
use api::grpc::qdrant::{
ClearPayloadPointsInternal, CoreSearchBatchPointsInternal, CountPointsInternal, CountResponse,
CreateFieldIndexCollectionInternal, DeleteFieldIndexCollectionInternal,
DeletePayloadPointsInternal, DeletePointsInternal, DeleteVectorsInternal, FacetCountsInternal,
FacetResponseInternal, GetPointsInternal, GetResponse, IntermediateResult,
PointsOperationResponseInternal, QueryBatchPointsInternal, QueryBatchResponseInternal,
QueryResultInternal, QueryShardPoints, RecommendPointsInternal, RecommendResponse,
ScrollPointsInternal, ScrollResponse, SearchBatchResponse, SetPayloadPointsInternal,
SyncPointsInternal, UpdateBatchInternal, UpdateVectorsInternal, UpsertPointsInternal,
};
use api::grpc::update_operation::Update;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::universal_query::shard_query::ShardQueryRequest;
use collection::shards::shard::ShardId;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use itertools::Itertools;
use segment::data_types::facets::{FacetParams, FacetResponse};
use segment::json_path::JsonPath;
use segment::types::Filter;
use storage::content_manager::toc::TableOfContent;
use storage::content_manager::toc::request_hw_counter::RequestHwCounter;
use storage::rbac::Access;
use tonic::{Request, Response, Status};
use super::query_common::*;
use super::update_common::*;
use super::validate_and_log;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::token::extract_token;
use crate::common::strict_mode::*;
use crate::common::update::InternalUpdateParams;
use crate::settings::ServiceConfig;
const FULL_ACCESS: Access = Access::full("Internal API");
/// This API is intended for P2P communication within a distributed deployment.
pub struct PointsInternalService {
toc: Arc<TableOfContent>,
service_config: ServiceConfig,
}
impl PointsInternalService {
pub fn new(toc: Arc<TableOfContent>, service_config: ServiceConfig) -> Self {
Self {
toc,
service_config,
}
}
async fn sync_internal(
&self,
sync_points_internal: SyncPointsInternal,
inference_params: InferenceParams,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let SyncPointsInternal {
sync_points,
shard_id,
clock_tag,
} = sync_points_internal;
let sync_points = extract_internal_request(sync_points)?;
// Exclude the `inference_usage` for internal call
let (response, _inference_usage) = sync(
self.toc.clone(),
sync_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
inference_params,
)
.await?
.into_inner();
Ok(Response::new(response))
}
async fn upsert_internal(
&self,
upsert_points_internal: UpsertPointsInternal,
inference_params: InferenceParams,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let UpsertPointsInternal {
upsert_points,
shard_id,
clock_tag,
} = upsert_points_internal;
let upsert_points = extract_internal_request(upsert_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
upsert_points.collection_name.clone(),
);
upsert(
StrictModeCheckedInternalTocProvider::new(&self.toc),
upsert_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
inference_params.clone(),
hw_metrics,
)
.await
}
async fn delete_internal(
&self,
delete_points_internal: DeletePointsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeletePointsInternal {
delete_points,
shard_id,
clock_tag,
} = delete_points_internal;
let delete_points = extract_internal_request(delete_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
delete_points.collection_name.clone(),
);
delete(
UncheckedTocProvider::new_unchecked(&self.toc),
delete_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn update_vectors_internal(
&self,
update_vectors_internal: UpdateVectorsInternal,
inference_params: InferenceParams,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let UpdateVectorsInternal {
update_vectors,
shard_id,
clock_tag,
} = update_vectors_internal;
let update_point_vectors = extract_internal_request(update_vectors)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
update_point_vectors.collection_name.clone(),
);
crate::tonic::api::update_common::update_vectors(
StrictModeCheckedInternalTocProvider::new(&self.toc),
update_point_vectors,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
inference_params.clone(),
hw_metrics,
)
.await
}
async fn delete_vectors_internal(
&self,
delete_vectors_internal: DeleteVectorsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeleteVectorsInternal {
delete_vectors,
shard_id,
clock_tag,
} = delete_vectors_internal;
let delete_point_vectors = extract_internal_request(delete_vectors)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
delete_point_vectors.collection_name.clone(),
);
crate::tonic::api::update_common::delete_vectors(
UncheckedTocProvider::new_unchecked(&self.toc),
delete_point_vectors,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn set_payload_internal(
&self,
set_payload_internal: SetPayloadPointsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let SetPayloadPointsInternal {
set_payload_points,
shard_id,
clock_tag,
} = set_payload_internal;
let set_payload_points = extract_internal_request(set_payload_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
set_payload_points.collection_name.clone(),
);
set_payload(
StrictModeCheckedInternalTocProvider::new(&self.toc),
set_payload_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn overwrite_payload_internal(
&self,
overwrite_payload_internal: SetPayloadPointsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let SetPayloadPointsInternal {
set_payload_points,
shard_id,
clock_tag,
} = overwrite_payload_internal;
let set_payload_points = extract_internal_request(set_payload_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
set_payload_points.collection_name.clone(),
);
overwrite_payload(
StrictModeCheckedInternalTocProvider::new(&self.toc),
set_payload_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn delete_payload_internal(
&self,
delete_payload_internal: DeletePayloadPointsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeletePayloadPointsInternal {
delete_payload_points,
shard_id,
clock_tag,
} = delete_payload_internal;
let delete_payload_points = extract_internal_request(delete_payload_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
delete_payload_points.collection_name.clone(),
);
delete_payload(
UncheckedTocProvider::new_unchecked(&self.toc),
delete_payload_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn clear_payload_internal(
&self,
clear_payload_internal: ClearPayloadPointsInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let ClearPayloadPointsInternal {
clear_payload_points,
shard_id,
clock_tag,
} = clear_payload_internal;
let clear_payload_points = extract_internal_request(clear_payload_points)?;
let hw_metrics = self.get_request_collection_hw_usage_counter_for_internal(
clear_payload_points.collection_name.clone(),
);
clear_payload(
UncheckedTocProvider::new_unchecked(&self.toc),
clear_payload_points,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
FULL_ACCESS.clone(),
hw_metrics,
)
.await
}
async fn create_field_index_internal(
&self,
create_field_index_collection: CreateFieldIndexCollectionInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let CreateFieldIndexCollectionInternal {
create_field_index_collection,
shard_id,
clock_tag,
} = create_field_index_collection;
create_field_index_internal(
self.toc.clone(),
extract_internal_request(create_field_index_collection)?,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
)
.await
}
async fn delete_field_index_internal(
&self,
delete_field_index_collection: DeleteFieldIndexCollectionInternal,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeleteFieldIndexCollectionInternal {
delete_field_index_collection,
shard_id,
clock_tag,
} = delete_field_index_collection;
delete_field_index_internal(
self.toc.clone(),
extract_internal_request(delete_field_index_collection)?,
InternalUpdateParams::from_grpc(shard_id, clock_tag),
)
.await
}
}
pub async fn query_batch_internal(
toc: &TableOfContent,
collection_name: String,
query_points: Vec<QueryShardPoints>,
shard_selection: Option<ShardId>,
timeout: Option<Duration>,
request_hw_data: RequestHwCounter,
) -> Result<Response<QueryBatchResponseInternal>, Status> {
let batch_requests: Vec<_> = query_points
.into_iter()
.map(ShardQueryRequest::try_from)
.try_collect()?;
let timing = Instant::now();
// As this function is handling an internal request,
// we can assume that shard_key is already resolved
let shard_selection = match shard_selection {
None => {
debug_assert!(false, "Shard selection is expected for internal request");
ShardSelectorInternal::All
}
Some(shard_id) => ShardSelectorInternal::ShardId(shard_id),
};
let batch_response = toc
.query_batch_internal(
&collection_name,
batch_requests,
shard_selection,
timeout,
request_hw_data.get_counter(),
)
.await?;
let response = QueryBatchResponseInternal {
results: batch_response
.into_iter()
.map(|response| QueryResultInternal {
intermediate_results: response
.into_iter()
.map(|intermediate| IntermediateResult {
result: intermediate.into_iter().map(From::from).collect_vec(),
})
.collect_vec(),
})
.collect(),
time: timing.elapsed().as_secs_f64(),
hardware_usage: request_hw_data.to_grpc_api(),
inference_usage: None, // No inference in internal API
};
Ok(Response::new(response))
}
async fn facet_counts_internal(
toc: &TableOfContent,
request: FacetCountsInternal,
request_hw_data: RequestHwCounter,
) -> Result<Response<FacetResponseInternal>, Status> {
let timing = Instant::now();
let FacetCountsInternal {
collection_name,
key,
filter,
limit,
exact,
shard_id,
timeout,
} = request;
let shard_selection = ShardSelectorInternal::ShardId(shard_id);
let request = FacetParams {
key: JsonPath::from_str(&key)
.map_err(|_| Status::invalid_argument("Failed to parse facet key"))?,
limit: limit as usize,
filter: filter.map(Filter::try_from).transpose()?,
exact,
};
let response = toc
.facet_internal(
&collection_name,
request,
shard_selection,
timeout.map(Duration::from_secs),
request_hw_data.get_counter(),
)
.await?;
let FacetResponse { hits } = response;
let response = FacetResponseInternal {
hits: hits.into_iter().map(From::from).collect_vec(),
time: timing.elapsed().as_secs_f64(),
usage: request_hw_data.to_grpc_api(),
};
Ok(Response::new(response))
}
impl PointsInternalService {
/// Generates a new `RequestHwCounter` for the request.
/// This counter is indented to be used for internal requests.
fn get_request_collection_hw_usage_counter_for_internal(
&self,
collection_name: String,
) -> RequestHwCounter {
let counter = HwMeasurementAcc::new_with_metrics_drain(
self.toc.get_collection_hw_metrics(collection_name),
);
RequestHwCounter::new(counter, self.service_config.hardware_reporting())
}
}
#[tonic::async_trait]
impl PointsInternal for PointsInternalService {
async fn upsert(
&self,
request: Request<UpsertPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
let inference_token = extract_token(&request);
let inference_params = InferenceParams::new(inference_token.clone(), None);
self.upsert_internal(request.into_inner(), inference_params)
.await
}
async fn delete(
&self,
request: Request<DeletePointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.delete_internal(request.into_inner()).await
}
async fn update_vectors(
&self,
request: Request<UpdateVectorsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
let inference_token = extract_token(&request);
let inference_params = InferenceParams::new(inference_token.clone(), None);
self.update_vectors_internal(request.into_inner(), inference_params)
.await
}
async fn delete_vectors(
&self,
request: Request<DeleteVectorsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.delete_vectors_internal(request.into_inner()).await
}
async fn set_payload(
&self,
request: Request<SetPayloadPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.set_payload_internal(request.into_inner()).await
}
async fn overwrite_payload(
&self,
request: Request<SetPayloadPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.overwrite_payload_internal(request.into_inner()).await
}
async fn delete_payload(
&self,
request: Request<DeletePayloadPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.delete_payload_internal(request.into_inner()).await
}
async fn clear_payload(
&self,
request: Request<ClearPayloadPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.clear_payload_internal(request.into_inner()).await
}
async fn create_field_index(
&self,
request: Request<CreateFieldIndexCollectionInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.create_field_index_internal(request.into_inner()).await
}
async fn delete_field_index(
&self,
request: Request<DeleteFieldIndexCollectionInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
self.delete_field_index_internal(request.into_inner()).await
}
async fn update_batch(
&self,
request: Request<UpdateBatchInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
let inference_token = extract_token(&request);
// Update operation doesn't specify explicit timeout yet
let inference_params = InferenceParams::new(inference_token.clone(), None);
let request_inner = request.into_inner();
let mut total_usage = HardwareUsage::default();
let mut last_result = None;
// This API:
// - Sequentially applies all operations
// - If one operation fails, it will report the error immediately
// - If no operations are present, it will return an empty response
// - If all operations are successful, it will return the last operation result
for update in request_inner.operations {
let result = match update.update {
None => {
return Err(Status::invalid_argument("Update is missing"));
}
Some(update) => match update {
Update::Sync(sync) => {
self.sync_internal(sync, inference_params.clone()).await?
}
Update::Upsert(upsert) => {
self.upsert_internal(upsert, inference_params.clone())
.await?
}
Update::Delete(delete) => self.delete_internal(delete).await?,
Update::UpdateVectors(update_vectors) => {
self.update_vectors_internal(update_vectors, inference_params.clone())
.await?
}
Update::DeleteVectors(delete_vectors) => {
self.delete_vectors_internal(delete_vectors).await?
}
Update::SetPayload(set_payload) => {
self.set_payload_internal(set_payload).await?
}
Update::OverwritePayload(overwrite_payload) => {
self.overwrite_payload_internal(overwrite_payload).await?
}
Update::DeletePayload(delete_payload) => {
self.delete_payload_internal(delete_payload).await?
}
Update::ClearPayload(clear_payload) => {
self.clear_payload_internal(clear_payload).await?
}
Update::CreateFieldIndex(create_field_index) => {
self.create_field_index_internal(create_field_index).await?
}
Update::DeleteFieldIndex(delete_field_index) => {
self.delete_field_index_internal(delete_field_index).await?
}
},
};
let mut response = result.into_inner();
if let Some(usage) = response.hardware_usage.take() {
total_usage.add(usage);
}
last_result = Some(response)
}
if let Some(mut last_result) = last_result.take() {
last_result.hardware_usage = Some(total_usage);
Ok(Response::new(last_result))
} else {
// This response is possible if there are no operations in the request
Ok(Response::new(PointsOperationResponseInternal {
result: None,
time: 0.0,
hardware_usage: None,
inference_usage: None, // No inference in internal API
}))
}
}
async fn core_search_batch(
&self,
request: Request<CoreSearchBatchPointsInternal>,
) -> Result<Response<SearchBatchResponse>, Status> {
validate_and_log(request.get_ref());
let CoreSearchBatchPointsInternal {
collection_name,
search_points,
shard_id,
timeout,
} = request.into_inner();
let timeout = timeout.map(Duration::from_secs);
// Individual `read_consistency` values are ignored by `core_search_batch`...
//
// search_points
// .iter_mut()
// .for_each(|search_points| search_points.read_consistency = None);
let hw_data =
self.get_request_collection_hw_usage_counter_for_internal(collection_name.clone());
let res = core_search_list(
self.toc.as_ref(),
collection_name,
search_points,
None, // *Has* to be `None`!
shard_id,
FULL_ACCESS.clone(),
timeout,
hw_data,
)
.await?;
Ok(res)
}
async fn recommend(
&self,
request: Request<RecommendPointsInternal>,
) -> Result<Response<RecommendResponse>, Status> {
validate_and_log(request.get_ref());
let RecommendPointsInternal {
recommend_points,
.. // shard_id - is not used in internal API,
// because it is transformed into regular search requests on the first node
} = request.into_inner();
let mut recommend_points = recommend_points
.ok_or_else(|| Status::invalid_argument("RecommendPoints is missing"))?;
recommend_points.read_consistency = None; // *Have* to be `None`!
let collection_name = recommend_points.collection_name.clone();
let hw_data = self.get_request_collection_hw_usage_counter_for_internal(collection_name);
let res = recommend(
UncheckedTocProvider::new_unchecked(&self.toc),
recommend_points,
FULL_ACCESS.clone(),
hw_data,
)
.await?;
Ok(res)
}
async fn scroll(
&self,
request: Request<ScrollPointsInternal>,
) -> Result<Response<ScrollResponse>, Status> {
validate_and_log(request.get_ref());
let ScrollPointsInternal {
scroll_points,
shard_id,
} = request.into_inner();
let mut scroll_points =
scroll_points.ok_or_else(|| Status::invalid_argument("ScrollPoints is missing"))?;
scroll_points.read_consistency = None; // *Have* to be `None`!
let hw_data = self.get_request_collection_hw_usage_counter_for_internal(
scroll_points.collection_name.clone(),
);
scroll(
UncheckedTocProvider::new_unchecked(&self.toc),
scroll_points,
shard_id,
FULL_ACCESS.clone(),
hw_data,
)
.await
}
async fn get(
&self,
request: Request<GetPointsInternal>,
) -> Result<Response<GetResponse>, Status> {
validate_and_log(request.get_ref());
let GetPointsInternal {
get_points,
shard_id,
} = request.into_inner();
let mut get_points =
get_points.ok_or_else(|| Status::invalid_argument("GetPoints is missing"))?;
get_points.read_consistency = None; // *Have* to be `None`!
let hw_data = self.get_request_collection_hw_usage_counter_for_internal(
get_points.collection_name.clone(),
);
get(
UncheckedTocProvider::new_unchecked(&self.toc),
get_points,
shard_id,
FULL_ACCESS.clone(),
hw_data,
)
.await
}
async fn count(
&self,
request: Request<CountPointsInternal>,
) -> Result<Response<CountResponse>, Status> {
validate_and_log(request.get_ref());
let CountPointsInternal {
count_points,
shard_id,
} = request.into_inner();
let count_points =
count_points.ok_or_else(|| Status::invalid_argument("CountPoints is missing"))?;
let hw_data = self.get_request_collection_hw_usage_counter_for_internal(
count_points.collection_name.clone(),
);
let res = count(
UncheckedTocProvider::new_unchecked(&self.toc),
count_points,
shard_id,
&FULL_ACCESS,
hw_data,
)
.await?;
Ok(res)
}
async fn sync(
&self,
request: Request<SyncPointsInternal>,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
validate_and_log(request.get_ref());
let inference_token = extract_token(&request);
// Internal operation, we don't expect timeout here
let inference_params = InferenceParams::new(inference_token, None);
self.sync_internal(request.into_inner(), inference_params)
.await
}
async fn query_batch(
&self,
request: Request<QueryBatchPointsInternal>,
) -> Result<Response<QueryBatchResponseInternal>, Status> {
validate_and_log(request.get_ref());
let QueryBatchPointsInternal {
collection_name,
shard_id,
query_points,
timeout,
} = request.into_inner();
let timeout = timeout.map(Duration::from_secs);
let hw_data =
self.get_request_collection_hw_usage_counter_for_internal(collection_name.clone());
query_batch_internal(
self.toc.as_ref(),
collection_name,
query_points,
shard_id,
timeout,
hw_data,
)
.await
}
async fn facet(
&self,
request: Request<FacetCountsInternal>,
) -> Result<Response<FacetResponseInternal>, Status> {
validate_and_log(request.get_ref());
let request_inner = request.into_inner();
let hw_data = self.get_request_collection_hw_usage_counter_for_internal(
request_inner.collection_name.clone(),
);
facet_counts_internal(self.toc.as_ref(), request_inner, hw_data).await
}
}
fn extract_internal_request<T>(request: Option<T>) -> Result<T, tonic::Status> {
request.ok_or_else(|| {
tonic::Status::invalid_argument(format!("{} is missing", any::type_name::<T>()))
})
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/update_common.rs | src/tonic/api/update_common.rs | use std::sync::Arc;
use std::time::Instant;
use api::conversions::json::{json_path_from_proto, proto_to_payloads};
use api::grpc;
use api::grpc::qdrant::payload_index_params::IndexParams;
use api::grpc::qdrant::points_update_operation::{ClearPayload, Operation, PointStructList};
use api::grpc::qdrant::{
ClearPayloadPoints, CreateFieldIndexCollection, DeleteFieldIndexCollection,
DeletePayloadPoints, DeletePointVectors, DeletePoints, FieldType, PayloadIndexParams,
PointsOperationResponseInternal, PointsSelector, SetPayloadPoints, SyncPoints,
UpdateBatchPoints, UpdateBatchResponse, UpdatePointVectors, UpsertPoints,
points_update_operation,
};
use api::grpc::{HardwareUsage, InferenceUsage, Usage};
use api::rest::schema::{PointInsertOperations, PointsList};
use api::rest::{PointStruct, PointVectors, ShardKeySelector, UpdateVectors, VectorStruct};
use collection::operations::CollectionUpdateOperations;
use collection::operations::conversions::try_points_selector_from_grpc;
use collection::operations::payload_ops::DeletePayload;
use collection::operations::point_ops::{self, PointOperations, PointSyncOperation};
use collection::operations::vector_ops::DeleteVectors;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use itertools::Itertools;
use segment::types::{
ExtendedPointId, Filter, PayloadFieldSchema, PayloadSchemaParams, PayloadSchemaType,
};
use storage::content_manager::toc::TableOfContent;
use storage::content_manager::toc::request_hw_counter::RequestHwCounter;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use tonic::{Response, Status};
use crate::common::inference::params::InferenceParams;
use crate::common::inference::service::InferenceType;
use crate::common::inference::update_requests::convert_point_struct;
use crate::common::strict_mode::*;
use crate::common::update::*;
pub async fn upsert(
toc_provider: impl CheckedTocProvider,
upsert_points: UpsertPoints,
internal_params: InternalUpdateParams,
access: Access,
inference_params: InferenceParams,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let UpsertPoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
update_filter,
timeout,
} = upsert_points;
let points: Result<_, _> = points.into_iter().map(PointStruct::try_from).collect();
let operation = PointInsertOperations::PointsList(PointsList {
points: points?,
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
update_filter: update_filter
.map(segment::types::Filter::try_from)
.transpose()?,
});
let timing = Instant::now();
let (result, inference_usage) = do_upsert_points(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
inference_params,
request_hw_counter.get_counter(),
)
.await?;
let response = points_operation_response_internal_with_inference_usage(
timing,
result,
request_hw_counter.to_grpc_api(),
inference_usage.map(grpc::InferenceUsage::from),
);
Ok(Response::new(response))
}
pub async fn delete(
toc_provider: impl CheckedTocProvider,
delete_points: DeletePoints,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeletePoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
timeout,
} = delete_points;
let points_selector = match points {
None => return Err(Status::invalid_argument("PointSelector is missing")),
Some(p) => try_points_selector_from_grpc(p, shard_key_selector)?,
};
let timing = Instant::now();
let result = do_delete_points(
toc_provider,
collection_name,
points_selector,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn update_vectors(
toc_provider: impl CheckedTocProvider,
update_point_vectors: UpdatePointVectors,
internal_params: InternalUpdateParams,
access: Access,
inference_params: InferenceParams,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let UpdatePointVectors {
collection_name,
wait,
points,
ordering,
shard_key_selector,
update_filter,
timeout,
} = update_point_vectors;
// Build list of operation points
let mut op_points = Vec::with_capacity(points.len());
for point in points {
let id = match point.id {
Some(id) => id.try_into()?,
None => return Err(Status::invalid_argument("id is expected")),
};
let vector = match point.vectors {
Some(vectors) => VectorStruct::try_from(vectors)?,
None => return Err(Status::invalid_argument("vectors is expected")),
};
op_points.push(PointVectors { id, vector });
}
let operation = UpdateVectors {
points: op_points,
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
update_filter: update_filter
.map(segment::types::Filter::try_from)
.transpose()?,
};
let timing = Instant::now();
let (result, usage) = do_update_vectors(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
inference_params,
request_hw_counter.get_counter(),
)
.await?;
let response = points_operation_response_internal_with_inference_usage(
timing,
result,
request_hw_counter.to_grpc_api(),
usage.map(grpc::InferenceUsage::from),
);
Ok(Response::new(response))
}
pub async fn delete_vectors(
toc_provider: impl CheckedTocProvider,
delete_point_vectors: DeletePointVectors,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeletePointVectors {
collection_name,
wait,
points_selector,
vectors,
ordering,
shard_key_selector,
timeout,
} = delete_point_vectors;
let (points, filter) = extract_points_selector(points_selector)?;
let vector_names = match vectors {
Some(vectors) => vectors.names,
None => return Err(Status::invalid_argument("vectors is expected")),
};
let operation = DeleteVectors {
points,
filter,
vector: vector_names.into_iter().collect(),
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
};
let timing = Instant::now();
let result = do_delete_vectors(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn set_payload(
toc_provider: impl CheckedTocProvider,
set_payload_points: SetPayloadPoints,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let SetPayloadPoints {
collection_name,
wait,
payload,
points_selector,
ordering,
shard_key_selector,
key,
timeout,
} = set_payload_points;
let key = key.map(|k| json_path_from_proto(&k)).transpose()?;
let (points, filter) = extract_points_selector(points_selector)?;
let operation = collection::operations::payload_ops::SetPayload {
payload: proto_to_payloads(payload)?,
points,
filter,
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
key,
};
let timing = Instant::now();
let result = do_set_payload(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn overwrite_payload(
toc_provider: impl CheckedTocProvider,
set_payload_points: SetPayloadPoints,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let SetPayloadPoints {
collection_name,
wait,
payload,
points_selector,
ordering,
shard_key_selector,
timeout,
..
} = set_payload_points;
let (points, filter) = extract_points_selector(points_selector)?;
let operation = collection::operations::payload_ops::SetPayload {
payload: proto_to_payloads(payload)?,
points,
filter,
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
// overwrite operation don't support indicate path of property
key: None,
};
let timing = Instant::now();
let result = do_overwrite_payload(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn delete_payload(
toc_provider: impl CheckedTocProvider,
delete_payload_points: DeletePayloadPoints,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeletePayloadPoints {
collection_name,
wait,
keys,
points_selector,
ordering,
shard_key_selector,
timeout,
} = delete_payload_points;
let keys = keys.iter().map(|k| json_path_from_proto(k)).try_collect()?;
let (points, filter) = extract_points_selector(points_selector)?;
let operation = DeletePayload {
keys,
points,
filter,
shard_key: shard_key_selector
.map(ShardKeySelector::try_from)
.transpose()?,
};
let timing = Instant::now();
let result = do_delete_payload(
toc_provider,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn clear_payload(
toc_provider: impl CheckedTocProvider,
clear_payload_points: ClearPayloadPoints,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let ClearPayloadPoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
timeout,
} = clear_payload_points;
let points_selector = match points {
None => return Err(Status::invalid_argument("PointSelector is missing")),
Some(p) => try_points_selector_from_grpc(p, shard_key_selector)?,
};
let timing = Instant::now();
let result = do_clear_payload(
toc_provider,
collection_name,
points_selector,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response =
points_operation_response_internal(timing, result, request_hw_counter.to_grpc_api());
Ok(Response::new(response))
}
pub async fn update_batch(
dispatcher: &Dispatcher,
update_batch_points: UpdateBatchPoints,
internal_params: InternalUpdateParams,
access: Access,
inference_params: InferenceParams,
request_hw_counter: RequestHwCounter,
) -> Result<Response<UpdateBatchResponse>, Status> {
let UpdateBatchPoints {
collection_name,
wait,
operations,
ordering,
timeout,
} = update_batch_points;
let timing = Instant::now();
let mut results = Vec::with_capacity(operations.len());
let mut total_inference_usage = InferenceUsage::default();
for op in operations {
let operation = op
.operation
.ok_or_else(|| Status::invalid_argument("Operation is missing"))?;
let collection_name = collection_name.clone();
let ordering = ordering.clone();
let mut result = match operation {
points_update_operation::Operation::Upsert(PointStructList {
points,
shard_key_selector,
update_filter,
}) => {
upsert(
StrictModeCheckedTocProvider::new(dispatcher),
UpsertPoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
update_filter,
timeout,
},
internal_params,
access.clone(),
inference_params.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::DeleteDeprecated(points) => {
delete(
StrictModeCheckedTocProvider::new(dispatcher),
DeletePoints {
collection_name,
wait,
points: Some(points),
ordering,
shard_key_selector: None,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::SetPayload(
points_update_operation::SetPayload {
payload,
points_selector,
shard_key_selector,
key,
},
) => {
set_payload(
StrictModeCheckedTocProvider::new(dispatcher),
SetPayloadPoints {
collection_name,
wait,
payload,
points_selector,
ordering,
shard_key_selector,
key,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::OverwritePayload(
points_update_operation::OverwritePayload {
payload,
points_selector,
shard_key_selector,
..
},
) => {
overwrite_payload(
StrictModeCheckedTocProvider::new(dispatcher),
SetPayloadPoints {
collection_name,
wait,
payload,
points_selector,
ordering,
shard_key_selector,
// overwrite operation doesn't support it
key: None,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::DeletePayload(
points_update_operation::DeletePayload {
keys,
points_selector,
shard_key_selector,
},
) => {
delete_payload(
StrictModeCheckedTocProvider::new(dispatcher),
DeletePayloadPoints {
collection_name,
wait,
keys,
points_selector,
ordering,
shard_key_selector,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::ClearPayload(ClearPayload {
points,
shard_key_selector,
}) => {
clear_payload(
StrictModeCheckedTocProvider::new(dispatcher),
ClearPayloadPoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::UpdateVectors(
points_update_operation::UpdateVectors {
points,
shard_key_selector,
update_filter,
},
) => {
update_vectors(
StrictModeCheckedTocProvider::new(dispatcher),
UpdatePointVectors {
collection_name,
wait,
points,
ordering,
shard_key_selector,
update_filter,
timeout,
},
internal_params,
access.clone(),
inference_params.clone(),
request_hw_counter.clone(),
)
.await
}
points_update_operation::Operation::DeleteVectors(
points_update_operation::DeleteVectors {
points_selector,
vectors,
shard_key_selector,
},
) => {
delete_vectors(
StrictModeCheckedTocProvider::new(dispatcher),
DeletePointVectors {
collection_name,
wait,
points_selector,
vectors,
ordering,
shard_key_selector,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
Operation::ClearPayloadDeprecated(selector) => {
clear_payload(
StrictModeCheckedTocProvider::new(dispatcher),
ClearPayloadPoints {
collection_name,
wait,
points: Some(selector),
ordering,
shard_key_selector: None,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
Operation::DeletePoints(points_update_operation::DeletePoints {
points,
shard_key_selector,
}) => {
delete(
StrictModeCheckedTocProvider::new(dispatcher),
DeletePoints {
collection_name,
wait,
points,
ordering,
shard_key_selector,
timeout,
},
internal_params,
access.clone(),
request_hw_counter.clone(),
)
.await
}
}?;
total_inference_usage.merge_opt(result.get_mut().inference_usage.take());
results.push(result);
}
Ok(Response::new(UpdateBatchResponse {
result: results
.into_iter()
.map(|response| grpc::UpdateResult::from(response.into_inner().result.unwrap()))
.collect(),
time: timing.elapsed().as_secs_f64(),
usage: Usage::new(
request_hw_counter.to_grpc_api(),
total_inference_usage.into_non_empty(),
)
.into_non_empty(),
}))
}
pub async fn create_field_index(
dispatcher: Arc<Dispatcher>,
create_field_index_collection: CreateFieldIndexCollection,
internal_params: InternalUpdateParams,
access: Access,
request_hw_counter: RequestHwCounter,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let CreateFieldIndexCollection {
collection_name,
wait,
field_name,
field_type,
field_index_params,
ordering,
timeout,
} = create_field_index_collection;
let field_name = json_path_from_proto(&field_name)?;
let field_schema = convert_field_type(field_type, field_index_params)?;
let operation = CreateFieldIndex {
field_name,
field_schema,
};
let timing = Instant::now();
let result = do_create_index(
dispatcher,
collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
request_hw_counter.get_counter(),
)
.await?;
let response = points_operation_response_internal(
timing, result,
None, // Do not measure API usage for this operation, as it might be inaccurate due to consensus involvement
);
Ok(Response::new(response))
}
pub async fn create_field_index_internal(
toc: Arc<TableOfContent>,
create_field_index_collection: CreateFieldIndexCollection,
internal_params: InternalUpdateParams,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let CreateFieldIndexCollection {
collection_name,
wait,
field_name,
field_type,
field_index_params,
ordering,
timeout,
} = create_field_index_collection;
let field_name = json_path_from_proto(&field_name)?;
let field_schema = convert_field_type(field_type, field_index_params)?;
let timing = Instant::now();
let result = do_create_index_internal(
toc,
collection_name,
field_name,
field_schema,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
HwMeasurementAcc::disposable(), // API unmeasured
)
.await?;
let response = points_operation_response_internal(timing, result, None);
Ok(Response::new(response))
}
pub async fn delete_field_index(
dispatcher: Arc<Dispatcher>,
delete_field_index_collection: DeleteFieldIndexCollection,
internal_params: InternalUpdateParams,
access: Access,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeleteFieldIndexCollection {
collection_name,
wait,
field_name,
ordering,
timeout,
} = delete_field_index_collection;
let field_name = json_path_from_proto(&field_name)?;
let timing = Instant::now();
let result = do_delete_index(
dispatcher,
collection_name,
field_name,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
access,
HwMeasurementAcc::disposable(), // API unmeasured
)
.await?;
let response = points_operation_response_internal(timing, result, None);
Ok(Response::new(response))
}
pub async fn delete_field_index_internal(
toc: Arc<TableOfContent>,
delete_field_index_collection: DeleteFieldIndexCollection,
internal_params: InternalUpdateParams,
) -> Result<Response<PointsOperationResponseInternal>, Status> {
let DeleteFieldIndexCollection {
collection_name,
wait,
field_name,
ordering,
timeout,
} = delete_field_index_collection;
let field_name = json_path_from_proto(&field_name)?;
let timing = Instant::now();
let result = do_delete_index_internal(
toc,
collection_name,
field_name,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
HwMeasurementAcc::disposable(), // API unmeasured
)
.await?;
let response = points_operation_response_internal(timing, result, None);
Ok(Response::new(response))
}
pub async fn sync(
toc: Arc<TableOfContent>,
sync_points: SyncPoints,
internal_params: InternalUpdateParams,
access: Access,
inference_params: InferenceParams,
) -> Result<Response<(PointsOperationResponseInternal, InferenceUsage)>, Status> {
let SyncPoints {
collection_name,
wait,
points,
from_id,
to_id,
ordering,
timeout,
} = sync_points;
let timing = Instant::now();
let point_structs: Result<_, _> = points.into_iter().map(PointStruct::try_from).collect();
// No actual inference should happen here, as we are just syncing existing points
// So, this function is used for consistency only
let (points, usage) =
convert_point_struct(point_structs?, InferenceType::Update, inference_params).await?;
let operation = PointSyncOperation {
points,
from_id: from_id.map(|x| x.try_into()).transpose()?,
to_id: to_id.map(|x| x.try_into()).transpose()?,
};
let operation =
CollectionUpdateOperations::PointOperation(PointOperations::SyncPoints(operation));
let result = update(
&toc,
&collection_name,
operation,
internal_params,
UpdateParams::from_grpc(wait, ordering, timeout.map(std::time::Duration::from_secs))?,
None,
access,
HwMeasurementAcc::disposable(), // API unmeasured
)
.await?;
let response = points_operation_response_internal(timing, result, None);
Ok(Response::new((response, usage.unwrap_or_default().into())))
}
pub fn points_operation_response_internal_with_inference_usage(
timing: Instant,
update_result: collection::operations::types::UpdateResult,
hardware_usage: Option<HardwareUsage>,
inference_usage: Option<InferenceUsage>,
) -> PointsOperationResponseInternal {
PointsOperationResponseInternal {
result: Some(update_result.into()),
time: timing.elapsed().as_secs_f64(),
hardware_usage,
inference_usage,
}
}
pub fn points_operation_response_internal(
timing: Instant,
update_result: collection::operations::types::UpdateResult,
hardware_usage: Option<HardwareUsage>,
) -> PointsOperationResponseInternal {
points_operation_response_internal_with_inference_usage(
timing,
update_result,
hardware_usage,
None, // No inference usage for this operation
)
}
fn extract_points_selector(
points_selector: Option<PointsSelector>,
) -> Result<(Option<Vec<ExtendedPointId>>, Option<Filter>), Status> {
let (points, filter) = if let Some(points_selector) = points_selector {
let points_selector = try_points_selector_from_grpc(points_selector, None)?;
match points_selector {
point_ops::PointsSelector::PointIdsSelector(points) => (Some(points.points), None),
point_ops::PointsSelector::FilterSelector(filter) => (None, Some(filter.filter)),
}
} else {
return Err(Status::invalid_argument("points_selector is expected"));
};
Ok((points, filter))
}
fn convert_field_type(
field_type: Option<i32>,
field_index_params: Option<PayloadIndexParams>,
) -> Result<Option<PayloadFieldSchema>, Status> {
let field_type_parsed = field_type
.map(|x| FieldType::try_from(x).ok())
.ok_or_else(|| Status::invalid_argument("cannot convert field_type"))?;
let field_schema = match (field_type_parsed, field_index_params) {
(
Some(field_type),
Some(PayloadIndexParams {
index_params: Some(index_params),
}),
) => {
let schema_params = match index_params {
// Parameterized keyword type
IndexParams::KeywordIndexParams(keyword_index_params) => {
matches!(field_type, FieldType::Keyword).then(|| {
TryFrom::try_from(keyword_index_params).map(PayloadSchemaParams::Keyword)
})
}
IndexParams::IntegerIndexParams(integer_index_params) => {
matches!(field_type, FieldType::Integer).then(|| {
TryFrom::try_from(integer_index_params).map(PayloadSchemaParams::Integer)
})
}
// Parameterized float type
IndexParams::FloatIndexParams(float_index_params) => {
matches!(field_type, FieldType::Float).then(|| {
TryFrom::try_from(float_index_params).map(PayloadSchemaParams::Float)
})
}
IndexParams::GeoIndexParams(geo_index_params) => {
matches!(field_type, FieldType::Geo)
.then(|| TryFrom::try_from(geo_index_params).map(PayloadSchemaParams::Geo))
}
// Parameterized text type
IndexParams::TextIndexParams(text_index_params) => {
matches!(field_type, FieldType::Text).then(|| {
TryFrom::try_from(text_index_params).map(PayloadSchemaParams::Text)
})
}
// Parameterized bool type
IndexParams::BoolIndexParams(bool_index_params) => {
matches!(field_type, FieldType::Bool).then(|| {
TryFrom::try_from(bool_index_params).map(PayloadSchemaParams::Bool)
})
}
// Parameterized Datetime type
IndexParams::DatetimeIndexParams(datetime_index_params) => {
matches!(field_type, FieldType::Datetime).then(|| {
TryFrom::try_from(datetime_index_params).map(PayloadSchemaParams::Datetime)
})
}
// Parameterized Uuid type
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/tonic/api/snapshots_api.rs | src/tonic/api/snapshots_api.rs | use std::sync::Arc;
use std::time::Instant;
use api::grpc::qdrant::shard_snapshots_server::ShardSnapshots;
use api::grpc::qdrant::snapshots_server::Snapshots;
use api::grpc::qdrant::{
CreateFullSnapshotRequest, CreateShardSnapshotRequest, CreateSnapshotRequest,
CreateSnapshotResponse, DeleteFullSnapshotRequest, DeleteShardSnapshotRequest,
DeleteSnapshotRequest, DeleteSnapshotResponse, ListFullSnapshotsRequest,
ListShardSnapshotsRequest, ListSnapshotsRequest, ListSnapshotsResponse,
RecoverShardSnapshotRequest, RecoverSnapshotResponse,
};
use collection::operations::verification::new_unchecked_verification_pass;
use storage::content_manager::snapshots::{
do_create_full_snapshot, do_delete_collection_snapshot, do_delete_full_snapshot,
do_list_full_snapshots,
};
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use tonic::{Request, Response, Status, async_trait};
use super::{validate, validate_and_log};
use crate::common;
use crate::common::collections::{do_create_snapshot, do_list_snapshots};
use crate::common::http_client::HttpClient;
use crate::tonic::auth::extract_access;
pub struct SnapshotsService {
dispatcher: Arc<Dispatcher>,
}
impl SnapshotsService {
pub fn new(dispatcher: Arc<Dispatcher>) -> Self {
Self { dispatcher }
}
}
#[async_trait]
impl Snapshots for SnapshotsService {
async fn create(
&self,
mut request: Request<CreateSnapshotRequest>,
) -> Result<Response<CreateSnapshotResponse>, Status> {
validate(request.get_ref())?;
let access = extract_access(&mut request);
let collection_name = request.into_inner().collection_name;
let timing = Instant::now();
let dispatcher = self.dispatcher.clone();
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let response = do_create_snapshot(
Arc::clone(dispatcher.toc(&access, &pass)),
access,
&collection_name,
)
.await?;
Ok(Response::new(CreateSnapshotResponse {
snapshot_description: Some(response.into()),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn list(
&self,
mut request: Request<ListSnapshotsRequest>,
) -> Result<Response<ListSnapshotsResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
let ListSnapshotsRequest { collection_name } = request.into_inner();
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let snapshots = do_list_snapshots(
self.dispatcher.toc(&access, &pass),
access,
&collection_name,
)
.await?;
Ok(Response::new(ListSnapshotsResponse {
snapshot_descriptions: snapshots.into_iter().map(|s| s.into()).collect(),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn delete(
&self,
mut request: Request<DeleteSnapshotRequest>,
) -> Result<Response<DeleteSnapshotResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
let DeleteSnapshotRequest {
collection_name,
snapshot_name,
} = request.into_inner();
let _response = do_delete_collection_snapshot(
&self.dispatcher,
access,
&collection_name,
&snapshot_name,
)
.await?;
Ok(Response::new(DeleteSnapshotResponse {
time: timing.elapsed().as_secs_f64(),
}))
}
async fn create_full(
&self,
mut request: Request<CreateFullSnapshotRequest>,
) -> Result<Response<CreateSnapshotResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
let response = do_create_full_snapshot(&self.dispatcher, access).await?;
Ok(Response::new(CreateSnapshotResponse {
snapshot_description: Some(response.into()),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn list_full(
&self,
mut request: Request<ListFullSnapshotsRequest>,
) -> Result<Response<ListSnapshotsResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let snapshots = do_list_full_snapshots(self.dispatcher.toc(&access, &pass), access).await?;
Ok(Response::new(ListSnapshotsResponse {
snapshot_descriptions: snapshots.into_iter().map(|s| s.into()).collect(),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn delete_full(
&self,
mut request: Request<DeleteFullSnapshotRequest>,
) -> Result<Response<DeleteSnapshotResponse>, Status> {
validate(request.get_ref())?;
let timing = Instant::now();
let access = extract_access(&mut request);
let snapshot_name = request.into_inner().snapshot_name;
let _response = do_delete_full_snapshot(&self.dispatcher, access, &snapshot_name).await?;
Ok(Response::new(DeleteSnapshotResponse {
time: timing.elapsed().as_secs_f64(),
}))
}
}
pub struct ShardSnapshotsService {
toc: Arc<TableOfContent>,
http_client: HttpClient,
}
impl ShardSnapshotsService {
pub fn new(toc: Arc<TableOfContent>, http_client: HttpClient) -> Self {
Self { toc, http_client }
}
}
#[async_trait]
impl ShardSnapshots for ShardSnapshotsService {
async fn create(
&self,
mut request: Request<CreateShardSnapshotRequest>,
) -> Result<Response<CreateSnapshotResponse>, Status> {
let access = extract_access(&mut request);
let request = request.into_inner();
validate_and_log(&request);
let timing = Instant::now();
let snapshot_description = common::snapshots::create_shard_snapshot(
self.toc.clone(),
access,
request.collection_name,
request.shard_id,
)
.await?;
Ok(Response::new(CreateSnapshotResponse {
snapshot_description: Some(snapshot_description.into()),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn list(
&self,
mut request: Request<ListShardSnapshotsRequest>,
) -> Result<Response<ListSnapshotsResponse>, Status> {
let access = extract_access(&mut request);
let request = request.into_inner();
validate_and_log(&request);
let timing = Instant::now();
let snapshot_descriptions = common::snapshots::list_shard_snapshots(
self.toc.clone(),
access,
request.collection_name,
request.shard_id,
)
.await?;
Ok(Response::new(ListSnapshotsResponse {
snapshot_descriptions: snapshot_descriptions.into_iter().map(Into::into).collect(),
time: timing.elapsed().as_secs_f64(),
}))
}
async fn delete(
&self,
mut request: Request<DeleteShardSnapshotRequest>,
) -> Result<Response<DeleteSnapshotResponse>, Status> {
let access = extract_access(&mut request);
let request = request.into_inner();
validate_and_log(&request);
let timing = Instant::now();
common::snapshots::delete_shard_snapshot(
self.toc.clone(),
access,
request.collection_name,
request.shard_id,
request.snapshot_name,
)
.await?;
Ok(Response::new(DeleteSnapshotResponse {
time: timing.elapsed().as_secs_f64(),
}))
}
async fn recover(
&self,
mut request: Request<RecoverShardSnapshotRequest>,
) -> Result<Response<RecoverSnapshotResponse>, Status> {
let access = extract_access(&mut request);
let request = request.into_inner();
validate_and_log(&request);
let timing = Instant::now();
common::snapshots::recover_shard_snapshot(
self.toc.clone(),
access,
request.collection_name,
request.shard_id,
request.snapshot_location.try_into()?,
request.snapshot_priority.try_into()?,
request.checksum,
self.http_client.clone(),
request.api_key,
)
.await?;
Ok(Response::new(RecoverSnapshotResponse {
time: timing.elapsed().as_secs_f64(),
}))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_reporting.rs | src/common/telemetry_reporting.rs | use std::sync::Arc;
use std::time::Duration;
use common::defaults::APP_USER_AGENT;
use common::types::{DetailsLevel, TelemetryDetail};
use reqwest::Client;
use segment::common::anonymize::Anonymize;
use storage::content_manager::errors::StorageResult;
use storage::rbac::Access;
use tokio::sync::Mutex;
use super::telemetry::TelemetryCollector;
const DETAIL: TelemetryDetail = TelemetryDetail {
level: DetailsLevel::Level2,
histograms: false,
};
const REPORTING_INTERVAL: Duration = Duration::from_secs(60 * 60); // One hour
pub struct TelemetryReporter {
telemetry_url: String,
telemetry: Arc<Mutex<TelemetryCollector>>,
}
const FULL_ACCESS: Access = Access::full("Telemetry reporter");
impl TelemetryReporter {
fn new(telemetry: Arc<Mutex<TelemetryCollector>>) -> Self {
let telemetry_url = if cfg!(debug_assertions) {
"https://staging-telemetry.qdrant.io".to_string()
} else {
"https://telemetry.qdrant.io".to_string()
};
Self {
telemetry_url,
telemetry,
}
}
async fn report(&self, client: &Client) -> StorageResult<()> {
let data = self
.telemetry
.lock()
.await
.prepare_data(&FULL_ACCESS, DETAIL, None)
.await?
.anonymize();
let data = serde_json::to_string(&data)?;
let resp = client
.post(&self.telemetry_url)
.body(data)
.header("Content-Type", "application/json")
.send()
.await?;
if !resp.status().is_success() {
log::error!(
"Failed to report telemetry: resp status:{:?} resp body:{:?}",
resp.status(),
resp.text().await?
);
}
Ok(())
}
pub async fn run(telemetry: Arc<Mutex<TelemetryCollector>>) {
let reporter = Self::new(telemetry);
let client = Client::builder()
.user_agent(APP_USER_AGENT.as_str())
.build()
.unwrap();
loop {
if let Err(err) = reporter.report(&client).await {
log::error!("Failed to report telemetry {err}")
}
tokio::time::sleep(REPORTING_INTERVAL).await;
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/pyroscope_state.rs | src/common/pyroscope_state.rs | #[cfg(target_os = "linux")]
pub mod pyro {
use pyroscope::pyroscope::PyroscopeAgentRunning;
use pyroscope::{PyroscopeAgent, PyroscopeError};
use pyroscope_pprofrs::{PprofConfig, pprof_backend};
use crate::common::debugger::PyroscopeConfig;
pub struct PyroscopeState {
pub config: PyroscopeConfig,
pub agent: Option<PyroscopeAgent<PyroscopeAgentRunning>>,
}
impl PyroscopeState {
fn build_agent(
config: &PyroscopeConfig,
) -> Result<PyroscopeAgent<PyroscopeAgentRunning>, PyroscopeError> {
let pprof_config = PprofConfig::new().sample_rate(config.sampling_rate.unwrap_or(100));
let backend_impl = pprof_backend(pprof_config);
log::info!(
"Starting pyroscope agent with identifier {}",
&config.identifier
);
// TODO: Add more tags like peerId and peerUrl
let agent = PyroscopeAgent::builder(config.url.clone(), "qdrant".to_string())
.backend(backend_impl)
.tags(vec![("app", "Qdrant"), ("identifier", &config.identifier)])
.build()?;
let running_agent = agent.start()?;
Ok(running_agent)
}
pub fn from_config(config: Option<PyroscopeConfig>) -> Option<Self> {
match config {
Some(pyro_config) => {
let agent = PyroscopeState::build_agent(&pyro_config);
match agent {
Ok(agent) => Some(PyroscopeState {
config: pyro_config,
agent: Some(agent),
}),
Err(err) => {
log::warn!("Pyroscope agent failed to start {err}");
None
}
}
}
None => None,
}
}
pub fn stop_agent(&mut self) -> bool {
log::info!("Stopping pyroscope agent");
if let Some(agent) = self.agent.take() {
return match agent.stop() {
Ok(stopped_agent) => {
log::info!("Stopped pyroscope agent. Shutting it down");
stopped_agent.shutdown();
log::info!("Pyroscope agent shut down completed.");
true
}
Err(err) => {
log::warn!("Pyroscope agent failed to stop {err}");
false
}
};
}
true
}
}
impl Drop for PyroscopeState {
fn drop(&mut self) {
self.stop_agent();
}
}
}
#[cfg(not(target_os = "linux"))]
pub mod pyro {
use crate::common::debugger::PyroscopeConfig;
pub struct PyroscopeState {}
impl PyroscopeState {
pub fn from_config(_config: Option<PyroscopeConfig>) -> Option<Self> {
None
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/http_client.rs | src/common/http_client.rs | use std::path::Path;
use std::{io, result};
use common::defaults::APP_USER_AGENT;
use fs_err as fs;
use reqwest::header::{HeaderMap, HeaderValue, InvalidHeaderValue};
use storage::content_manager::errors::StorageError;
use super::auth::HTTP_HEADER_API_KEY;
use crate::settings::{Settings, TlsConfig};
#[derive(Clone)]
pub struct HttpClient {
tls_config: Option<TlsConfig>,
verify_https_client_certificate: bool,
}
impl HttpClient {
pub fn from_settings(settings: &Settings) -> Result<Self> {
let tls_config = if settings.service.enable_tls {
let Some(tls_config) = settings.tls.clone() else {
return Err(Error::TlsConfigUndefined);
};
Some(tls_config)
} else {
None
};
let verify_https_client_certificate = settings.service.verify_https_client_certificate;
let http_client = Self {
tls_config,
verify_https_client_certificate,
};
Ok(http_client)
}
/// Create a new HTTP(S) client
///
/// An API key can be optionally provided to be used in this HTTP client. It'll send the API
/// key as `Api-key` header in every request.
///
/// # Warning
///
/// Setting an API key may leak when the client is used to send a request to a malicious
/// server. This is potentially dangerous if a user has control over what URL is accessed.
///
/// For this reason the API key is not set by default as provided in the configuration. It must
/// be explicitly provided when creating the HTTP client.
pub fn client(&self, api_key: Option<&str>) -> Result<reqwest::Client> {
https_client(
api_key,
self.tls_config.as_ref(),
self.verify_https_client_certificate,
)
}
}
fn https_client(
api_key: Option<&str>,
tls_config: Option<&TlsConfig>,
verify_https_client_certificate: bool,
) -> Result<reqwest::Client> {
let mut builder = reqwest::Client::builder().user_agent(APP_USER_AGENT.as_str());
// Configure TLS root certificate and validation
if let Some(tls_config) = tls_config {
if let Some(ca_cert) = &tls_config.ca_cert {
match https_client_ca_cert(ca_cert) {
Ok(ca_cert) => builder = builder.add_root_certificate(ca_cert),
Err(err) => {
// I think it might be Ok to not fail here, if root certificate is not found
// There are 2 possible scenarios:
//
// 1. Server TLS is either not used or it uses some other CA certificate (like global one)
// 2. Server TLS is using self-signed certificate, and we should have it.
//
// In first case, we don't need to load the CA certificate, everything will work in either case.
// In second case, we should have the CA certificate, request will fail because of invalid certificate.
//
// So both scenarios work exactly the same way if we fail early or not.
// Warning message is needed for easier debugging in case of second scenario.
log::warn!(
"Failed to load CA certificate, skipping HTTPS client CA certificate configuration: {err}",
);
}
}
}
if verify_https_client_certificate {
builder = builder.identity(https_client_identity(
tls_config.cert.as_ref(),
tls_config.key.as_ref(),
)?);
}
}
// Attach API key as sensitive header
if let Some(api_key) = api_key {
let mut headers = HeaderMap::new();
let mut api_key_value = HeaderValue::from_str(api_key).map_err(Error::MalformedApiKey)?;
api_key_value.set_sensitive(true);
headers.insert(HTTP_HEADER_API_KEY, api_key_value);
builder = builder.default_headers(headers);
}
let client = builder.build()?;
Ok(client)
}
fn https_client_ca_cert(ca_cert: impl AsRef<Path>) -> Result<reqwest::tls::Certificate> {
let ca_cert_pem = fs::read(ca_cert.as_ref())
.map_err(|err| Error::failed_to_read(err, "CA certificate", ca_cert.as_ref()))?;
let ca_cert = reqwest::Certificate::from_pem(&ca_cert_pem)?;
Ok(ca_cert)
}
fn https_client_identity(cert: &Path, key: &Path) -> Result<reqwest::tls::Identity> {
let mut identity_pem =
fs::read(cert).map_err(|err| Error::failed_to_read(err, "certificate", cert))?;
let mut key_file = fs::File::open(key).map_err(|err| Error::failed_to_read(err, "key", key))?;
// Concatenate certificate and key into a single PEM bytes
io::copy(&mut key_file, &mut identity_pem)
.map_err(|err| Error::failed_to_read(err, "key", key))?;
let identity = reqwest::Identity::from_pem(&identity_pem)?;
Ok(identity)
}
pub type Result<T, E = Error> = result::Result<T, E>;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("TLS config is not defined in the Qdrant config file")]
TlsConfigUndefined,
#[error("{1}: {0}")]
Io(#[source] io::Error, String),
#[error("failed to setup HTTPS client: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("malformed API key")]
MalformedApiKey(#[source] InvalidHeaderValue),
}
impl Error {
pub fn io(source: io::Error, context: impl Into<String>) -> Self {
Self::Io(source, context.into())
}
pub fn failed_to_read(source: io::Error, file: &str, path: &Path) -> Self {
Self::io(
source,
format!("failed to read HTTPS client {file} file {}", path.display()),
)
}
}
impl From<Error> for StorageError {
fn from(err: Error) -> Self {
StorageError::service_error(format!("failed to initialize HTTP(S) client: {err}"))
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
io::Error::other(err)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/error_reporting.rs | src/common/error_reporting.rs | use std::time::Duration;
use common::defaults::APP_USER_AGENT;
pub struct ErrorReporter;
impl ErrorReporter {
fn get_url() -> String {
if cfg!(debug_assertions) {
"https://staging-telemetry.qdrant.io".to_string()
} else {
"https://telemetry.qdrant.io".to_string()
}
}
pub fn report(error: &str, reporting_id: &str, backtrace: Option<&str>) {
let client = reqwest::blocking::Client::builder()
.user_agent(APP_USER_AGENT.as_str())
.build()
.unwrap();
let report = serde_json::json!({
"id": reporting_id,
"error": error,
"backtrace": backtrace.unwrap_or(""),
});
let data = serde_json::to_string(&report).unwrap();
let _resp = client
.post(Self::get_url())
.body(data)
.header("Content-Type", "application/json")
.timeout(Duration::from_secs(1))
.send();
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/update.rs | src/common/update.rs | use std::sync::Arc;
use std::time::Duration;
use api::rest::models::InferenceUsage;
use api::rest::*;
use collection::collection::Collection;
use collection::operations::conversions::write_ordering_from_proto;
use collection::operations::point_ops::*;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::{CollectionError, CollectionResult, UpdateResult};
use collection::operations::vector_ops::*;
use collection::operations::verification::*;
use collection::shards::shard::ShardId;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use schemars::JsonSchema;
use segment::json_path::JsonPath;
use segment::types::{Filter, PayloadFieldSchema, PayloadKeyType, StrictModeConfig};
use serde::{Deserialize, Serialize};
use shard::operations::payload_ops::*;
use shard::operations::*;
use storage::content_manager::collection_meta_ops::*;
use storage::content_manager::collection_verification::check_strict_mode;
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use validator::Validate;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::service::InferenceType;
use crate::common::inference::update_requests::*;
use crate::common::strict_mode::*;
#[derive(Copy, Clone, Debug, Deserialize, Serialize, Validate)]
pub struct UpdateParams {
#[serde(default)]
pub wait: bool,
#[serde(default)]
pub ordering: WriteOrdering,
#[serde(default)]
pub timeout: Option<Duration>,
}
impl UpdateParams {
pub fn from_grpc(
wait: Option<bool>,
ordering: Option<api::grpc::qdrant::WriteOrdering>,
timeout: Option<Duration>,
) -> tonic::Result<Self> {
let params = Self {
wait: wait.unwrap_or(false),
ordering: write_ordering_from_proto(ordering)?,
timeout,
};
Ok(params)
}
pub(crate) fn timeout_as_secs(&self) -> Option<usize> {
self.timeout.map(|timeout| timeout.as_secs() as usize)
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct InternalUpdateParams {
pub shard_id: Option<ShardId>,
pub clock_tag: Option<ClockTag>,
}
impl InternalUpdateParams {
pub fn from_grpc(
shard_id: Option<ShardId>,
clock_tag: Option<api::grpc::qdrant::ClockTag>,
) -> Self {
Self {
shard_id,
clock_tag: clock_tag.map(ClockTag::from),
}
}
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct UpdateOperations {
#[validate(nested)]
pub operations: Vec<UpdateOperation>,
}
#[derive(Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum UpdateOperation {
Upsert(UpsertOperation),
Delete(DeleteOperation),
SetPayload(SetPayloadOperation),
OverwritePayload(OverwritePayloadOperation),
DeletePayload(DeletePayloadOperation),
ClearPayload(ClearPayloadOperation),
UpdateVectors(UpdateVectorsOperation),
DeleteVectors(DeleteVectorsOperation),
}
impl Validate for UpdateOperation {
fn validate(&self) -> Result<(), validator::ValidationErrors> {
match self {
UpdateOperation::Upsert(op) => op.validate(),
UpdateOperation::Delete(op) => op.validate(),
UpdateOperation::SetPayload(op) => op.validate(),
UpdateOperation::OverwritePayload(op) => op.validate(),
UpdateOperation::DeletePayload(op) => op.validate(),
UpdateOperation::ClearPayload(op) => op.validate(),
UpdateOperation::UpdateVectors(op) => op.validate(),
UpdateOperation::DeleteVectors(op) => op.validate(),
}
}
}
impl StrictModeVerification for UpdateOperation {
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&segment::types::Filter> {
None
}
fn indexed_filter_write(&self) -> Option<&segment::types::Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
async fn check_strict_mode(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
match self {
UpdateOperation::Upsert(op) => {
op.upsert
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::Delete(op) => {
op.delete
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::SetPayload(op) => {
op.set_payload
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::OverwritePayload(op) => {
op.overwrite_payload
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::DeletePayload(op) => {
op.delete_payload
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::ClearPayload(op) => {
op.clear_payload
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::UpdateVectors(op) => {
op.update_vectors
.check_strict_mode(collection, strict_mode_config)
.await
}
UpdateOperation::DeleteVectors(op) => {
op.delete_vectors
.check_strict_mode(collection, strict_mode_config)
.await
}
}
}
}
impl StrictModeVerification for CreateFieldIndex {
async fn check_custom(
&self,
collection: &Collection,
strict_mode_config: &StrictModeConfig,
) -> CollectionResult<()> {
if let Some(max_payload_index_count) = strict_mode_config.max_payload_index_count {
let collection_info = collection.info(&ShardSelectorInternal::All).await?;
if collection_info.payload_schema.len() >= max_payload_index_count {
return Err(CollectionError::strict_mode(
format!(
"Collection already has the maximum number of payload indices ({max_payload_index_count})"
),
"Please delete an existing index before creating a new one.",
));
}
}
Ok(())
}
fn indexed_filter_write(&self) -> Option<&Filter> {
None
}
fn query_limit(&self) -> Option<usize> {
None
}
fn indexed_filter_read(&self) -> Option<&Filter> {
None
}
fn request_exact(&self) -> Option<bool> {
None
}
fn request_search_params(&self) -> Option<&segment::types::SearchParams> {
None
}
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct UpsertOperation {
#[validate(nested)]
upsert: PointInsertOperations,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct DeleteOperation {
#[validate(nested)]
delete: PointsSelector,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct SetPayloadOperation {
#[validate(nested)]
set_payload: SetPayload,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct OverwritePayloadOperation {
#[validate(nested)]
overwrite_payload: SetPayload,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct DeletePayloadOperation {
#[validate(nested)]
delete_payload: DeletePayload,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct ClearPayloadOperation {
#[validate(nested)]
clear_payload: PointsSelector,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct UpdateVectorsOperation {
#[validate(nested)]
update_vectors: UpdateVectors,
}
#[derive(Deserialize, Serialize, JsonSchema, Validate)]
pub struct DeleteVectorsOperation {
#[validate(nested)]
delete_vectors: DeleteVectors,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate)]
pub struct CreateFieldIndex {
pub field_name: PayloadKeyType,
#[serde(alias = "field_type")]
#[validate(nested)]
pub field_schema: Option<PayloadFieldSchema>,
}
#[expect(clippy::too_many_arguments)]
pub async fn do_upsert_points(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: PointInsertOperations,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
inference_params: InferenceParams,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<(UpdateResult, Option<models::InferenceUsage>), StorageError> {
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let (operation, shard_key, usage, update_filter) = match operation {
PointInsertOperations::PointsBatch(batch) => {
let PointsBatch {
batch,
shard_key,
update_filter,
} = batch;
let (batch, usage) = convert_batch(batch, inference_params).await?;
let operation = PointInsertOperationsInternal::PointsBatch(batch);
(operation, shard_key, usage, update_filter)
}
PointInsertOperations::PointsList(list) => {
let PointsList {
points,
shard_key,
update_filter,
} = list;
let (list, usage) =
convert_point_struct(points, InferenceType::Update, inference_params).await?;
let operation = PointInsertOperationsInternal::PointsList(list);
(operation, shard_key, usage, update_filter)
}
};
let operation = if let Some(condition) = update_filter {
CollectionUpdateOperations::PointOperation(PointOperations::UpsertPointsConditional(
ConditionalInsertOperationInternal {
points_op: operation,
condition,
},
))
} else {
CollectionUpdateOperations::PointOperation(PointOperations::UpsertPoints(operation))
};
let result = update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await?;
Ok((result, usage))
}
pub async fn do_delete_points(
toc_provider: impl CheckedTocProvider,
collection_name: String,
points: PointsSelector,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let toc = toc_provider
.check_strict_mode(&points, &collection_name, params.timeout_as_secs(), &access)
.await?;
let (operation, shard_key) = match points {
PointsSelector::PointIdsSelector(PointIdsList { points, shard_key }) => {
(PointOperations::DeletePoints { ids: points }, shard_key)
}
PointsSelector::FilterSelector(FilterSelector { filter, shard_key }) => {
(PointOperations::DeletePointsByFilter(filter), shard_key)
}
};
let operation = CollectionUpdateOperations::PointOperation(operation);
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await
}
#[expect(clippy::too_many_arguments)]
pub async fn do_update_vectors(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: UpdateVectors,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
inference_params: InferenceParams,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<(UpdateResult, Option<models::InferenceUsage>), StorageError> {
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let UpdateVectors {
points,
shard_key,
update_filter,
} = operation;
let (points, usage) =
convert_point_vectors(points, InferenceType::Update, inference_params).await?;
let operation = CollectionUpdateOperations::VectorOperation(VectorOperations::UpdateVectors(
UpdateVectorsOp {
points,
update_filter,
},
));
let result = update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await?;
Ok((result, usage))
}
pub async fn do_delete_vectors(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: DeleteVectors,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
// TODO: Is this cancel safe!?
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let DeleteVectors {
vector,
filter,
points,
shard_key,
} = operation;
let vector_names: Vec<_> = vector.into_iter().collect();
let mut result = None;
if let Some(filter) = filter {
let vectors_operation =
VectorOperations::DeleteVectorsByFilter(filter, vector_names.clone());
let operation = CollectionUpdateOperations::VectorOperation(vectors_operation);
result = Some(
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key.clone(),
access.clone(),
hw_measurement_acc.clone(),
)
.await?,
);
}
if let Some(points) = points {
let vectors_operation = VectorOperations::DeleteVectors(points.into(), vector_names);
let operation = CollectionUpdateOperations::VectorOperation(vectors_operation);
result = Some(
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await?,
);
}
result.ok_or_else(|| StorageError::bad_request("No filter or points provided"))
}
pub async fn do_set_payload(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: SetPayload,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let SetPayload {
points,
payload,
filter,
shard_key,
key,
} = operation;
let operation =
CollectionUpdateOperations::PayloadOperation(PayloadOps::SetPayload(SetPayloadOp {
payload,
points,
filter,
key,
}));
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await
}
pub async fn do_overwrite_payload(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: SetPayload,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let SetPayload {
points,
payload,
filter,
shard_key,
key: _,
} = operation;
let operation =
CollectionUpdateOperations::PayloadOperation(PayloadOps::OverwritePayload(SetPayloadOp {
payload,
points,
filter,
// overwrite operation doesn't support payload selector
key: None,
}));
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await
}
pub async fn do_delete_payload(
toc_provider: impl CheckedTocProvider,
collection_name: String,
operation: DeletePayload,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let toc = toc_provider
.check_strict_mode(
&operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?;
let DeletePayload {
keys,
points,
filter,
shard_key,
} = operation;
let operation =
CollectionUpdateOperations::PayloadOperation(PayloadOps::DeletePayload(DeletePayloadOp {
keys,
points,
filter,
}));
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await
}
pub async fn do_clear_payload(
toc_provider: impl CheckedTocProvider,
collection_name: String,
points: PointsSelector,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let toc = toc_provider
.check_strict_mode(&points, &collection_name, params.timeout_as_secs(), &access)
.await?;
let (point_operation, shard_key) = match points {
PointsSelector::PointIdsSelector(PointIdsList { points, shard_key }) => {
(PayloadOps::ClearPayload { points }, shard_key)
}
PointsSelector::FilterSelector(FilterSelector { filter, shard_key }) => {
(PayloadOps::ClearPayloadByFilter(filter), shard_key)
}
};
let operation = CollectionUpdateOperations::PayloadOperation(point_operation);
update(
toc,
&collection_name,
operation,
internal_params,
params,
shard_key,
access,
hw_measurement_acc,
)
.await
}
#[expect(clippy::too_many_arguments)]
pub async fn do_batch_update_points(
toc_provider: impl CheckedTocProvider + Clone,
collection_name: String,
operations: Vec<UpdateOperation>,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
inference_params: InferenceParams,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<(Vec<UpdateResult>, Option<InferenceUsage>), StorageError> {
// Check strict mode for all batch operations, *before applying* them
let mut toc = None;
for operation in &operations {
toc = toc_provider
.check_strict_mode(
operation,
&collection_name,
params.timeout_as_secs(),
&access,
)
.await?
.into();
}
let Some(toc) = toc else {
// Batch is empty, return empty result vector
return Ok((Vec::new(), None));
};
// Pass unchecked ToC provider into `do_*` functions, because we already checked strict mode
let toc_provider = UncheckedTocProvider::new_unchecked(toc);
let mut results = Vec::with_capacity(operations.len());
let mut inference_usage = InferenceUsage::default();
for operation in operations {
let current_update_result = match operation {
UpdateOperation::Upsert(operation) => {
let (result, usage) = do_upsert_points(
toc_provider.clone(),
collection_name.clone(),
operation.upsert,
internal_params,
params,
access.clone(),
inference_params.clone(),
hw_measurement_acc.clone(),
)
.await?;
inference_usage.merge_opt(usage);
result
}
UpdateOperation::Delete(operation) => {
do_delete_points(
toc_provider.clone(),
collection_name.clone(),
operation.delete,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
UpdateOperation::SetPayload(operation) => {
do_set_payload(
toc_provider.clone(),
collection_name.clone(),
operation.set_payload,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
UpdateOperation::OverwritePayload(operation) => {
do_overwrite_payload(
toc_provider.clone(),
collection_name.clone(),
operation.overwrite_payload,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
UpdateOperation::DeletePayload(operation) => {
do_delete_payload(
toc_provider.clone(),
collection_name.clone(),
operation.delete_payload,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
UpdateOperation::ClearPayload(operation) => {
do_clear_payload(
toc_provider.clone(),
collection_name.clone(),
operation.clear_payload,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
UpdateOperation::UpdateVectors(operation) => {
let (result, usage) = do_update_vectors(
toc_provider.clone(),
collection_name.clone(),
operation.update_vectors,
internal_params,
params,
access.clone(),
inference_params.clone(),
hw_measurement_acc.clone(),
)
.await?;
inference_usage.merge_opt(usage);
result
}
UpdateOperation::DeleteVectors(operation) => {
do_delete_vectors(
toc_provider.clone(),
collection_name.clone(),
operation.delete_vectors,
internal_params,
params,
access.clone(),
hw_measurement_acc.clone(),
)
.await?
}
};
results.push(current_update_result);
}
Ok((results, inference_usage.into_non_empty()))
}
pub async fn do_create_index(
dispatcher: Arc<Dispatcher>,
collection_name: String,
operation: CreateFieldIndex,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
// TODO: Is this cancel safe!?
// Use per-request timeout from params if provided
let wait_timeout = params.timeout;
// Check strict mode before submitting consensus operation
let pass = check_strict_mode(
&operation,
wait_timeout.map(|d| d.as_secs() as usize),
&collection_name,
&dispatcher,
&access,
)
.await?;
let Some(field_schema) = operation.field_schema else {
return Err(StorageError::bad_request(
"Can't auto-detect field type, please specify `field_schema` in the request",
));
};
let consensus_op = CollectionMetaOperations::CreatePayloadIndex(CreatePayloadIndex {
collection_name: collection_name.clone(),
field_name: operation.field_name.clone(),
field_schema: field_schema.clone(),
});
let toc = dispatcher.toc(&access, &pass).clone();
// TODO: Is `submit_collection_meta_op` cancel-safe!? Should be, I think?.. 🤔
dispatcher
.submit_collection_meta_op(consensus_op, access, wait_timeout)
.await?;
// This function is required as long as we want to maintain interface compatibility
// for `wait` parameter and return type.
// The idea is to migrate from the point-like interface to consensus-like interface in the next few versions
do_create_index_internal(
toc,
collection_name,
operation.field_name,
Some(field_schema),
internal_params,
params,
hw_measurement_acc,
)
.await
}
pub async fn do_create_index_internal(
toc: Arc<TableOfContent>,
collection_name: String,
field_name: PayloadKeyType,
field_schema: Option<PayloadFieldSchema>,
internal_params: InternalUpdateParams,
params: UpdateParams,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let operation = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::CreateIndex(CreateIndex {
field_name,
field_schema,
}),
);
update(
&toc,
&collection_name,
operation,
internal_params,
params,
None,
Access::full("Internal API"),
hw_measurement_acc,
)
.await
}
pub async fn do_delete_index(
dispatcher: Arc<Dispatcher>,
collection_name: String,
index_name: JsonPath,
internal_params: InternalUpdateParams,
params: UpdateParams,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
// TODO: Is this cancel safe!?
let consensus_op = CollectionMetaOperations::DropPayloadIndex(DropPayloadIndex {
collection_name: collection_name.clone(),
field_name: index_name.clone(),
});
// Use per-request timeout from params if provided
let wait_timeout = params.timeout;
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let toc = dispatcher.toc(&access, &pass).clone();
// TODO: Is `submit_collection_meta_op` cancel-safe!? Should be, I think?.. 🤔
dispatcher
.submit_collection_meta_op(consensus_op, access, wait_timeout)
.await?;
do_delete_index_internal(
toc,
collection_name,
index_name,
internal_params,
params,
hw_measurement_acc,
)
.await
}
pub async fn do_delete_index_internal(
toc: Arc<TableOfContent>,
collection_name: String,
index_name: JsonPath,
internal_params: InternalUpdateParams,
params: UpdateParams,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let operation = CollectionUpdateOperations::FieldIndexOperation(
FieldIndexOperations::DeleteIndex(index_name),
);
update(
&toc,
&collection_name,
operation,
internal_params,
params,
None,
Access::full("Internal API"),
hw_measurement_acc,
)
.await
}
#[expect(clippy::too_many_arguments)]
pub async fn update(
toc: &TableOfContent,
collection_name: &str,
operation: CollectionUpdateOperations,
internal_params: InternalUpdateParams,
params: UpdateParams,
shard_key: Option<ShardKeySelector>,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<UpdateResult, StorageError> {
let InternalUpdateParams {
shard_id,
clock_tag,
} = internal_params;
let UpdateParams {
wait,
ordering,
timeout: _,
} = params;
let shard_selector = match operation {
CollectionUpdateOperations::PointOperation(point_ops::PointOperations::SyncPoints(_)) => {
debug_assert_eq!(
shard_key, None,
"Sync points operations can't specify shard key"
);
match shard_id {
Some(shard_id) => ShardSelectorInternal::ShardId(shard_id),
None => {
debug_assert!(false, "Sync operation is supposed to select shard directly");
ShardSelectorInternal::Empty
}
}
}
CollectionUpdateOperations::FieldIndexOperation(_) => {
debug_assert_eq!(
shard_key, None,
"Field index operations can't specify shard key"
);
match shard_id {
Some(shard_id) => ShardSelectorInternal::ShardId(shard_id),
None => ShardSelectorInternal::All,
}
}
_ => get_shard_selector_for_update(shard_id, shard_key),
};
toc.update(
collection_name,
OperationWithClockTag::new(operation, clock_tag),
wait,
ordering,
shard_selector,
access,
hw_measurement_acc,
)
.await
}
/// Converts a pair of parameters into a shard selector
/// suitable for update operations.
///
/// The key difference from selector for search operations is that
/// empty shard selector in case of update means default shard,
/// while empty shard selector in case of search means all shards.
///
/// Parameters:
/// - shard_selection: selection of the exact shard ID, always have priority over shard_key
/// - shard_key: selection of the shard key, can be a single key or a list of keys
///
/// Returns:
/// - ShardSelectorInternal - resolved shard selector
fn get_shard_selector_for_update(
shard_selection: Option<ShardId>,
shard_key: Option<ShardKeySelector>,
) -> ShardSelectorInternal {
match (shard_selection, shard_key) {
(Some(shard_selection), None) => ShardSelectorInternal::ShardId(shard_selection),
(Some(shard_selection), Some(_)) => {
debug_assert!(
false,
"Shard selection and shard key are mutually exclusive"
);
ShardSelectorInternal::ShardId(shard_selection)
}
(None, Some(shard_key)) => ShardSelectorInternal::from(shard_key),
(None, None) => ShardSelectorInternal::Empty,
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry.rs | src/common/telemetry.rs | use std::sync::Arc;
use std::time::Duration;
use collection::operations::verification::new_unchecked_verification_pass;
use common::types::{DetailsLevel, TelemetryDetail};
use parking_lot::Mutex;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::Serialize;
use shard::common::stopping_guard::StoppingGuard;
use storage::content_manager::errors::{StorageError, StorageResult};
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use tokio::time::error::Elapsed;
use tokio_util::task::AbortOnDropHandle;
use uuid::Uuid;
use crate::common::telemetry_ops::app_telemetry::{AppBuildTelemetry, AppBuildTelemetryCollector};
use crate::common::telemetry_ops::cluster_telemetry::ClusterTelemetry;
use crate::common::telemetry_ops::collections_telemetry::CollectionsTelemetry;
use crate::common::telemetry_ops::hardware::HardwareTelemetry;
use crate::common::telemetry_ops::memory_telemetry::MemoryTelemetry;
use crate::common::telemetry_ops::requests_telemetry::{
ActixTelemetryCollector, RequestsTelemetry, TonicTelemetryCollector,
};
use crate::settings::Settings;
// Keep in sync with openapi/openapi-service.ytt.yaml
const DEFAULT_TELEMETRY_TIMEOUT: Duration = Duration::from_secs(60);
pub struct TelemetryCollector {
process_id: Uuid,
settings: Settings,
dispatcher: Arc<Dispatcher>,
pub app_telemetry_collector: AppBuildTelemetryCollector,
pub actix_telemetry_collector: Arc<Mutex<ActixTelemetryCollector>>,
pub tonic_telemetry_collector: Arc<Mutex<TonicTelemetryCollector>>,
}
// Whole telemetry data
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct TelemetryData {
#[anonymize(false)]
id: String,
pub(crate) app: AppBuildTelemetry,
pub(crate) collections: CollectionsTelemetry,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) cluster: Option<ClusterTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) requests: Option<RequestsTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) memory: Option<MemoryTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) hardware: Option<HardwareTelemetry>,
}
impl TelemetryCollector {
pub fn reporting_id(&self) -> String {
self.process_id.to_string()
}
pub fn generate_id() -> Uuid {
Uuid::new_v4()
}
pub fn new(settings: Settings, dispatcher: Arc<Dispatcher>, id: Uuid) -> Self {
Self {
process_id: id,
settings,
dispatcher,
app_telemetry_collector: AppBuildTelemetryCollector::new(),
actix_telemetry_collector: Arc::new(Mutex::new(ActixTelemetryCollector {
workers: Vec::new(),
})),
tonic_telemetry_collector: Arc::new(Mutex::new(TonicTelemetryCollector {
workers: Vec::new(),
})),
}
}
pub async fn prepare_data(
&self,
access: &Access,
detail: TelemetryDetail,
timeout: Option<Duration>,
) -> StorageResult<TelemetryData> {
let timeout = timeout.unwrap_or(DEFAULT_TELEMETRY_TIMEOUT);
// Use blocking pool because the collection telemetry acquires several sync. locks.
let is_stopped_guard = StoppingGuard::new();
let is_stopped = is_stopped_guard.get_is_stopped();
let collections_telemetry_handle = {
let toc = self
.dispatcher
.toc(access, &new_unchecked_verification_pass())
.clone();
let runtime_handle = toc.general_runtime_handle().clone();
let access_collection = access.clone();
let handle = runtime_handle.spawn_blocking(move || {
// Re-enter the async runtime in this blocking thread
tokio::runtime::Handle::current().block_on(async move {
CollectionsTelemetry::collect(
detail,
&access_collection,
&toc,
timeout,
&is_stopped,
)
.await
})
});
AbortOnDropHandle::new(handle)
};
let collections_telemetry = tokio::time::timeout(timeout, collections_telemetry_handle)
.await
.map_err(|_: Elapsed| StorageError::timeout(timeout, "collections telemetry"))???;
Ok(TelemetryData {
id: self.process_id.to_string(),
collections: collections_telemetry,
app: AppBuildTelemetry::collect(detail, &self.app_telemetry_collector, &self.settings),
cluster: ClusterTelemetry::collect(access, detail, &self.dispatcher, &self.settings),
requests: RequestsTelemetry::collect(
access,
&self.actix_telemetry_collector.lock(),
&self.tonic_telemetry_collector.lock(),
detail,
),
memory: (detail.level > DetailsLevel::Level0)
.then(|| MemoryTelemetry::collect(access))
.flatten(),
hardware: (detail.level > DetailsLevel::Level0)
.then(|| HardwareTelemetry::new(&self.dispatcher, access)),
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/helpers.rs | src/common/helpers.rs | use std::cmp::max;
use std::io;
use std::sync::atomic::{AtomicUsize, Ordering};
use fs_err as fs;
use tokio::runtime;
use tokio::runtime::Runtime;
use tonic::transport::{Certificate, ClientTlsConfig, Identity, ServerTlsConfig};
use crate::settings::{Settings, TlsConfig};
pub fn create_search_runtime(max_search_threads: usize) -> io::Result<Runtime> {
let num_threads = common::defaults::search_thread_count(max_search_threads);
runtime::Builder::new_multi_thread()
.worker_threads(num_threads)
.max_blocking_threads(num_threads)
.enable_all()
.thread_name_fn(|| {
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
format!("search-{id}")
})
.build()
}
pub fn create_update_runtime(max_optimization_threads: usize) -> io::Result<Runtime> {
let mut update_runtime_builder = runtime::Builder::new_multi_thread();
let num_cpus = common::cpu::get_num_cpus();
update_runtime_builder
.enable_time()
.enable_io()
.worker_threads(num_cpus)
.thread_name_fn(move || {
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
let update_id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
format!("update-{update_id}")
});
if max_optimization_threads > 0 {
// panics if val is not larger than 0.
update_runtime_builder.max_blocking_threads(max_optimization_threads);
}
update_runtime_builder.build()
}
pub fn create_general_purpose_runtime() -> io::Result<Runtime> {
runtime::Builder::new_multi_thread()
.enable_time()
.enable_io()
.worker_threads(max(common::cpu::get_num_cpus(), 2))
.thread_name_fn(|| {
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
let general_id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
format!("general-{general_id}")
})
.build()
}
/// Load client TLS configuration.
pub fn load_tls_client_config(settings: &Settings) -> io::Result<Option<ClientTlsConfig>> {
if settings.cluster.p2p.enable_tls {
let tls_config = &settings.tls()?;
Ok(Some(
ClientTlsConfig::new()
.identity(load_identity(tls_config)?)
.ca_certificate(load_ca_certificate(tls_config)?),
))
} else {
Ok(None)
}
}
/// Load server TLS configuration for external gRPC
pub fn load_tls_external_server_config(tls_config: &TlsConfig) -> io::Result<ServerTlsConfig> {
Ok(ServerTlsConfig::new().identity(load_identity(tls_config)?))
}
/// Load server TLS configuration for internal gRPC, check client certificate against CA
pub fn load_tls_internal_server_config(tls_config: &TlsConfig) -> io::Result<ServerTlsConfig> {
Ok(ServerTlsConfig::new()
.identity(load_identity(tls_config)?)
.client_ca_root(load_ca_certificate(tls_config)?))
}
fn load_identity(tls_config: &TlsConfig) -> io::Result<Identity> {
let cert = fs::read_to_string(&tls_config.cert)?;
let key = fs::read_to_string(&tls_config.key)?;
Ok(Identity::from_pem(cert, key))
}
fn load_ca_certificate(tls_config: &TlsConfig) -> io::Result<Certificate> {
let Some(ca_cert_path) = &tls_config.ca_cert else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"CA certificate is required for TLS configuration",
));
};
let pem = fs::read_to_string(ca_cert_path)?;
Ok(Certificate::from_pem(pem))
}
pub fn tonic_error_to_io_error(err: tonic::transport::Error) -> io::Error {
io::Error::other(err)
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use std::thread::sleep;
use std::time::Duration;
use collection::common::is_ready::IsReady;
#[test]
fn test_is_ready() {
let is_ready = Arc::new(IsReady::default());
let is_ready_clone = is_ready.clone();
let join = thread::spawn(move || {
is_ready_clone.await_ready();
eprintln!(
"is_ready_clone.check_ready() = {:#?}",
is_ready_clone.check_ready()
);
});
sleep(Duration::from_millis(500));
eprintln!("Making ready");
is_ready.make_ready();
sleep(Duration::from_millis(500));
join.join().unwrap()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/collections.rs | src/common/collections.rs | use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use api::grpc::qdrant::CollectionExists;
use api::rest::models::{
CollectionDescription, CollectionsResponse, ShardKeyDescription, ShardKeysResponse,
};
use collection::config::ShardingMethod;
#[cfg(feature = "staging")]
use collection::operations::cluster_ops::TestSlowDownOperation;
use collection::operations::cluster_ops::{
AbortTransferOperation, ClusterOperations, DropReplicaOperation, MoveShardOperation,
ReplicatePoints, ReplicatePointsOperation, ReplicateShardOperation, ReshardingDirection,
RestartTransfer, RestartTransferOperation, StartResharding,
};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::snapshot_ops::SnapshotDescription;
use collection::operations::types::{
AliasDescription, CollectionClusterInfo, CollectionInfo, CollectionsAliasesResponse,
};
use collection::operations::verification::new_unchecked_verification_pass;
use collection::shards::replica_set;
use collection::shards::replica_set::replica_set_state;
use collection::shards::resharding::ReshardKey;
use collection::shards::shard::{PeerId, ShardId, ShardsPlacement};
use collection::shards::transfer::{
ShardTransfer, ShardTransferKey, ShardTransferMethod, ShardTransferRestart,
};
use itertools::Itertools;
use rand::prelude::SliceRandom;
use rand::seq::IteratorRandom;
use storage::content_manager::collection_meta_ops::ShardTransferOperations::{Abort, Start};
#[cfg(feature = "staging")]
use storage::content_manager::collection_meta_ops::TestSlowDown;
use storage::content_manager::collection_meta_ops::{
CollectionMetaOperations, CreateShardKey, DropShardKey, ReshardingOperation,
SetShardReplicaState, ShardTransferOperations, UpdateCollectionOperation,
};
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use uuid::Uuid;
pub async fn do_collection_exists(
toc: &TableOfContent,
access: Access,
name: &str,
) -> Result<CollectionExists, StorageError> {
let collection_pass = access.check_collection_access(name, AccessRequirements::new())?;
// if this returns Ok, it means the collection exists.
// if not, we check that the error is NotFound
let Err(error) = toc.get_collection(&collection_pass).await else {
return Ok(CollectionExists { exists: true });
};
match error {
StorageError::NotFound { .. } => Ok(CollectionExists { exists: false }),
e => Err(e),
}
}
pub async fn do_get_collection(
toc: &TableOfContent,
access: Access,
name: &str,
shard_selection: Option<ShardId>,
) -> Result<CollectionInfo, StorageError> {
let collection_pass = access.check_collection_access(name, AccessRequirements::new())?;
let collection = toc.get_collection(&collection_pass).await?;
let shard_selection = match shard_selection {
None => ShardSelectorInternal::All,
Some(shard_id) => ShardSelectorInternal::ShardId(shard_id),
};
Ok(collection.info(&shard_selection).await?)
}
pub async fn do_list_collections(
toc: &TableOfContent,
access: Access,
) -> Result<CollectionsResponse, StorageError> {
let collections = toc
.all_collections(&access)
.await
.into_iter()
.map(|pass| CollectionDescription {
name: pass.name().to_string(),
})
.collect_vec();
Ok(CollectionsResponse { collections })
}
pub async fn do_get_collection_shard_keys(
toc: &TableOfContent,
access: Access,
name: &str,
) -> Result<ShardKeysResponse, StorageError> {
let collection_pass = access.check_collection_access(name, AccessRequirements::new())?;
let collection = toc.get_collection(&collection_pass).await?;
let state = collection.state().await;
let shard_keys = match state.config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => None,
ShardingMethod::Custom => Some(
state
.shards_key_mapping
.iter_shard_keys()
.map(|k| ShardKeyDescription { key: k.clone() })
.collect(),
),
};
Ok(ShardKeysResponse { shard_keys })
}
/// Construct shards-replicas layout for the shard from the given scope of peers
/// Example:
/// Shards: 3
/// Replicas: 2
/// Peers: [A, B, C]
///
/// Placement:
/// [
/// [A, B]
/// [B, C]
/// [A, C]
/// ]
fn generate_even_placement(
mut pool: Vec<PeerId>,
shard_number: usize,
replication_factor: usize,
) -> ShardsPlacement {
let mut exact_placement = Vec::new();
let mut rng = rand::rng();
pool.shuffle(&mut rng);
let mut loop_iter = pool.iter().cycle();
// pool: [1,2,3,4]
// shuf_pool: [2,3,4,1]
//
// loop_iter: [2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1,...]
// shard_placement: [2, 3, 4][1, 2, 3][4, 1, 2][3, 4, 1][2, 3, 4]
let max_replication_factor = std::cmp::min(replication_factor, pool.len());
for _shard in 0..shard_number {
let mut shard_placement = Vec::new();
for _replica in 0..max_replication_factor {
shard_placement.push(*loop_iter.next().unwrap());
}
exact_placement.push(shard_placement);
}
exact_placement
}
pub async fn do_list_collection_aliases(
toc: &TableOfContent,
access: Access,
collection_name: &str,
) -> Result<CollectionsAliasesResponse, StorageError> {
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new())?;
let aliases: Vec<AliasDescription> = toc
.collection_aliases(&collection_pass, &access)
.await?
.into_iter()
.map(|alias| AliasDescription {
alias_name: alias,
collection_name: collection_name.to_string(),
})
.collect();
Ok(CollectionsAliasesResponse { aliases })
}
pub async fn do_list_aliases(
toc: &TableOfContent,
access: Access,
) -> Result<CollectionsAliasesResponse, StorageError> {
let aliases = toc.list_aliases(&access).await?;
Ok(CollectionsAliasesResponse { aliases })
}
pub async fn do_list_snapshots(
toc: &TableOfContent,
access: Access,
collection_name: &str,
) -> Result<Vec<SnapshotDescription>, StorageError> {
let collection_pass =
access.check_collection_access(collection_name, AccessRequirements::new().extras())?;
Ok(toc
.get_collection(&collection_pass)
.await?
.list_snapshots()
.await?)
}
pub async fn do_create_snapshot(
toc: Arc<TableOfContent>,
access: Access,
collection_name: &str,
) -> Result<SnapshotDescription, StorageError> {
let collection_pass = access
.check_collection_access(collection_name, AccessRequirements::new().write().extras())?
.into_static();
let result = tokio::spawn(async move { toc.create_snapshot(&collection_pass).await }).await??;
Ok(result)
}
pub async fn do_get_collection_cluster(
toc: &TableOfContent,
access: Access,
name: &str,
) -> Result<CollectionClusterInfo, StorageError> {
let collection_pass =
access.check_collection_access(name, AccessRequirements::new().extras())?;
let collection = toc.get_collection(&collection_pass).await?;
Ok(collection.cluster_info(toc.this_peer_id).await?)
}
pub async fn do_update_collection_cluster(
dispatcher: &Dispatcher,
collection_name: String,
operation: ClusterOperations,
access: Access,
wait_timeout: Option<Duration>,
) -> Result<bool, StorageError> {
let collection_pass = access.check_collection_access(
&collection_name,
AccessRequirements::new().write().manage().extras(),
)?;
if dispatcher.consensus_state().is_none() {
return Err(StorageError::BadRequest {
description: "Distributed mode disabled".to_string(),
});
}
let consensus_state = dispatcher.consensus_state().unwrap();
let get_all_peer_ids = || {
consensus_state
.persistent
.read()
.peer_address_by_id
.read()
.keys()
.cloned()
.collect_vec()
};
let validate_peer_exists = |peer_id| {
let target_peer_exist = consensus_state
.persistent
.read()
.peer_address_by_id
.read()
.contains_key(&peer_id);
if !target_peer_exist {
return Err(StorageError::BadRequest {
description: format!("Peer {peer_id} does not exist"),
});
}
Ok(())
};
// All checks should've been done at this point.
let pass = new_unchecked_verification_pass();
let collection = dispatcher
.toc(&access, &pass)
.get_collection(&collection_pass)
.await?;
match operation {
ClusterOperations::MoveShard(MoveShardOperation { move_shard }) => {
// validate shard to move
if !collection.contains_shard(move_shard.shard_id).await {
return Err(StorageError::BadRequest {
description: format!(
"Shard {} of {} does not exist",
move_shard.shard_id, collection_name
),
});
};
// validate target and source peer exists
validate_peer_exists(move_shard.to_peer_id)?;
validate_peer_exists(move_shard.from_peer_id)?;
// submit operation to consensus
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::TransferShard(
collection_name,
Start(ShardTransfer {
shard_id: move_shard.shard_id,
to_shard_id: move_shard.to_shard_id,
to: move_shard.to_peer_id,
from: move_shard.from_peer_id,
sync: false,
method: move_shard.method,
filter: None,
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::ReplicateShard(ReplicateShardOperation { replicate_shard }) => {
// validate shard to move
if !collection.contains_shard(replicate_shard.shard_id).await {
return Err(StorageError::BadRequest {
description: format!(
"Shard {} of {} does not exist",
replicate_shard.shard_id, collection_name
),
});
};
// validate target peer exists
validate_peer_exists(replicate_shard.to_peer_id)?;
// validate source peer exists
validate_peer_exists(replicate_shard.from_peer_id)?;
// submit operation to consensus
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::TransferShard(
collection_name,
Start(ShardTransfer {
shard_id: replicate_shard.shard_id,
to_shard_id: replicate_shard.to_shard_id,
to: replicate_shard.to_peer_id,
from: replicate_shard.from_peer_id,
sync: true,
method: replicate_shard.method,
filter: None,
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::ReplicatePoints(ReplicatePointsOperation { replicate_points }) => {
let ReplicatePoints {
filter,
from_shard_key,
to_shard_key,
} = replicate_points;
let from_shard_ids = collection.get_shard_ids(&from_shard_key).await?;
// Temporary, before we support multi-source transfers
if from_shard_ids.len() != 1 {
return Err(StorageError::BadRequest {
description: format!(
"Only replicating from shard keys with exactly one shard is supported. Shard key {from_shard_key} has {} shards",
from_shard_ids.len()
),
});
}
// validate shard key exists
let from_replicas = collection.get_replicas(&from_shard_key).await?;
let to_replicas = collection.get_replicas(&to_shard_key).await?;
debug_assert!(!from_replicas.is_empty());
if to_replicas.len() != 1 {
return Err(StorageError::BadRequest {
description: format!(
"Only replicating to shard keys with exactly one replica is supported. Shard key {to_shard_key} has {} replicas",
to_replicas.len()
),
});
}
let (from_shard_id, from_peer_id) = from_replicas[0];
let (to_shard_id, to_peer_id) = to_replicas[0];
// validate source & target peers exist
validate_peer_exists(to_peer_id)?;
validate_peer_exists(from_peer_id)?;
// submit operation to consensus
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::TransferShard(
collection_name,
Start(ShardTransfer {
shard_id: from_shard_id,
to_shard_id: Some(to_shard_id),
from: from_peer_id,
to: to_peer_id,
sync: true,
method: Some(ShardTransferMethod::StreamRecords),
filter,
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::AbortTransfer(AbortTransferOperation { abort_transfer }) => {
let transfer = ShardTransferKey {
shard_id: abort_transfer.shard_id,
to_shard_id: abort_transfer.to_shard_id,
to: abort_transfer.to_peer_id,
from: abort_transfer.from_peer_id,
};
if !collection.check_transfer_exists(&transfer).await {
return Err(StorageError::NotFound {
description: format!(
"Shard transfer {} -> {} for collection {}:{} does not exist",
transfer.from, transfer.to, collection_name, transfer.shard_id
),
});
}
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::TransferShard(
collection_name,
Abort {
transfer,
reason: "user request".to_string(),
},
),
access,
wait_timeout,
)
.await
}
ClusterOperations::DropReplica(DropReplicaOperation { drop_replica }) => {
if !collection.contains_shard(drop_replica.shard_id).await {
return Err(StorageError::BadRequest {
description: format!(
"Shard {} of {} does not exist",
drop_replica.shard_id, collection_name
),
});
};
validate_peer_exists(drop_replica.peer_id)?;
let mut update_operation = UpdateCollectionOperation::new_empty(collection_name);
update_operation.set_shard_replica_changes(vec![replica_set::Change::Remove(
drop_replica.shard_id,
drop_replica.peer_id,
)]);
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::UpdateCollection(update_operation),
access,
wait_timeout,
)
.await
}
ClusterOperations::CreateShardingKey(create_sharding_key_op) => {
let create_sharding_key = create_sharding_key_op.create_sharding_key;
// Validate that:
// - proper sharding method is used
// - key does not exist yet
//
// If placement suggested:
// - Peers exist
let state = collection.state().await;
match state.config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
return Err(StorageError::bad_request(
"Shard Key cannot be created with Auto sharding method",
));
}
ShardingMethod::Custom => {}
}
let shard_number = create_sharding_key
.shards_number
.unwrap_or(state.config.params.shard_number)
.get() as usize;
let replication_factor = create_sharding_key
.replication_factor
.unwrap_or(state.config.params.replication_factor)
.get() as usize;
if let Some(initial_state) = create_sharding_key.initial_state {
match initial_state {
replica_set_state::ReplicaState::Active
| replica_set_state::ReplicaState::Partial => {}
_ => {
return Err(StorageError::bad_request(format!(
"Initial state cannot be {initial_state:?}, only Active or Partial are allowed",
)));
}
}
}
let shard_keys_mapping = state.shards_key_mapping;
if shard_keys_mapping.contains_key(&create_sharding_key.shard_key) {
return Err(StorageError::BadRequest {
description: format!(
"Sharding key {} already exists for collection {}",
create_sharding_key.shard_key, collection_name
),
});
}
let peers_pool: Vec<_> = if let Some(placement) = create_sharding_key.placement {
if placement.is_empty() {
return Err(StorageError::BadRequest {
description: format!(
"Sharding key {} placement cannot be empty. If you want to use random placement, do not specify placement",
create_sharding_key.shard_key
),
});
}
for peer_id in placement.iter().copied() {
validate_peer_exists(peer_id)?;
}
placement
} else {
get_all_peer_ids()
};
let exact_placement =
generate_even_placement(peers_pool, shard_number, replication_factor);
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::CreateShardKey(CreateShardKey {
collection_name,
shard_key: create_sharding_key.shard_key,
placement: exact_placement,
initial_state: create_sharding_key.initial_state,
}),
access,
wait_timeout,
)
.await
}
ClusterOperations::DropShardingKey(drop_sharding_key_op) => {
let drop_sharding_key = drop_sharding_key_op.drop_sharding_key;
// Validate that:
// - proper sharding method is used
// - key does exist
let state = collection.state().await;
match state.config.params.sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
return Err(StorageError::bad_request(
"Shard Key cannot be created with Auto sharding method",
));
}
ShardingMethod::Custom => {}
}
let shard_keys_mapping = state.shards_key_mapping;
if !shard_keys_mapping.contains_key(&drop_sharding_key.shard_key) {
return Err(StorageError::BadRequest {
description: format!(
"Sharding key {} does not exist for collection {collection_name}",
drop_sharding_key.shard_key,
),
});
}
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::DropShardKey(DropShardKey {
collection_name,
shard_key: drop_sharding_key.shard_key,
}),
access,
wait_timeout,
)
.await
}
ClusterOperations::RestartTransfer(RestartTransferOperation { restart_transfer }) => {
// TODO(reshading): Deduplicate resharding operations handling?
let RestartTransfer {
shard_id,
to_shard_id,
from_peer_id,
to_peer_id,
method,
} = restart_transfer;
let transfer_key = ShardTransferKey {
shard_id,
to_shard_id,
to: to_peer_id,
from: from_peer_id,
};
if !collection.check_transfer_exists(&transfer_key).await {
return Err(StorageError::NotFound {
description: format!(
"Shard transfer {} -> {} for collection {}:{} does not exist",
transfer_key.from, transfer_key.to, collection_name, transfer_key.shard_id
),
});
}
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::TransferShard(
collection_name,
ShardTransferOperations::Restart(ShardTransferRestart {
shard_id,
to_shard_id,
to: to_peer_id,
from: from_peer_id,
method,
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::StartResharding(op) => {
let StartResharding {
uuid,
direction,
peer_id,
shard_key,
} = op.start_resharding;
if !dispatcher.is_resharding_enabled() {
return Err(StorageError::bad_request(
"resharding is only supported in Qdrant Cloud",
));
}
// Assign random UUID if not specified by user before processing operation on all peers
let uuid = uuid.unwrap_or_else(Uuid::new_v4);
let collection_state = collection.state().await;
if let Some(shard_key) = &shard_key
&& !collection_state.shards_key_mapping.contains_key(shard_key)
{
return Err(StorageError::bad_request(format!(
"sharding key {shard_key} does not exist for collection {collection_name}",
)));
}
let shard_id = match (direction, shard_key.as_ref()) {
// When scaling up, just pick the next shard ID
(ReshardingDirection::Up, _) => {
collection_state
.shards
.keys()
.copied()
.max()
.expect("collection must contain shards")
+ 1
}
// When scaling down without shard keys, pick the last shard ID
(ReshardingDirection::Down, None) => collection_state
.shards
.keys()
.copied()
.max()
.expect("collection must contain shards"),
// When scaling down with shard keys, pick the last shard ID of that key
(ReshardingDirection::Down, Some(shard_key)) => collection_state
.shards_key_mapping
.get(shard_key)
.expect("specified shard key must exist")
.iter()
.copied()
.max()
.expect("collection must contain shards"),
};
let peer_id = match (peer_id, direction) {
// Select user specified peer, but make sure it exists
(Some(peer_id), _) => {
validate_peer_exists(peer_id)?;
peer_id
}
// When scaling up, select peer with least number of shards for this collection
(None, ReshardingDirection::Up) => {
let mut shards_on_peers = collection_state
.shards
.values()
.flat_map(|shard_info| shard_info.replicas.keys())
.fold(HashMap::new(), |mut counts, peer_id| {
*counts.entry(*peer_id).or_insert(0) += 1;
counts
});
for peer_id in get_all_peer_ids() {
// Add registered peers not holding any shard yet
shards_on_peers.entry(peer_id).or_insert(0);
}
shards_on_peers
.into_iter()
.min_by_key(|(_, count)| *count)
.map(|(peer_id, _)| peer_id)
.expect("expected at least one peer")
}
// When scaling down, select random peer that contains the shard we're dropping
// Other peers work, but are less efficient due to remote operations
(None, ReshardingDirection::Down) => collection_state
.shards
.get(&shard_id)
.expect("select shard ID must always exist in collection state")
.replicas
.keys()
.choose(&mut rand::rng())
.copied()
.unwrap(),
};
if let Some(resharding) = &collection_state.resharding {
return Err(StorageError::bad_request(format!(
"resharding {resharding:?} is already in progress \
for collection {collection_name}"
)));
}
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::Resharding(
collection_name.clone(),
ReshardingOperation::Start(ReshardKey {
uuid,
direction,
peer_id,
shard_id,
shard_key,
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::AbortResharding(_) => {
// TODO(reshading): Deduplicate resharding operations handling?
let Some(state) = collection.resharding_state().await else {
return Err(StorageError::bad_request(format!(
"resharding is not in progress for collection {collection_name}"
)));
};
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::Resharding(
collection_name.clone(),
ReshardingOperation::Abort(ReshardKey {
uuid: state.uuid,
direction: state.direction,
peer_id: state.peer_id,
shard_id: state.shard_id,
shard_key: state.shard_key.clone(),
}),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::FinishResharding(_) => {
// TODO(resharding): Deduplicate resharding operations handling?
let Some(state) = collection.resharding_state().await else {
return Err(StorageError::bad_request(format!(
"resharding is not in progress for collection {collection_name}"
)));
};
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::Resharding(
collection_name.clone(),
ReshardingOperation::Finish(state.key()),
),
access,
wait_timeout,
)
.await
}
ClusterOperations::FinishMigratingPoints(op) => {
// TODO(resharding): Deduplicate resharding operations handling?
let Some(state) = collection.resharding_state().await else {
return Err(StorageError::bad_request(format!(
"resharding is not in progress for collection {collection_name}"
)));
};
let op = op.finish_migrating_points;
let shard_id = match (op.shard_id, state.direction) {
(Some(shard_id), _) => shard_id,
(None, ReshardingDirection::Up) => state.shard_id,
(None, ReshardingDirection::Down) => {
return Err(StorageError::bad_request(
"shard ID must be specified when resharding down",
));
}
};
let peer_id = match (op.peer_id, state.direction) {
(Some(peer_id), _) => peer_id,
(None, ReshardingDirection::Up) => state.peer_id,
(None, ReshardingDirection::Down) => {
return Err(StorageError::bad_request(
"peer ID must be specified when resharding down",
));
}
};
let from_state = match state.direction {
ReshardingDirection::Up => replica_set_state::ReplicaState::Resharding,
ReshardingDirection::Down => replica_set_state::ReplicaState::ReshardingScaleDown,
};
dispatcher
.submit_collection_meta_op(
CollectionMetaOperations::SetShardReplicaState(SetShardReplicaState {
collection_name: collection_name.clone(),
shard_id,
peer_id,
state: replica_set_state::ReplicaState::Active,
from_state: Some(from_state),
}),
access,
wait_timeout,
)
.await
}
ClusterOperations::CommitReadHashRing(_) => {
// TODO(reshading): Deduplicate resharding operations handling?
let Some(state) = collection.resharding_state().await else {
return Err(StorageError::bad_request(format!(
"resharding is not in progress for collection {collection_name}"
)));
};
// TODO(resharding): Add precondition checks?
dispatcher
.submit_collection_meta_op(
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/mod.rs | src/common/mod.rs | pub mod auth;
pub mod collections;
pub mod debugger;
pub mod error_reporting;
pub mod health;
pub mod helpers;
pub mod http_client;
pub mod inference;
pub mod metrics;
pub mod pyroscope_state;
pub mod query;
pub mod snapshots;
pub mod stacktrace;
pub mod strict_mode;
pub mod strings;
pub mod telemetry;
pub mod telemetry_ops;
pub mod telemetry_reporting;
pub mod update;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/snapshots.rs | src/common/snapshots.rs | use std::sync::Arc;
use collection::collection::Collection;
use collection::common::sha_256;
use collection::common::snapshot_stream::SnapshotStream;
use collection::operations::snapshot_ops::{
ShardSnapshotLocation, SnapshotDescription, SnapshotPriority,
};
use collection::operations::verification::VerificationPass;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::replica_set::snapshots::RecoveryType;
use collection::shards::shard::ShardId;
use common::tempfile_ext::MaybeTempPath;
use segment::data_types::manifest::SnapshotManifest;
use storage::content_manager::errors::StorageError;
use storage::content_manager::snapshots;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use tokio::sync::OwnedRwLockWriteGuard;
use super::http_client::HttpClient;
/// # Cancel safety
///
/// This function is cancel safe.
pub async fn create_shard_snapshot(
toc: Arc<TableOfContent>,
access: Access,
collection_name: String,
shard_id: ShardId,
) -> Result<SnapshotDescription, StorageError> {
let collection_pass = access
.check_collection_access(&collection_name, AccessRequirements::new().write().extras())?;
let collection = toc.get_collection(&collection_pass).await?;
let _telemetry_scope_guard = toc
.snapshot_telemetry_collector(&collection_name)
.running_snapshots
.measure_scope();
let snapshot = collection
.create_shard_snapshot(shard_id, &toc.optional_temp_or_snapshot_temp_path()?)
.await?;
Ok(snapshot)
}
/// # Cancel safety
///
/// This function is cancel safe.
pub async fn stream_shard_snapshot(
toc: Arc<TableOfContent>,
access: Access,
collection_name: String,
shard_id: ShardId,
manifest: Option<SnapshotManifest>,
) -> Result<SnapshotStream, StorageError> {
let collection_pass = access
.check_collection_access(&collection_name, AccessRequirements::new().write().extras())?;
let collection = toc.get_collection(&collection_pass).await?;
let _telemetry_scope_guard = toc
.snapshot_telemetry_collector(&collection_name)
.running_snapshots
.measure_scope();
if let Some(old_manifest) = &manifest {
let current_manifest = collection.get_partial_snapshot_manifest(shard_id).await?;
// If `old_manifest` is *exactly* the same, as `current_manifest`, return specialized error
// instead of creating partial snapshot.
//
// Snapshot manifest format is flexible, so it *might* be possible that manifests are *not*
// exactly the same, but resulting partial snapshot will still be "empty", but:
// - it should *not* happen in practice currently
// - we intentionally use exact equality as the most "conservative" comparison, just in case
if old_manifest == ¤t_manifest {
return Err(StorageError::EmptyPartialSnapshot { shard_id });
}
}
let snapshot_stream = toc
.get_collection(&collection_pass)
.await?
.stream_shard_snapshot(
shard_id,
manifest,
&toc.optional_temp_or_snapshot_temp_path()?,
)
.await?;
Ok(snapshot_stream)
}
/// # Cancel safety
///
/// This function is cancel safe.
pub async fn list_shard_snapshots(
toc: Arc<TableOfContent>,
access: Access,
collection_name: String,
shard_id: ShardId,
) -> Result<Vec<SnapshotDescription>, StorageError> {
let collection_pass =
access.check_collection_access(&collection_name, AccessRequirements::new().extras())?;
let collection = toc.get_collection(&collection_pass).await?;
let snapshots = collection.list_shard_snapshots(shard_id).await?;
Ok(snapshots)
}
/// # Cancel safety
///
/// This function is cancel safe.
pub async fn delete_shard_snapshot(
toc: Arc<TableOfContent>,
access: Access,
collection_name: String,
shard_id: ShardId,
snapshot_name: String,
) -> Result<(), StorageError> {
let collection_pass = access
.check_collection_access(&collection_name, AccessRequirements::new().write().extras())?;
let collection = toc.get_collection(&collection_pass).await?;
let snapshot_manager = collection.get_snapshots_storage_manager()?;
let snapshot_path = collection
.shards_holder()
.read()
.await
.get_shard_snapshot_path(collection.snapshots_path(), shard_id, &snapshot_name)
.await?;
tokio::spawn(async move { snapshot_manager.delete_snapshot(&snapshot_path).await }).await??;
Ok(())
}
/// # Cancel safety
///
/// This function is cancel safe.
#[allow(clippy::too_many_arguments)]
pub async fn recover_shard_snapshot(
toc: Arc<TableOfContent>,
access: Access,
collection_name: String,
shard_id: ShardId,
snapshot_location: ShardSnapshotLocation,
snapshot_priority: SnapshotPriority,
checksum: Option<String>,
client: HttpClient,
api_key: Option<String>,
) -> Result<(), StorageError> {
let collection_pass = access
.check_global_access(AccessRequirements::new().manage())?
.issue_pass(&collection_name)
.into_static();
// - `recover_shard_snapshot_impl` is *not* cancel safe
// - but the task is *spawned* on the runtime and won't be cancelled, if request is cancelled
cancel::future::spawn_cancel_on_drop(async move |cancel| {
let cancel_safe = async {
let collection = toc.get_collection(&collection_pass).await?;
collection.assert_shard_exists(shard_id).await?;
let download_dir = toc.optional_temp_or_snapshot_temp_path()?;
let snapshot_path = match snapshot_location {
ShardSnapshotLocation::Url(url) => {
if !matches!(url.scheme(), "http" | "https") {
let description = format!(
"Invalid snapshot URL {url}: URLs with {} scheme are not supported",
url.scheme(),
);
return Err(StorageError::bad_input(description));
}
let client = client.client(api_key.as_deref())?;
snapshots::download::download_snapshot(&client, url, &download_dir).await?
}
ShardSnapshotLocation::Path(snapshot_file_name) => {
let snapshot_path = collection
.shards_holder()
.read()
.await
.get_shard_snapshot_path(
collection.snapshots_path(),
shard_id,
&snapshot_file_name,
)
.await?;
collection
.get_snapshots_storage_manager()?
.get_snapshot_file(&snapshot_path, &download_dir)
.await?
}
};
if let Some(checksum) = checksum {
let snapshot_checksum = sha_256::hash_file(&snapshot_path).await?;
if !sha_256::hashes_equal(&snapshot_checksum, &checksum) {
return Err(StorageError::bad_input(format!(
"Snapshot checksum mismatch: expected {checksum}, got {snapshot_checksum}"
)));
}
}
Ok((collection, snapshot_path))
};
let (collection, snapshot_path) =
cancel::future::cancel_on_token(cancel.clone(), cancel_safe).await??;
// `recover_shard_snapshot_impl` is *not* cancel safe
recover_shard_snapshot_impl(
&toc,
&collection,
shard_id,
snapshot_path,
snapshot_priority,
RecoveryType::Full,
cancel,
)
.await
})
.await??;
Ok(())
}
/// # Cancel safety
///
/// This function is *not* cancel safe.
pub async fn recover_shard_snapshot_impl(
toc: &TableOfContent,
collection: &Collection,
shard: ShardId,
snapshot_path: MaybeTempPath,
priority: SnapshotPriority,
recovery_type: RecoveryType,
cancel: cancel::CancellationToken,
) -> Result<(), StorageError> {
let _recover_tracker_guard = toc
.snapshot_telemetry_collector(collection.name())
.running_snapshot_recovery
.measure_scope();
// `Collection::restore_shard_snapshot` and `activate_shard` calls *have to* be executed as a
// single transaction
//
// It is *possible* to make this function to be cancel safe, but it is *extremely tedious* to do so
// TODO: `Collection::restore_shard_snapshot` *is* cancel-safe, but `recover_shard_snapshot_impl` is *not* cancel-safe (yet)
collection
.restore_shard_snapshot(
shard,
snapshot_path,
recovery_type,
toc.this_peer_id,
toc.is_distributed(),
&toc.optional_temp_or_snapshot_temp_path()?,
cancel,
)
.await?
.await?;
let state = collection.state().await;
let shard_info = state.shards.get(&shard).unwrap(); // TODO: Handle `unwrap`?..
// TODO: Unify (and de-duplicate) "recovered shard state notification" logic in `_do_recover_from_snapshot` with this one!
let other_active_replicas: Vec<_> = shard_info
.replicas
.iter()
.map(|(&peer, &state)| (peer, state))
.filter(|&(peer, state)| {
// Check if there are *other* active replicas, after recovering shard snapshot.
// This should include `ReshardingScaleDown` replicas.
let is_active = matches!(
state,
ReplicaState::Active | ReplicaState::ReshardingScaleDown
);
peer != toc.this_peer_id && is_active
})
.collect();
if other_active_replicas.is_empty() || recovery_type.is_partial() {
snapshots::recover::activate_shard(toc, collection, toc.this_peer_id, &shard).await?;
} else {
match priority {
SnapshotPriority::NoSync => {
snapshots::recover::activate_shard(toc, collection, toc.this_peer_id, &shard)
.await?;
}
SnapshotPriority::Snapshot => {
snapshots::recover::activate_shard(toc, collection, toc.this_peer_id, &shard)
.await?;
for &(peer, _) in other_active_replicas.iter() {
toc.send_set_replica_state_proposal(
collection.name().to_string(),
peer,
shard,
ReplicaState::Dead,
None,
)?;
}
}
SnapshotPriority::Replica => {
toc.send_set_replica_state_proposal(
collection.name().to_string(),
toc.this_peer_id,
shard,
ReplicaState::Dead,
None,
)?;
}
// `ShardTransfer` is only used during snapshot *shard transfer*.
// State transitions are performed as part of shard transfer *later*, so this simply does *nothing*.
SnapshotPriority::ShardTransfer => (),
}
}
Ok(())
}
pub async fn try_take_partial_snapshot_recovery_lock(
dispatcher: &Dispatcher,
collection_name: &str,
shard_id: ShardId,
access: &Access,
pass: &VerificationPass,
) -> Result<Option<OwnedRwLockWriteGuard<()>>, StorageError> {
let collection_pass = access
.check_global_access(AccessRequirements::new().manage())?
.issue_pass(collection_name);
let recovery_lock = dispatcher
.toc(access, pass)
.get_collection(&collection_pass)
.await?
.try_take_partial_snapshot_recovery_lock(shard_id, RecoveryType::Partial)
.await?;
Ok(recovery_lock)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/health.rs | src/common/health.rs | use std::collections::HashSet;
use std::future::{self, Future};
use std::sync::Arc;
use std::sync::atomic::{self, AtomicBool};
use std::time::Duration;
use std::{panic, thread};
use api::grpc::qdrant::qdrant_internal_client::QdrantInternalClient;
use api::grpc::qdrant::{GetConsensusCommitRequest, GetConsensusCommitResponse};
use api::grpc::transport_channel_pool::{self, TransportChannelPool};
use collection::shards::CollectionId;
use collection::shards::shard::ShardId;
use common::defaults;
use futures::stream::FuturesUnordered;
use futures::{FutureExt as _, StreamExt as _, TryStreamExt as _};
use itertools::Itertools;
use storage::content_manager::consensus_manager::ConsensusStateRef;
use storage::content_manager::toc::TableOfContent;
use storage::rbac::Access;
use tokio::{runtime, sync, time};
const READY_CHECK_TIMEOUT: Duration = Duration::from_millis(500);
const GET_CONSENSUS_COMMITS_RETRIES: usize = 2;
/// Structure used to process health checks like `/readyz` endpoints.
pub struct HealthChecker {
// The state of the health checker.
// Once set to `true`, it should not change back to `false`.
// Initially set to `false`.
is_ready: Arc<AtomicBool>,
// The signal that notifies that state has changed.
// Comes from the health checker task.
is_ready_signal: Arc<sync::Notify>,
// Signal to the health checker task, that the API was called.
// Used to drive the health checker task and avoid constant polling.
check_ready_signal: Arc<sync::Notify>,
_cancel: cancel::DropGuard,
}
impl HealthChecker {
pub fn spawn(
toc: Arc<TableOfContent>,
consensus_state: ConsensusStateRef,
runtime: &runtime::Handle,
wait_for_bootstrap: bool,
) -> Self {
let task = Task {
toc,
consensus_state,
is_ready: Default::default(),
is_ready_signal: Default::default(),
check_ready_signal: Default::default(),
cancel: Default::default(),
wait_for_bootstrap,
};
let health_checker = Self {
is_ready: task.is_ready.clone(),
is_ready_signal: task.is_ready_signal.clone(),
check_ready_signal: task.check_ready_signal.clone(),
_cancel: task.cancel.clone().drop_guard(),
};
let task = runtime.spawn(task.exec());
drop(task); // drop `JoinFuture` explicitly to make clippy happy
health_checker
}
pub async fn check_ready(&self) -> bool {
if self.is_ready() {
return true;
}
self.notify_task();
self.wait_ready().await
}
pub fn is_ready(&self) -> bool {
self.is_ready.load(atomic::Ordering::Relaxed)
}
pub fn notify_task(&self) {
self.check_ready_signal.notify_one();
}
async fn wait_ready(&self) -> bool {
let is_ready_signal = self.is_ready_signal.notified();
if self.is_ready() {
return true;
}
time::timeout(READY_CHECK_TIMEOUT, is_ready_signal)
.await
.is_ok()
}
}
pub struct Task {
toc: Arc<TableOfContent>,
consensus_state: ConsensusStateRef,
// Shared state with the health checker
// Once set to `true`, it should not change back to `false`.
is_ready: Arc<AtomicBool>,
// Used to notify the health checker service that the state has changed.
is_ready_signal: Arc<sync::Notify>,
// Driver signal for the health checker task
// Once received, the task should proceed with an attempt to check the state.
// Usually comes from the API call, but can be triggered by the task itself.
check_ready_signal: Arc<sync::Notify>,
cancel: cancel::CancellationToken,
wait_for_bootstrap: bool,
}
impl Task {
pub async fn exec(self) {
while let Err(err) = self.exec_catch_unwind().await {
let message = common::panic::downcast_str(&err).unwrap_or("");
let separator = if !message.is_empty() { ": " } else { "" };
log::error!("HealthChecker task panicked, retrying{separator}{message}",);
}
}
async fn exec_catch_unwind(&self) -> thread::Result<()> {
panic::AssertUnwindSafe(self.exec_cancel())
.catch_unwind()
.await
}
async fn exec_cancel(&self) {
let _ = cancel::future::cancel_on_token(self.cancel.clone(), self.exec_impl()).await;
}
async fn exec_impl(&self) {
// Wait until node joins cluster for the first time
//
// If this is a new deployment and `--bootstrap` CLI parameter was specified...
if self.wait_for_bootstrap {
// Check if this is the only node in the cluster
while self.consensus_state.peer_count() <= 1 {
// If cluster is empty, make another attempt to check
// after we receive another call to `/readyz`
//
// Wait for `/readyz` signal
self.check_ready_signal.notified().await;
}
}
// Artificial simulate signal from `/readyz` endpoint
// as if it was already called by the user.
// This allows to check the happy path without waiting for the first call.
self.check_ready_signal.notify_one();
// Get estimate of current cluster commit so we can wait for it
let Some(mut cluster_commit_index) = self.cluster_commit_index(true).await else {
self.set_ready();
return;
};
// Wait until local peer has reached cluster commit
loop {
while self.commit_index() < cluster_commit_index {
// Wait for `/readyz` signal
self.check_ready_signal.notified().await;
// Ensure we're not the only peer left
if self.consensus_state.peer_count() <= 1 {
self.set_ready();
return;
}
}
match self.cluster_commit_index(false).await {
// If cluster commit is still the same, we caught up and we're done
Some(new_index) if cluster_commit_index == new_index => break,
// Cluster commit is newer, update it and wait again
Some(new_index) => cluster_commit_index = new_index,
// Failed to get cluster commit, assume we're done
None => break,
}
}
// Collect "unhealthy" shards list
let mut unhealthy_shards = self.unhealthy_shards().await;
// Check if all shards are "healthy"...
while !unhealthy_shards.is_empty() {
// If not:
//
// - Wait for `/readyz` signal
self.check_ready_signal.notified().await;
// - Refresh "unhealthy" shards list
let current_unhealthy_shards = self.unhealthy_shards().await;
// - Check if any shards "healed" since last check
unhealthy_shards.retain(|shard| current_unhealthy_shards.contains(shard));
}
self.set_ready();
}
/// Get the highest consensus commit across cluster peers
///
/// If `one_peer` is true the first fetched commit is returned. It may not necessarily be the
/// latest commit.
async fn cluster_commit_index(&self, one_peer: bool) -> Option<u64> {
// Wait for `/readyz` signal
self.check_ready_signal.notified().await;
// Check if there is only 1 node in the cluster
if self.consensus_state.peer_count() <= 1 {
return None;
}
// Get *cluster* commit index
let peer_address_by_id = self.consensus_state.peer_address_by_id();
let transport_channel_pool = &self.toc.get_channel_service().channel_pool;
let this_peer_id = self.toc.this_peer_id;
let this_peer_uri = peer_address_by_id.get(&this_peer_id);
let mut requests = peer_address_by_id
.values()
// Do not get the current commit from ourselves
.filter(|&uri| Some(uri) != this_peer_uri)
// Historic peers might use the same URLs as our current peers, request each URI once
.unique()
.map(|uri| get_consensus_commit(transport_channel_pool, uri))
.collect::<FuturesUnordered<_>>()
.inspect_err(|err| log::error!("GetConsensusCommit request failed: {err}"))
.filter_map(|res| future::ready(res.ok()));
// Raft commits consensus operation, after majority of nodes persisted it.
//
// This means, if we check the majority of nodes (e.g., `total nodes / 2 + 1`), at least one
// of these nodes will *always* have an up-to-date commit index. And so, the highest commit
// index among majority of nodes *is* the cluster commit index.
//
// Our current node *is* one of the cluster nodes, so it's enough to query `total nodes / 2`
// *additional* nodes, to get cluster commit index.
//
// The check goes like this:
// - Either at least one of the "additional" nodes return a *higher* commit index, which
// means our node is *not* up-to-date, and we have to wait to reach this commit index
// - Or *all* of them return *lower* commit index, which means current node is *already*
// up-to-date, and `/readyz` check will pass to the next step
//
// Example:
//
// Total nodes: 2
// Required: 2 / 2 = 1
//
// Total nodes: 3
// Required: 3 / 2 = 1
//
// Total nodes: 4
// Required: 4 / 2 = 2
//
// Total nodes: 5
// Required: 5 / 2 = 2
let sufficient_commit_indices_count = if !one_peer {
peer_address_by_id.len() / 2
} else {
1
};
// *Wait* for `total nodex / 2` successful responses...
let mut commit_indices: Vec<_> = (&mut requests)
.take(sufficient_commit_indices_count)
.collect()
.await;
// ...and also collect any additional responses, that we might have *already* received
while let Ok(Some(resp)) = time::timeout(Duration::ZERO, requests.next()).await {
commit_indices.push(resp);
}
// Find the maximum commit index among all responses.
//
// Note, that we progress even if most (or even *all*) requests failed (e.g., because all
// other nodes are unavailable or they don't support `GetConsensusCommit` gRPC API).
//
// So this check is not 100% reliable and can give a false-positive result!
let cluster_commit_index = commit_indices
.into_iter()
.map(|resp| resp.into_inner().commit)
.max()
.unwrap_or(0);
Some(cluster_commit_index as _)
}
fn commit_index(&self) -> u64 {
// TODO: Blocking call in async context!?
self.consensus_state
.persistent
.read()
.last_applied_entry()
.unwrap_or(0)
}
/// List shards that are unhealthy, which may undergo automatic recovery.
///
/// Shards in resharding state are not considered unhealthy and are excluded here.
/// They require an external driver to make them active or to drop them.
async fn unhealthy_shards(&self) -> HashSet<Shard> {
let this_peer_id = self.toc.this_peer_id;
let collections = self
.toc
.all_collections(&Access::full("For health check"))
.await;
let mut unhealthy_shards = HashSet::new();
for collection_pass in &collections {
let state = match self.toc.get_collection(collection_pass).await {
Ok(collection) => collection.state().await,
Err(_) => continue,
};
for (&shard, info) in state.shards.iter() {
let Some(state) = info.replicas.get(&this_peer_id) else {
continue;
};
if state.is_healthy() {
continue;
}
unhealthy_shards.insert(Shard::new(collection_pass.name(), shard));
}
}
unhealthy_shards
}
fn set_ready(&self) {
self.is_ready.store(true, atomic::Ordering::Relaxed);
self.is_ready_signal.notify_waiters();
}
}
fn get_consensus_commit<'a>(
transport_channel_pool: &'a TransportChannelPool,
uri: &'a tonic::transport::Uri,
) -> impl Future<Output = GetConsensusCommitResult> + 'a {
transport_channel_pool.with_channel_timeout(
uri,
|channel| async {
let mut client = QdrantInternalClient::new(channel);
let mut request = tonic::Request::new(GetConsensusCommitRequest {});
request.set_timeout(defaults::CONSENSUS_META_OP_WAIT);
client.get_consensus_commit(request).await
},
Some(defaults::CONSENSUS_META_OP_WAIT),
GET_CONSENSUS_COMMITS_RETRIES,
)
}
type GetConsensusCommitResult = Result<
tonic::Response<GetConsensusCommitResponse>,
transport_channel_pool::RequestError<tonic::Status>,
>;
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
struct Shard {
collection: CollectionId,
shard: ShardId,
}
impl Shard {
pub fn new(collection: impl Into<CollectionId>, shard: ShardId) -> Self {
Self {
collection: collection.into(),
shard,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/stacktrace.rs | src/common/stacktrace.rs | use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize, JsonSchema, Debug)]
struct StackTraceSymbol {
name: Option<String>,
file: Option<String>,
line: Option<u32>,
}
#[derive(Deserialize, Serialize, JsonSchema, Debug)]
struct StackTraceFrame {
symbols: Vec<StackTraceSymbol>,
}
impl StackTraceFrame {
#[allow(dead_code)]
pub fn render(&self) -> String {
let mut result = String::new();
for symbol in &self.symbols {
let symbol_string = format!(
"{}:{} - {} ",
symbol.file.as_deref().unwrap_or_default(),
symbol.line.unwrap_or_default(),
symbol.name.as_deref().unwrap_or_default(),
);
result.push_str(&symbol_string);
}
result
}
}
#[derive(Deserialize, Serialize, JsonSchema, Debug)]
pub struct ThreadStackTrace {
id: u32,
name: String,
frames: Vec<String>,
}
#[derive(Deserialize, Serialize, JsonSchema, Debug)]
pub struct StackTrace {
threads: Vec<ThreadStackTrace>,
}
pub fn get_stack_trace() -> StackTrace {
#[cfg(not(all(target_os = "linux", feature = "stacktrace")))]
{
StackTrace { threads: vec![] }
}
#[cfg(all(target_os = "linux", feature = "stacktrace"))]
{
let exe = std::env::current_exe().unwrap();
let trace =
rstack_self::trace(std::process::Command::new(exe).arg("--stacktrace")).unwrap();
StackTrace {
threads: trace
.threads()
.iter()
.map(|thread| ThreadStackTrace {
id: thread.id(),
name: thread.name().to_string(),
frames: thread
.frames()
.iter()
.map(|frame| {
let frame = StackTraceFrame {
symbols: frame
.symbols()
.iter()
.map(|symbol| StackTraceSymbol {
name: symbol.name().map(|name| name.to_string()),
file: symbol.file().map(|file| {
file.to_str().unwrap_or_default().to_string()
}),
line: symbol.line(),
})
.collect(),
};
frame.render()
})
.collect(),
})
.collect(),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/strings.rs | src/common/strings.rs | /// Constant-time equality for String types
#[inline]
pub fn ct_eq(lhs: impl AsRef<str>, rhs: impl AsRef<str>) -> bool {
constant_time_eq::constant_time_eq(lhs.as_ref().as_bytes(), rhs.as_ref().as_bytes())
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/query.rs | src/common/query.rs | use std::time::Duration;
use api::rest::SearchGroupsRequestInternal;
use collection::collection::distance_matrix::*;
use collection::common::batching::batch_requests;
use collection::grouping::group_by::GroupRequest;
use collection::operations::consistency_params::ReadConsistency;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::*;
use collection::operations::universal_query::collection_query::*;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::ScoredPoint;
use shard::retrieve::record_internal::RecordInternal;
use shard::search::CoreSearchRequestBatch;
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::rbac::Access;
#[allow(clippy::too_many_arguments)]
pub async fn do_core_search_points(
toc: &TableOfContent,
collection_name: &str,
request: CoreSearchRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<ScoredPoint>, StorageError> {
let batch_res = do_core_search_batch_points(
toc,
collection_name,
CoreSearchRequestBatch {
searches: vec![request],
},
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await?;
batch_res
.into_iter()
.next()
.ok_or_else(|| StorageError::service_error("Empty search result"))
}
pub async fn do_search_batch_points(
toc: &TableOfContent,
collection_name: &str,
requests: Vec<(CoreSearchRequest, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<Vec<ScoredPoint>>, StorageError> {
let requests = batch_requests::<
(CoreSearchRequest, ShardSelectorInternal),
ShardSelectorInternal,
Vec<CoreSearchRequest>,
Vec<_>,
>(
requests,
|(_, shard_selector)| shard_selector,
|(request, _), core_reqs| {
core_reqs.push(request);
Ok(())
},
|shard_selector, core_requests, res| {
if core_requests.is_empty() {
return Ok(());
}
let core_batch = CoreSearchRequestBatch {
searches: core_requests,
};
let req = toc.core_search_batch(
collection_name,
core_batch,
read_consistency,
shard_selector,
access.clone(),
timeout,
hw_measurement_acc.clone(),
);
res.push(req);
Ok(())
},
)?;
let results = futures::future::try_join_all(requests).await?;
let flatten_results: Vec<Vec<_>> = results.into_iter().flatten().collect();
Ok(flatten_results)
}
#[allow(clippy::too_many_arguments)]
pub async fn do_core_search_batch_points(
toc: &TableOfContent,
collection_name: &str,
request: CoreSearchRequestBatch,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<Vec<ScoredPoint>>, StorageError> {
toc.core_search_batch(
collection_name,
request,
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_search_point_groups(
toc: &TableOfContent,
collection_name: &str,
request: SearchGroupsRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<GroupsResult, StorageError> {
toc.group(
collection_name,
GroupRequest::from(request),
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_recommend_point_groups(
toc: &TableOfContent,
collection_name: &str,
request: RecommendGroupsRequestInternal,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<GroupsResult, StorageError> {
toc.group(
collection_name,
GroupRequest::from(request),
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await
}
pub async fn do_discover_batch_points(
toc: &TableOfContent,
collection_name: &str,
request: DiscoverRequestBatch,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<Vec<ScoredPoint>>, StorageError> {
let requests = request
.searches
.into_iter()
.map(|req| {
let shard_selector = match req.shard_key {
None => ShardSelectorInternal::All,
Some(shard_key) => ShardSelectorInternal::from(shard_key),
};
(req.discover_request, shard_selector)
})
.collect();
toc.discover_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_count_points(
toc: &TableOfContent,
collection_name: &str,
request: CountRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<CountResult, StorageError> {
toc.count(
collection_name,
request,
read_consistency,
timeout,
shard_selection,
access,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_get_points(
toc: &TableOfContent,
collection_name: &str,
request: PointRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<RecordInternal>, StorageError> {
toc.retrieve(
collection_name,
request,
read_consistency,
timeout,
shard_selection,
access,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_scroll_points(
toc: &TableOfContent,
collection_name: &str,
request: ScrollRequestInternal,
read_consistency: Option<ReadConsistency>,
timeout: Option<Duration>,
shard_selection: ShardSelectorInternal,
access: Access,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<ScrollResult, StorageError> {
toc.scroll(
collection_name,
request,
read_consistency,
timeout,
shard_selection,
access,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_query_points(
toc: &TableOfContent,
collection_name: &str,
request: CollectionQueryRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<ScoredPoint>, StorageError> {
let requests = vec![(request, shard_selection)];
let batch_res = toc
.query_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
hw_measurement_acc,
)
.await?;
batch_res
.into_iter()
.next()
.ok_or_else(|| StorageError::service_error("Empty query result"))
}
#[allow(clippy::too_many_arguments)]
pub async fn do_query_batch_points(
toc: &TableOfContent,
collection_name: &str,
requests: Vec<(CollectionQueryRequest, ShardSelectorInternal)>,
read_consistency: Option<ReadConsistency>,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<Vec<Vec<ScoredPoint>>, StorageError> {
toc.query_batch(
collection_name,
requests,
read_consistency,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_query_point_groups(
toc: &TableOfContent,
collection_name: &str,
request: CollectionQueryGroupsRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<GroupsResult, StorageError> {
toc.group(
collection_name,
GroupRequest::from(request),
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn do_search_points_matrix(
toc: &TableOfContent,
collection_name: &str,
request: CollectionSearchMatrixRequest,
read_consistency: Option<ReadConsistency>,
shard_selection: ShardSelectorInternal,
access: Access,
timeout: Option<Duration>,
hw_measurement_acc: HwMeasurementAcc,
) -> Result<CollectionSearchMatrixResponse, StorageError> {
toc.search_points_matrix(
collection_name,
request,
read_consistency,
shard_selection,
access,
timeout,
hw_measurement_acc,
)
.await
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/metrics.rs | src/common/metrics.rs | use std::collections::HashMap;
use api::rest::models::HardwareUsage;
use collection::shards::replica_set::replica_set_state::ReplicaState;
use itertools::Itertools;
use prometheus::TextEncoder;
use prometheus::proto::{Counter, Gauge, LabelPair, Metric, MetricFamily, MetricType};
use segment::common::operation_time_statistics::OperationDurationStatistics;
use shard::PeerId;
use storage::types::ConsensusThreadStatus;
use super::telemetry_ops::hardware::HardwareTelemetry;
use crate::common::telemetry::TelemetryData;
use crate::common::telemetry_ops::app_telemetry::{AppBuildTelemetry, AppFeaturesTelemetry};
use crate::common::telemetry_ops::cluster_telemetry::{ClusterStatusTelemetry, ClusterTelemetry};
use crate::common::telemetry_ops::collections_telemetry::{
CollectionTelemetryEnum, CollectionsTelemetry,
};
use crate::common::telemetry_ops::memory_telemetry::MemoryTelemetry;
use crate::common::telemetry_ops::requests_telemetry::{
GrpcTelemetry, RequestsTelemetry, WebApiTelemetry,
};
/// Whitelist for REST endpoints in metrics output.
///
/// Contains selection of search, recommend, scroll and upsert endpoints.
///
/// This array *must* be sorted.
const REST_ENDPOINT_WHITELIST: &[&str] = &[
"/collections/{name}/index",
"/collections/{name}/points",
"/collections/{name}/points/batch",
"/collections/{name}/points/count",
"/collections/{name}/points/delete",
"/collections/{name}/points/discover",
"/collections/{name}/points/discover/batch",
"/collections/{name}/points/facet",
"/collections/{name}/points/payload",
"/collections/{name}/points/payload/clear",
"/collections/{name}/points/payload/delete",
"/collections/{name}/points/query",
"/collections/{name}/points/query/batch",
"/collections/{name}/points/query/groups",
"/collections/{name}/points/recommend",
"/collections/{name}/points/recommend/batch",
"/collections/{name}/points/recommend/groups",
"/collections/{name}/points/scroll",
"/collections/{name}/points/search",
"/collections/{name}/points/search/batch",
"/collections/{name}/points/search/groups",
"/collections/{name}/points/search/matrix/offsets",
"/collections/{name}/points/search/matrix/pairs",
"/collections/{name}/points/vectors",
"/collections/{name}/points/vectors/delete",
];
/// Whitelist for GRPC endpoints in metrics output.
///
/// Contains selection of search, recommend, scroll and upsert endpoints.
///
/// This array *must* be sorted.
const GRPC_ENDPOINT_WHITELIST: &[&str] = &[
"/qdrant.Points/ClearPayload",
"/qdrant.Points/Count",
"/qdrant.Points/Delete",
"/qdrant.Points/DeletePayload",
"/qdrant.Points/Discover",
"/qdrant.Points/DiscoverBatch",
"/qdrant.Points/Facet",
"/qdrant.Points/Get",
"/qdrant.Points/OverwritePayload",
"/qdrant.Points/Query",
"/qdrant.Points/QueryBatch",
"/qdrant.Points/QueryGroups",
"/qdrant.Points/Recommend",
"/qdrant.Points/RecommendBatch",
"/qdrant.Points/RecommendGroups",
"/qdrant.Points/Scroll",
"/qdrant.Points/Search",
"/qdrant.Points/SearchBatch",
"/qdrant.Points/SearchGroups",
"/qdrant.Points/SetPayload",
"/qdrant.Points/UpdateBatch",
"/qdrant.Points/UpdateVectors",
"/qdrant.Points/Upsert",
];
/// For REST requests, only report timings when having this HTTP response status.
const REST_TIMINGS_FOR_STATUS: u16 = 200;
/// Encapsulates metrics data in Prometheus format.
pub struct MetricsData {
metrics: Vec<MetricFamily>,
}
impl MetricsData {
pub fn format_metrics(&self) -> String {
TextEncoder::new().encode_to_string(&self.metrics).unwrap()
}
/// Creates a new `MetricsData` from telemetry data and an optional prefix for metrics names.
pub fn new_from_telemetry(telemetry_data: TelemetryData, prefix: Option<&str>) -> Self {
let mut metrics = MetricsData::empty();
telemetry_data.add_metrics(&mut metrics, prefix);
metrics
}
/// Adds the given `metrics_family` to the `MetricsData` collection, if `Some`.
fn push_metric(&mut self, metric_family: Option<MetricFamily>) {
if let Some(metric_family) = metric_family {
self.metrics.push(metric_family);
}
}
/// Creates an empty collection of Prometheus metric families `MetricsData`.
/// This should only be used when you explicitly need an empty collection to gather metrics.
///
/// In most cases, you should use [`MetricsData::new_from_telemetry`] to initialize new metrics data.
fn empty() -> Self {
Self { metrics: vec![] }
}
}
trait MetricsProvider {
/// Add metrics definitions for this.
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>);
}
impl MetricsProvider for TelemetryData {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
self.app.add_metrics(metrics, prefix);
let this_peer_id = self.cluster.as_ref().and_then(|i| i.this_peer_id());
self.collections.add_metrics(metrics, prefix, this_peer_id);
if let Some(cluster) = &self.cluster {
cluster.add_metrics(metrics, prefix);
}
if let Some(requests) = &self.requests {
requests.add_metrics(metrics, prefix);
}
if let Some(hardware) = &self.hardware {
hardware.add_metrics(metrics, prefix);
}
if let Some(mem) = &self.memory {
mem.add_metrics(metrics, prefix);
}
#[cfg(target_os = "linux")]
match procfs_metrics::ProcFsMetrics::collect() {
Ok(procfs_provider) => procfs_provider.add_metrics(metrics, prefix),
Err(err) => log::warn!("Error reading procfs infos: {err:?}"),
};
}
}
impl MetricsProvider for AppBuildTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
metrics.push_metric(metric_family(
"app_info",
"information about qdrant server",
MetricType::GAUGE,
vec![gauge(
1.0,
&[("name", &self.name), ("version", &self.version)],
)],
prefix,
));
self.features
.iter()
.for_each(|f| f.add_metrics(metrics, prefix));
}
}
impl MetricsProvider for AppFeaturesTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
metrics.push_metric(metric_family(
"app_status_recovery_mode",
"features enabled in qdrant server",
MetricType::GAUGE,
vec![gauge(if self.recovery_mode { 1.0 } else { 0.0 }, &[])],
prefix,
))
}
}
impl CollectionsTelemetry {
fn add_metrics(
&self,
metrics: &mut MetricsData,
prefix: Option<&str>,
peer_id: Option<PeerId>,
) {
metrics.push_metric(metric_family(
"collections_total",
"number of collections",
MetricType::GAUGE,
vec![gauge(self.number_of_collections as f64, &[])],
prefix,
));
let num_collections = self.collections.as_ref().map_or(0, |c| c.len());
// Optimizers
let mut total_optimizations_running = Vec::with_capacity(num_collections);
// Min/Max/Expected/Active replicas over all shards.
let mut total_min_active_replicas = usize::MAX;
let mut total_max_active_replicas = 0;
// Points per collection
let mut points_per_collection = Vec::with_capacity(num_collections);
// Vectors excluded from index-only requests.
let mut indexed_only_excluded = Vec::with_capacity(num_collections);
let mut total_dead_replicas = 0;
// Snapshot metrics
let mut snapshots_creation_running = Vec::with_capacity(num_collections);
let mut snapshots_recovery_running = Vec::with_capacity(num_collections);
let mut snapshots_created_total = Vec::with_capacity(num_collections);
let mut vector_count_by_name = Vec::with_capacity(num_collections);
// Shard transfers
let mut shard_transfers_in = Vec::with_capacity(num_collections);
let mut shard_transfers_out = Vec::with_capacity(num_collections);
for collection in self.collections.iter().flatten() {
let collection = match collection {
CollectionTelemetryEnum::Full(collection_telemetry) => collection_telemetry,
CollectionTelemetryEnum::Aggregated(_) => {
continue;
}
};
total_optimizations_running.push(gauge(
collection.count_optimizers_running() as f64,
&[("id", &collection.id)],
));
let min_max_active_replicas = collection
.shards
.iter()
.flatten()
// While resharding up, some (shard) replica sets may still be incomplete during
// the resharding process. In that case we don't want to consider these replica
// sets at all in the active replica calculation. This is fine because searches nor
// updates don't depend on them being available yet.
//
// More specifically:
// - in stage 2 (migrate points) of resharding up we don't rely on the replica
// to be available yet. In this stage, these replicas will have the `Resharding`
// state.
// - in stage 3 (replicate) of resharding up we activate the the replica and
// replicate to match the configuration replication factor. From this point on we
// do rely on the replica to be available. Now one replica will be `Active`, and
// the other replicas will be in a transfer state. No replica will have `Resharding`
// state.
//
// So, during stage 2 of resharding up we don't want to adjust the minimum number
// of active replicas downwards. During stage 3 we do want it to affect the minimum
// available replica number. It will be 1 for some time until replication transfers
// complete.
//
// To ignore a (shard) replica set that is in stage 2 of resharding up, we simply
// check if any of it's replicas is in `Resharding` state.
.filter(|shard| {
!shard
.replicate_states
.values()
.any(|i| matches!(i, ReplicaState::Resharding))
})
.map(|shard| {
shard
.replicate_states
.values()
// While resharding down, all the replicas that we keep will get the
// `ReshardingScaleDown` state for a period of time. We simply consider
// these replicas to be active. The `is_active` function already accounts
// this.
.filter(|state| state.is_active())
.count()
})
.minmax();
let min_max_active_replicas = match min_max_active_replicas {
itertools::MinMaxResult::NoElements => None,
itertools::MinMaxResult::OneElement(one) => Some((one, one)),
itertools::MinMaxResult::MinMax(min, max) => Some((min, max)),
};
if let Some((min, max)) = min_max_active_replicas {
total_min_active_replicas = total_min_active_replicas.min(min);
total_max_active_replicas = total_max_active_replicas.max(max);
}
points_per_collection.push(gauge(
collection.count_points() as f64,
&[("id", &collection.id)],
));
for (vec_name, count) in collection.count_points_per_vector() {
vector_count_by_name.push(gauge(
count as f64,
&[("collection", &collection.id), ("vector", &vec_name)],
))
}
let points_excluded_from_index_only = collection
.shards
.iter()
.flatten()
.filter_map(|shard| shard.local.as_ref())
.filter_map(|local| local.indexed_only_excluded_vectors.as_ref())
.flatten()
.fold(
HashMap::<&str, usize>::default(),
|mut acc, (name, vector_size)| {
*acc.entry(name).or_insert(0) += vector_size;
acc
},
);
for (name, vector_size) in points_excluded_from_index_only {
indexed_only_excluded.push(gauge(
vector_size as f64,
&[("id", &collection.id), ("vector", name)],
))
}
total_dead_replicas += collection
.shards
.iter()
.flatten()
.filter(|i| i.replicate_states.values().any(|state| !state.is_active()))
.count();
// Shard Transfers
let mut incoming_transfers = 0;
let mut outgoing_transfers = 0;
if let Some(this_peer_id) = peer_id {
for transfer in collection.transfers.iter().flatten() {
if transfer.to == this_peer_id {
incoming_transfers += 1;
}
if transfer.from == this_peer_id {
outgoing_transfers += 1;
}
}
}
shard_transfers_in.push(gauge(
f64::from(incoming_transfers),
&[("id", &collection.id)],
));
shard_transfers_out.push(gauge(
f64::from(outgoing_transfers),
&[("id", &collection.id)],
));
}
for snapshot_telemetry in self.snapshots.iter().flatten() {
let id = &snapshot_telemetry.id;
snapshots_recovery_running.push(gauge(
snapshot_telemetry
.running_snapshot_recovery
.unwrap_or_default() as f64,
&[("id", id)],
));
snapshots_creation_running.push(gauge(
snapshot_telemetry.running_snapshots.unwrap_or_default() as f64,
&[("id", id)],
));
snapshots_created_total.push(counter(
snapshot_telemetry
.total_snapshot_creations
.unwrap_or_default() as f64,
&[("id", id)],
));
}
let vector_count = vector_count_by_name
.iter()
.map(|m| m.get_gauge().get_value())
.sum::<f64>()
// The sum of an empty f64 iterator returns `-0`. Since a negative
// number of vectors is impossible, taking the absolute value is always safe.
.abs();
metrics.push_metric(metric_family(
"collections_vector_total",
"total number of vectors in all collections",
MetricType::GAUGE,
vec![gauge(vector_count, &[])],
prefix,
));
metrics.push_metric(metric_family(
"collection_vectors",
"amount of vectors grouped by vector name",
MetricType::GAUGE,
vector_count_by_name,
prefix,
));
metrics.push_metric(metric_family(
"collection_indexed_only_excluded_points",
"amount of points excluded in indexed_only requests",
MetricType::GAUGE,
indexed_only_excluded,
prefix,
));
let total_min_active_replicas = if total_min_active_replicas == usize::MAX {
0
} else {
total_min_active_replicas
};
metrics.push_metric(metric_family(
"collection_active_replicas_min",
"minimum number of active replicas across all shards",
MetricType::GAUGE,
vec![gauge(total_min_active_replicas as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"collection_active_replicas_max",
"maximum number of active replicas across all shards",
MetricType::GAUGE,
vec![gauge(total_max_active_replicas as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"collection_running_optimizations",
"number of currently running optimization tasks per collection",
MetricType::GAUGE,
total_optimizations_running,
prefix,
));
metrics.push_metric(metric_family(
"collection_points",
"approximate amount of points per collection",
MetricType::GAUGE,
points_per_collection,
prefix,
));
metrics.push_metric(metric_family(
"collection_dead_replicas",
"total amount of shard replicas in non-active state",
MetricType::GAUGE,
vec![gauge(total_dead_replicas as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"snapshot_creation_running",
"amount of snapshot creations that are currently running",
MetricType::GAUGE,
snapshots_creation_running,
prefix,
));
metrics.push_metric(metric_family(
"snapshot_recovery_running",
"amount of snapshot recovery operations currently running",
MetricType::GAUGE,
snapshots_recovery_running,
prefix,
));
metrics.push_metric(metric_family(
"snapshot_created_total",
"total amount of snapshots created",
MetricType::COUNTER,
snapshots_created_total,
prefix,
));
metrics.push_metric(metric_family(
"collection_shard_transfer_incoming",
"incoming shard transfers currently running",
MetricType::GAUGE,
shard_transfers_in,
prefix,
));
metrics.push_metric(metric_family(
"collection_shard_transfer_outgoing",
"outgoing shard transfers currently running",
MetricType::GAUGE,
shard_transfers_out,
prefix,
));
}
}
impl MetricsProvider for ClusterTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
let ClusterTelemetry {
enabled,
status,
config: _,
peers: _,
peer_metadata: _,
metadata: _,
} = self;
metrics.push_metric(metric_family(
"cluster_enabled",
"is cluster support enabled",
MetricType::GAUGE,
vec![gauge(if *enabled { 1.0 } else { 0.0 }, &[])],
prefix,
));
if let Some(status) = status {
status.add_metrics(metrics, prefix);
}
}
}
impl MetricsProvider for ClusterStatusTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
metrics.push_metric(metric_family(
"cluster_peers_total",
"total number of cluster peers",
MetricType::GAUGE,
vec![gauge(self.number_of_peers as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"cluster_term",
"current cluster term",
MetricType::COUNTER,
vec![counter(self.term as f64, &[])],
prefix,
));
if let Some(ref peer_id) = self.peer_id.map(|p| p.to_string()) {
metrics.push_metric(metric_family(
"cluster_commit",
"index of last committed (finalized) operation cluster peer is aware of",
MetricType::COUNTER,
vec![counter(self.commit as f64, &[("peer_id", peer_id)])],
prefix,
));
metrics.push_metric(metric_family(
"cluster_pending_operations_total",
"total number of pending operations for cluster peer",
MetricType::GAUGE,
vec![gauge(self.pending_operations as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"cluster_voter",
"is cluster peer a voter or learner",
MetricType::GAUGE,
vec![gauge(if self.is_voter { 1.0 } else { 0.0 }, &[])],
prefix,
));
}
// Initialize all states so that every state has a zeroed metric by default.
let mut state_working = 0.0;
let mut state_stopped = 0.0;
match &self.consensus_thread_status {
ConsensusThreadStatus::Working { last_update } => {
let timestamp = last_update.timestamp();
metrics.push_metric(metric_family(
"cluster_last_update_timestamp_seconds",
"unix timestamp of last update",
MetricType::COUNTER,
vec![counter(timestamp as f64, &[])],
prefix,
));
state_working = 1.0;
}
ConsensusThreadStatus::Stopped | ConsensusThreadStatus::StoppedWithErr { err: _ } => {
state_stopped = 1.0;
}
}
let working_states = vec![
gauge(state_working, &[("state", "working")]),
gauge(state_stopped, &[("state", "stopped")]),
];
metrics.push_metric(metric_family(
"cluster_working_state",
"working state of the cluster",
MetricType::GAUGE,
working_states,
prefix,
));
}
}
impl MetricsProvider for RequestsTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
self.rest.add_metrics(metrics, prefix);
self.grpc.add_metrics(metrics, prefix);
}
}
impl MetricsProvider for WebApiTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
let mut builder = OperationDurationMetricsBuilder::default();
for (endpoint, responses) in &self.responses {
let Some((method, endpoint)) = endpoint.split_once(' ') else {
continue;
};
// Endpoint must be whitelisted
if REST_ENDPOINT_WHITELIST.binary_search(&endpoint).is_err() {
continue;
}
for (status, stats) in responses {
builder.add(
stats,
&[
("method", method),
("endpoint", endpoint),
("status", &status.to_string()),
],
*status == REST_TIMINGS_FOR_STATUS,
);
}
}
builder.build(prefix, "rest", metrics);
}
}
impl MetricsProvider for GrpcTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
let mut builder = OperationDurationMetricsBuilder::default();
for (endpoint, stats) in &self.responses {
// Endpoint must be whitelisted
if GRPC_ENDPOINT_WHITELIST
.binary_search(&endpoint.as_str())
.is_err()
{
continue;
}
builder.add(stats, &[("endpoint", endpoint.as_str())], true);
}
builder.build(prefix, "grpc", metrics);
}
}
impl MetricsProvider for MemoryTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
metrics.push_metric(metric_family(
"memory_active_bytes",
"Total number of bytes in active pages allocated by the application",
MetricType::GAUGE,
vec![gauge(self.active_bytes as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"memory_allocated_bytes",
"Total number of bytes allocated by the application",
MetricType::GAUGE,
vec![gauge(self.allocated_bytes as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"memory_metadata_bytes",
"Total number of bytes dedicated to metadata",
MetricType::GAUGE,
vec![gauge(self.metadata_bytes as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"memory_resident_bytes",
"Maximum number of bytes in physically resident data pages mapped",
MetricType::GAUGE,
vec![gauge(self.resident_bytes as f64, &[])],
prefix,
));
metrics.push_metric(metric_family(
"memory_retained_bytes",
"Total number of bytes in virtual memory mappings",
MetricType::GAUGE,
vec![gauge(self.retained_bytes as f64, &[])],
prefix,
));
}
}
impl HardwareTelemetry {
// Helper function to create counter metrics of a single Hw type, like cpu.
fn make_metric_counters<F: Fn(&HardwareUsage) -> usize>(&self, f: F) -> Vec<Metric> {
self.collection_data
.iter()
.map(|(collection_id, hw_usage)| counter(f(hw_usage) as f64, &[("id", collection_id)]))
.collect()
}
}
impl MetricsProvider for HardwareTelemetry {
fn add_metrics(&self, metrics: &mut MetricsData, prefix: Option<&str>) {
// MetricType::COUNTER requires non-empty collection data.
if self.collection_data.is_empty() {
return;
}
// Keep a dummy type decomposition of HwUsage here to enforce coverage of new fields in metrics.
// This gets optimized away by the compiler: https://godbolt.org/z/9cMTzcYr4
let HardwareUsage {
cpu: _,
payload_io_read: _,
payload_io_write: _,
payload_index_io_read: _,
payload_index_io_write: _,
vector_io_read: _,
vector_io_write: _,
} = HardwareUsage::default();
metrics.push_metric(metric_family(
"collection_hardware_metric_cpu",
"CPU measurements of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.cpu),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_payload_io_read",
"Total IO payload read metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.payload_io_read),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_payload_index_io_read",
"Total IO payload index read metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.payload_index_io_read),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_payload_index_io_write",
"Total IO payload index write metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.payload_index_io_write),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_payload_io_write",
"Total IO payload write metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.payload_io_write),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_vector_io_read",
"Total IO vector read metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.vector_io_read),
prefix,
));
metrics.push_metric(metric_family(
"collection_hardware_metric_vector_io_write",
"Total IO vector write metrics of a collection",
MetricType::COUNTER,
self.make_metric_counters(|hw| hw.vector_io_write),
prefix,
));
}
}
/// A helper struct to build a vector of [`MetricFamily`] out of a collection of
/// [`OperationDurationStatistics`].
#[derive(Default)]
struct OperationDurationMetricsBuilder {
total: Vec<Metric>,
fail_total: Vec<Metric>,
avg_secs: Vec<Metric>,
min_secs: Vec<Metric>,
max_secs: Vec<Metric>,
duration_histogram_secs: Vec<Metric>,
}
impl OperationDurationMetricsBuilder {
/// Add metrics for the provided statistics.
/// If `add_timings` is `false`, only the total and fail_total counters will be added.
pub fn add(
&mut self,
stat: &OperationDurationStatistics,
labels: &[(&str, &str)],
add_timings: bool,
) {
self.total.push(counter(stat.count as f64, labels));
self.fail_total
.push(counter(stat.fail_count.unwrap_or_default() as f64, labels));
if !add_timings {
return;
}
self.avg_secs.push(gauge(
f64::from(stat.avg_duration_micros.unwrap_or(0.0)) / 1_000_000.0,
labels,
));
self.min_secs.push(gauge(
f64::from(stat.min_duration_micros.unwrap_or(0.0)) / 1_000_000.0,
labels,
));
self.max_secs.push(gauge(
f64::from(stat.max_duration_micros.unwrap_or(0.0)) / 1_000_000.0,
labels,
));
self.duration_histogram_secs.push(histogram(
stat.count as u64,
stat.total_duration_micros.unwrap_or(0) as f64 / 1_000_000.0,
&stat
.duration_micros_histogram
.iter()
.map(|&(b, c)| (f64::from(b) / 1_000_000.0, c as u64))
.collect::<Vec<_>>(),
labels,
));
}
/// Build metrics and add them to the provided vector.
pub fn build(self, global_prefix: Option<&str>, prefix: &str, metrics: &mut MetricsData) {
let prefix = format!("{}{prefix}_", global_prefix.unwrap_or(""));
metrics.push_metric(metric_family(
"responses_total",
"total number of responses",
MetricType::COUNTER,
self.total,
Some(&prefix),
));
metrics.push_metric(metric_family(
"responses_fail_total",
"total number of failed responses",
MetricType::COUNTER,
self.fail_total,
Some(&prefix),
));
metrics.push_metric(metric_family(
"responses_avg_duration_seconds",
"average response duration",
MetricType::GAUGE,
self.avg_secs,
Some(&prefix),
));
metrics.push_metric(metric_family(
"responses_min_duration_seconds",
"minimum response duration",
MetricType::GAUGE,
self.min_secs,
Some(&prefix),
));
metrics.push_metric(metric_family(
"responses_max_duration_seconds",
"maximum response duration",
MetricType::GAUGE,
self.max_secs,
Some(&prefix),
));
metrics.push_metric(metric_family(
"responses_duration_seconds",
"response duration histogram",
MetricType::HISTOGRAM,
self.duration_histogram_secs,
Some(&prefix),
));
}
}
fn metric_family(
name: &str,
help: &str,
r#type: MetricType,
metrics: Vec<Metric>,
prefix: Option<&str>,
) -> Option<MetricFamily> {
// We can't create a new `MetricsFamily` without metrics.
if metrics.is_empty() {
return None;
}
let mut metric_family = MetricFamily::default();
let name_with_prefix = prefix
.map(|prefix| format!("{prefix}{name}"))
.unwrap_or_else(|| name.to_string());
metric_family.set_name(name_with_prefix);
metric_family.set_help(help.into());
metric_family.set_field_type(r#type);
metric_family.set_metric(metrics);
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | true |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/debugger.rs | src/common/debugger.rs | use std::sync::Arc;
use parking_lot::Mutex;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::common::pyroscope_state::pyro::PyroscopeState;
use crate::settings::Settings;
#[derive(Serialize, JsonSchema, Debug, Deserialize, Clone)]
pub struct PyroscopeConfig {
pub url: String,
pub identifier: String,
pub user: Option<String>,
pub password: Option<String>,
pub sampling_rate: Option<u32>,
}
#[derive(Default, Debug, Serialize, JsonSchema, Deserialize, Clone)]
pub struct DebuggerConfig {
pub pyroscope: Option<PyroscopeConfig>,
}
#[derive(Debug, Serialize, JsonSchema, Deserialize, Clone)]
#[serde(rename_all = "snake_case")]
pub enum DebugConfigPatch {
Pyroscope(Option<PyroscopeConfig>),
}
pub struct DebuggerState {
#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
pub pyroscope: Arc<Mutex<Option<PyroscopeState>>>,
}
impl DebuggerState {
pub fn from_settings(settings: &Settings) -> Self {
let pyroscope_config = settings.debugger.pyroscope.clone();
Self {
pyroscope: Arc::new(Mutex::new(PyroscopeState::from_config(pyroscope_config))),
}
}
#[cfg_attr(not(target_os = "linux"), allow(clippy::unused_self))]
pub fn get_config(&self) -> DebuggerConfig {
let pyroscope_config = {
#[cfg(target_os = "linux")]
{
let pyroscope_state_guard = self.pyroscope.lock();
pyroscope_state_guard.as_ref().map(|s| s.config.clone())
}
#[cfg(not(target_os = "linux"))]
{
None
}
};
DebuggerConfig {
pyroscope: pyroscope_config,
}
}
#[cfg_attr(not(target_os = "linux"), allow(clippy::unused_self))]
pub fn apply_config_patch(&self, patch: DebugConfigPatch) -> bool {
#[cfg(target_os = "linux")]
{
match patch {
DebugConfigPatch::Pyroscope(new_config) => {
let mut pyroscope_guard = self.pyroscope.lock();
if let Some(pyroscope_state) = pyroscope_guard.as_mut() {
let stopped = pyroscope_state.stop_agent();
if !stopped {
return false;
}
}
if let Some(new_config) = new_config {
*pyroscope_guard = PyroscopeState::from_config(Some(new_config));
}
true
}
}
}
#[cfg(not(target_os = "linux"))]
{
let _ = patch; // Ignore new_config on non-linux OS
false
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/strict_mode.rs | src/common/strict_mode.rs | use std::sync::Arc;
use collection::operations::verification::StrictModeVerification;
use storage::content_manager::collection_verification::{
check_strict_mode, check_strict_mode_batch, check_strict_mode_toc, check_strict_mode_toc_batch,
};
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
/// Trait for different ways of providing something with `toc` that may do additional checks eg. for Strict mode.
pub trait CheckedTocProvider {
async fn check_strict_mode(
&self,
request: &impl StrictModeVerification,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError>;
async fn check_strict_mode_batch<I, R>(
&self,
requests: &[I],
conv: impl Fn(&I) -> &R,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError>
where
R: StrictModeVerification;
}
/// Simple provider for TableOfContent that doesn't do any checks.
#[derive(Clone)]
pub struct UncheckedTocProvider<'a> {
toc: &'a Arc<TableOfContent>,
}
impl<'a> UncheckedTocProvider<'a> {
pub fn new_unchecked(toc: &'a Arc<TableOfContent>) -> Self {
Self { toc }
}
}
impl CheckedTocProvider for UncheckedTocProvider<'_> {
async fn check_strict_mode(
&self,
_request: &impl StrictModeVerification,
_collection_name: &str,
_timeout: Option<usize>,
_access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError> {
// No checks here
Ok(self.toc)
}
async fn check_strict_mode_batch<I, R>(
&self,
_requests: &[I],
_conv: impl Fn(&I) -> &R,
_collection_name: &str,
_timeout: Option<usize>,
_access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError>
where
R: StrictModeVerification,
{
// No checks here
Ok(self.toc)
}
}
/// Provider for TableOfContent that requires Strict mode to be checked.
#[derive(Clone)]
pub struct StrictModeCheckedTocProvider<'a> {
dispatcher: &'a Dispatcher,
}
impl<'a> StrictModeCheckedTocProvider<'a> {
pub fn new(dispatcher: &'a Dispatcher) -> Self {
Self { dispatcher }
}
}
impl CheckedTocProvider for StrictModeCheckedTocProvider<'_> {
async fn check_strict_mode(
&self,
request: &impl StrictModeVerification,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError> {
let pass =
check_strict_mode(request, timeout, collection_name, self.dispatcher, access).await?;
Ok(self.dispatcher.toc(access, &pass))
}
async fn check_strict_mode_batch<I, R>(
&self,
requests: &[I],
conv: impl Fn(&I) -> &R,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError>
where
R: StrictModeVerification,
{
let pass = check_strict_mode_batch(
requests.iter().map(conv),
timeout,
collection_name,
self.dispatcher,
access,
)
.await?;
Ok(self.dispatcher.toc(access, &pass))
}
}
/// TableOfContent "Provider" for internal API. The `CheckedTocProvider` usually is designed to provide TOC by checking strictmode.
/// However this is not possible in the internal API as it requires `Dispatcher` which we don't have in this case.
///
/// Note: Only use this if you only have access to `TableOfContent` and not `Dispatcher`!
pub(crate) struct StrictModeCheckedInternalTocProvider<'a> {
toc: &'a Arc<TableOfContent>,
}
impl<'a> StrictModeCheckedInternalTocProvider<'a> {
pub fn new(toc: &'a Arc<TableOfContent>) -> Self {
Self { toc }
}
}
impl CheckedTocProvider for StrictModeCheckedInternalTocProvider<'_> {
async fn check_strict_mode(
&self,
request: &impl StrictModeVerification,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError> {
check_strict_mode_toc(request, timeout, collection_name, self.toc, access).await?;
Ok(self.toc)
}
async fn check_strict_mode_batch<I, R>(
&self,
requests: &[I],
conv: impl Fn(&I) -> &R,
collection_name: &str,
timeout: Option<usize>,
access: &Access,
) -> Result<&Arc<TableOfContent>, StorageError>
where
R: StrictModeVerification,
{
check_strict_mode_toc_batch(
requests.iter().map(conv),
timeout,
collection_name,
self.toc,
access,
)
.await?;
Ok(self.toc)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/auth/jwt_parser.rs | src/common/auth/jwt_parser.rs | use jsonwebtoken::errors::ErrorKind;
use jsonwebtoken::{Algorithm, DecodingKey, Validation, decode};
use validator::Validate;
use super::AuthError;
use super::claims::Claims;
#[derive(Clone)]
pub struct JwtParser {
key: DecodingKey,
validation: Validation,
}
impl JwtParser {
const ALGORITHM: Algorithm = Algorithm::HS256;
pub fn new(secret: &str) -> Self {
let key = DecodingKey::from_secret(secret.as_bytes());
let mut validation = Validation::new(Self::ALGORITHM);
// Qdrant server is the only audience
validation.validate_aud = false;
// Expiration time leeway to account for clock skew
validation.leeway = 30;
// All claims are optional
validation.required_spec_claims = Default::default();
JwtParser { key, validation }
}
/// Decode the token and return the claims, this already validates the `exp` claim with some leeway.
/// Returns None when the token doesn't look like a JWT.
pub fn decode(&self, token: &str) -> Option<Result<Claims, AuthError>> {
let claims = match decode::<Claims>(token, &self.key, &self.validation) {
Ok(token_data) => token_data.claims,
Err(e) => {
return match e.kind() {
ErrorKind::ExpiredSignature | ErrorKind::InvalidSignature => {
Some(Err(AuthError::Forbidden(e.to_string())))
}
_ => None,
};
}
};
if let Err(e) = claims.validate() {
return Some(Err(AuthError::Unauthorized(e.to_string())));
}
Some(Ok(claims))
}
}
#[cfg(test)]
mod tests {
use serde_json::json;
use storage::rbac::{
Access, CollectionAccess, CollectionAccessList, CollectionAccessMode, GlobalAccessMode,
};
use super::*;
pub fn create_token(claims: &Claims) -> String {
use jsonwebtoken::{EncodingKey, Header, encode};
let key = EncodingKey::from_secret("secret".as_ref());
let header = Header::new(JwtParser::ALGORITHM);
encode(&header, claims, &key).unwrap()
}
#[test]
fn test_jwt_parser() {
let exp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
let claims = Claims {
sub: None,
exp: Some(exp),
access: Access::Collection(CollectionAccessList(vec![CollectionAccess {
collection: "collection".to_string(),
access: CollectionAccessMode::ReadWrite,
#[expect(deprecated)]
payload: None,
}])),
value_exists: None,
};
let token = create_token(&claims);
let secret = "secret";
let parser = JwtParser::new(secret);
let decoded_claims = parser.decode(&token).unwrap().unwrap();
assert_eq!(claims, decoded_claims);
}
#[test]
fn test_jwt_parser_with_deprecated_payloads() {
let exp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
let claims = Claims {
sub: None,
exp: Some(exp),
access: Access::Collection(CollectionAccessList(vec![CollectionAccess {
collection: "collection".to_string(),
access: CollectionAccessMode::ReadWrite,
#[expect(deprecated)]
payload: Some(json!({
"field1": "value",
"field2": 42,
"field3": true,
})),
}])),
value_exists: None,
};
let token = create_token(&claims);
let secret = "secret";
let parser = JwtParser::new(secret);
assert!(parser.decode(&token).unwrap().is_err()); // Validation should fail due to PayloadConstraint
}
#[test]
fn test_exp_validation() {
let exp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("Time went backwards")
.as_secs()
- 31; // 31 seconds in the past, bigger than the 30 seconds leeway
let mut claims = Claims {
sub: None,
exp: Some(exp),
access: Access::Global(GlobalAccessMode::Read),
value_exists: None,
};
let token = create_token(&claims);
let secret = "secret";
let parser = JwtParser::new(secret);
assert!(matches!(
parser.decode(&token),
Some(Err(AuthError::Forbidden(_)))
));
// Remove the exp claim and it should work
claims.exp = None;
let token = create_token(&claims);
let decoded_claims = parser.decode(&token).unwrap().unwrap();
assert_eq!(claims, decoded_claims);
}
#[test]
fn test_invalid_token() {
let claims = Claims {
sub: None,
exp: None,
access: Access::Global(GlobalAccessMode::Read),
value_exists: None,
};
let token = create_token(&claims);
assert!(matches!(
JwtParser::new("wrong-secret").decode(&token),
Some(Err(AuthError::Forbidden(_)))
));
assert!(JwtParser::new("secret").decode("foo.bar.baz").is_none());
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/auth/mod.rs | src/common/auth/mod.rs | use std::sync::Arc;
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::types::ScrollRequestInternal;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use segment::types::{WithPayloadInterface, WithVector};
use storage::content_manager::errors::StorageError;
use storage::content_manager::toc::TableOfContent;
use storage::rbac::Access;
use self::claims::{Claims, ValueExists};
use self::jwt_parser::JwtParser;
use super::strings::ct_eq;
use crate::common::inference::token::InferenceToken;
use crate::settings::ServiceConfig;
pub mod claims;
pub mod jwt_parser;
pub const HTTP_HEADER_API_KEY: &str = "api-key";
/// The API keys used for auth
#[derive(Clone)]
pub struct AuthKeys {
/// A key allowing Read or Write operations
read_write: Option<String>,
/// A key allowing Read operations
read_only: Option<String>,
/// A JWT parser, based on the read_write key
jwt_parser: Option<JwtParser>,
/// Table of content, needed to do stateful validation of JWT
toc: Arc<TableOfContent>,
}
#[derive(Debug)]
pub enum AuthError {
Unauthorized(String),
Forbidden(String),
StorageError(StorageError),
}
impl AuthKeys {
fn get_jwt_parser(service_config: &ServiceConfig) -> Option<JwtParser> {
if service_config.jwt_rbac.unwrap_or_default() {
service_config
.api_key
.as_ref()
.map(|secret| JwtParser::new(secret))
} else {
None
}
}
/// Defines the auth scheme given the service config
///
/// Returns None if no scheme is specified.
pub fn try_create(service_config: &ServiceConfig, toc: Arc<TableOfContent>) -> Option<Self> {
match (
service_config.api_key.clone(),
service_config.read_only_api_key.clone(),
) {
(None, None) => None,
(read_write, read_only) => Some(Self {
read_write,
read_only,
jwt_parser: Self::get_jwt_parser(service_config),
toc,
}),
}
}
/// Validate that the specified request is allowed for given keys.
pub async fn validate_request<'a>(
&self,
get_header: impl Fn(&'a str) -> Option<&'a str>,
) -> Result<(Access, InferenceToken), AuthError> {
let Some(key) = get_header(HTTP_HEADER_API_KEY)
.or_else(|| get_header("authorization").and_then(|v| v.strip_prefix("Bearer ")))
else {
return Err(AuthError::Unauthorized(
"Must provide an API key or an Authorization bearer token".to_string(),
));
};
if self.can_write(key) {
return Ok((
Access::full("Read-write access by key"),
InferenceToken(None),
));
}
if self.can_read(key) {
return Ok((
Access::full_ro("Read-only access by key"),
InferenceToken(None),
));
}
if let Some(claims) = self.jwt_parser.as_ref().and_then(|p| p.decode(key)) {
let Claims {
sub,
exp: _, // already validated on decoding
access,
value_exists,
} = claims?;
if let Some(value_exists) = value_exists {
self.validate_value_exists(&value_exists).await?;
}
return Ok((access, InferenceToken(sub)));
}
Err(AuthError::Unauthorized(
"Invalid API key or JWT".to_string(),
))
}
async fn validate_value_exists(&self, value_exists: &ValueExists) -> Result<(), AuthError> {
let scroll_req = ScrollRequestInternal {
offset: None,
limit: Some(1),
filter: Some(value_exists.to_filter()),
with_payload: Some(WithPayloadInterface::Bool(false)),
with_vector: WithVector::Bool(false),
order_by: None,
};
let res = self
.toc
.scroll(
value_exists.get_collection(),
scroll_req,
None,
None, // no timeout
ShardSelectorInternal::All,
Access::full("JWT stateful validation"),
HwMeasurementAcc::disposable(),
)
.await
.map_err(|e| match e {
StorageError::NotFound { .. } => {
AuthError::Forbidden("Invalid JWT, stateful validation failed".to_string())
}
_ => AuthError::StorageError(e),
})?;
if res.points.is_empty() {
return Err(AuthError::Unauthorized(
"Invalid JWT, stateful validation failed".to_string(),
));
};
Ok(())
}
/// Check if a key is allowed to read
#[inline]
fn can_read(&self, key: &str) -> bool {
self.read_only
.as_ref()
.is_some_and(|ro_key| ct_eq(ro_key, key))
}
/// Check if a key is allowed to write
#[inline]
fn can_write(&self, key: &str) -> bool {
self.read_write
.as_ref()
.is_some_and(|rw_key| ct_eq(rw_key, key))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/auth/claims.rs | src/common/auth/claims.rs | use segment::json_path::JsonPath;
use segment::types::{Condition, FieldCondition, Filter, Match, ValueVariants};
use serde::{Deserialize, Serialize};
use storage::rbac::Access;
use validator::{Validate, ValidationErrors};
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct Claims {
/// The subject ID; can be a subscription ID, cluster ID, or user ID
pub sub: Option<String>,
/// Expiration time (seconds since UNIX epoch)
pub exp: Option<u64>,
#[serde(default = "default_access")]
pub access: Access,
/// Validate this token by looking for a value inside a collection.
pub value_exists: Option<ValueExists>,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct KeyValuePair {
key: JsonPath,
value: ValueVariants,
}
impl KeyValuePair {
pub fn to_condition(&self) -> Condition {
Condition::Field(FieldCondition::new_match(
self.key.clone(),
Match::new_value(self.value.clone()),
))
}
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct ValueExists {
collection: String,
matches: Vec<KeyValuePair>,
}
fn default_access() -> Access {
Access::full("Give full access when the access field is not present")
}
impl ValueExists {
pub fn get_collection(&self) -> &str {
&self.collection
}
pub fn to_filter(&self) -> Filter {
let conditions = self
.matches
.iter()
.map(|pair| pair.to_condition())
.collect();
Filter {
should: None,
min_should: None,
must: Some(conditions),
must_not: None,
}
}
}
impl Validate for Claims {
fn validate(&self) -> Result<(), ValidationErrors> {
ValidationErrors::merge_all(Ok(()), "access", self.access.validate())
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/query_requests_grpc.rs | src/common/inference/query_requests_grpc.rs | use api::conversions::json::json_path_from_proto;
use api::grpc::qdrant::RecommendInput;
use api::grpc::qdrant::query::Variant;
use api::grpc::{InferenceUsage, qdrant as grpc};
use api::rest::{self, LookupLocation, RecommendStrategy};
use collection::operations::universal_query::collection_query::{
CollectionPrefetch, CollectionQueryGroupsRequest, CollectionQueryRequest, Mmr, NearestWithMmr,
Query, VectorInputInternal, VectorQuery,
};
use collection::operations::universal_query::formula::FormulaInternal;
use collection::operations::universal_query::shard_query::{FusionInternal, SampleInternal};
use ordered_float::OrderedFloat;
use segment::data_types::order_by::OrderBy;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, MultiDenseVectorInternal, VectorInternal};
use segment::types::{Filter, PointIdType, SearchParams};
use segment::vector_storage::query::{ContextPair, ContextQuery, DiscoveryQuery, RecoQuery};
use tonic::Status;
use crate::common::inference::batch_processing_grpc::{
BatchAccumGrpc, collect_prefetch, collect_query,
};
use crate::common::inference::infer_processing::BatchAccumInferred;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::service::{InferenceData, InferenceType};
/// ToDo: this function is supposed to call an inference endpoint internally
pub async fn convert_query_point_groups_from_grpc(
query: grpc::QueryPointGroups,
inference_params: InferenceParams,
) -> Result<(CollectionQueryGroupsRequest, InferenceUsage), Status> {
let grpc::QueryPointGroups {
collection_name: _,
prefetch,
query,
using,
filter,
params,
score_threshold,
with_payload,
with_vectors,
lookup_from,
limit,
group_size,
group_by,
with_lookup,
read_consistency: _,
timeout: _,
shard_key_selector: _,
} = query;
let mut batch = BatchAccumGrpc::new();
if let Some(q) = &query {
collect_query(q, &mut batch)?;
}
for p in &prefetch {
collect_prefetch(p, &mut batch)?;
}
let BatchAccumGrpc { objects } = batch;
let (inferred, usage) =
BatchAccumInferred::from_objects(objects, InferenceType::Search, inference_params)
.await
.map_err(|e| Status::internal(format!("Inference error: {e}")))?;
let query = if let Some(q) = query {
Some(convert_query_with_inferred(q, &inferred)?)
} else {
None
};
let prefetch = prefetch
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, &inferred))
.collect::<Result<Vec<_>, _>>()?;
let request = CollectionQueryGroupsRequest {
prefetch,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter: filter.map(TryFrom::try_from).transpose()?,
score_threshold,
with_vector: with_vectors
.map(From::from)
.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_VECTOR),
with_payload: with_payload
.map(TryFrom::try_from)
.transpose()?
.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_PAYLOAD),
lookup_from: lookup_from.map(LookupLocation::try_from).transpose()?,
group_by: json_path_from_proto(&group_by)?,
group_size: group_size
.map(|s| s as usize)
.unwrap_or(CollectionQueryRequest::DEFAULT_GROUP_SIZE),
limit: limit
.map(|l| l as usize)
.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
params: params.map(From::from),
with_lookup: with_lookup.map(TryFrom::try_from).transpose()?,
};
Ok((request, usage.unwrap_or_default().into()))
}
/// ToDo: this function is supposed to call an inference endpoint internally
pub async fn convert_query_points_from_grpc(
query: grpc::QueryPoints,
inference_params: InferenceParams,
) -> Result<(CollectionQueryRequest, InferenceUsage), Status> {
let grpc::QueryPoints {
collection_name: _,
prefetch,
query,
using,
filter,
params,
score_threshold,
limit,
offset,
with_payload,
with_vectors,
read_consistency: _,
shard_key_selector: _,
lookup_from,
timeout: _,
} = query;
let mut batch = BatchAccumGrpc::new();
if let Some(q) = &query {
collect_query(q, &mut batch)?;
}
for p in &prefetch {
collect_prefetch(p, &mut batch)?;
}
let BatchAccumGrpc { objects } = batch;
let (inferred, usage) =
BatchAccumInferred::from_objects(objects, InferenceType::Search, inference_params)
.await
.map_err(|e| Status::internal(format!("Inference error: {e}")))?;
let prefetch = prefetch
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, &inferred))
.collect::<Result<Vec<_>, _>>()?;
let query = query
.map(|q| convert_query_with_inferred(q, &inferred))
.transpose()?;
Ok((
CollectionQueryRequest {
prefetch,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter: filter.map(TryFrom::try_from).transpose()?,
score_threshold,
limit: limit
.map(|l| l as usize)
.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
offset: offset
.map(|o| o as usize)
.unwrap_or(CollectionQueryRequest::DEFAULT_OFFSET),
params: params.map(From::from),
with_vector: with_vectors
.map(From::from)
.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_VECTOR),
with_payload: with_payload
.map(TryFrom::try_from)
.transpose()?
.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_PAYLOAD),
lookup_from: lookup_from.map(LookupLocation::try_from).transpose()?,
},
usage.unwrap_or_default().into(),
))
}
fn convert_prefetch_with_inferred(
prefetch: grpc::PrefetchQuery,
inferred: &BatchAccumInferred,
) -> Result<CollectionPrefetch, Status> {
let grpc::PrefetchQuery {
prefetch,
query,
using,
filter,
params,
score_threshold,
limit,
lookup_from,
} = prefetch;
let nested_prefetches = prefetch
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, inferred))
.collect::<Result<Vec<_>, _>>()?;
let query = query
.map(|q| convert_query_with_inferred(q, inferred))
.transpose()?;
Ok(CollectionPrefetch {
prefetch: nested_prefetches,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter: filter.map(Filter::try_from).transpose()?,
score_threshold: score_threshold.map(OrderedFloat),
limit: limit
.map(|l| l as usize)
.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
params: params.map(SearchParams::from),
lookup_from: lookup_from.map(LookupLocation::try_from).transpose()?,
})
}
fn convert_query_with_inferred(
query: grpc::Query,
inferred: &BatchAccumInferred,
) -> Result<Query, Status> {
let variant = query
.variant
.ok_or_else(|| Status::invalid_argument("Query variant is missing"))?;
let query = match variant {
Variant::Nearest(nearest) => {
let vector = convert_vector_input_with_inferred(nearest, inferred)?;
Query::Vector(VectorQuery::Nearest(vector))
}
Variant::Recommend(recommend) => {
let RecommendInput {
positive,
negative,
strategy,
} = recommend;
let positives = positive
.into_iter()
.map(|v| convert_vector_input_with_inferred(v, inferred))
.collect::<Result<Vec<_>, _>>()?;
let negatives = negative
.into_iter()
.map(|v| convert_vector_input_with_inferred(v, inferred))
.collect::<Result<Vec<_>, _>>()?;
let reco_query = RecoQuery::new(positives, negatives);
let strategy = strategy
.and_then(|x| grpc::RecommendStrategy::try_from(x).ok())
.map(RecommendStrategy::from)
.unwrap_or_default();
match strategy {
RecommendStrategy::AverageVector => {
Query::Vector(VectorQuery::RecommendAverageVector(reco_query))
}
RecommendStrategy::BestScore => {
Query::Vector(VectorQuery::RecommendBestScore(reco_query))
}
RecommendStrategy::SumScores => {
Query::Vector(VectorQuery::RecommendSumScores(reco_query))
}
}
}
Variant::Discover(discover) => {
let grpc::DiscoverInput { target, context } = discover;
let target = target
.map(|t| convert_vector_input_with_inferred(t, inferred))
.transpose()?
.ok_or_else(|| Status::invalid_argument("DiscoverInput target is missing"))?;
let grpc::ContextInput { pairs } = context
.ok_or_else(|| Status::invalid_argument("DiscoverInput context is missing"))?;
let context = pairs
.into_iter()
.map(|pair| context_pair_from_grpc_with_inferred(pair, inferred))
.collect::<Result<_, _>>()?;
Query::Vector(VectorQuery::Discover(DiscoveryQuery::new(target, context)))
}
Variant::Context(context) => {
let context_query = context_query_from_grpc_with_inferred(context, inferred)?;
Query::Vector(VectorQuery::Context(context_query))
}
Variant::OrderBy(order_by) => Query::OrderBy(OrderBy::try_from(order_by)?),
Variant::Fusion(fusion) => Query::Fusion(FusionInternal::try_from(fusion)?),
Variant::Rrf(rrf) => Query::Fusion(FusionInternal::try_from(rrf)?),
Variant::Formula(formula) => Query::Formula(FormulaInternal::try_from(formula)?),
Variant::Sample(sample) => Query::Sample(SampleInternal::try_from(sample)?),
Variant::NearestWithMmr(grpc::NearestInputWithMmr { nearest, mmr }) => {
let nearest =
nearest.ok_or_else(|| Status::invalid_argument("nearest vector is missing"))?;
let nearest = convert_vector_input_with_inferred(nearest, inferred)?;
let mmr = mmr.ok_or_else(|| Status::invalid_argument("mmr is missing"))?;
let grpc::Mmr {
diversity,
candidates_limit,
} = mmr;
let mmr = Mmr {
diversity,
candidates_limit: candidates_limit.map(|x| x as usize),
};
Query::Vector(VectorQuery::NearestWithMmr(NearestWithMmr { nearest, mmr }))
}
};
Ok(query)
}
fn convert_vector_input_with_inferred(
vector: grpc::VectorInput,
inferred: &BatchAccumInferred,
) -> Result<VectorInputInternal, Status> {
use api::grpc::qdrant::vector_input::Variant;
let variant = vector
.variant
.ok_or_else(|| Status::invalid_argument("VectorInput variant is missing"))?;
match variant {
Variant::Id(id) => Ok(VectorInputInternal::Id(PointIdType::try_from(id)?)),
Variant::Dense(dense) => Ok(VectorInputInternal::Vector(VectorInternal::Dense(
From::from(dense),
))),
Variant::Sparse(sparse) => Ok(VectorInputInternal::Vector(VectorInternal::Sparse(
From::from(sparse),
))),
Variant::MultiDense(multi_dense) => Ok(VectorInputInternal::Vector(
VectorInternal::MultiDense(MultiDenseVectorInternal::from(multi_dense)),
)),
Variant::Document(doc) => {
let doc: rest::Document = doc
.try_into()
.map_err(|e| Status::internal(format!("Document conversion error: {e}")))?;
let data = InferenceData::Document(doc);
let vector = inferred
.get_vector(&data)
.ok_or_else(|| Status::internal("Missing inferred vector for document"))?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
Variant::Image(img) => {
let img: rest::Image = img
.try_into()
.map_err(|e| Status::internal(format!("Image conversion error: {e}",)))?;
let data = InferenceData::Image(img);
let vector = inferred
.get_vector(&data)
.ok_or_else(|| Status::internal("Missing inferred vector for image"))?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
Variant::Object(obj) => {
let obj: rest::InferenceObject = obj
.try_into()
.map_err(|e| Status::internal(format!("Object conversion error: {e}")))?;
let data = InferenceData::Object(obj);
let vector = inferred
.get_vector(&data)
.ok_or_else(|| Status::internal("Missing inferred vector for object"))?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
}
}
fn context_query_from_grpc_with_inferred(
value: grpc::ContextInput,
inferred: &BatchAccumInferred,
) -> Result<ContextQuery<VectorInputInternal>, Status> {
let grpc::ContextInput { pairs } = value;
Ok(ContextQuery {
pairs: pairs
.into_iter()
.map(|pair| context_pair_from_grpc_with_inferred(pair, inferred))
.collect::<Result<_, _>>()?,
})
}
fn context_pair_from_grpc_with_inferred(
value: grpc::ContextInputPair,
inferred: &BatchAccumInferred,
) -> Result<ContextPair<VectorInputInternal>, Status> {
let grpc::ContextInputPair { positive, negative } = value;
let positive =
positive.ok_or_else(|| Status::invalid_argument("ContextPair positive is missing"))?;
let negative =
negative.ok_or_else(|| Status::invalid_argument("ContextPair negative is missing"))?;
Ok(ContextPair {
positive: convert_vector_input_with_inferred(positive, inferred)?,
negative: convert_vector_input_with_inferred(negative, inferred)?,
})
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use api::grpc::qdrant::Value;
use api::grpc::qdrant::value::Kind;
use api::grpc::qdrant::vector_input::Variant;
use collection::operations::point_ops::VectorPersisted;
use super::*;
fn create_test_document() -> api::grpc::qdrant::Document {
api::grpc::qdrant::Document {
text: "test".to_string(),
model: "test-model".to_string(),
options: HashMap::new(),
}
}
fn create_test_image() -> api::grpc::qdrant::Image {
api::grpc::qdrant::Image {
image: Some(Value {
kind: Some(Kind::StringValue("test.jpg".to_string())),
}),
model: "test-model".to_string(),
options: HashMap::new(),
}
}
fn create_test_object() -> api::grpc::qdrant::InferenceObject {
api::grpc::qdrant::InferenceObject {
object: Some(Value {
kind: Some(Kind::StringValue("test".to_string())),
}),
model: "test-model".to_string(),
options: HashMap::new(),
}
}
fn create_test_inferred_batch() -> BatchAccumInferred {
let mut objects = HashMap::new();
let grpc_doc = create_test_document();
let grpc_img = create_test_image();
let grpc_obj = create_test_object();
let doc: rest::Document = grpc_doc.try_into().unwrap();
let img: rest::Image = grpc_img.try_into().unwrap();
let obj: rest::InferenceObject = grpc_obj.try_into().unwrap();
let doc_data = InferenceData::Document(doc);
let img_data = InferenceData::Image(img);
let obj_data = InferenceData::Object(obj);
let dense_vector = vec![1.0, 2.0, 3.0];
let vector_persisted = VectorPersisted::Dense(dense_vector);
objects.insert(doc_data, vector_persisted.clone());
objects.insert(img_data, vector_persisted.clone());
objects.insert(obj_data, vector_persisted);
BatchAccumInferred { objects }
}
#[test]
fn test_convert_vector_input_with_inferred_dense() {
let inferred = create_test_inferred_batch();
let vector = grpc::VectorInput {
variant: Some(Variant::Dense(grpc::DenseVector {
data: vec![1.0, 2.0, 3.0],
})),
};
let result = convert_vector_input_with_inferred(vector, &inferred).unwrap();
match result {
VectorInputInternal::Vector(VectorInternal::Dense(values)) => {
assert_eq!(values, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vector"),
}
}
#[test]
fn test_convert_vector_input_with_inferred_document() {
let inferred = create_test_inferred_batch();
let doc = create_test_document();
let vector = grpc::VectorInput {
variant: Some(Variant::Document(doc)),
};
let result = convert_vector_input_with_inferred(vector, &inferred).unwrap();
match result {
VectorInputInternal::Vector(VectorInternal::Dense(values)) => {
assert_eq!(values, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vector from inference"),
}
}
#[test]
fn test_convert_vector_input_missing_variant() {
let inferred = create_test_inferred_batch();
let vector = grpc::VectorInput { variant: None };
let result = convert_vector_input_with_inferred(vector, &inferred);
assert!(result.is_err());
assert!(result.unwrap_err().message().contains("variant is missing"));
}
#[test]
fn test_context_pair_from_grpc_with_inferred() {
let inferred = create_test_inferred_batch();
let pair = grpc::ContextInputPair {
positive: Some(grpc::VectorInput {
variant: Some(Variant::Dense(grpc::DenseVector {
data: vec![1.0, 2.0, 3.0],
})),
}),
negative: Some(grpc::VectorInput {
variant: Some(Variant::Document(create_test_document())),
}),
};
let result = context_pair_from_grpc_with_inferred(pair, &inferred).unwrap();
match (result.positive, result.negative) {
(
VectorInputInternal::Vector(VectorInternal::Dense(pos)),
VectorInputInternal::Vector(VectorInternal::Dense(neg)),
) => {
assert_eq!(pos, vec![1.0, 2.0, 3.0]);
assert_eq!(neg, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vectors"),
}
}
#[test]
fn test_context_pair_missing_vectors() {
let inferred = create_test_inferred_batch();
let pair = grpc::ContextInputPair {
positive: None,
negative: Some(grpc::VectorInput {
variant: Some(Variant::Document(create_test_document())),
}),
};
let result = context_pair_from_grpc_with_inferred(pair, &inferred);
assert!(result.is_err());
assert!(
result
.unwrap_err()
.message()
.contains("positive is missing"),
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/config.rs | src/common/inference/config.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct InferenceConfig {
pub address: Option<String>,
pub timeout: Option<u64>,
pub token: Option<String>,
}
impl InferenceConfig {
pub fn new(address: Option<String>) -> Self {
Self {
address,
timeout: None,
token: None,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/update_requests.rs | src/common/inference/update_requests.rs | use std::collections::HashMap;
use api::rest::models::InferenceUsage;
use api::rest::{Batch, BatchVectorStruct, PointStruct, PointVectors, Vector, VectorStruct};
use collection::operations::point_ops::{
BatchPersisted, BatchVectorStructPersisted, PointStructPersisted, VectorPersisted,
VectorStructPersisted,
};
use collection::operations::vector_ops::PointVectorsPersisted;
use storage::content_manager::errors::StorageError;
use crate::common::inference::batch_processing::BatchAccum;
use crate::common::inference::infer_processing::BatchAccumInferred;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::service::{InferenceData, InferenceType};
pub async fn convert_point_struct(
point_structs: Vec<PointStruct>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<(Vec<PointStructPersisted>, Option<InferenceUsage>), StorageError> {
let mut batch_accum = BatchAccum::new();
for point_struct in &point_structs {
match &point_struct.vector {
VectorStruct::Named(named) => {
for vector in named.values() {
match vector {
Vector::Document(doc) => {
batch_accum.add(InferenceData::Document(doc.clone()))
}
Vector::Image(img) => batch_accum.add(InferenceData::Image(img.clone())),
Vector::Object(obj) => batch_accum.add(InferenceData::Object(obj.clone())),
Vector::Dense(_) | Vector::Sparse(_) | Vector::MultiDense(_) => {}
}
}
}
VectorStruct::Document(doc) => batch_accum.add(InferenceData::Document(doc.clone())),
VectorStruct::Image(img) => batch_accum.add(InferenceData::Image(img.clone())),
VectorStruct::Object(obj) => batch_accum.add(InferenceData::Object(obj.clone())),
VectorStruct::MultiDense(_) | VectorStruct::Single(_) => {}
}
}
let (inferred, usage) = if !batch_accum.objects.is_empty() {
let (inferred_data, usage) =
BatchAccumInferred::from_batch_accum(batch_accum, inference_type, &inference_params)
.await?;
(Some(inferred_data), usage)
} else {
(None, None)
};
let mut converted_points: Vec<PointStructPersisted> = Vec::new();
for point_struct in point_structs {
let PointStruct {
id,
vector,
payload,
} = point_struct;
let converted_vector_struct = match vector {
VectorStruct::Single(single) => VectorStructPersisted::Single(single),
VectorStruct::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
VectorStruct::Named(named) => {
let mut named_vectors = HashMap::new();
for (name, vector_data) in named {
let converted_vector = match &inferred {
Some(inferred) => convert_vector_with_inferred(vector_data, inferred)?,
None => match vector_data {
Vector::Dense(dense) => VectorPersisted::Dense(dense),
Vector::Sparse(sparse) => VectorPersisted::Sparse(sparse),
Vector::MultiDense(multi) => VectorPersisted::MultiDense(multi),
Vector::Document(_) | Vector::Image(_) | Vector::Object(_) => {
return Err(StorageError::inference_error(
"Inference required but service returned no results for named vector",
));
}
},
};
named_vectors.insert(name, converted_vector);
}
VectorStructPersisted::Named(named_vectors)
}
VectorStruct::Document(doc) => {
let vector_data = match &inferred {
Some(inferred) => {
convert_vector_with_inferred(Vector::Document(doc), inferred)?
}
None => {
return Err(StorageError::inference_error(
"Inference required for document but service returned no results",
));
}
};
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from document inference should be named",
));
}
VectorPersisted::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
}
}
VectorStruct::Image(img) => {
let vector_data = match &inferred {
Some(inferred) => convert_vector_with_inferred(Vector::Image(img), inferred)?,
None => {
return Err(StorageError::inference_error(
"Inference required for image but service returned no results",
));
}
};
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from image inference should be named",
));
}
VectorPersisted::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
}
}
VectorStruct::Object(obj) => {
let vector_data = match &inferred {
Some(inferred) => convert_vector_with_inferred(Vector::Object(obj), inferred)?,
None => {
return Err(StorageError::inference_error(
"Inference required for object but service returned no results",
));
}
};
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from object inference should be named",
));
}
VectorPersisted::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
}
}
};
let converted = PointStructPersisted {
id,
vector: converted_vector_struct,
payload,
};
converted_points.push(converted);
}
Ok((converted_points, usage))
}
pub async fn convert_batch(
batch: Batch,
inference_params: InferenceParams,
) -> Result<(BatchPersisted, Option<InferenceUsage>), StorageError> {
let Batch {
ids,
vectors,
payloads,
} = batch;
let mut inference_usage = InferenceUsage::default();
let batch_persisted = match vectors {
BatchVectorStruct::Single(single) => BatchVectorStructPersisted::Single(single),
BatchVectorStruct::MultiDense(multi) => BatchVectorStructPersisted::MultiDense(multi),
BatchVectorStruct::Named(named) => {
let mut named_vectors = HashMap::new();
for (name, vecs) in named {
let (converted_vectors, batch_usage) =
convert_vectors(vecs, InferenceType::Update, inference_params.clone()).await?;
inference_usage.merge_opt(batch_usage);
named_vectors.insert(name, converted_vectors);
}
BatchVectorStructPersisted::Named(named_vectors)
}
BatchVectorStruct::Document(_) => {
return Err(StorageError::inference_error(
"Direct Document processing is not supported in top-level batch vectors. Use named vectors.",
));
}
BatchVectorStruct::Image(_) => {
return Err(StorageError::inference_error(
"Direct Image processing is not supported in top-level batch vectors. Use named vectors.",
));
}
BatchVectorStruct::Object(_) => {
return Err(StorageError::inference_error(
"Direct Object processing is not supported in top-level batch vectors. Use named vectors.",
));
}
};
let batch_persisted = BatchPersisted {
ids,
vectors: batch_persisted,
payloads,
};
Ok((batch_persisted, inference_usage.into_non_empty()))
}
pub async fn convert_point_vectors(
point_vectors_list: Vec<PointVectors>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<(Vec<PointVectorsPersisted>, Option<InferenceUsage>), StorageError> {
let mut batch_accum = BatchAccum::new();
for point_vectors in &point_vectors_list {
if let VectorStruct::Named(named) = &point_vectors.vector {
for vector in named.values() {
match vector {
Vector::Document(doc) => batch_accum.add(InferenceData::Document(doc.clone())),
Vector::Image(img) => batch_accum.add(InferenceData::Image(img.clone())),
Vector::Object(obj) => batch_accum.add(InferenceData::Object(obj.clone())),
Vector::Dense(_) | Vector::Sparse(_) | Vector::MultiDense(_) => {}
}
}
}
}
let mut inference_usage = InferenceUsage::default();
let inferred = if !batch_accum.objects.is_empty() {
let (inferred_data, usage) =
BatchAccumInferred::from_batch_accum(batch_accum, inference_type, &inference_params)
.await?;
inference_usage.merge_opt(usage);
Some(inferred_data)
} else {
None
};
let mut converted_point_vectors: Vec<PointVectorsPersisted> = Vec::new();
for point_vectors in point_vectors_list {
let PointVectors { id, vector } = point_vectors;
let converted_vector = match vector {
VectorStruct::Single(dense) => VectorStructPersisted::Single(dense),
VectorStruct::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
VectorStruct::Named(named) => {
let mut converted = HashMap::new();
for (name, vec) in named {
let converted_vec = match &inferred {
Some(inferred) => convert_vector_with_inferred(vec, inferred)?,
None => match vec {
Vector::Dense(dense) => VectorPersisted::Dense(dense),
Vector::Sparse(sparse) => VectorPersisted::Sparse(sparse),
Vector::MultiDense(multi) => VectorPersisted::MultiDense(multi),
Vector::Document(_) | Vector::Image(_) | Vector::Object(_) => {
return Err(StorageError::inference_error(
"Inference required for named vector in PointVectors but no results",
));
}
},
};
converted.insert(name, converted_vec);
}
VectorStructPersisted::Named(converted)
}
VectorStruct::Document(_) => {
return Err(StorageError::inference_error(
"Direct Document processing not supported for PointVectors. Use named vectors.",
));
}
VectorStruct::Image(_) => {
return Err(StorageError::inference_error(
"Direct Image processing not supported for PointVectors. Use named vectors.",
));
}
VectorStruct::Object(_) => {
return Err(StorageError::inference_error(
"Direct Object processing not supported for PointVectors. Use named vectors.",
));
}
};
converted_point_vectors.push(PointVectorsPersisted {
id,
vector: converted_vector,
});
}
Ok((converted_point_vectors, inference_usage.into_non_empty()))
}
fn convert_point_struct_with_inferred(
point_structs: Vec<PointStruct>,
inferred: &BatchAccumInferred,
) -> Result<Vec<PointStructPersisted>, StorageError> {
point_structs
.into_iter()
.map(|point_struct| {
let PointStruct {
id,
vector,
payload,
} = point_struct;
let converted_vector_struct = match vector {
VectorStruct::Single(single) => VectorStructPersisted::Single(single),
VectorStruct::MultiDense(multi) => VectorStructPersisted::MultiDense(multi),
VectorStruct::Named(named) => {
let mut named_vectors = HashMap::new();
for (name, vector_data) in named {
let converted_vector = convert_vector_with_inferred(vector_data, inferred)?;
named_vectors.insert(name, converted_vector);
}
VectorStructPersisted::Named(named_vectors)
}
VectorStruct::Document(doc) => {
let vector_data =
convert_vector_with_inferred(Vector::Document(doc), inferred)?;
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from document inference must be named",
));
}
VectorPersisted::MultiDense(multi) => {
VectorStructPersisted::MultiDense(multi)
}
}
}
VectorStruct::Image(img) => {
let vector_data = convert_vector_with_inferred(Vector::Image(img), inferred)?;
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from image inference must be named",
));
}
VectorPersisted::MultiDense(multi) => {
VectorStructPersisted::MultiDense(multi)
}
}
}
VectorStruct::Object(obj) => {
let vector_data = convert_vector_with_inferred(Vector::Object(obj), inferred)?;
match vector_data {
VectorPersisted::Dense(dense) => VectorStructPersisted::Single(dense),
VectorPersisted::Sparse(_) => {
return Err(StorageError::bad_request(
"Sparse vector from object inference must be named",
));
}
VectorPersisted::MultiDense(multi) => {
VectorStructPersisted::MultiDense(multi)
}
}
}
};
Ok(PointStructPersisted {
id,
vector: converted_vector_struct,
payload,
})
})
.collect()
}
pub async fn convert_vectors(
vectors: Vec<Vector>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<(Vec<VectorPersisted>, Option<InferenceUsage>), StorageError> {
let mut batch_accum = BatchAccum::new();
for vector in &vectors {
match vector {
Vector::Document(doc) => batch_accum.add(InferenceData::Document(doc.clone())),
Vector::Image(img) => batch_accum.add(InferenceData::Image(img.clone())),
Vector::Object(obj) => batch_accum.add(InferenceData::Object(obj.clone())),
Vector::Dense(_) | Vector::Sparse(_) | Vector::MultiDense(_) => {}
}
}
let mut inference_usage = InferenceUsage::default();
let inferred = if !batch_accum.objects.is_empty() {
let (inferred_data, usage) =
BatchAccumInferred::from_batch_accum(batch_accum, inference_type, &inference_params)
.await?;
inference_usage.merge_opt(usage);
Some(inferred_data)
} else {
None
};
let converted_vectors: Result<Vec<VectorPersisted>, StorageError> = vectors
.into_iter()
.map(|vector_data| match &inferred {
Some(inferred) => convert_vector_with_inferred(vector_data, inferred),
None => match vector_data {
Vector::Dense(dense) => Ok(VectorPersisted::Dense(dense)),
Vector::Sparse(sparse) => Ok(VectorPersisted::Sparse(sparse)),
Vector::MultiDense(multi) => Ok(VectorPersisted::MultiDense(multi)),
Vector::Document(_) | Vector::Image(_) | Vector::Object(_) => {
Err(StorageError::inference_error(
"Inference required but no inference service results available",
))
}
},
})
.collect();
converted_vectors.map(|vecs| (vecs, inference_usage.into_non_empty()))
}
fn convert_vector_with_inferred(
vector: Vector,
inferred: &BatchAccumInferred,
) -> Result<VectorPersisted, StorageError> {
match vector {
Vector::Dense(dense) => Ok(VectorPersisted::Dense(dense)),
Vector::Sparse(sparse) => Ok(VectorPersisted::Sparse(sparse)),
Vector::MultiDense(multi) => Ok(VectorPersisted::MultiDense(multi)),
Vector::Document(doc) => {
let data = InferenceData::Document(doc);
inferred.get_vector(&data).cloned().ok_or_else(|| {
StorageError::inference_error("Missing inferred vector for document")
})
}
Vector::Image(img) => {
let data = InferenceData::Image(img);
inferred
.get_vector(&data)
.cloned()
.ok_or_else(|| StorageError::inference_error("Missing inferred vector for image"))
}
Vector::Object(obj) => {
let data = InferenceData::Object(obj);
inferred
.get_vector(&data)
.cloned()
.ok_or_else(|| StorageError::inference_error("Missing inferred vector for object"))
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/batch_processing.rs | src/common/inference/batch_processing.rs | use std::collections::HashSet;
use api::rest::{
ContextInput, ContextPair, DiscoverInput, Prefetch, Query, QueryGroupsRequestInternal,
QueryInterface, QueryRequestInternal, RecommendInput, VectorInput,
};
use super::service::{InferenceData, InferenceInput, InferenceRequest};
pub struct BatchAccum {
pub(crate) objects: HashSet<InferenceData>,
}
impl BatchAccum {
pub fn new() -> Self {
Self {
objects: HashSet::new(),
}
}
pub fn add(&mut self, data: InferenceData) {
self.objects.insert(data);
}
pub fn extend(&mut self, other: BatchAccum) {
self.objects.extend(other.objects);
}
pub fn is_empty(&self) -> bool {
self.objects.is_empty()
}
}
impl From<&BatchAccum> for InferenceRequest {
fn from(batch: &BatchAccum) -> Self {
Self {
inputs: batch
.objects
.iter()
.cloned()
.map(InferenceInput::from)
.collect(),
inference: None,
token: None,
}
}
}
fn collect_vector_input(vector: &VectorInput, batch: &mut BatchAccum) {
match vector {
VectorInput::Document(doc) => batch.add(InferenceData::Document(doc.clone())),
VectorInput::Image(img) => batch.add(InferenceData::Image(img.clone())),
VectorInput::Object(obj) => batch.add(InferenceData::Object(obj.clone())),
// types that are not supported in the Inference Service
VectorInput::DenseVector(_) => {}
VectorInput::SparseVector(_) => {}
VectorInput::MultiDenseVector(_) => {}
VectorInput::Id(_) => {}
}
}
fn collect_context_pair(pair: &ContextPair, batch: &mut BatchAccum) {
collect_vector_input(&pair.positive, batch);
collect_vector_input(&pair.negative, batch);
}
fn collect_discover_input(discover: &DiscoverInput, batch: &mut BatchAccum) {
collect_vector_input(&discover.target, batch);
if let Some(context) = &discover.context {
for pair in context {
collect_context_pair(pair, batch);
}
}
}
fn collect_recommend_input(recommend: &RecommendInput, batch: &mut BatchAccum) {
if let Some(positive) = &recommend.positive {
for vector in positive {
collect_vector_input(vector, batch);
}
}
if let Some(negative) = &recommend.negative {
for vector in negative {
collect_vector_input(vector, batch);
}
}
}
fn collect_query(query: &Query, batch: &mut BatchAccum) {
match query {
Query::Nearest(nearest) => collect_vector_input(&nearest.nearest, batch),
Query::Recommend(recommend) => collect_recommend_input(&recommend.recommend, batch),
Query::Discover(discover) => collect_discover_input(&discover.discover, batch),
Query::Context(context) => {
if let ContextInput(Some(pairs)) = &context.context {
for pair in pairs {
collect_context_pair(pair, batch);
}
}
}
Query::OrderBy(_)
| Query::Fusion(_)
| Query::Rrf(_)
| Query::Formula(_)
| Query::Sample(_) => {}
}
}
fn collect_query_interface(query: &QueryInterface, batch: &mut BatchAccum) {
match query {
QueryInterface::Nearest(vector) => collect_vector_input(vector, batch),
QueryInterface::Query(query) => collect_query(query, batch),
}
}
fn collect_prefetch(prefetch: &Prefetch, batch: &mut BatchAccum) {
let Prefetch {
prefetch,
query,
using: _,
filter: _,
params: _,
score_threshold: _,
limit: _,
lookup_from: _,
} = prefetch;
if let Some(query) = query {
collect_query_interface(query, batch);
}
if let Some(prefetches) = prefetch {
for p in prefetches {
collect_prefetch(p, batch);
}
}
}
pub fn collect_query_groups_request(request: &QueryGroupsRequestInternal) -> BatchAccum {
let mut batch = BatchAccum::new();
let QueryGroupsRequestInternal {
query,
prefetch,
using: _,
filter: _,
params: _,
score_threshold: _,
with_vector: _,
with_payload: _,
lookup_from: _,
group_request: _,
} = request;
if let Some(query) = query {
collect_query_interface(query, &mut batch);
}
if let Some(prefetches) = prefetch {
for prefetch in prefetches {
collect_prefetch(prefetch, &mut batch);
}
}
batch
}
pub fn collect_query_request(request: &QueryRequestInternal) -> BatchAccum {
let mut batch = BatchAccum::new();
let QueryRequestInternal {
prefetch,
query,
using: _,
filter: _,
score_threshold: _,
params: _,
limit: _,
offset: _,
with_vector: _,
with_payload: _,
lookup_from: _,
} = request;
if let Some(query) = query {
collect_query_interface(query, &mut batch);
}
if let Some(prefetches) = prefetch {
for prefetch in prefetches {
collect_prefetch(prefetch, &mut batch);
}
}
batch
}
#[cfg(test)]
mod tests {
use api::rest::QueryBaseGroupRequest;
use api::rest::schema::{DiscoverQuery, Document, Image, InferenceObject, NearestQuery};
use serde_json::json;
use super::*;
fn create_test_document(text: &str) -> Document {
Document {
text: text.to_string(),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_image(url: &str) -> Image {
Image {
image: json!({"data": url.to_string()}),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_object(data: &str) -> InferenceObject {
InferenceObject {
object: json!({"data": data}),
model: "test-model".to_string(),
options: Default::default(),
}
}
#[test]
fn test_batch_accum_basic() {
let mut batch = BatchAccum::new();
assert!(batch.objects.is_empty());
let doc = InferenceData::Document(create_test_document("test"));
batch.add(doc.clone());
assert_eq!(batch.objects.len(), 1);
batch.add(doc);
assert_eq!(batch.objects.len(), 1);
}
#[test]
fn test_batch_accum_extend() {
let mut batch1 = BatchAccum::new();
let mut batch2 = BatchAccum::new();
let doc1 = InferenceData::Document(create_test_document("test1"));
let doc2 = InferenceData::Document(create_test_document("test2"));
batch1.add(doc1);
batch2.add(doc2);
batch1.extend(batch2);
assert_eq!(batch1.objects.len(), 2);
}
#[test]
fn test_deduplication() {
let mut batch = BatchAccum::new();
let doc1 = InferenceData::Document(create_test_document("same"));
let doc2 = InferenceData::Document(create_test_document("same"));
batch.add(doc1);
batch.add(doc2);
assert_eq!(batch.objects.len(), 1);
}
#[test]
fn test_collect_vector_input() {
let mut batch = BatchAccum::new();
let doc_input = VectorInput::Document(create_test_document("test"));
let img_input = VectorInput::Image(create_test_image("test.jpg"));
let obj_input = VectorInput::Object(create_test_object("test"));
collect_vector_input(&doc_input, &mut batch);
collect_vector_input(&img_input, &mut batch);
collect_vector_input(&obj_input, &mut batch);
assert_eq!(batch.objects.len(), 3);
}
#[test]
fn test_collect_prefetch() {
let prefetch = Prefetch {
query: Some(QueryInterface::Nearest(VectorInput::Document(
create_test_document("test"),
))),
prefetch: Some(vec![Prefetch {
query: Some(QueryInterface::Nearest(VectorInput::Image(
create_test_image("nested.jpg"),
))),
prefetch: None,
using: None,
filter: None,
params: None,
score_threshold: None,
limit: None,
lookup_from: None,
}]),
using: None,
filter: None,
params: None,
score_threshold: None,
limit: None,
lookup_from: None,
};
let mut batch = BatchAccum::new();
collect_prefetch(&prefetch, &mut batch);
assert_eq!(batch.objects.len(), 2);
}
#[test]
fn test_collect_query_groups_request() {
let request = QueryGroupsRequestInternal {
query: Some(QueryInterface::Query(Query::Nearest(NearestQuery {
nearest: VectorInput::Document(create_test_document("test")),
mmr: None,
}))),
prefetch: Some(vec![Prefetch {
query: Some(QueryInterface::Query(Query::Discover(DiscoverQuery {
discover: DiscoverInput {
target: VectorInput::Image(create_test_image("test.jpg")),
context: Some(vec![ContextPair {
positive: VectorInput::Document(create_test_document("pos")),
negative: VectorInput::Image(create_test_image("neg.jpg")),
}]),
},
}))),
prefetch: None,
using: None,
filter: None,
params: None,
score_threshold: None,
limit: None,
lookup_from: None,
}]),
using: None,
filter: None,
params: None,
score_threshold: None,
with_vector: None,
with_payload: None,
lookup_from: None,
group_request: QueryBaseGroupRequest {
group_by: "test".parse().unwrap(),
group_size: None,
limit: None,
with_lookup: None,
},
};
let batch = collect_query_groups_request(&request);
assert_eq!(batch.objects.len(), 4);
}
#[test]
fn test_different_model_same_content() {
let mut batch = BatchAccum::new();
let mut doc1 = create_test_document("same");
let mut doc2 = create_test_document("same");
doc1.model = "model1".to_string();
doc2.model = "model2".to_string();
batch.add(InferenceData::Document(doc1));
batch.add(InferenceData::Document(doc2));
assert_eq!(batch.objects.len(), 2);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/local_model.rs | src/common/inference/local_model.rs | use collection::operations::point_ops::VectorPersisted;
use storage::content_manager::errors::StorageError;
use super::bm25::Bm25;
use super::service::{InferenceInput, InferenceType};
use crate::common::inference::inference_input::InferenceDataType;
enum LocalModelName {
Bm25,
}
impl LocalModelName {
fn from_str(model_name: &str) -> Option<Self> {
match model_name.to_lowercase().as_str() {
"qdrant/bm25" => Some(LocalModelName::Bm25),
"bm25" => Some(LocalModelName::Bm25),
_ => None,
}
}
}
/// Run inference with only local models.
///
/// # Panics
/// Panics if one inference input did not target a local model.
pub fn infer_local(
inference_inputs: Vec<InferenceInput>,
inference_type: InferenceType,
) -> Result<Vec<VectorPersisted>, StorageError> {
let mut out = Vec::with_capacity(inference_inputs.len());
for input in inference_inputs {
let InferenceInput {
data,
data_type,
model,
options,
} = input;
let Some(model_name) = LocalModelName::from_str(&model) else {
unreachable!(
"Non local model has been passed to infer_local(). This can happen if a newly added model wasn't added to infer_local()"
)
};
// Validate it is text
match data_type {
InferenceDataType::Text => {}
InferenceDataType::Image | InferenceDataType::Object => {
return Err(StorageError::bad_input(format!(
"Only text input is supported for {model}."
)));
}
};
let input_str = data.as_str().ok_or_else(|| {
StorageError::bad_input(format!("Only text input is supported for {model}."))
})?;
let embedding = match model_name {
LocalModelName::Bm25 => {
let bm25_config = InferenceInput::parse_bm25_config(options)?;
let bm25 = Bm25::new(bm25_config);
match inference_type {
InferenceType::Update => bm25.doc_embed(input_str),
InferenceType::Search => bm25.search_embed(input_str),
}
}
};
out.push(embedding);
}
Ok(out)
}
/// Returns `true` if the provided `model_name` targets a local model. Local models
/// are models that are handled by Qdrant and are not forwarded to a remote inference service.
pub fn is_local_model(model_name: &str) -> bool {
let model_name = LocalModelName::from_str(model_name);
model_name.is_some()
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/inference_input.rs | src/common/inference/inference_input.rs | use std::collections::HashMap;
use api::rest::{Bm25Config, Document, DocumentOptions, Image, InferenceObject};
use serde::de::IntoDeserializer;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use storage::content_manager::errors::StorageError;
use super::service::InferenceData;
#[derive(Debug, Serialize, Clone)]
pub struct InferenceInput {
pub data: Value,
pub data_type: InferenceDataType,
pub model: String,
pub options: Option<HashMap<String, Value>>,
}
impl InferenceInput {
/// Attempts to parse the input's options into a local model config.
pub fn parse_bm25_config(
options: Option<HashMap<String, Value>>,
) -> Result<Bm25Config, StorageError> {
let options = options.unwrap_or_default();
Bm25Config::deserialize(options.into_deserializer())
.map_err(|err| StorageError::bad_input(format!("Invalid BM25 config: {err:#?}")))
}
}
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "snake_case")]
pub enum InferenceDataType {
Text,
Image,
Object,
}
impl From<InferenceData> for InferenceInput {
fn from(value: InferenceData) -> Self {
match value {
InferenceData::Document(doc) => {
let Document {
text,
model,
options,
} = doc;
InferenceInput {
data: Value::String(text),
data_type: InferenceDataType::Text,
model,
options: options.map(DocumentOptions::into_options),
}
}
InferenceData::Image(img) => {
let Image {
image,
model,
options,
} = img;
InferenceInput {
data: image,
data_type: InferenceDataType::Image,
model,
options: options.options,
}
}
InferenceData::Object(obj) => {
let InferenceObject {
object,
model,
options,
} = obj;
InferenceInput {
data: object,
data_type: InferenceDataType::Object,
model,
options: options.options,
}
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/query_requests_rest.rs | src/common/inference/query_requests_rest.rs | use api::rest::models::InferenceUsage;
use api::rest::schema as rest;
use collection::lookup::WithLookup;
use collection::operations::universal_query::collection_query::{
CollectionPrefetch, CollectionQueryGroupsRequest, CollectionQueryRequest, Mmr, NearestWithMmr,
Query, VectorInputInternal, VectorQuery,
};
use collection::operations::universal_query::formula::FormulaInternal;
use collection::operations::universal_query::shard_query::{FusionInternal, SampleInternal};
use ordered_float::OrderedFloat;
use segment::data_types::order_by::OrderBy;
use segment::data_types::vectors::{DEFAULT_VECTOR_NAME, MultiDenseVectorInternal, VectorInternal};
use segment::vector_storage::query::{ContextPair, ContextQuery, DiscoveryQuery, RecoQuery};
use storage::content_manager::errors::StorageError;
use crate::common::inference::batch_processing::{
collect_query_groups_request, collect_query_request,
};
use crate::common::inference::infer_processing::BatchAccumInferred;
use crate::common::inference::params::InferenceParams;
use crate::common::inference::service::{InferenceData, InferenceType};
pub struct CollectionQueryRequestWithUsage {
pub request: CollectionQueryRequest,
pub usage: Option<InferenceUsage>,
}
pub struct CollectionQueryGroupsRequestWithUsage {
pub request: CollectionQueryGroupsRequest,
pub usage: Option<InferenceUsage>,
}
pub async fn convert_query_groups_request_from_rest(
request: rest::QueryGroupsRequestInternal,
inference_params: InferenceParams,
) -> Result<CollectionQueryGroupsRequestWithUsage, StorageError> {
let batch = collect_query_groups_request(&request);
let rest::QueryGroupsRequestInternal {
prefetch,
query,
using,
filter,
score_threshold,
params,
with_vector,
with_payload,
lookup_from,
group_request,
} = request;
let (inferred, usage) =
BatchAccumInferred::from_batch_accum(batch, InferenceType::Search, &inference_params)
.await?;
let query = query
.map(|q| convert_query_with_inferred(q, &inferred))
.transpose()?;
let prefetch = prefetch
.map(|prefetches| {
prefetches
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, &inferred))
.collect::<Result<Vec<_>, _>>()
})
.transpose()?
.unwrap_or_default();
let collection_query_groups_request = CollectionQueryGroupsRequest {
prefetch,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter,
score_threshold,
params,
with_vector: with_vector.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_VECTOR),
with_payload: with_payload.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_PAYLOAD),
lookup_from,
limit: group_request
.limit
.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
group_by: group_request.group_by,
group_size: group_request
.group_size
.unwrap_or(CollectionQueryRequest::DEFAULT_GROUP_SIZE),
with_lookup: group_request.with_lookup.map(WithLookup::from),
};
Ok(CollectionQueryGroupsRequestWithUsage {
request: collection_query_groups_request,
usage,
})
}
pub async fn convert_query_request_from_rest(
request: rest::QueryRequestInternal,
inference_params: &InferenceParams,
) -> Result<CollectionQueryRequestWithUsage, StorageError> {
let batch = collect_query_request(&request);
let (inferred, usage) =
BatchAccumInferred::from_batch_accum(batch, InferenceType::Search, inference_params)
.await?;
let rest::QueryRequestInternal {
prefetch,
query,
using,
filter,
score_threshold,
params,
limit,
offset,
with_vector,
with_payload,
lookup_from,
} = request;
let prefetch = prefetch
.map(|prefetches| {
prefetches
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, &inferred))
.collect::<Result<Vec<_>, _>>()
})
.transpose()?
.unwrap_or_default();
let query = query
.map(|q| convert_query_with_inferred(q, &inferred))
.transpose()?;
let collection_query_request = CollectionQueryRequest {
prefetch,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter,
score_threshold,
limit: limit.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
offset: offset.unwrap_or(CollectionQueryRequest::DEFAULT_OFFSET),
params,
with_vector: with_vector.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_VECTOR),
with_payload: with_payload.unwrap_or(CollectionQueryRequest::DEFAULT_WITH_PAYLOAD),
lookup_from,
};
Ok(CollectionQueryRequestWithUsage {
request: collection_query_request,
usage,
})
}
fn convert_vector_input_with_inferred(
vector: rest::VectorInput,
inferred: &BatchAccumInferred,
) -> Result<VectorInputInternal, StorageError> {
match vector {
rest::VectorInput::Id(id) => Ok(VectorInputInternal::Id(id)),
rest::VectorInput::DenseVector(dense) => {
Ok(VectorInputInternal::Vector(VectorInternal::Dense(dense)))
}
rest::VectorInput::SparseVector(sparse) => {
Ok(VectorInputInternal::Vector(VectorInternal::Sparse(sparse)))
}
rest::VectorInput::MultiDenseVector(multi_dense) => Ok(VectorInputInternal::Vector(
VectorInternal::MultiDense(MultiDenseVectorInternal::new_unchecked(multi_dense)),
)),
rest::VectorInput::Document(doc) => {
let data = InferenceData::Document(doc);
let vector = inferred.get_vector(&data).ok_or_else(|| {
StorageError::inference_error("Missing inferred vector for document")
})?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
rest::VectorInput::Image(img) => {
let data = InferenceData::Image(img);
let vector = inferred.get_vector(&data).ok_or_else(|| {
StorageError::inference_error("Missing inferred vector for image")
})?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
rest::VectorInput::Object(obj) => {
let data = InferenceData::Object(obj);
let vector = inferred.get_vector(&data).ok_or_else(|| {
StorageError::inference_error("Missing inferred vector for object")
})?;
Ok(VectorInputInternal::Vector(VectorInternal::from(
vector.clone(),
)))
}
}
}
fn convert_query_with_inferred(
query: rest::QueryInterface,
inferred: &BatchAccumInferred,
) -> Result<Query, StorageError> {
let query = rest::Query::from(query);
match query {
rest::Query::Nearest(rest::NearestQuery { nearest, mmr }) => {
let vector = convert_vector_input_with_inferred(nearest, inferred)?;
if let Some(mmr) = mmr {
let mmr = Mmr {
diversity: mmr.diversity,
candidates_limit: mmr.candidates_limit,
};
Ok(Query::Vector(VectorQuery::NearestWithMmr(NearestWithMmr {
nearest: vector,
mmr,
})))
} else {
Ok(Query::Vector(VectorQuery::Nearest(vector)))
}
}
rest::Query::Recommend(recommend) => {
let rest::RecommendInput {
positive,
negative,
strategy,
} = recommend.recommend;
let positives = positive
.into_iter()
.flatten()
.map(|v| convert_vector_input_with_inferred(v, inferred))
.collect::<Result<Vec<_>, _>>()?;
let negatives = negative
.into_iter()
.flatten()
.map(|v| convert_vector_input_with_inferred(v, inferred))
.collect::<Result<Vec<_>, _>>()?;
let reco_query = RecoQuery::new(positives, negatives);
match strategy.unwrap_or_default() {
rest::RecommendStrategy::AverageVector => Ok(Query::Vector(
VectorQuery::RecommendAverageVector(reco_query),
)),
rest::RecommendStrategy::BestScore => {
Ok(Query::Vector(VectorQuery::RecommendBestScore(reco_query)))
}
rest::RecommendStrategy::SumScores => {
Ok(Query::Vector(VectorQuery::RecommendSumScores(reco_query)))
}
}
}
rest::Query::Discover(discover) => {
let rest::DiscoverInput { target, context } = discover.discover;
let target = convert_vector_input_with_inferred(target, inferred)?;
let context = context
.into_iter()
.flatten()
.map(|pair| context_pair_from_rest_with_inferred(pair, inferred))
.collect::<Result<Vec<_>, _>>()?;
Ok(Query::Vector(VectorQuery::Discover(DiscoveryQuery::new(
target, context,
))))
}
rest::Query::Context(context) => {
let rest::ContextInput(context) = context.context;
let context = context
.into_iter()
.flatten()
.map(|pair| context_pair_from_rest_with_inferred(pair, inferred))
.collect::<Result<Vec<_>, _>>()?;
Ok(Query::Vector(VectorQuery::Context(ContextQuery::new(
context,
))))
}
rest::Query::OrderBy(order_by) => Ok(Query::OrderBy(OrderBy::from(order_by.order_by))),
rest::Query::Fusion(fusion) => Ok(Query::Fusion(FusionInternal::from(fusion.fusion))),
rest::Query::Rrf(rrf) => Ok(Query::Fusion(FusionInternal::from(rrf.rrf))),
rest::Query::Formula(formula) => Ok(Query::Formula(FormulaInternal::from(formula))),
rest::Query::Sample(sample) => Ok(Query::Sample(SampleInternal::from(sample.sample))),
}
}
fn convert_prefetch_with_inferred(
prefetch: rest::Prefetch,
inferred: &BatchAccumInferred,
) -> Result<CollectionPrefetch, StorageError> {
let rest::Prefetch {
prefetch,
query,
using,
filter,
score_threshold,
params,
limit,
lookup_from,
} = prefetch;
let query = query
.map(|q| convert_query_with_inferred(q, inferred))
.transpose()?;
let nested_prefetches = prefetch
.map(|prefetches| {
prefetches
.into_iter()
.map(|p| convert_prefetch_with_inferred(p, inferred))
.collect::<Result<Vec<_>, _>>()
})
.transpose()?
.unwrap_or_default();
Ok(CollectionPrefetch {
prefetch: nested_prefetches,
query,
using: using.unwrap_or_else(|| DEFAULT_VECTOR_NAME.to_owned()),
filter,
score_threshold: score_threshold.map(OrderedFloat),
limit: limit.unwrap_or(CollectionQueryRequest::DEFAULT_LIMIT),
params,
lookup_from,
})
}
fn context_pair_from_rest_with_inferred(
value: rest::ContextPair,
inferred: &BatchAccumInferred,
) -> Result<ContextPair<VectorInputInternal>, StorageError> {
let rest::ContextPair { positive, negative } = value;
Ok(ContextPair {
positive: convert_vector_input_with_inferred(positive, inferred)?,
negative: convert_vector_input_with_inferred(negative, inferred)?,
})
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use api::rest::schema::{Document, Image, InferenceObject, NearestQuery};
use collection::operations::point_ops::VectorPersisted;
use serde_json::json;
use super::*;
fn create_test_document(text: &str) -> Document {
Document {
text: text.to_string(),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_image(url: &str) -> Image {
Image {
image: json!({"data": url.to_string()}),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_object(data: &str) -> InferenceObject {
InferenceObject {
object: json!({"data": data}),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_inferred_batch() -> BatchAccumInferred {
let mut objects = HashMap::new();
let doc = InferenceData::Document(create_test_document("test"));
let img = InferenceData::Image(create_test_image("test.jpg"));
let obj = InferenceData::Object(create_test_object("test"));
let dense_vector = vec![1.0, 2.0, 3.0];
let vector_persisted = VectorPersisted::Dense(dense_vector);
objects.insert(doc, vector_persisted.clone());
objects.insert(img, vector_persisted.clone());
objects.insert(obj, vector_persisted);
BatchAccumInferred { objects }
}
#[test]
fn test_convert_vector_input_with_inferred_dense() {
let inferred = create_test_inferred_batch();
let vector = rest::VectorInput::DenseVector(vec![1.0, 2.0, 3.0]);
let result = convert_vector_input_with_inferred(vector, &inferred).unwrap();
match result {
VectorInputInternal::Vector(VectorInternal::Dense(values)) => {
assert_eq!(values, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vector"),
}
}
#[test]
fn test_convert_vector_input_with_inferred_document() {
let inferred = create_test_inferred_batch();
let doc = create_test_document("test");
let vector = rest::VectorInput::Document(doc);
let result = convert_vector_input_with_inferred(vector, &inferred).unwrap();
match result {
VectorInputInternal::Vector(VectorInternal::Dense(values)) => {
assert_eq!(values, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vector from inference"),
}
}
#[test]
fn test_convert_vector_input_with_inferred_missing() {
let inferred = create_test_inferred_batch();
let doc = create_test_document("missing");
let vector = rest::VectorInput::Document(doc);
let result = convert_vector_input_with_inferred(vector, &inferred);
assert!(result.is_err());
assert!(
result
.unwrap_err()
.to_string()
.contains("Missing inferred vector"),
);
}
#[test]
fn test_context_pair_from_rest_with_inferred() {
let inferred = create_test_inferred_batch();
let pair = rest::ContextPair {
positive: rest::VectorInput::DenseVector(vec![1.0, 2.0, 3.0]),
negative: rest::VectorInput::Document(create_test_document("test")),
};
let result = context_pair_from_rest_with_inferred(pair, &inferred).unwrap();
match (result.positive, result.negative) {
(
VectorInputInternal::Vector(VectorInternal::Dense(pos)),
VectorInputInternal::Vector(VectorInternal::Dense(neg)),
) => {
assert_eq!(pos, vec![1.0, 2.0, 3.0]);
assert_eq!(neg, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vectors"),
}
}
#[test]
fn test_convert_query_with_inferred_nearest() {
let inferred = create_test_inferred_batch();
let nearest = NearestQuery {
nearest: rest::VectorInput::Document(create_test_document("test")),
mmr: None,
};
let query = rest::QueryInterface::Query(rest::Query::Nearest(nearest));
let result = convert_query_with_inferred(query, &inferred).unwrap();
match result {
Query::Vector(VectorQuery::Nearest(vector)) => match vector {
VectorInputInternal::Vector(VectorInternal::Dense(values)) => {
assert_eq!(values, vec![1.0, 2.0, 3.0]);
}
_ => panic!("Expected dense vector"),
},
_ => panic!("Expected nearest query"),
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/params.rs | src/common/inference/params.rs | use crate::common::inference::token::InferenceToken;
#[derive(Debug, Clone, PartialEq, Default)]
pub struct InferenceParams {
pub token: InferenceToken,
pub timeout: Option<std::time::Duration>,
}
impl InferenceParams {
pub fn new(token: impl Into<InferenceToken>, timeout: Option<std::time::Duration>) -> Self {
Self {
token: token.into(),
timeout,
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/bm25.rs | src/common/inference/bm25.rs | use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap};
use std::str::FromStr;
use std::sync::Arc;
use api::rest::{Bm25Config, TextPreprocessingConfig};
use collection::operations::point_ops::VectorPersisted;
use itertools::Itertools;
use murmur3::murmur3_32_of_slice;
use segment::data_types::index::{Language, StopwordsInterface};
use segment::index::field_index::full_text_index::stop_words::StopwordsFilter;
use segment::index::field_index::full_text_index::tokenizers::{
Stemmer, Tokenizer, TokensProcessor,
};
const DEFAULT_LANGUAGE: &str = "english";
/// Bm25 implementation
#[derive(Debug)]
pub struct Bm25 {
config: Bm25Config,
tokenizer: Tokenizer,
}
impl Bm25 {
pub fn new(mut config: Bm25Config) -> Self {
let tokenizer_conf = std::mem::take(&mut config.text_preprocessing_config);
let tokens_processor = new_token_processor_from_config(tokenizer_conf);
let tokenizer = Tokenizer::new(config.tokenizer, tokens_processor);
Self { config, tokenizer }
}
/// Tokenizes the `input` with the configured tokenizer options.
fn tokenize<'b>(&'b self, input: &'b str) -> Vec<Cow<'b, str>> {
let mut out = vec![];
self.tokenizer.tokenize_query(input, |i| out.push(i));
out
}
/// Embeds the given input using the Bm25 algorithm and configured options/hyperparameters.
pub fn search_embed(&self, input: &str) -> VectorPersisted {
let tokens = self.tokenize(input);
if tokens.is_empty() {
return VectorPersisted::empty_sparse();
}
let indices: Vec<u32> = tokens
.into_iter()
.map(|token| Self::compute_token_id(&token))
.unique()
.collect();
let values: Vec<f32> = vec![1.0; indices.len()];
VectorPersisted::new_sparse(indices, values)
}
/// Embeds the given input using the Bm25 algorithm and configured options/hyperparameters.
pub fn doc_embed(&self, input: &str) -> VectorPersisted {
let tokens = self.tokenize(input);
if tokens.is_empty() {
return VectorPersisted::empty_sparse();
}
let tf_map = self.term_frequency(&tokens);
let (indices, values): (Vec<u32>, Vec<f32>) = tf_map.into_iter().unzip();
VectorPersisted::new_sparse(indices, values)
}
fn term_frequency(&self, tokens: &[Cow<str>]) -> BTreeMap<u32, f32> {
let mut tf_map = BTreeMap::new();
let doc_len = tokens.len() as f64;
let mut counter: HashMap<&str, u32> = HashMap::new();
tokens
.iter()
.for_each(|token| *counter.entry(token.as_ref()).or_insert(0) += 1);
let k = self.config.k.into_inner();
let b = self.config.b.into_inner();
let avg_len = self.config.avg_len.into_inner();
for (token, count) in &counter {
let token_id = Self::compute_token_id(token);
let num_occurrences = f64::from(*count);
let mut tf = num_occurrences * (k + 1.0);
tf /= k.mul_add(1.0 - b + b * doc_len / avg_len, num_occurrences);
tf_map.insert(token_id, tf as f32);
}
tf_map
}
fn compute_token_id(token: &str) -> u32 {
(murmur3_32_of_slice(token.as_bytes(), 0) as i32).unsigned_abs()
}
}
fn new_token_processor_from_config(value: TextPreprocessingConfig) -> TokensProcessor {
let TextPreprocessingConfig {
language,
lowercase,
ascii_folding,
stopwords,
stemmer,
min_token_len,
max_token_len,
} = value;
let lowercase = lowercase.unwrap_or(true);
let ascii_folding = ascii_folding.unwrap_or(false);
let language = language.unwrap_or_else(|| DEFAULT_LANGUAGE.to_string());
let stemmer = match stemmer {
None => Stemmer::try_default_from_language(&language),
Some(stemmer_algorithm) => Some(Stemmer::from_algorithm(&stemmer_algorithm)),
};
let stopwords_config = match stopwords {
None => {
// Try to create from the language
Language::from_str(&language)
.ok()
.map(StopwordsInterface::Language)
}
Some(stopwords_interface) => Some(stopwords_interface),
};
TokensProcessor::new(
lowercase,
ascii_folding,
Arc::new(StopwordsFilter::new(&stopwords_config, lowercase)),
stemmer,
min_token_len,
max_token_len,
)
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/service.rs | src/common/inference/service.rs | use std::fmt::Display;
use std::hash::Hash;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use actix_web::http::header::HttpDate;
use api::rest::models::InferenceUsage;
use api::rest::{Document, Image, InferenceObject};
use collection::operations::point_ops::VectorPersisted;
use common::defaults::APP_USER_AGENT;
use itertools::{Either, Itertools};
use parking_lot::RwLock;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use storage::content_manager::errors::StorageError;
pub use super::inference_input::InferenceInput;
use super::local_model;
use crate::common::inference::config::InferenceConfig;
use crate::common::inference::params::InferenceParams;
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub enum InferenceType {
#[default]
Update,
Search,
}
impl Display for InferenceType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", format!("{self:?}").to_lowercase())
}
}
#[derive(Debug, Serialize)]
pub struct InferenceRequest {
pub(crate) inputs: Vec<InferenceInput>,
pub(crate) inference: Option<InferenceType>,
#[serde(default)]
pub(crate) token: Option<String>,
}
#[derive(Debug, Deserialize)]
#[cfg_attr(test, derive(Serialize))]
pub struct InferenceResponse {
pub embeddings: Vec<VectorPersisted>,
pub usage: Option<InferenceUsage>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
pub enum InferenceData {
Document(Document),
Image(Image),
Object(InferenceObject),
}
#[derive(Debug, Deserialize)]
struct InferenceError {
pub error: String,
}
impl InferenceData {
pub(crate) fn type_name(&self) -> &'static str {
match self {
InferenceData::Document(_) => "document",
InferenceData::Image(_) => "image",
InferenceData::Object(_) => "object",
}
}
}
pub struct InferenceService {
pub(crate) config: InferenceConfig,
pub(crate) client: Client,
}
static INFERENCE_SERVICE: RwLock<Option<Arc<InferenceService>>> = RwLock::new(None);
/// We assume that the inference provider will handle timeouts itself, if
/// not provided by the user or configured. But we need ensurance, that we don't
/// wait forever for a response.
static DEFAULT_INFERENCE_TIMEOUT_SECS: u64 = 10 * 60; // 10 minutes
impl InferenceService {
pub fn new(config: Option<InferenceConfig>) -> Self {
let config = config.unwrap_or_default();
let InferenceConfig {
address: _,
timeout,
token: _,
} = &config;
let timeout = timeout.unwrap_or(DEFAULT_INFERENCE_TIMEOUT_SECS);
let client_builder = Client::builder()
.user_agent(APP_USER_AGENT.as_str())
.timeout(Duration::from_secs(timeout));
Self {
config,
client: client_builder
.build()
.expect("Invalid timeout value for HTTP client"),
}
}
pub fn init_global(config: Option<InferenceConfig>) -> Result<(), StorageError> {
let mut inference_service = INFERENCE_SERVICE.write();
let service = Self::new(config);
if !service.is_address_valid() {
return Err(StorageError::service_error(
"Cannot initialize InferenceService: address is required but not provided or empty in config",
));
}
*inference_service = Some(Arc::new(service));
Ok(())
}
pub fn get_global() -> Option<Arc<InferenceService>> {
INFERENCE_SERVICE.read().as_ref().cloned()
}
pub(crate) fn validate(&self) -> Result<(), StorageError> {
if !self.is_address_valid() {
return Err(StorageError::service_error(
"InferenceService configuration error: address is missing or empty",
));
}
Ok(())
}
pub async fn infer(
&self,
inference_inputs: Vec<InferenceInput>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<InferenceResponse, StorageError> {
let (
(local_inference_inputs, local_inference_positions),
(remote_inference_inputs, remote_inference_positions),
): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = inference_inputs
.into_iter()
// Keep track of the input's positions so we can properly merge them together later.
.enumerate()
.partition_map(|(pos, input)| {
// Check if input is targeting a local model or the configured remote server.
if local_model::is_local_model(&input.model) {
Either::Left((input, pos))
} else {
Either::Right((input, pos))
}
});
// Run inference on local models
let local_model_results = local_model::infer_local(local_inference_inputs, inference_type)?;
// Early return with the local model's results if no other inference_inputs were passed.
// If local models is also empty, we automatically return an empty response here.
if remote_inference_inputs.is_empty() {
return Ok(InferenceResponse {
embeddings: local_model_results,
usage: None, // No usage since everything was processed locally.
});
}
let remote_result = self
.infer_remote(remote_inference_inputs, inference_type, inference_params)
.await?;
Ok(Self::merge_local_and_remote_result(
local_model_results,
local_inference_positions,
remote_result,
remote_inference_positions,
))
}
async fn infer_remote(
&self,
inference_inputs: Vec<InferenceInput>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<InferenceResponse, StorageError> {
// Assume that either:
// - User doesn't have access to generating random JWT tokens (like in serverless)
// - Inference server checks validity of the tokens.
let InferenceParams {
token: inference_token,
timeout,
} = inference_params;
let token = inference_token.0.or_else(|| self.config.token.clone());
let Some(url) = self.config.address.as_ref() else {
return Err(StorageError::service_error(
"InferenceService URL not configured - please provide valid address in config",
));
};
let request_body = InferenceRequest {
inputs: inference_inputs,
inference: Some(inference_type),
token,
};
let request = self.client.post(url);
let request = if let Some(timeout) = timeout {
request.timeout(timeout)
} else {
request
};
let response = request.json(&request_body).send().await;
let (response_body, status, retry_after) = match response {
Ok(response) => {
let status = response.status();
let retry_after = Self::parse_retry_after(response.headers());
match response.text().await {
Ok(body) => (body, status, retry_after),
Err(err) => {
return Err(StorageError::service_error(format!(
"Failed to read inference response body: {err}"
)));
}
}
}
Err(error) => {
if let Some(status) = error.status() {
(error.to_string(), status, None)
} else {
return Err(StorageError::service_error(format!(
"Failed to send inference request: {error}"
)));
}
}
};
Self::handle_inference_response(status, &response_body, retry_after)
}
fn merge_local_and_remote_result(
local_results: Vec<VectorPersisted>,
local_pos: Vec<usize>,
remote_res: InferenceResponse,
remote_pos: Vec<usize>,
) -> InferenceResponse {
// Skip merging with local results if we only have inference results from remote.
if local_results.is_empty() {
return remote_res;
}
// Merge remote results and local results together in the exact same order they have been passed.
let merged = merge_position_items(
local_results,
local_pos,
remote_res.embeddings,
remote_pos,
)
.expect(
"Expected local results and remote items being contiguous. This is an internal bug!",
);
InferenceResponse {
embeddings: merged,
usage: remote_res.usage, // Only account for usage of remote.
}
}
fn parse_retry_after(headers: &reqwest::header::HeaderMap) -> Option<Duration> {
headers
.get(reqwest::header::RETRY_AFTER)
.and_then(|value| value.to_str().ok())
.and_then(|value| {
// Check if the value is a valid duration in seconds
if let Ok(seconds) = value.parse::<u64>() {
return Some(Duration::from_secs(seconds));
}
// Check if the value is a valid Date
if let Ok(http_date) = value.parse::<HttpDate>() {
let ts = SystemTime::from(http_date);
return ts
.duration_since(SystemTime::now())
.ok()
.map(|d| d.max(Duration::ZERO));
}
None
})
}
pub(crate) fn handle_inference_response(
status: reqwest::StatusCode,
response_body: &str,
retry_after: Option<Duration>,
) -> Result<InferenceResponse, StorageError> {
match status {
reqwest::StatusCode::OK => {
serde_json::from_str(response_body)
.map_err(|e| {
StorageError::service_error(format!(
"Failed to parse successful inference response: {e}. Response body: {response_body}",
))
})
}
reqwest::StatusCode::BAD_REQUEST => {
// Try to extract error description from the response body, if it is a valid JSON
let parsed_body: Result<InferenceError, _> = serde_json::from_str(response_body);
match parsed_body {
Ok(InferenceError { error }) => {
Err(StorageError::bad_request(format!(
"Inference request validation failed: {error}",
)))
}
Err(_) => {
Err(StorageError::bad_request(format!(
"Invalid inference request: {response_body}",
)))
}
}
}
status @ (reqwest::StatusCode::UNAUTHORIZED | reqwest::StatusCode::FORBIDDEN) => {
Err(StorageError::service_error(format!(
"Authentication failed for inference service ({status}): {response_body}",
)))
}
status @ reqwest::StatusCode::TOO_MANY_REQUESTS => {
Err(StorageError::rate_limit_exceeded(
format!("Too many requests for inference service ({status}): {response_body}"),
retry_after,
))
}
status @ (reqwest::StatusCode::INTERNAL_SERVER_ERROR
| reqwest::StatusCode::SERVICE_UNAVAILABLE
| reqwest::StatusCode::GATEWAY_TIMEOUT) => Err(StorageError::service_error(format!(
"Inference service error ({status}): {response_body}",
))),
_ => {
if status.is_server_error() {
Err(StorageError::service_error(format!(
"Inference service error ({status}): {response_body}",
)))
} else if status.is_client_error() {
Err(StorageError::bad_request(format!(
"Inference can't process request ({status}): {response_body}",
)))
} else {
Err(StorageError::service_error(format!(
"Unexpected inference error ({status}): {response_body}",
)))
}
}
}
}
fn is_address_valid(&self) -> bool {
self.config.address.is_none() // In BM25 we don't need an address so we allow InferenceService to have an empty address.
|| self.config.address.as_ref().is_some_and(|i| !i.is_empty())
}
}
/// 2-way merge of lists with `PositionItems`. Also checks for skipped items and returns `None` in case an item is left out.
fn merge_position_items<I>(
left: impl IntoIterator<Item = I>,
left_pos: Vec<usize>,
right: impl IntoIterator<Item = I>,
right_pos: Vec<usize>,
) -> Option<Vec<I>> {
let left_iter = left.into_iter().zip(left_pos);
let right_iter = right.into_iter().zip(right_pos);
let mut i = 0; // Check that we cover all items and don't skip any.
left_iter
.merge_by(right_iter, |l: &(I, usize), r: &(I, usize)| l.1 < r.1)
.map(|item| {
if item.1 == i {
i += 1;
Some(item.0)
} else {
None
}
})
.collect::<Option<Vec<_>>>()
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use api::rest::Bm25Config;
use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use serde_json::{Value, json};
use super::*;
use crate::common::inference::bm25::Bm25;
use crate::common::inference::inference_input::InferenceDataType;
const BM25_LOCAL_MODEL_NAME: &str = "bm25";
#[test]
fn test_merge_position_items() {
let (left, right): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) =
(0..1000).map(|i| (i, i)).partition(|i| i.0 % 7 == 0);
let merged = merge_position_items(left.0, left.1, right.0, right.1);
assert_eq!(merged, Some((0..1000).collect::<Vec<_>>()));
}
#[test]
fn test_merge_position_items_fail() {
let (left, mut right): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) =
(0..1000).map(|i| (i, i)).partition(|i| i.0 % 7 == 0);
right.0.remove(5);
right.1.remove(5);
let merged = merge_position_items(left.0, left.1, right.0, right.1);
// We were missing an item and therefore expect `None`.
assert_eq!(merged, None);
}
#[tokio::test]
async fn test_bm25_end_to_end() {
let mut rng = StdRng::seed_from_u64(42);
// Test without any BM25
let only_inference_inputs: Vec<_> = (0..rng.random_range(30..100))
.map(|_| make_normal_inference_input("this is some input", &mut rng))
.collect();
let res = run_inference_with_mocked_remote(only_inference_inputs.clone()).await;
check_inference_response(only_inference_inputs, res);
// Test with only BM25
let only_bm25_inputs: Vec<_> = (0..rng.random_range(30..100))
.map(|_| make_bm25_inference_input("this is some input"))
.collect();
let res = run_inference_with_mocked_remote(only_bm25_inputs.clone()).await;
check_inference_response(only_bm25_inputs, res);
// Test BM25 and inference mixed.
let mut inputs: Vec<InferenceInput> = vec![];
inputs.extend(
(0..rng.random_range(30..100)).map(|_| make_bm25_inference_input("this is some input")),
);
inputs.extend(
(0..rng.random_range(30..100))
.map(|_| make_normal_inference_input("this is some input", &mut rng)),
);
inputs.shuffle(&mut rng);
let res = run_inference_with_mocked_remote(inputs.clone()).await;
check_inference_response(inputs, res);
}
fn make_normal_inference_input(input: &str, rand: &mut StdRng) -> InferenceInput {
let options = if rand.random_bool(0.3) {
let mut opts = HashMap::default();
let value = rand.random_iter::<char>().take(10).collect::<String>(); // Test utf8
opts.insert("some-key".to_string(), Value::String(value));
Some(opts)
} else {
None
};
InferenceInput {
data: Value::String(input.to_string()),
data_type: InferenceDataType::Text,
model: "anyModel".to_string(),
options,
}
}
fn make_bm25_inference_input(input: &str) -> InferenceInput {
let bm25_config = Bm25Config::default();
let options: HashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&bm25_config).unwrap()).unwrap();
InferenceInput {
data: Value::String(input.to_string()),
data_type: InferenceDataType::Text,
model: BM25_LOCAL_MODEL_NAME.to_string(),
options: Some(options),
}
}
fn check_inference_response(inputs: Vec<InferenceInput>, response: InferenceResponse) {
assert_eq!(inputs.len(), response.embeddings.len());
for (idx, (input, response)) in inputs.into_iter().zip(response.embeddings).enumerate() {
if input.model == BM25_LOCAL_MODEL_NAME {
// In our test-setup, only BM25 returns sparse vectors. Normal inference is mocked
// and always returns dense vectors.
assert!(matches!(response, VectorPersisted::Sparse(..)));
let bm25_config = InferenceInput::parse_bm25_config(input.options).unwrap();
// Re-run bm25 and check that response is correct.
let bm25 = Bm25::new(bm25_config).doc_embed(input.data.as_str().unwrap());
assert_eq!(response, bm25);
} else {
let expected_vector = VectorPersisted::Dense(vec![0.0; idx]);
assert_eq!(response, expected_vector);
}
}
}
async fn run_inference_with_mocked_remote(
inference_inputs: Vec<InferenceInput>,
) -> InferenceResponse {
// Request a new server from the pool
let mut server = mockito::Server::new_async().await;
// Create dummy dense vectors for non-bm25 inputs with the length of the index.
// The dummy dense vector have the dimension of the position they appeared in `inference_inputs`,
// so we can easily check for correct ordering later, although it is a bit hacky.
let expected_embeddings: Vec<_> = inference_inputs
.iter()
.enumerate()
.filter(|(_, item)| item.model != BM25_LOCAL_MODEL_NAME)
.map(|(index, _)| {
let values = vec![0.0; index];
VectorPersisted::Dense(values)
})
.collect();
// Create an HTTP mock
let mock = server
.mock("POST", "/")
.with_status(200)
.with_header("content-type", "text/json")
.with_body(
json!(InferenceResponse {
embeddings: expected_embeddings,
usage: None,
})
.to_string(),
)
.create_async()
.await;
let config = InferenceConfig {
address: Some(server.url()), // Use mock's URL as address when doing inference.
timeout: None,
token: Some(String::default()),
};
let service = InferenceService::new(Some(config));
let has_remote_inference_items = inference_inputs
.iter()
.any(|i| i.model != BM25_LOCAL_MODEL_NAME);
let res = service
.infer(
inference_inputs,
InferenceType::Update,
InferenceParams::new("key", None),
)
.await
.expect("Failed to do inference");
// We expect exactly 1 request if there is any inference (non-bm25) request
// and 0 if all inputs are bm25.
if has_remote_inference_items {
mock.expect(1).assert_async().await;
} else {
mock.expect(0).assert_async().await;
}
res
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/batch_processing_grpc.rs | src/common/inference/batch_processing_grpc.rs | use std::collections::HashSet;
use api::grpc::qdrant::vector_input::Variant;
use api::grpc::qdrant::{
ContextInput, ContextInputPair, DiscoverInput, PrefetchQuery, Query, RecommendInput,
VectorInput, query,
};
use api::rest::schema as rest;
use tonic::Status;
use super::service::{InferenceData, InferenceInput, InferenceRequest};
pub struct BatchAccumGrpc {
pub(crate) objects: HashSet<InferenceData>,
}
impl BatchAccumGrpc {
pub fn new() -> Self {
Self {
objects: HashSet::new(),
}
}
pub fn add(&mut self, data: InferenceData) {
self.objects.insert(data);
}
pub fn extend(&mut self, other: BatchAccumGrpc) {
self.objects.extend(other.objects);
}
pub fn is_empty(&self) -> bool {
self.objects.is_empty()
}
}
impl From<&BatchAccumGrpc> for InferenceRequest {
fn from(batch: &BatchAccumGrpc) -> Self {
Self {
inputs: batch
.objects
.iter()
.cloned()
.map(InferenceInput::from)
.collect(),
inference: None,
token: None,
}
}
}
fn collect_vector_input(vector: &VectorInput, batch: &mut BatchAccumGrpc) -> Result<(), Status> {
let Some(variant) = &vector.variant else {
return Ok(());
};
match variant {
Variant::Id(_) => {}
Variant::Dense(_) => {}
Variant::Sparse(_) => {}
Variant::MultiDense(_) => {}
Variant::Document(document) => {
let doc = rest::Document::try_from(document.clone())
.map_err(|e| Status::internal(format!("Document conversion error: {e:?}")))?;
batch.add(InferenceData::Document(doc));
}
Variant::Image(image) => {
let img = rest::Image::try_from(image.clone())
.map_err(|e| Status::internal(format!("Image conversion error: {e:?}")))?;
batch.add(InferenceData::Image(img));
}
Variant::Object(object) => {
let obj = rest::InferenceObject::try_from(object.clone())
.map_err(|e| Status::internal(format!("Object conversion error: {e:?}")))?;
batch.add(InferenceData::Object(obj));
}
}
Ok(())
}
pub(crate) fn collect_context_input(
context: &ContextInput,
batch: &mut BatchAccumGrpc,
) -> Result<(), Status> {
let ContextInput { pairs } = context;
for pair in pairs {
collect_context_input_pair(pair, batch)?;
}
Ok(())
}
fn collect_context_input_pair(
pair: &ContextInputPair,
batch: &mut BatchAccumGrpc,
) -> Result<(), Status> {
let ContextInputPair { positive, negative } = pair;
if let Some(positive) = positive {
collect_vector_input(positive, batch)?;
}
if let Some(negative) = negative {
collect_vector_input(negative, batch)?;
}
Ok(())
}
pub(crate) fn collect_discover_input(
discover: &DiscoverInput,
batch: &mut BatchAccumGrpc,
) -> Result<(), Status> {
let DiscoverInput { target, context } = discover;
if let Some(vector) = target {
collect_vector_input(vector, batch)?;
}
if let Some(context) = context {
for pair in &context.pairs {
collect_context_input_pair(pair, batch)?;
}
}
Ok(())
}
pub(crate) fn collect_recommend_input(
recommend: &RecommendInput,
batch: &mut BatchAccumGrpc,
) -> Result<(), Status> {
let RecommendInput {
positive,
negative,
strategy: _,
} = recommend;
for vector in positive {
collect_vector_input(vector, batch)?;
}
for vector in negative {
collect_vector_input(vector, batch)?;
}
Ok(())
}
pub(crate) fn collect_query(query: &Query, batch: &mut BatchAccumGrpc) -> Result<(), Status> {
let Some(variant) = &query.variant else {
return Ok(());
};
match variant {
query::Variant::Nearest(nearest) => collect_vector_input(nearest, batch)?,
query::Variant::Recommend(recommend) => collect_recommend_input(recommend, batch)?,
query::Variant::Discover(discover) => collect_discover_input(discover, batch)?,
query::Variant::Context(context) => collect_context_input(context, batch)?,
query::Variant::OrderBy(_) => {}
query::Variant::Fusion(_) => {}
query::Variant::Rrf(_) => {}
query::Variant::Sample(_) => {}
query::Variant::Formula(_) => {}
query::Variant::NearestWithMmr(nearest_with_mmr) => {
nearest_with_mmr
.nearest
.as_ref()
.map(|vector| collect_vector_input(vector, batch))
.transpose()?;
}
}
Ok(())
}
pub(crate) fn collect_prefetch(
prefetch: &PrefetchQuery,
batch: &mut BatchAccumGrpc,
) -> Result<(), Status> {
let PrefetchQuery {
prefetch,
query,
using: _,
filter: _,
params: _,
score_threshold: _,
limit: _,
lookup_from: _,
} = prefetch;
if let Some(query) = query {
collect_query(query, batch)?;
}
for p in prefetch {
collect_prefetch(p, batch)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use api::rest::schema::{Document, Image, InferenceObject};
use serde_json::json;
use super::*;
fn create_test_document(text: &str) -> Document {
Document {
text: text.to_string(),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_image(url: &str) -> Image {
Image {
image: json!({"data": url.to_string()}),
model: "test-model".to_string(),
options: Default::default(),
}
}
fn create_test_object(data: &str) -> InferenceObject {
InferenceObject {
object: json!({"data": data}),
model: "test-model".to_string(),
options: Default::default(),
}
}
#[test]
fn test_batch_accum_basic() {
let mut batch = BatchAccumGrpc::new();
assert!(batch.objects.is_empty());
let doc = InferenceData::Document(create_test_document("test"));
batch.add(doc.clone());
assert_eq!(batch.objects.len(), 1);
batch.add(doc);
assert_eq!(batch.objects.len(), 1);
}
#[test]
fn test_batch_accum_extend() {
let mut batch1 = BatchAccumGrpc::new();
let mut batch2 = BatchAccumGrpc::new();
let doc1 = InferenceData::Document(create_test_document("test1"));
let doc2 = InferenceData::Document(create_test_document("test2"));
batch1.add(doc1);
batch2.add(doc2);
batch1.extend(batch2);
assert_eq!(batch1.objects.len(), 2);
}
#[test]
fn test_deduplication() {
let mut batch = BatchAccumGrpc::new();
let doc1 = InferenceData::Document(create_test_document("same"));
let doc2 = InferenceData::Document(create_test_document("same"));
batch.add(doc1);
batch.add(doc2);
assert_eq!(batch.objects.len(), 1);
}
#[test]
fn test_different_model_same_content() {
let mut batch = BatchAccumGrpc::new();
let mut doc1 = create_test_document("same");
let mut doc2 = create_test_document("same");
doc1.model = "model1".to_string();
doc2.model = "model2".to_string();
batch.add(InferenceData::Document(doc1));
batch.add(InferenceData::Document(doc2));
assert_eq!(batch.objects.len(), 2);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/mod.rs | src/common/inference/mod.rs | #![allow(dead_code)]
mod batch_processing;
mod batch_processing_grpc;
pub mod bm25;
pub(crate) mod config;
mod infer_processing;
pub mod inference_input;
mod local_model;
pub mod params;
pub mod query_requests_grpc;
pub mod query_requests_rest;
pub mod service;
pub mod token;
pub mod update_requests;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/infer_processing.rs | src/common/inference/infer_processing.rs | use std::collections::{HashMap, HashSet};
use api::rest::models::InferenceUsage;
use collection::operations::point_ops::VectorPersisted;
use storage::content_manager::errors::StorageError;
use super::batch_processing::BatchAccum;
use super::service::{
InferenceData, InferenceInput, InferenceResponse, InferenceService, InferenceType,
};
use crate::common::inference::params::InferenceParams;
pub struct BatchAccumInferred {
pub(crate) objects: HashMap<InferenceData, VectorPersisted>,
}
impl BatchAccumInferred {
pub fn new() -> Self {
Self {
objects: HashMap::new(),
}
}
pub async fn from_objects(
objects: HashSet<InferenceData>,
inference_type: InferenceType,
inference_params: InferenceParams,
) -> Result<(Self, Option<InferenceUsage>), StorageError> {
if objects.is_empty() {
return Ok((Self::new(), None));
}
let Some(service) = InferenceService::get_global() else {
return Err(StorageError::service_error(
"InferenceService is not initialized. Please check if it was properly configured and initialized during startup.",
));
};
service.validate()?;
let objects_serialized: Vec<_> = objects.into_iter().collect();
let inference_inputs: Vec<_> = objects_serialized
.iter()
.cloned()
.map(InferenceInput::from)
.collect();
let InferenceResponse { embeddings, usage } = service
.infer(inference_inputs, inference_type, inference_params)
.await?;
if embeddings.is_empty() {
return Err(StorageError::service_error(
"Inference service returned no vectors. Check if models are properly loaded.",
));
}
let objects = objects_serialized.into_iter().zip(embeddings).collect();
Ok((Self { objects }, usage))
}
pub async fn from_batch_accum(
batch: BatchAccum,
inference_type: InferenceType,
inference_params: &InferenceParams,
) -> Result<(Self, Option<InferenceUsage>), StorageError> {
let BatchAccum { objects } = batch;
Self::from_objects(objects, inference_type, inference_params.clone()).await
}
pub fn get_vector(&self, data: &InferenceData) -> Option<&VectorPersisted> {
self.objects.get(data)
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/inference/token.rs | src/common/inference/token.rs | use std::convert::Infallible;
use std::future::{Ready, ready};
use actix_web::{FromRequest, HttpMessage};
#[derive(Debug, Clone, PartialEq, Default)]
pub struct InferenceToken(pub Option<String>);
impl InferenceToken {
pub fn new(key: impl Into<String>) -> Self {
InferenceToken(Some(key.into()))
}
pub fn as_str(&self) -> Option<&str> {
self.0.as_deref()
}
}
impl From<&str> for InferenceToken {
fn from(s: &str) -> Self {
InferenceToken::new(s)
}
}
impl FromRequest for InferenceToken {
type Error = Infallible;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(
req: &actix_web::HttpRequest,
_payload: &mut actix_web::dev::Payload,
) -> Self::Future {
let api_key = req.extensions().get::<InferenceToken>().cloned();
ready(Ok(api_key.unwrap_or_default()))
}
}
pub fn extract_token<R>(req: &tonic::Request<R>) -> InferenceToken {
req.extensions()
.get::<InferenceToken>()
.cloned()
.unwrap_or(InferenceToken(None))
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/requests_telemetry.rs | src/common/telemetry_ops/requests_telemetry.rs | use std::collections::HashMap;
use std::sync::Arc;
use common::types::TelemetryDetail;
use parking_lot::Mutex;
use schemars::JsonSchema;
use segment::common::anonymize::{Anonymize, anonymize_collection_values};
use segment::common::operation_time_statistics::{
OperationDurationStatistics, OperationDurationsAggregator, ScopeDurationMeasurer,
};
use serde::Serialize;
use storage::rbac::{Access, AccessRequirements};
pub type HttpStatusCode = u16;
#[derive(Serialize, Clone, Default, Debug, JsonSchema)]
pub struct WebApiTelemetry {
pub responses: HashMap<String, HashMap<HttpStatusCode, OperationDurationStatistics>>,
}
#[derive(Serialize, Clone, Default, Debug, JsonSchema, Anonymize)]
pub struct GrpcTelemetry {
#[anonymize(with = anonymize_collection_values)]
pub responses: HashMap<String, OperationDurationStatistics>,
}
pub struct ActixTelemetryCollector {
pub workers: Vec<Arc<Mutex<ActixWorkerTelemetryCollector>>>,
}
#[derive(Default)]
pub struct ActixWorkerTelemetryCollector {
methods: HashMap<String, HashMap<HttpStatusCode, Arc<Mutex<OperationDurationsAggregator>>>>,
}
pub struct TonicTelemetryCollector {
pub workers: Vec<Arc<Mutex<TonicWorkerTelemetryCollector>>>,
}
#[derive(Default)]
pub struct TonicWorkerTelemetryCollector {
methods: HashMap<String, Arc<Mutex<OperationDurationsAggregator>>>,
}
impl ActixTelemetryCollector {
pub fn create_web_worker_telemetry(&mut self) -> Arc<Mutex<ActixWorkerTelemetryCollector>> {
let worker: Arc<Mutex<_>> = Default::default();
self.workers.push(worker.clone());
worker
}
pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> WebApiTelemetry {
let mut result = WebApiTelemetry::default();
for web_data in &self.workers {
let lock = web_data.lock().get_telemetry_data(detail);
result.merge(&lock);
}
result
}
}
impl TonicTelemetryCollector {
pub fn create_grpc_telemetry_collector(&mut self) -> Arc<Mutex<TonicWorkerTelemetryCollector>> {
let worker: Arc<Mutex<_>> = Default::default();
self.workers.push(worker.clone());
worker
}
pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> GrpcTelemetry {
let mut result = GrpcTelemetry::default();
for grpc_data in &self.workers {
let lock = grpc_data.lock().get_telemetry_data(detail);
result.merge(&lock);
}
result
}
}
impl TonicWorkerTelemetryCollector {
pub fn add_response(&mut self, method: String, instant: std::time::Instant) {
let aggregator = self
.methods
.entry(method)
.or_insert_with(OperationDurationsAggregator::new);
ScopeDurationMeasurer::new_with_instant(aggregator, instant);
}
pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> GrpcTelemetry {
let mut responses = HashMap::new();
for (method, aggregator) in self.methods.iter() {
responses.insert(method.clone(), aggregator.lock().get_statistics(detail));
}
GrpcTelemetry { responses }
}
}
impl ActixWorkerTelemetryCollector {
pub fn add_response(
&mut self,
method: String,
status_code: HttpStatusCode,
instant: std::time::Instant,
) {
let aggregator = self
.methods
.entry(method)
.or_default()
.entry(status_code)
.or_insert_with(OperationDurationsAggregator::new);
ScopeDurationMeasurer::new_with_instant(aggregator, instant);
}
pub fn get_telemetry_data(&self, detail: TelemetryDetail) -> WebApiTelemetry {
let mut responses = HashMap::new();
for (method, status_codes) in &self.methods {
let mut status_codes_map = HashMap::new();
for (status_code, aggregator) in status_codes {
status_codes_map.insert(*status_code, aggregator.lock().get_statistics(detail));
}
responses.insert(method.clone(), status_codes_map);
}
WebApiTelemetry { responses }
}
}
impl GrpcTelemetry {
pub fn merge(&mut self, other: &GrpcTelemetry) {
for (method, other_statistics) in &other.responses {
let entry = self.responses.entry(method.clone()).or_default();
*entry = entry.clone() + other_statistics.clone();
}
}
}
impl WebApiTelemetry {
pub fn merge(&mut self, other: &WebApiTelemetry) {
for (method, status_codes) in &other.responses {
let status_codes_map = self.responses.entry(method.clone()).or_default();
for (status_code, statistics) in status_codes {
let entry = status_codes_map.entry(*status_code).or_default();
*entry = entry.clone() + statistics.clone();
}
}
}
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct RequestsTelemetry {
pub rest: WebApiTelemetry,
pub grpc: GrpcTelemetry,
}
impl RequestsTelemetry {
pub fn collect(
access: &Access,
actix_collector: &ActixTelemetryCollector,
tonic_collector: &TonicTelemetryCollector,
detail: TelemetryDetail,
) -> Option<Self> {
let global_access = AccessRequirements::new();
if access.check_global_access(global_access).is_ok() {
let rest = actix_collector.get_telemetry_data(detail);
let grpc = tonic_collector.get_telemetry_data(detail);
Some(Self { rest, grpc })
} else {
None
}
}
}
impl Anonymize for WebApiTelemetry {
fn anonymize(&self) -> Self {
let responses = self
.responses
.iter()
.map(|(key, value)| (key.clone(), anonymize_collection_values(value)))
.collect();
WebApiTelemetry { responses }
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/hardware.rs | src/common/telemetry_ops/hardware.rs | use std::collections::HashMap;
use api::rest::models::HardwareUsage;
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::Serialize;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct HardwareTelemetry {
pub(crate) collection_data: HashMap<String, HardwareUsage>,
}
impl HardwareTelemetry {
pub(crate) fn new(dispatcher: &Dispatcher, access: &Access) -> Self {
let mut all_hw_metrics = dispatcher.all_hw_metrics();
let collection_data = match access {
Access::Global(_) => all_hw_metrics,
Access::Collection(collection_access_list) => {
let required_access = AccessRequirements::new();
let allowed_collections =
collection_access_list.meeting_requirements(required_access);
let mut resolved_collection_data = HashMap::new();
for collection in allowed_collections {
if let Some(hw_metrics) = all_hw_metrics.remove(collection) {
resolved_collection_data.insert(collection.clone(), hw_metrics);
}
}
resolved_collection_data
}
};
Self { collection_data }
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/collections_telemetry.rs | src/common/telemetry_ops/collections_telemetry.rs | use std::sync::atomic::AtomicBool;
use std::time::Duration;
use collection::operations::types::CollectionResult;
use collection::telemetry::{
CollectionSnapshotTelemetry, CollectionTelemetry, CollectionsAggregatedTelemetry,
};
use common::types::{DetailsLevel, TelemetryDetail};
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::Serialize;
use storage::content_manager::toc::TableOfContent;
use storage::rbac::Access;
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
#[serde(untagged)]
pub enum CollectionTelemetryEnum {
Full(Box<CollectionTelemetry>),
Aggregated(CollectionsAggregatedTelemetry),
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize, Default)]
pub struct CollectionsTelemetry {
#[anonymize(false)]
pub number_of_collections: usize,
#[anonymize(false)]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_collections: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub collections: Option<Vec<CollectionTelemetryEnum>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub snapshots: Option<Vec<CollectionSnapshotTelemetry>>,
}
impl CollectionsTelemetry {
pub async fn collect(
detail: TelemetryDetail,
access: &Access,
toc: &TableOfContent,
timeout: Duration,
is_stopped: &AtomicBool,
) -> CollectionResult<Self> {
let number_of_collections = toc.all_collections(access).await.len();
let (collections, snapshots) = if detail.level >= DetailsLevel::Level1 {
let telemetry_data = if detail.level >= DetailsLevel::Level2 {
let toc_telemetry = toc
.get_telemetry_data(detail, access, timeout, is_stopped)
.await?;
let collections: Vec<_> = toc_telemetry
.collection_telemetry
.into_iter()
.map(|t| CollectionTelemetryEnum::Full(Box::new(t)))
.collect();
(collections, toc_telemetry.snapshot_telemetry)
} else {
let collections = toc
.get_aggregated_telemetry_data(access, timeout, is_stopped)
.await?
.into_iter()
.map(CollectionTelemetryEnum::Aggregated)
.collect();
(collections, vec![])
};
(Some(telemetry_data.0), Some(telemetry_data.1))
} else {
(None, None)
};
let max_collections = toc.max_collections();
Ok(CollectionsTelemetry {
number_of_collections,
max_collections,
collections,
snapshots,
})
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/memory_telemetry.rs | src/common/telemetry_ops/memory_telemetry.rs | use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use serde::Serialize;
use storage::rbac::Access;
#[cfg(all(
not(target_env = "msvc"),
any(target_arch = "x86_64", target_arch = "aarch64")
))]
use storage::rbac::AccessRequirements;
#[cfg(all(
not(target_env = "msvc"),
any(target_arch = "x86_64", target_arch = "aarch64")
))]
use tikv_jemalloc_ctl::{epoch, stats};
#[derive(Debug, Clone, Default, JsonSchema, Serialize, Anonymize)]
#[anonymize(false)]
pub struct MemoryTelemetry {
/// Total number of bytes in active pages allocated by the application
pub active_bytes: usize,
/// Total number of bytes allocated by the application
pub allocated_bytes: usize,
/// Total number of bytes dedicated to metadata
pub metadata_bytes: usize,
/// Maximum number of bytes in physically resident data pages mapped
pub resident_bytes: usize,
/// Total number of bytes in virtual memory mappings
pub retained_bytes: usize,
}
impl MemoryTelemetry {
#[cfg(all(
not(target_env = "msvc"),
any(target_arch = "x86_64", target_arch = "aarch64")
))]
pub fn collect(access: &Access) -> Option<MemoryTelemetry> {
let required_access = AccessRequirements::new();
if epoch::advance().is_ok() && access.check_global_access(required_access).is_ok() {
Some(MemoryTelemetry {
active_bytes: stats::active::read().unwrap_or_default(),
allocated_bytes: stats::allocated::read().unwrap_or_default(),
metadata_bytes: stats::metadata::read().unwrap_or_default(),
resident_bytes: stats::resident::read().unwrap_or_default(),
retained_bytes: stats::retained::read().unwrap_or_default(),
})
} else {
log::info!("Failed to advance Jemalloc stats epoch");
None
}
}
#[cfg(target_env = "msvc")]
pub fn collect(_access: &Access) -> Option<MemoryTelemetry> {
None
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/app_telemetry.rs | src/common/telemetry_ops/app_telemetry.rs | use std::path::Path;
use chrono::{DateTime, SubsecRound, Utc};
use common::flags::FeatureFlags;
use common::types::{DetailsLevel, TelemetryDetail};
use schemars::JsonSchema;
use segment::common::anonymize::Anonymize;
use segment::types::HnswGlobalConfig;
use serde::Serialize;
use crate::settings::Settings;
pub struct AppBuildTelemetryCollector {
pub startup: DateTime<Utc>,
}
impl AppBuildTelemetryCollector {
pub fn new() -> Self {
AppBuildTelemetryCollector {
startup: Utc::now().round_subsecs(2),
}
}
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct AppFeaturesTelemetry {
pub debug: bool,
pub service_debug_feature: bool,
pub recovery_mode: bool,
pub gpu: bool,
pub rocksdb: bool,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct RunningEnvironmentTelemetry {
#[anonymize(false)]
distribution: Option<String>,
#[anonymize(false)]
distribution_version: Option<String>,
is_docker: bool,
#[anonymize(false)]
cores: Option<usize>,
ram_size: Option<usize>,
disk_size: Option<usize>,
#[anonymize(false)]
cpu_flags: String,
#[serde(skip_serializing_if = "Option::is_none")]
cpu_endian: Option<CpuEndian>,
#[serde(skip_serializing_if = "Option::is_none")]
gpu_devices: Option<Vec<GpuDeviceTelemetry>>,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct AppBuildTelemetry {
#[anonymize(false)]
pub name: String,
#[anonymize(false)]
pub version: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub features: Option<AppFeaturesTelemetry>,
#[anonymize(value = None)]
#[serde(skip_serializing_if = "Option::is_none")]
pub runtime_features: Option<FeatureFlags>,
#[serde(skip_serializing_if = "Option::is_none")]
pub hnsw_global_config: Option<HnswGlobalConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub system: Option<RunningEnvironmentTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
pub jwt_rbac: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub hide_jwt_dashboard: Option<bool>,
pub startup: DateTime<Utc>,
}
impl AppBuildTelemetry {
pub fn collect(
detail: TelemetryDetail,
collector: &AppBuildTelemetryCollector,
settings: &Settings,
) -> Self {
AppBuildTelemetry {
name: env!("CARGO_PKG_NAME").to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
features: (detail.level >= DetailsLevel::Level1).then(|| AppFeaturesTelemetry {
debug: cfg!(debug_assertions),
service_debug_feature: cfg!(feature = "service_debug"),
recovery_mode: settings.storage.recovery_mode.is_some(),
gpu: cfg!(feature = "gpu"),
rocksdb: cfg!(feature = "rocksdb"),
}),
runtime_features: (detail.level >= DetailsLevel::Level1)
.then(common::flags::feature_flags),
hnsw_global_config: (detail.level >= DetailsLevel::Level1)
.then(|| settings.storage.hnsw_global_config.clone()),
system: (detail.level >= DetailsLevel::Level1).then(get_system_data),
jwt_rbac: settings.service.jwt_rbac,
hide_jwt_dashboard: settings.service.hide_jwt_dashboard,
startup: collector.startup,
}
}
}
fn get_system_data() -> RunningEnvironmentTelemetry {
let distribution = if let Ok(release) = sys_info::linux_os_release() {
release.id
} else {
sys_info::os_type().ok()
};
let distribution_version = if let Ok(release) = sys_info::linux_os_release() {
release.version_id
} else {
sys_info::os_release().ok()
};
let mut cpu_flags = vec![];
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if std::arch::is_x86_feature_detected!("sse") {
cpu_flags.push("sse");
}
if std::arch::is_x86_feature_detected!("sse2") {
cpu_flags.push("sse2");
}
if std::arch::is_x86_feature_detected!("avx") {
cpu_flags.push("avx");
}
if std::arch::is_x86_feature_detected!("avx2") {
cpu_flags.push("avx2");
}
if std::arch::is_x86_feature_detected!("fma") {
cpu_flags.push("fma");
}
if std::arch::is_x86_feature_detected!("f16c") {
cpu_flags.push("f16c");
}
if std::arch::is_x86_feature_detected!("avx512f") {
cpu_flags.push("avx512f");
}
if std::arch::is_x86_feature_detected!("avx512vl") {
cpu_flags.push("avx512vl");
}
if std::arch::is_x86_feature_detected!("avx512vpopcntdq") {
cpu_flags.push("avx512vpopcntdq");
}
}
#[cfg(all(target_arch = "aarch64", target_feature = "neon"))]
{
if std::arch::is_aarch64_feature_detected!("neon") {
cpu_flags.push("neon");
}
if std::arch::is_aarch64_feature_detected!("fp16") {
cpu_flags.push("fp16");
}
}
#[cfg(feature = "gpu")]
let gpu_devices = segment::index::hnsw_index::gpu::GPU_DEVICES_MANAGER
.read()
.as_ref()
.map(|gpu_devices_manager| {
gpu_devices_manager
.all_found_device_names()
.iter()
.map(|name| GpuDeviceTelemetry { name: name.clone() })
.collect::<Vec<_>>()
});
#[cfg(not(feature = "gpu"))]
let gpu_devices = None;
RunningEnvironmentTelemetry {
distribution,
distribution_version,
is_docker: cfg!(unix) && Path::new("/.dockerenv").exists(),
cores: sys_info::cpu_num().ok().map(|x| x as usize),
ram_size: sys_info::mem_info().ok().map(|x| x.total as usize),
disk_size: sys_info::disk_info().ok().map(|x| x.total as usize),
cpu_flags: cpu_flags.join(","),
cpu_endian: Some(CpuEndian::current()),
gpu_devices,
}
}
#[derive(Serialize, Clone, Copy, Debug, JsonSchema, Anonymize)]
#[serde(rename_all = "snake_case")]
pub enum CpuEndian {
Little,
Big,
Other,
}
impl CpuEndian {
/// Get the current used byte order
pub const fn current() -> Self {
if cfg!(target_endian = "little") {
CpuEndian::Little
} else if cfg!(target_endian = "big") {
CpuEndian::Big
} else {
CpuEndian::Other
}
}
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct GpuDeviceTelemetry {
#[anonymize(false)]
pub name: String,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/mod.rs | src/common/telemetry_ops/mod.rs | pub mod app_telemetry;
pub mod cluster_telemetry;
pub mod collections_telemetry;
pub mod hardware;
pub mod memory_telemetry;
pub mod requests_telemetry;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/common/telemetry_ops/cluster_telemetry.rs | src/common/telemetry_ops/cluster_telemetry.rs | use std::collections::HashMap;
use collection::operations::types::PeerMetadata;
use collection::shards::shard::PeerId;
use common::types::{DetailsLevel, TelemetryDetail};
use schemars::JsonSchema;
use segment::common::anonymize::{Anonymize, anonymize_collection_values_opt};
use serde::Serialize;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
use storage::types::{ClusterStatus, ConsensusThreadStatus, PeerInfo, StateRole};
use crate::settings::Settings;
#[derive(Serialize, Clone, Debug, JsonSchema)]
pub struct P2pConfigTelemetry {
connection_pool_size: usize,
}
#[derive(Serialize, Clone, Debug, JsonSchema)]
pub struct ConsensusConfigTelemetry {
max_message_queue_size: usize,
tick_period_ms: u64,
bootstrap_timeout_sec: u64,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
#[anonymize(false)]
pub struct ClusterConfigTelemetry {
grpc_timeout_ms: u64,
p2p: P2pConfigTelemetry,
consensus: ConsensusConfigTelemetry,
}
impl From<&Settings> for ClusterConfigTelemetry {
fn from(settings: &Settings) -> Self {
ClusterConfigTelemetry {
grpc_timeout_ms: settings.cluster.grpc_timeout_ms,
p2p: P2pConfigTelemetry {
connection_pool_size: settings.cluster.p2p.connection_pool_size,
},
consensus: ConsensusConfigTelemetry {
max_message_queue_size: settings.cluster.consensus.max_message_queue_size,
tick_period_ms: settings.cluster.consensus.tick_period_ms,
bootstrap_timeout_sec: settings.cluster.consensus.bootstrap_timeout_sec,
},
}
}
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct ClusterStatusTelemetry {
#[anonymize(false)]
pub number_of_peers: usize,
#[anonymize(false)]
pub term: u64,
#[anonymize(false)]
pub commit: u64,
#[anonymize(false)]
pub pending_operations: usize,
pub role: Option<StateRole>,
pub is_voter: bool,
#[anonymize(false)]
pub peer_id: Option<PeerId>,
pub consensus_thread_status: ConsensusThreadStatus,
}
#[derive(Serialize, Clone, Debug, JsonSchema, Anonymize)]
pub struct ClusterTelemetry {
pub enabled: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<ClusterStatusTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
pub config: Option<ClusterConfigTelemetry>,
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(with = anonymize_collection_values_opt)]
pub peers: Option<HashMap<PeerId, PeerInfo>>,
#[serde(skip_serializing_if = "Option::is_none")]
#[anonymize(false)]
pub peer_metadata: Option<HashMap<PeerId, PeerMetadata>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<HashMap<String, serde_json::Value>>,
}
impl ClusterTelemetry {
pub fn collect(
access: &Access,
detail: TelemetryDetail,
dispatcher: &Dispatcher,
settings: &Settings,
) -> Option<ClusterTelemetry> {
let global_access = AccessRequirements::new();
if access.check_global_access(global_access).is_err() {
return None;
}
Some(ClusterTelemetry {
enabled: settings.cluster.enabled,
status: (detail.level >= DetailsLevel::Level1)
.then(|| match dispatcher.cluster_status() {
ClusterStatus::Disabled => None,
ClusterStatus::Enabled(cluster_info) => Some(ClusterStatusTelemetry {
number_of_peers: cluster_info.peers.len(),
term: cluster_info.raft_info.term,
commit: cluster_info.raft_info.commit,
pending_operations: cluster_info.raft_info.pending_operations,
role: cluster_info.raft_info.role,
is_voter: cluster_info.raft_info.is_voter,
peer_id: Some(cluster_info.peer_id),
consensus_thread_status: cluster_info.consensus_thread_status,
}),
})
.flatten(),
config: (detail.level >= DetailsLevel::Level2)
.then(|| ClusterConfigTelemetry::from(settings)),
peers: (detail.level >= DetailsLevel::Level2)
.then(|| match dispatcher.cluster_status() {
ClusterStatus::Disabled => None,
ClusterStatus::Enabled(cluster_info) => Some(cluster_info.peers),
})
.flatten(),
peer_metadata: (detail.level >= DetailsLevel::Level3)
.then(|| {
dispatcher
.consensus_state()
.map(|state| state.persistent.read().peer_metadata_by_id())
})
.flatten(),
metadata: (detail.level >= DetailsLevel::Level1)
.then(|| {
dispatcher
.consensus_state()
.map(|state| state.persistent.read().cluster_metadata.clone())
.filter(|metadata| !metadata.is_empty())
})
.flatten(),
})
}
pub fn this_peer_id(&self) -> Option<PeerId> {
self.enabled
.then(|| self.status.as_ref().and_then(|j| j.peer_id))
.flatten()
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/migrations/single_to_cluster.rs | src/migrations/single_to_cluster.rs | use std::sync::Arc;
use collection::collection_state::State;
use collection::config::{CollectionConfigInternal, ShardingMethod};
use collection::shards::replica_set::replica_set_state::ReplicaState;
use collection::shards::shard::PeerId;
use storage::content_manager::collection_meta_ops::{
CollectionMetaOperations, CreateCollection, CreateCollectionOperation, CreateShardKey,
SetShardReplicaState,
};
use storage::content_manager::consensus_manager::ConsensusStateRef;
use storage::content_manager::shard_distribution::ShardDistributionProposal;
use storage::content_manager::toc::TableOfContent;
use storage::dispatcher::Dispatcher;
use storage::rbac::{Access, AccessRequirements};
/// Processes the existing collections, which were created outside the consensus:
/// - during the migration from single to cluster
/// - during restoring from a backup
pub async fn handle_existing_collections(
toc_arc: Arc<TableOfContent>,
consensus_state: ConsensusStateRef,
dispatcher_arc: Arc<Dispatcher>,
this_peer_id: PeerId,
collections: Vec<String>,
) {
let full_access = Access::full("Migration from single to cluster");
let multipass = full_access
.check_global_access(AccessRequirements::new().manage())
.expect("Full access should have manage rights");
consensus_state.is_leader_established.await_ready();
for collection_name in collections {
let Ok(collection_obj) = toc_arc
.get_collection(&multipass.issue_pass(&collection_name))
.await
else {
break;
};
let State {
config,
shards,
resharding: _, // resharding can't exist outside of consensus
transfers: _, // transfers can't exist outside of consensus
shards_key_mapping,
payload_index_schema: _, // payload index schema doesn't require special handling in this case
} = collection_obj.state().await;
let CollectionConfigInternal {
params,
hnsw_config,
optimizer_config,
wal_config,
quantization_config,
strict_mode_config,
uuid,
metadata,
} = config;
let shards_number = params.shard_number.get();
let sharding_method = params.sharding_method;
let mut collection_create_operation = CreateCollectionOperation::new(
collection_name.clone(),
CreateCollection {
vectors: params.vectors,
sparse_vectors: params.sparse_vectors,
shard_number: Some(shards_number),
sharding_method,
replication_factor: Some(params.replication_factor.get()),
write_consistency_factor: Some(params.write_consistency_factor.get()),
on_disk_payload: Some(params.on_disk_payload),
hnsw_config: Some(hnsw_config.into()),
wal_config: Some(wal_config.into()),
optimizers_config: Some(optimizer_config.into()),
quantization_config,
strict_mode_config,
uuid,
metadata,
},
)
.expect("Failed to create collection operation");
let mut consensus_operations = Vec::new();
match sharding_method.unwrap_or_default() {
ShardingMethod::Auto => {
collection_create_operation.set_distribution(ShardDistributionProposal {
distribution: shards
.iter()
.filter_map(|(shard_id, shard_info)| {
if shard_info.replicas.contains_key(&this_peer_id) {
Some((*shard_id, vec![this_peer_id]))
} else {
None
}
})
.collect(),
});
consensus_operations.push(CollectionMetaOperations::CreateCollection(
collection_create_operation,
));
}
ShardingMethod::Custom => {
// We should create additional consensus operations here to set the shard distribution
collection_create_operation.set_distribution(ShardDistributionProposal::empty());
consensus_operations.push(CollectionMetaOperations::CreateCollection(
collection_create_operation,
));
for (shard_key, shard_ids) in shards_key_mapping.iter() {
let mut placement = Vec::new();
for shard_id in shard_ids {
let shard_info = shards.get(shard_id).unwrap();
placement.push(shard_info.replicas.keys().copied().collect());
}
consensus_operations.push(CollectionMetaOperations::CreateShardKey(
CreateShardKey {
collection_name: collection_name.clone(),
shard_key: shard_key.clone(),
placement,
initial_state: None, // Initial state can't be set during migration
},
))
}
}
}
for operation in consensus_operations {
let _res = dispatcher_arc
.submit_collection_meta_op(operation, full_access.clone(), None)
.await;
}
for (shard_id, shard_info) in shards {
if shard_info.replicas.contains_key(&this_peer_id) {
let _res = dispatcher_arc
.submit_collection_meta_op(
CollectionMetaOperations::SetShardReplicaState(SetShardReplicaState {
collection_name: collection_name.clone(),
shard_id,
peer_id: this_peer_id,
state: ReplicaState::Active,
from_state: None,
}),
full_access.clone(),
None,
)
.await;
}
}
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/migrations/mod.rs | src/migrations/mod.rs | pub mod single_to_cluster;
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/certificate_helpers.rs | src/actix/certificate_helpers.rs | use std::fmt::Debug;
use std::io::{self, BufRead, BufReader};
use std::sync::Arc;
use std::time::{Duration, Instant};
use fs_err::File;
use parking_lot::RwLock;
use rustls::client::VerifierBuilderError;
use rustls::pki_types::CertificateDer;
use rustls::server::{ClientHello, ResolvesServerCert, WebPkiClientVerifier};
use rustls::sign::CertifiedKey;
use rustls::{RootCertStore, ServerConfig, crypto};
use rustls_pemfile::Item;
use crate::settings::{Settings, TlsConfig};
type Result<T> = std::result::Result<T, Error>;
/// A TTL based rotating server certificate resolver
#[derive(Debug)]
struct RotatingCertificateResolver {
/// TLS configuration used for loading/refreshing certified key
tls_config: TlsConfig,
/// TTL for each rotation
ttl: Option<Duration>,
/// Current certified key
key: RwLock<CertifiedKeyWithAge>,
}
impl RotatingCertificateResolver {
pub fn new(tls_config: TlsConfig, ttl: Option<Duration>) -> Result<Self> {
let certified_key = load_certified_key(&tls_config)?;
Ok(Self {
tls_config,
ttl,
key: RwLock::new(CertifiedKeyWithAge::from(certified_key)),
})
}
/// Get certificate key or refresh
///
/// The key is automatically refreshed when the TTL is reached.
/// If refreshing fails, an error is logged and the old key is persisted.
fn get_key_or_refresh(&self) -> Arc<CertifiedKey> {
// Get read-only lock to the key. If TTL is not configured or is not expired, return key.
let key = self.key.read();
let ttl = match self.ttl {
Some(ttl) if key.is_expired(ttl) => ttl,
_ => return key.key.clone(),
};
drop(key);
// If TTL is expired:
// - get read-write lock to the key
// - *re-check that TTL is expired* (to avoid refreshing the key multiple times from concurrent threads)
// - refresh and return the key
let mut key = self.key.write();
if key.is_expired(ttl)
&& let Err(err) = key.refresh(&self.tls_config)
{
log::error!("Failed to refresh server TLS certificate, keeping current: {err}");
}
key.key.clone()
}
}
impl ResolvesServerCert for RotatingCertificateResolver {
fn resolve(&self, _client_hello: ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
Some(self.get_key_or_refresh())
}
}
#[derive(Debug)]
struct CertifiedKeyWithAge {
/// Last time the certificate was updated/replaced
last_update: Instant,
/// Current certified key
key: Arc<CertifiedKey>,
}
impl CertifiedKeyWithAge {
pub fn from(key: Arc<CertifiedKey>) -> Self {
Self {
last_update: Instant::now(),
key,
}
}
pub fn refresh(&mut self, tls_config: &TlsConfig) -> Result<()> {
*self = Self::from(load_certified_key(tls_config)?);
Ok(())
}
pub fn age(&self) -> Duration {
self.last_update.elapsed()
}
pub fn is_expired(&self, ttl: Duration) -> bool {
self.age() >= ttl
}
}
/// Load TLS configuration and construct certified key.
fn load_certified_key(tls_config: &TlsConfig) -> Result<Arc<CertifiedKey>> {
// Load certificates
let certs: Vec<CertificateDer> = with_buf_read(&tls_config.cert, |rd| {
rustls_pemfile::read_all(rd).collect::<io::Result<Vec<_>>>()
})?
.into_iter()
.filter_map(|item| match item {
Item::X509Certificate(data) => Some(data),
_ => None,
})
.collect();
if certs.is_empty() {
return Err(Error::NoServerCert);
}
// Load private key
let private_key_item =
with_buf_read(&tls_config.key, rustls_pemfile::read_one)?.ok_or(Error::NoPrivateKey)?;
let private_key = match private_key_item {
Item::Pkcs1Key(pkey) => rustls_pki_types::PrivateKeyDer::from(pkey),
Item::Pkcs8Key(pkey) => rustls_pki_types::PrivateKeyDer::from(pkey),
Item::Sec1Key(pkey) => rustls_pki_types::PrivateKeyDer::from(pkey),
_ => return Err(Error::InvalidPrivateKey),
};
let signing_key = crypto::ring::sign::any_supported_type(&private_key).map_err(Error::Sign)?;
// Construct certified key
let certified_key = CertifiedKey::new(certs, signing_key);
Ok(Arc::new(certified_key))
}
/// Generate an actix server configuration with TLS
///
/// Uses TLS settings as configured in configuration by user.
pub fn actix_tls_server_config(settings: &Settings) -> Result<ServerConfig> {
let config = ServerConfig::builder();
let tls_config = settings
.tls
.clone()
.ok_or_else(Settings::tls_config_is_undefined_error)
.map_err(Error::Io)?;
// Verify client CA or not
let config = if settings.service.verify_https_client_certificate {
let mut root_cert_store = RootCertStore::empty();
let ca_cert_path = tls_config.ca_cert.as_ref().ok_or(Error::NoCaCert)?;
let ca_certs: Vec<CertificateDer> =
with_buf_read(ca_cert_path, |rd| rustls_pemfile::certs(rd).collect())?;
root_cert_store.add_parsable_certificates(ca_certs);
let client_cert_verifier = WebPkiClientVerifier::builder(root_cert_store.into())
.build()
.map_err(Error::ClientCertVerifier)?;
config.with_client_cert_verifier(client_cert_verifier)
} else {
config.with_no_client_auth()
};
// Configure rotating certificate resolver
let ttl = match tls_config.cert_ttl {
None | Some(0) => None,
Some(seconds) => Some(Duration::from_secs(seconds)),
};
let cert_resolver = RotatingCertificateResolver::new(tls_config, ttl)?;
let config = config.with_cert_resolver(Arc::new(cert_resolver));
Ok(config)
}
fn with_buf_read<T>(path: &str, f: impl FnOnce(&mut dyn BufRead) -> io::Result<T>) -> Result<T> {
let file = File::open(path).map_err(|err| Error::OpenFile(err, path.into()))?;
let mut reader = BufReader::new(file);
let dyn_reader: &mut dyn BufRead = &mut reader;
f(dyn_reader).map_err(|err| Error::ReadFile(err, path.into()))
}
/// Actix TLS errors.
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("TLS file could not be opened: {1}")]
OpenFile(#[source] io::Error, String),
#[error("TLS file could not be read: {1}")]
ReadFile(#[source] io::Error, String),
#[error("general TLS IO error")]
Io(#[source] io::Error),
#[error("no server certificate found")]
NoServerCert,
#[error("no private key found")]
NoPrivateKey,
#[error("invalid private key")]
InvalidPrivateKey,
#[error("TLS signing error")]
Sign(#[source] rustls::Error),
#[error("client certificate verification")]
ClientCertVerifier(#[source] VerifierBuilderError),
#[error("No ca_cert provided")]
NoCaCert,
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/helpers.rs | src/actix/helpers.rs | use std::fmt::Debug;
use std::future::Future;
use actix_web::http::header;
use actix_web::http::header::HeaderMap;
use actix_web::rt::time::Instant;
use actix_web::{HttpResponse, ResponseError, http};
use api::rest::models::{ApiResponse, ApiStatus, HardwareUsage, InferenceUsage, Usage};
use collection::operations::types::CollectionError;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use serde::Serialize;
use storage::content_manager::errors::{StorageError, StorageResult};
use storage::content_manager::toc::request_hw_counter::RequestHwCounter;
use storage::dispatcher::Dispatcher;
pub fn get_request_hardware_counter(
dispatcher: &Dispatcher,
collection_name: String,
report_to_api: bool,
wait: Option<bool>,
) -> RequestHwCounter {
let report_to_api = report_to_api && wait != Some(false);
RequestHwCounter::new(
HwMeasurementAcc::new_with_metrics_drain(
dispatcher.get_collection_hw_metrics(collection_name),
),
report_to_api,
)
}
pub fn accepted_response(
timing: Instant,
hardware_usage: Option<HardwareUsage>,
inference_usage: Option<InferenceUsage>,
) -> HttpResponse {
let usage = {
let u = Usage {
hardware: hardware_usage,
inference: inference_usage,
};
if u.is_empty() { None } else { Some(u) }
};
HttpResponse::Accepted().json(ApiResponse::<()> {
result: None,
status: ApiStatus::Accepted,
time: timing.elapsed().as_secs_f64(),
usage,
})
}
pub fn process_response_with_inference_usage<T>(
response: Result<T, StorageError>,
timing: Instant,
hardware_usage: Option<HardwareUsage>,
inference_usage: Option<InferenceUsage>,
) -> HttpResponse
where
T: Serialize,
{
match response {
Ok(res) => HttpResponse::Ok().json(ApiResponse {
result: Some(res),
status: ApiStatus::Ok,
time: timing.elapsed().as_secs_f64(),
usage: Some(Usage {
hardware: hardware_usage,
inference: inference_usage,
}),
}),
Err(err) => process_response_error_with_inference_usage(
err,
timing,
hardware_usage,
inference_usage,
),
}
}
pub fn process_response<T>(
response: Result<T, StorageError>,
timing: Instant,
hardware_usage: Option<HardwareUsage>,
) -> HttpResponse
where
T: Serialize,
{
process_response_with_inference_usage(response, timing, hardware_usage, None)
}
pub fn process_response_error_with_inference_usage(
err: StorageError,
timing: Instant,
hardware_usage: Option<HardwareUsage>,
inference_usage: Option<InferenceUsage>,
) -> HttpResponse {
log_service_error(&err);
let error = HttpError::from(err);
let http_code = error.status_code();
let headers = error.headers();
let json_body = ApiResponse::<()> {
result: None,
status: ApiStatus::Error(error.to_string()),
time: timing.elapsed().as_secs_f64(),
usage: Some(Usage {
hardware: hardware_usage,
inference: inference_usage,
}),
};
let mut response_builder = HttpResponse::build(http_code);
for header_pair in headers {
response_builder.insert_header(header_pair);
}
response_builder.json(json_body)
}
pub fn process_response_error(
err: StorageError,
timing: Instant,
hardware_usage: Option<HardwareUsage>,
) -> HttpResponse {
process_response_error_with_inference_usage(err, timing, hardware_usage, None)
}
pub fn already_in_progress_response() -> HttpResponse {
HttpResponse::build(http::StatusCode::SERVICE_UNAVAILABLE).json(ApiResponse::<()> {
result: None,
status: ApiStatus::AlreadyInProgress,
time: 0.0,
usage: None,
})
}
/// Response wrapper for a `Future` returning `Result`.
///
/// # Cancel safety
///
/// Future must be cancel safe.
pub async fn time<T, Fut>(future: Fut) -> HttpResponse
where
Fut: Future<Output = StorageResult<T>>,
T: serde::Serialize,
{
time_impl(async { future.await.map(Some) }).await
}
/// Response wrapper for a `Future` returning `Result`.
/// If `wait` is false, returns `202 Accepted` immediately.
pub async fn time_or_accept<T, Fut>(future: Fut, wait: bool) -> HttpResponse
where
Fut: Future<Output = StorageResult<T>> + Send + 'static,
T: serde::Serialize + Send + 'static,
{
let future = async move {
let handle = tokio::task::spawn(async move {
let result = future.await;
if !wait && let Err(err) = &result {
log_service_error(err);
}
result
});
if wait {
handle.await?.map(Some)
} else {
Ok(None)
}
};
time_impl(future).await
}
/// # Cancel safety
///
/// Future must be cancel safe.
async fn time_impl<T, Fut>(future: Fut) -> HttpResponse
where
Fut: Future<Output = Result<Option<T>, StorageError>>,
T: serde::Serialize,
{
let instant = Instant::now();
match future.await.transpose() {
Some(res) => process_response(res, instant, None),
None => accepted_response(instant, None, None),
}
}
fn log_service_error(err: &StorageError) {
if let StorageError::ServiceError { backtrace, .. } = err {
log::error!("Error processing request: {err}");
if let Some(backtrace) = backtrace {
log::trace!("Backtrace: {backtrace}");
}
}
}
#[derive(Clone, Debug, thiserror::Error)]
#[error("{0}")]
pub struct HttpError(StorageError);
impl HttpError {
fn headers(&self) -> HeaderMap {
let mut headers = HeaderMap::new();
match &self.0 {
StorageError::RateLimitExceeded {
description: _,
retry_after,
} => {
if let Some(retry_after) = retry_after {
// Retry-After is expressed in seconds `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After`
// Ceil the value to the nearest second so clients don't retry too early
let retry_after_sec = retry_after.as_secs_f32().ceil() as u32;
headers.insert(
header::RETRY_AFTER,
header::HeaderValue::from(retry_after_sec),
);
}
}
StorageError::BadInput { .. } => {}
StorageError::AlreadyExists { .. } => {}
StorageError::NotFound { .. } => {}
StorageError::ServiceError { .. } => {}
StorageError::BadRequest { .. } => {}
StorageError::Locked { .. } => {}
StorageError::Timeout { .. } => {}
StorageError::ChecksumMismatch { .. } => {}
StorageError::Forbidden { .. } => {}
StorageError::PreconditionFailed { .. } => {}
StorageError::InferenceError { .. } => {}
StorageError::ShardUnavailable { .. } => {}
StorageError::EmptyPartialSnapshot { .. } => {}
}
headers
}
}
impl ResponseError for HttpError {
fn status_code(&self) -> http::StatusCode {
match &self.0 {
StorageError::BadInput { .. } => http::StatusCode::BAD_REQUEST,
StorageError::NotFound { .. } => http::StatusCode::NOT_FOUND,
StorageError::ServiceError { .. } => http::StatusCode::INTERNAL_SERVER_ERROR,
StorageError::BadRequest { .. } => http::StatusCode::BAD_REQUEST,
StorageError::Locked { .. } => http::StatusCode::FORBIDDEN,
StorageError::Timeout { .. } => http::StatusCode::REQUEST_TIMEOUT,
StorageError::AlreadyExists { .. } => http::StatusCode::CONFLICT,
StorageError::ChecksumMismatch { .. } => http::StatusCode::BAD_REQUEST,
StorageError::Forbidden { .. } => http::StatusCode::FORBIDDEN,
StorageError::PreconditionFailed { .. } => http::StatusCode::INTERNAL_SERVER_ERROR,
StorageError::InferenceError { .. } => http::StatusCode::BAD_REQUEST,
StorageError::RateLimitExceeded { .. } => http::StatusCode::TOO_MANY_REQUESTS,
StorageError::ShardUnavailable { .. } => http::StatusCode::SERVICE_UNAVAILABLE,
StorageError::EmptyPartialSnapshot { .. } => http::StatusCode::NOT_MODIFIED,
}
}
}
impl From<StorageError> for HttpError {
fn from(err: StorageError) -> Self {
HttpError(err)
}
}
impl From<CollectionError> for HttpError {
fn from(err: CollectionError) -> Self {
HttpError(err.into())
}
}
impl From<std::io::Error> for HttpError {
fn from(err: std::io::Error) -> Self {
HttpError(err.into()) // TODO: Is this good enough?.. 🤔
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/web_ui.rs | src/actix/web_ui.rs | use std::path::Path;
use actix_web::dev::HttpServiceFactory;
use actix_web::http::header::HeaderValue;
use actix_web::middleware::DefaultHeaders;
use actix_web::web;
use crate::settings::Settings;
const DEFAULT_STATIC_DIR: &str = "./static";
pub const WEB_UI_PATH: &str = "/dashboard";
pub fn web_ui_folder(settings: &Settings) -> Option<String> {
let web_ui_enabled = settings.service.enable_static_content.unwrap_or(true);
if web_ui_enabled {
let static_folder = settings
.service
.static_content_dir
.clone()
.unwrap_or_else(|| DEFAULT_STATIC_DIR.to_string());
let static_folder_path = Path::new(&static_folder);
if !static_folder_path.exists() || !static_folder_path.is_dir() {
// enabled BUT folder does not exist
log::warn!(
"Static content folder for Web UI '{}' does not exist",
static_folder_path.display(),
);
None
} else {
// enabled AND folder exists
Some(static_folder)
}
} else {
// not enabled
None
}
}
pub fn web_ui_factory(static_folder: &str) -> impl HttpServiceFactory + use<> {
web::scope(WEB_UI_PATH)
.wrap(DefaultHeaders::new().add(("X-Frame-Options", HeaderValue::from_static("DENY"))))
.service(actix_files::Files::new("/", static_folder).index_file("index.html"))
}
#[cfg(test)]
mod tests {
use actix_web::App;
use actix_web::http::StatusCode;
use actix_web::http::header::{self, HeaderMap};
use actix_web::test::{self, TestRequest};
use super::*;
fn assert_html_custom_headers(headers: &HeaderMap) {
let content_type = header::HeaderValue::from_static("text/html; charset=utf-8");
assert_eq!(headers.get(header::CONTENT_TYPE), Some(&content_type));
let x_frame_options = header::HeaderValue::from_static("DENY");
assert_eq!(headers.get(header::X_FRAME_OPTIONS), Some(&x_frame_options),);
}
#[actix_web::test]
async fn test_web_ui() {
let static_dir = String::from("static");
let mut settings = Settings::new(None).unwrap();
settings.service.static_content_dir = Some(static_dir.clone());
let maybe_static_folder = web_ui_folder(&settings);
if maybe_static_folder.is_none() {
println!("Skipping test because the static folder was not found.");
return;
}
let static_folder = maybe_static_folder.unwrap();
let srv = test::init_service(App::new().service(web_ui_factory(&static_folder))).await;
// Index path (no trailing slash)
let req = TestRequest::with_uri(WEB_UI_PATH).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_html_custom_headers(headers);
// Index path (trailing slash)
let req = TestRequest::with_uri(format!("{WEB_UI_PATH}/").as_str()).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_html_custom_headers(headers);
// Index path (index.html file)
let req = TestRequest::with_uri(format!("{WEB_UI_PATH}/index.html").as_str()).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_html_custom_headers(headers);
// Static asset (favicon.ico)
let req = TestRequest::with_uri(format!("{WEB_UI_PATH}/favicon.ico").as_str()).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
let headers = res.headers();
assert_eq!(
headers.get(header::CONTENT_TYPE),
Some(&header::HeaderValue::from_static("image/x-icon")),
);
// Non-existing path (404 Not Found)
let fake_path = uuid::Uuid::new_v4().to_string();
let srv = test::init_service(App::new().service(web_ui_factory(&fake_path))).await;
let req = TestRequest::with_uri(WEB_UI_PATH).to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let headers = res.headers();
assert_eq!(headers.get(header::CONTENT_TYPE), None);
assert_eq!(headers.get(header::CONTENT_LENGTH), None);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/auth.rs | src/actix/auth.rs | use std::convert::Infallible;
use std::future::{Ready, ready};
use std::sync::Arc;
use actix_web::body::{BoxBody, EitherBody};
use actix_web::dev::{Service, ServiceRequest, ServiceResponse, Transform, forward_ready};
use actix_web::{Error, FromRequest, HttpMessage, HttpResponse, ResponseError};
use futures_util::future::LocalBoxFuture;
use storage::rbac::Access;
use super::helpers::HttpError;
use crate::common::auth::{AuthError, AuthKeys};
pub struct Auth {
auth_keys: AuthKeys,
whitelist: Vec<WhitelistItem>,
}
impl Auth {
pub fn new(auth_keys: AuthKeys, whitelist: Vec<WhitelistItem>) -> Self {
Self {
auth_keys,
whitelist,
}
}
}
impl<S, B> Transform<S, ServiceRequest> for Auth
where
S: Service<ServiceRequest, Response = ServiceResponse<EitherBody<B, BoxBody>>, Error = Error>
+ 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<EitherBody<B, BoxBody>>;
type Error = Error;
type InitError = ();
type Transform = AuthMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(AuthMiddleware {
auth_keys: Arc::new(self.auth_keys.clone()),
whitelist: self.whitelist.clone(),
service: Arc::new(service),
}))
}
}
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct WhitelistItem(pub String, pub PathMode);
impl WhitelistItem {
pub fn exact<S: Into<String>>(path: S) -> Self {
Self(path.into(), PathMode::Exact)
}
pub fn prefix<S: Into<String>>(path: S) -> Self {
Self(path.into(), PathMode::Prefix)
}
pub fn matches(&self, other: &str) -> bool {
self.1.check(&self.0, other)
}
}
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum PathMode {
/// Path must match exactly
Exact,
/// Path must have given prefix
Prefix,
}
impl PathMode {
fn check(&self, key: &str, other: &str) -> bool {
match self {
Self::Exact => key == other,
Self::Prefix => other.starts_with(key),
}
}
}
pub struct AuthMiddleware<S> {
auth_keys: Arc<AuthKeys>,
/// List of items whitelisted from authentication.
whitelist: Vec<WhitelistItem>,
service: Arc<S>,
}
impl<S> AuthMiddleware<S> {
pub fn is_path_whitelisted(&self, path: &str) -> bool {
self.whitelist.iter().any(|item| item.matches(path))
}
}
impl<S, B> Service<ServiceRequest> for AuthMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<EitherBody<B, BoxBody>>, Error = Error>
+ 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<EitherBody<B, BoxBody>>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
forward_ready!(service);
fn call(&self, req: ServiceRequest) -> Self::Future {
let path = req.path();
if self.is_path_whitelisted(path) {
return Box::pin(self.service.call(req));
}
let auth_keys = self.auth_keys.clone();
let service = self.service.clone();
Box::pin(async move {
match auth_keys
.validate_request(|key| req.headers().get(key).and_then(|val| val.to_str().ok()))
.await
{
Ok((access, inference_token)) => {
let previous = req.extensions_mut().insert::<Access>(access);
req.extensions_mut().insert(inference_token);
debug_assert!(
previous.is_none(),
"Previous access object should not exist in the request"
);
service.call(req).await
}
Err(e) => {
let resp = match e {
AuthError::Unauthorized(e) => HttpResponse::Unauthorized().body(e),
AuthError::Forbidden(e) => HttpResponse::Forbidden().body(e),
AuthError::StorageError(e) => HttpError::from(e).error_response(),
};
Ok(req.into_response(resp).map_into_right_body())
}
}
})
}
}
pub struct ActixAccess(pub Access);
impl FromRequest for ActixAccess {
type Error = Infallible;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(
req: &actix_web::HttpRequest,
_payload: &mut actix_web::dev::Payload,
) -> Self::Future {
let access = req.extensions_mut().remove::<Access>().unwrap_or_else(|| {
Access::full("All requests have full by default access when API key is not configured")
});
ready(Ok(ActixAccess(access)))
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
qdrant/qdrant | https://github.com/qdrant/qdrant/blob/f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd/src/actix/mod.rs | src/actix/mod.rs | pub mod actix_telemetry;
pub mod api;
mod auth;
mod certificate_helpers;
pub mod helpers;
pub mod web_ui;
use std::io;
use std::sync::Arc;
use ::api::rest::models::{ApiResponse, ApiStatus, VersionInfo};
use actix_cors::Cors;
use actix_multipart::form::MultipartFormConfig;
use actix_multipart::form::tempfile::TempFileConfig;
use actix_web::middleware::{Compress, Condition, Logger, NormalizePath};
use actix_web::{App, HttpRequest, HttpResponse, HttpServer, Responder, error, get, web};
use actix_web_extras::middleware::Condition as ConditionEx;
use api::facet_api::config_facet_api;
use collection::operations::validation;
use collection::operations::verification::new_unchecked_verification_pass;
use storage::dispatcher::Dispatcher;
use storage::rbac::Access;
use crate::actix::api::cluster_api::config_cluster_api;
use crate::actix::api::collections_api::config_collections_api;
use crate::actix::api::count_api::count_points;
use crate::actix::api::debug_api::config_debugger_api;
use crate::actix::api::discovery_api::config_discovery_api;
use crate::actix::api::issues_api::config_issues_api;
use crate::actix::api::local_shard_api::config_local_shard_api;
use crate::actix::api::profiler_api::config_profiler_api;
use crate::actix::api::query_api::config_query_api;
use crate::actix::api::recommend_api::config_recommend_api;
use crate::actix::api::retrieve_api::{get_point, get_points, scroll_points};
use crate::actix::api::search_api::config_search_api;
use crate::actix::api::service_api::config_service_api;
use crate::actix::api::shards_api::config_shards_api;
use crate::actix::api::snapshot_api::config_snapshots_api;
use crate::actix::api::update_api::config_update_api;
use crate::actix::auth::{Auth, WhitelistItem};
use crate::actix::web_ui::{WEB_UI_PATH, web_ui_factory, web_ui_folder};
use crate::common::auth::AuthKeys;
use crate::common::debugger::DebuggerState;
use crate::common::health;
use crate::common::http_client::HttpClient;
use crate::common::telemetry::TelemetryCollector;
use crate::settings::{Settings, max_web_workers};
use crate::tracing::LoggerHandle;
#[get("/")]
pub async fn index() -> impl Responder {
HttpResponse::Ok().json(VersionInfo::default())
}
pub fn init(
dispatcher: Arc<Dispatcher>,
telemetry_collector: Arc<tokio::sync::Mutex<TelemetryCollector>>,
health_checker: Option<Arc<health::HealthChecker>>,
settings: Settings,
logger_handle: LoggerHandle,
) -> io::Result<()> {
actix_web::rt::System::new().block_on(async {
// Nothing to verify here.
let pass = new_unchecked_verification_pass();
let auth_keys = AuthKeys::try_create(
&settings.service,
dispatcher
.toc(&Access::full("For JWT validation"), &pass)
.clone(),
);
let upload_dir = dispatcher
.toc(&Access::full("For upload dir"), &pass)
.upload_dir()
.unwrap();
let dispatcher_data = web::Data::from(dispatcher);
let actix_telemetry_collector = telemetry_collector
.lock()
.await
.actix_telemetry_collector
.clone();
let debugger_state = web::Data::new(DebuggerState::from_settings(&settings));
let telemetry_collector_data = web::Data::from(telemetry_collector);
let logger_handle_data = web::Data::new(logger_handle);
let http_client = web::Data::new(HttpClient::from_settings(&settings)?);
let health_checker = web::Data::new(health_checker);
let web_ui_available = web_ui_folder(&settings);
let service_config = web::Data::new(settings.service.clone());
let mut api_key_whitelist = vec![
WhitelistItem::exact("/"),
WhitelistItem::exact("/healthz"),
WhitelistItem::prefix("/readyz"),
WhitelistItem::prefix("/livez"),
];
if web_ui_available.is_some() {
api_key_whitelist.push(WhitelistItem::prefix(WEB_UI_PATH));
}
let mut server = HttpServer::new(move || {
let cors = Cors::default()
.allow_any_origin()
.allow_any_method()
.allow_any_header();
let validate_path_config = actix_web_validator::PathConfig::default()
.error_handler(|err, rec| validation_error_handler("path parameters", err, rec));
let validate_query_config = actix_web_validator::QueryConfig::default()
.error_handler(|err, rec| validation_error_handler("query parameters", err, rec));
let validate_json_config = actix_web_validator::JsonConfig::default()
.limit(settings.service.max_request_size_mb * 1024 * 1024)
.error_handler(|err, rec| validation_error_handler("JSON body", err, rec));
let mut app = App::new()
.wrap(Compress::default()) // Reads the `Accept-Encoding` header to negotiate which compression codec to use.
// api_key middleware
// note: the last call to `wrap()` or `wrap_fn()` is executed first
.wrap(ConditionEx::from_option(auth_keys.as_ref().map(
|auth_keys| Auth::new(auth_keys.clone(), api_key_whitelist.clone()),
)))
// Normalize path
.wrap(NormalizePath::trim())
.wrap(Condition::new(settings.service.enable_cors, cors))
.wrap(
// Set up logger, but avoid logging hot status endpoints
Logger::default()
.exclude("/")
.exclude("/metrics")
.exclude("/telemetry")
.exclude("/healthz")
.exclude("/readyz")
.exclude("/livez"),
)
.wrap(actix_telemetry::ActixTelemetryTransform::new(
actix_telemetry_collector.clone(),
))
.app_data(dispatcher_data.clone())
.app_data(telemetry_collector_data.clone())
.app_data(logger_handle_data.clone())
.app_data(http_client.clone())
.app_data(debugger_state.clone())
.app_data(health_checker.clone())
.app_data(validate_path_config)
.app_data(validate_query_config)
.app_data(validate_json_config)
.app_data(TempFileConfig::default().directory(&upload_dir))
.app_data(MultipartFormConfig::default().total_limit(usize::MAX))
.app_data(service_config.clone())
.service(index)
.configure(config_collections_api)
.configure(config_snapshots_api)
.configure(config_update_api)
.configure(config_cluster_api)
.configure(config_service_api)
.configure(config_search_api)
.configure(config_recommend_api)
.configure(config_discovery_api)
.configure(config_query_api)
.configure(config_facet_api)
.configure(config_shards_api)
.configure(config_issues_api)
.configure(config_debugger_api)
.configure(config_profiler_api)
.configure(config_local_shard_api)
// Ordering of services is important for correct path pattern matching
// See: <https://github.com/qdrant/qdrant/issues/3543>
.service(scroll_points)
.service(count_points)
.service(get_point)
.service(get_points);
if let Some(static_folder) = web_ui_available.as_deref() {
app = app.service(web_ui_factory(static_folder));
}
app
})
.workers(max_web_workers(&settings));
let port = settings.service.http_port;
let bind_addr = format!("{}:{}", settings.service.host, port);
// With TLS enabled, bind with certificate helper and Rustls, or bind regularly
server = if settings.service.enable_tls {
log::info!(
"TLS enabled for REST API (TTL: {})",
settings
.tls
.as_ref()
.and_then(|tls| tls.cert_ttl)
.map(|ttl| ttl.to_string())
.unwrap_or_else(|| "none".into()),
);
let config = certificate_helpers::actix_tls_server_config(&settings)
.map_err(io::Error::other)?;
server.bind_rustls_0_23(bind_addr, config)?
} else {
log::info!("TLS disabled for REST API");
server.bind(bind_addr)?
};
log::info!("Qdrant HTTP listening on {port}");
server.run().await
})
}
fn validation_error_handler(
name: &str,
err: actix_web_validator::Error,
_req: &HttpRequest,
) -> error::Error {
use actix_web_validator::error::DeserializeErrors;
// Nicely describe deserialization and validation errors
let msg = match &err {
actix_web_validator::Error::Validate(errs) => {
validation::label_errors(format!("Validation error in {name}"), errs)
}
actix_web_validator::Error::Deserialize(err) => {
format!(
"Deserialize error in {name}: {}",
match err {
DeserializeErrors::DeserializeQuery(err) => err.to_string(),
DeserializeErrors::DeserializeJson(err) => err.to_string(),
DeserializeErrors::DeserializePath(err) => err.to_string(),
}
)
}
actix_web_validator::Error::JsonPayloadError(
actix_web::error::JsonPayloadError::Deserialize(err),
) => {
format!("Format error in {name}: {err}",)
}
err => err.to_string(),
};
// Build fitting response
let response = match &err {
actix_web_validator::Error::Validate(_) => HttpResponse::UnprocessableEntity(),
_ => HttpResponse::BadRequest(),
}
.json(ApiResponse::<()> {
result: None,
status: ApiStatus::Error(msg),
time: 0.0,
usage: None,
});
error::InternalError::from_response(err, response).into()
}
#[cfg(test)]
mod tests {
use ::api::grpc::api_crate_version;
#[test]
fn test_version() {
assert_eq!(
api_crate_version(),
env!("CARGO_PKG_VERSION"),
"Qdrant and lib/api crate versions are not same"
);
}
}
| rust | Apache-2.0 | f937d0260ffd7705c6f34d9c25da1e27ebf5ddfd | 2026-01-04T15:34:51.524868Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.