repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/global.rs | crates/obs/src/global.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{AppConfig, GlobalError, OtelGuard, SystemObserver, telemetry::init_telemetry};
use std::sync::{Arc, Mutex};
use tokio::sync::OnceCell;
use tracing::{error, info};
/// Global guard for OpenTelemetry tracing
static GLOBAL_GUARD: OnceCell<Arc<Mutex<OtelGuard>>> = OnceCell::const_new();
/// Flag indicating if observability metric is enabled
pub(crate) static OBSERVABILITY_METRIC_ENABLED: OnceCell<bool> = OnceCell::const_new();
/// Check whether Observability metric is enabled
pub fn observability_metric_enabled() -> bool {
OBSERVABILITY_METRIC_ENABLED.get().copied().unwrap_or(false)
}
/// Initialize the observability module
///
/// # Parameters
/// - `config`: Configuration information
///
/// # Returns
/// A tuple containing the logger and the telemetry guard
///
/// # Example
/// ```no_run
/// # use rustfs_obs::init_obs;
///
/// # #[tokio::main]
/// # async fn main() {
/// # match init_obs(None).await {
/// # Ok(guard) => {}
/// # Err(e) => { eprintln!("Failed to initialize observability: {}", e); }
/// # }
/// # }
/// ```
pub async fn init_obs(endpoint: Option<String>) -> Result<OtelGuard, GlobalError> {
// Load the configuration file
let config = AppConfig::new_with_endpoint(endpoint);
let otel_guard = init_telemetry(&config.observability)?;
// Server will be created per connection - this ensures isolation
tokio::spawn(async move {
// Record the PID-related metrics of the current process
let obs_result = SystemObserver::init_process_observer().await;
match obs_result {
Ok(_) => {
info!(target: "rustfs::obs::system::metrics","Process observer initialized successfully");
}
Err(e) => {
error!(target: "rustfs::obs::system::metrics","Failed to initialize process observer: {}", e);
}
}
});
Ok(otel_guard)
}
/// Set the global guard for OtelGuard
///
/// # Arguments
/// * `guard` - The OtelGuard instance to set globally
///
/// # Returns
/// * `Ok(())` if successful
/// * `Err(GuardError)` if setting fails
///
/// # Example
/// ```no_run
/// # use rustfs_obs::{ init_obs, set_global_guard};
///
/// # async fn init() -> Result<(), Box<dyn std::error::Error>> {
/// # let guard = match init_obs(None).await{
/// # Ok(g) => g,
/// # Err(e) => { return Err(Box::new(e)); }
/// # };
/// # set_global_guard(guard)?;
/// # Ok(())
/// # }
/// ```
pub fn set_global_guard(guard: OtelGuard) -> Result<(), GlobalError> {
info!("Initializing global guard");
GLOBAL_GUARD.set(Arc::new(Mutex::new(guard))).map_err(GlobalError::SetError)
}
/// Get the global guard for OtelGuard
///
/// # Returns
/// * `Ok(Arc<Mutex<OtelGuard>>)` if guard exists
/// * `Err(GuardError)` if guard not initialized
///
/// # Example
/// ```no_run
/// # use rustfs_obs::get_global_guard;
///
/// # async fn trace_operation() -> Result<(), Box<dyn std::error::Error>> {
/// # let guard = get_global_guard()?;
/// # let _lock = guard.lock().unwrap();
/// # // Perform traced operation
/// # Ok(())
/// # }
/// ```
pub fn get_global_guard() -> Result<Arc<Mutex<OtelGuard>>, GlobalError> {
GLOBAL_GUARD.get().cloned().ok_or(GlobalError::NotInitialized)
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_get_uninitialized_guard() {
let result = get_global_guard();
assert!(matches!(result, Err(GlobalError::NotInitialized)));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/lib.rs | crates/obs/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # RustFS Observability
//!
//! provides tools for system and service monitoring
//!
//! ## feature mark
//! - `default`: default monitoring function
//! - `gpu`: gpu monitoring function
//! - `full`: includes all functions
//!
//! to enable gpu monitoring add in cargo toml
//!
//! ```toml
//! # using gpu monitoring
//! rustfs-obs = { version = "0.1.0", features = ["gpu"] }
//!
//! # use all functions
//! rustfs-obs = { version = "0.1.0", features = ["full"] }
//! ```
///
/// ## Usage
///
/// ```no_run
/// use rustfs_obs::init_obs;
///
/// # #[tokio::main]
/// # async fn main() {
/// # let _guard = match init_obs(None).await {
/// # Ok(g) => g,
/// # Err(e) => {
/// # panic!("Failed to initialize observability: {:?}", e);
/// # }
/// # };
/// # // Application logic here
/// # {
/// # // Simulate some work
/// # tokio::time::sleep(std::time::Duration::from_secs(2)).await;
/// # println!("Application is running...");
/// # }
/// # // Guard will be dropped here, flushing telemetry data
/// # }
/// ```
mod config;
mod error;
mod global;
mod metrics;
mod recorder;
mod system;
mod telemetry;
pub use config::*;
pub use error::*;
pub use global::*;
pub use metrics::*;
pub use recorder::*;
pub use system::SystemObserver;
pub use telemetry::OtelGuard;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/telemetry.rs | crates/obs/src/telemetry.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::OtelConfig;
use crate::global::OBSERVABILITY_METRIC_ENABLED;
use crate::{Recorder, TelemetryError};
use flexi_logger::{DeferredNow, Record, WriteMode, WriteMode::AsyncWith, style};
use metrics::counter;
use nu_ansi_term::Color;
use opentelemetry::{KeyValue, global, trace::TracerProvider};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_otlp::{Compression, Protocol, WithExportConfig, WithHttpConfig};
use opentelemetry_sdk::{
Resource,
logs::SdkLoggerProvider,
metrics::{PeriodicReader, SdkMeterProvider},
trace::{RandomIdGenerator, Sampler, SdkTracerProvider},
};
use opentelemetry_semantic_conventions::{
SCHEMA_URL,
attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION},
};
use rustfs_config::{
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_OBS_LOG_STDOUT_ENABLED, ENVIRONMENT, METER_INTERVAL,
SAMPLE_RATIO, SERVICE_VERSION,
observability::{
DEFAULT_OBS_ENVIRONMENT_PRODUCTION, DEFAULT_OBS_LOG_FLUSH_MS, DEFAULT_OBS_LOG_MESSAGE_CAPA, DEFAULT_OBS_LOG_POOL_CAPA,
ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FLUSH_MS, ENV_OBS_LOG_MESSAGE_CAPA, ENV_OBS_LOG_POOL_CAPA,
},
};
use rustfs_utils::{get_env_opt_str, get_env_u64, get_env_usize, get_local_ip_with_default};
use smallvec::SmallVec;
use std::{borrow::Cow, fs, io::IsTerminal, time::Duration};
use tracing::info;
use tracing_error::ErrorLayer;
use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer};
use tracing_subscriber::{
EnvFilter, Layer,
fmt::{format::FmtSpan, time::LocalTime},
layer::SubscriberExt,
util::SubscriberInitExt,
};
/// A guard object that manages the lifecycle of OpenTelemetry components.
///
/// This struct holds references to the created OpenTelemetry providers and ensures
/// they are properly shut down when the guard is dropped. It implements the RAII
/// (Resource Acquisition Is Initialization) pattern for managing telemetry resources.
///
/// When this guard goes out of scope, it will automatically shut down:
/// - The tracer provider (for distributed tracing)
/// - The meter provider (for metrics collection)
/// - The logger provider (for structured logging)
///
/// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug
pub struct OtelGuard {
tracer_provider: Option<SdkTracerProvider>,
meter_provider: Option<SdkMeterProvider>,
logger_provider: Option<SdkLoggerProvider>,
flexi_logger_handles: Option<flexi_logger::LoggerHandle>,
tracing_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
}
impl std::fmt::Debug for OtelGuard {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OtelGuard")
.field("tracer_provider", &self.tracer_provider.is_some())
.field("meter_provider", &self.meter_provider.is_some())
.field("logger_provider", &self.logger_provider.is_some())
.field("flexi_logger_handles", &self.flexi_logger_handles.is_some())
.field("tracing_guard", &self.tracing_guard.is_some())
.finish()
}
}
impl Drop for OtelGuard {
fn drop(&mut self) {
if let Some(provider) = self.tracer_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Tracer shutdown error: {err:?}");
}
if let Some(provider) = self.meter_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Meter shutdown error: {err:?}");
}
if let Some(provider) = self.logger_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Logger shutdown error: {err:?}");
}
if let Some(handle) = self.flexi_logger_handles.take() {
handle.shutdown();
println!("flexi_logger shutdown completed");
}
if let Some(guard) = self.tracing_guard.take() {
drop(guard);
println!("Tracing guard dropped, flushing logs.");
}
}
}
/// create OpenTelemetry Resource
fn resource(config: &OtelConfig) -> Resource {
Resource::builder()
.with_service_name(Cow::Borrowed(config.service_name.as_deref().unwrap_or(APP_NAME)).to_string())
.with_schema_url(
[
KeyValue::new(
OTEL_SERVICE_VERSION,
Cow::Borrowed(config.service_version.as_deref().unwrap_or(SERVICE_VERSION)).to_string(),
),
KeyValue::new(
DEPLOYMENT_ENVIRONMENT_NAME,
Cow::Borrowed(config.environment.as_deref().unwrap_or(ENVIRONMENT)).to_string(),
),
KeyValue::new(NETWORK_LOCAL_ADDRESS, get_local_ip_with_default()),
],
SCHEMA_URL,
)
.build()
}
/// Creates a periodic reader for stdout metrics
fn create_periodic_reader(interval: u64) -> PeriodicReader<opentelemetry_stdout::MetricExporter> {
PeriodicReader::builder(opentelemetry_stdout::MetricExporter::default())
.with_interval(Duration::from_secs(interval))
.build()
}
// Read the AsyncWith parameter from the environment variable
fn get_env_async_with() -> WriteMode {
let pool_capa = get_env_usize(ENV_OBS_LOG_POOL_CAPA, DEFAULT_OBS_LOG_POOL_CAPA);
let message_capa = get_env_usize(ENV_OBS_LOG_MESSAGE_CAPA, DEFAULT_OBS_LOG_MESSAGE_CAPA);
let flush_ms = get_env_u64(ENV_OBS_LOG_FLUSH_MS, DEFAULT_OBS_LOG_FLUSH_MS);
AsyncWith {
pool_capa,
message_capa,
flush_interval: Duration::from_millis(flush_ms),
}
}
fn build_env_filter(logger_level: &str, default_level: Option<&str>) -> EnvFilter {
let level = default_level.unwrap_or(logger_level);
let mut filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level));
if !matches!(logger_level, "trace" | "debug") {
let directives: SmallVec<[&str; 5]> = smallvec::smallvec!["hyper", "tonic", "h2", "reqwest", "tower"];
for directive in directives {
filter = filter.add_directive(format!("{directive}=off").parse().unwrap());
}
}
filter
}
/// Custom Log Formatter Function - Terminal Output (with Color)
#[inline(never)]
fn format_with_color(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &Record) -> Result<(), std::io::Error> {
let level = record.level();
let level_style = style(level);
let binding = std::thread::current();
let thread_name = binding.name().unwrap_or("unnamed");
let thread_id = format!("{:?}", std::thread::current().id());
writeln!(
w,
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
level_style.paint(level.to_string()),
Color::Magenta.paint(record.target()),
Color::Blue.paint(record.file().unwrap_or("unknown")),
Color::Blue.paint(record.line().unwrap_or(0).to_string()),
Color::Green.paint(thread_name),
Color::Green.paint(thread_id),
record.args()
)
}
/// Custom Log Formatter - File Output (No Color)
#[inline(never)]
fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &Record) -> Result<(), std::io::Error> {
let level = record.level();
let binding = std::thread::current();
let thread_name = binding.name().unwrap_or("unnamed");
let thread_id = format!("{:?}", std::thread::current().id());
writeln!(
w,
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
level,
record.target(),
record.file().unwrap_or("unknown"),
record.line().unwrap_or(0),
thread_name,
thread_id,
record.args()
)
}
/// stdout + span information (fix: retain WorkerGuard to avoid releasing after initialization)
fn init_stdout_logging(_config: &OtelConfig, logger_level: &str, is_production: bool) -> OtelGuard {
let env_filter = build_env_filter(logger_level, None);
let (nb, guard) = tracing_appender::non_blocking(std::io::stdout());
let enable_color = std::io::stdout().is_terminal();
let fmt_layer = tracing_subscriber::fmt::layer()
.with_timer(LocalTime::rfc_3339())
.with_target(true)
.with_ansi(enable_color)
.with_thread_names(true)
.with_thread_ids(true)
.with_file(true)
.with_line_number(true)
.with_writer(nb)
.json()
.with_current_span(true)
.with_span_list(true)
.with_span_events(if is_production { FmtSpan::CLOSE } else { FmtSpan::FULL });
tracing_subscriber::registry()
.with(env_filter)
.with(ErrorLayer::default())
.with(fmt_layer)
.init();
OBSERVABILITY_METRIC_ENABLED.set(false).ok();
counter!("rustfs.start.total").increment(1);
info!("Init stdout logging (level: {})", logger_level);
OtelGuard {
tracer_provider: None,
meter_provider: None,
logger_provider: None,
flexi_logger_handles: None,
tracing_guard: Some(guard),
}
}
/// File rolling log (size switching + number retained)
fn init_file_logging(config: &OtelConfig, logger_level: &str, is_production: bool) -> Result<OtelGuard, TelemetryError> {
use flexi_logger::{Age, Cleanup, Criterion, FileSpec, LogSpecification, Naming};
let service_name = config.service_name.as_deref().unwrap_or(APP_NAME);
let default_log_directory = rustfs_utils::dirs::get_log_directory_to_string(ENV_OBS_LOG_DIRECTORY);
let log_directory = config.log_directory.as_deref().unwrap_or(default_log_directory.as_str());
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
let keep_files = config.log_keep_files.unwrap_or(DEFAULT_LOG_KEEP_FILES);
if let Err(e) = fs::create_dir_all(log_directory) {
return Err(TelemetryError::Io(e.to_string()));
}
#[cfg(unix)]
{
use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt;
let desired: u32 = 0o755;
match fs::metadata(log_directory) {
Ok(meta) => {
let current = meta.permissions().mode() & 0o777;
// Only tighten to 0755 if existing permissions are looser than target, avoid loosening
if (current & !desired) != 0 {
if let Err(e) = fs::set_permissions(log_directory, Permissions::from_mode(desired)) {
return Err(TelemetryError::SetPermissions(format!(
"dir='{log_directory}', want={desired:#o}, have={current:#o}, err={e}"
)));
}
// Second verification
if let Ok(meta2) = fs::metadata(log_directory) {
let after = meta2.permissions().mode() & 0o777;
if after != desired {
return Err(TelemetryError::SetPermissions(format!(
"dir='{log_directory}', want={desired:#o}, after={after:#o}"
)));
}
}
}
}
Err(e) => {
return Err(TelemetryError::Io(format!("stat '{log_directory}' failed: {e}")));
}
}
}
// parsing level
let log_spec = LogSpecification::parse(logger_level)
.unwrap_or_else(|_| LogSpecification::parse(DEFAULT_LOG_LEVEL).unwrap_or(LogSpecification::error()));
// Switch by size (MB), Build log cutting conditions
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
// Cut by time and size at the same time
(Some(time), Some(size)) => {
let age = match time.to_lowercase().as_str() {
"hour" => Age::Hour,
"day" => Age::Day,
"minute" => Age::Minute,
"second" => Age::Second,
_ => Age::Day, // The default is by day
};
Criterion::AgeOrSize(age, size * 1024 * 1024) // Convert to bytes
}
// Cut by time only
(Some(time), None) => {
let age = match time.to_lowercase().as_str() {
"hour" => Age::Hour,
"day" => Age::Day,
"minute" => Age::Minute,
"second" => Age::Second,
_ => Age::Day, // The default is by day
};
Criterion::Age(age)
}
// Cut by size only
(None, Some(size)) => {
Criterion::Size(size * 1024 * 1024) // Convert to bytes
}
// By default, it is cut by the day
_ => Criterion::Age(Age::Day),
};
// write mode
let write_mode = get_env_async_with();
// Build
let mut builder = flexi_logger::Logger::try_with_env_or_str(logger_level)
.unwrap_or(flexi_logger::Logger::with(log_spec.clone()))
.format_for_stderr(format_with_color)
.format_for_stdout(format_with_color)
.format_for_files(format_for_file)
.log_to_file(
FileSpec::default()
.directory(log_directory)
.basename(log_filename)
.suppress_timestamp(),
)
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files))
.write_mode(write_mode)
.append()
.use_utc();
// Optional copy to stdout (for local observation)
if config.log_stdout_enabled.unwrap_or(DEFAULT_OBS_LOG_STDOUT_ENABLED) || !is_production {
builder = builder.duplicate_to_stdout(flexi_logger::Duplicate::All);
} else {
builder = builder.duplicate_to_stdout(flexi_logger::Duplicate::None);
}
let handle = match builder.start() {
Ok(h) => Some(h),
Err(e) => {
eprintln!("ERROR: start flexi_logger failed: {e}");
None
}
};
OBSERVABILITY_METRIC_ENABLED.set(false).ok();
info!(
"Init file logging at '{}', roll size {:?}MB, keep {}",
log_directory, config.log_rotation_size_mb, keep_files
);
Ok(OtelGuard {
tracer_provider: None,
meter_provider: None,
logger_provider: None,
flexi_logger_handles: handle,
tracing_guard: None,
})
}
/// Observability (HTTP export, supports three sub-endpoints; if not, fallback to unified endpoint)
fn init_observability_http(config: &OtelConfig, logger_level: &str, is_production: bool) -> Result<OtelGuard, TelemetryError> {
// Resources and sampling
let res = resource(config);
let service_name = config.service_name.as_deref().unwrap_or(APP_NAME).to_owned();
let use_stdout = config.use_stdout.unwrap_or(!is_production);
let sample_ratio = config.sample_ratio.unwrap_or(SAMPLE_RATIO);
let sampler = if (0.0..1.0).contains(&sample_ratio) {
Sampler::TraceIdRatioBased(sample_ratio)
} else {
Sampler::AlwaysOn
};
// Endpoint
let root_ep = config.endpoint.clone(); // owned String
let trace_ep: String = config
.trace_endpoint
.as_deref()
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
.unwrap_or_else(|| format!("{root_ep}/v1/traces"));
let metric_ep: String = config
.metric_endpoint
.as_deref()
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
.unwrap_or_else(|| format!("{root_ep}/v1/metrics"));
let log_ep: String = config
.log_endpoint
.as_deref()
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
.unwrap_or_else(|| format!("{root_ep}/v1/logs"));
// Tracer(HTTP)
let tracer_provider = {
if trace_ep.is_empty() {
None
} else {
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_http()
.with_endpoint(trace_ep.as_str())
.with_protocol(Protocol::HttpBinary)
.with_compression(Compression::Gzip)
.build()
.map_err(|e| TelemetryError::BuildSpanExporter(e.to_string()))?;
let mut builder = SdkTracerProvider::builder()
.with_sampler(sampler)
.with_id_generator(RandomIdGenerator::default())
.with_resource(res.clone())
.with_batch_exporter(exporter);
if use_stdout {
builder = builder.with_batch_exporter(opentelemetry_stdout::SpanExporter::default());
}
let provider = builder.build();
global::set_tracer_provider(provider.clone());
Some(provider)
}
};
// Meter(HTTP)
let meter_provider = {
if metric_ep.is_empty() {
None
} else {
let exporter = opentelemetry_otlp::MetricExporter::builder()
.with_http()
.with_endpoint(metric_ep.as_str())
.with_temporality(opentelemetry_sdk::metrics::Temporality::default())
.with_protocol(Protocol::HttpBinary)
.with_compression(Compression::Gzip)
.build()
.map_err(|e| TelemetryError::BuildMetricExporter(e.to_string()))?;
let meter_interval = config.meter_interval.unwrap_or(METER_INTERVAL);
let (provider, recorder) = Recorder::builder(service_name.clone())
.with_meter_provider(|b| {
let b = b.with_resource(res.clone()).with_reader(
PeriodicReader::builder(exporter)
.with_interval(Duration::from_secs(meter_interval))
.build(),
);
if use_stdout {
b.with_reader(create_periodic_reader(meter_interval))
} else {
b
}
})
.build();
global::set_meter_provider(provider.clone());
metrics::set_global_recorder(recorder).map_err(|e| TelemetryError::InstallMetricsRecorder(e.to_string()))?;
Some(provider)
}
};
// Logger(HTTP)
let logger_provider = {
if log_ep.is_empty() {
None
} else {
let exporter = opentelemetry_otlp::LogExporter::builder()
.with_http()
.with_endpoint(log_ep.as_str())
.with_protocol(Protocol::HttpBinary)
.with_compression(Compression::Gzip)
.build()
.map_err(|e| TelemetryError::BuildLogExporter(e.to_string()))?;
let mut builder = SdkLoggerProvider::builder().with_resource(res);
builder = builder.with_batch_exporter(exporter);
if use_stdout {
builder = builder.with_batch_exporter(opentelemetry_stdout::LogExporter::default());
}
Some(builder.build())
}
};
// Tracing layer
let fmt_layer_opt = {
if config.log_stdout_enabled.unwrap_or(DEFAULT_OBS_LOG_STDOUT_ENABLED) {
let enable_color = std::io::stdout().is_terminal();
let mut layer = tracing_subscriber::fmt::layer()
.with_timer(LocalTime::rfc_3339())
.with_target(true)
.with_ansi(enable_color)
.with_thread_names(true)
.with_thread_ids(true)
.with_file(true)
.with_line_number(true)
.json()
.with_current_span(true)
.with_span_list(true);
let span_event = if is_production { FmtSpan::CLOSE } else { FmtSpan::FULL };
layer = layer.with_span_events(span_event);
Some(layer.with_filter(build_env_filter(logger_level, None)))
} else {
None
}
};
let filter = build_env_filter(logger_level, None);
let otel_bridge = logger_provider
.as_ref()
.map(|p| OpenTelemetryTracingBridge::new(p).with_filter(build_env_filter(logger_level, None)));
let tracer_layer = tracer_provider
.as_ref()
.map(|p| OpenTelemetryLayer::new(p.tracer(service_name.to_string())));
let metrics_layer = meter_provider.as_ref().map(|p| MetricsLayer::new(p.clone()));
tracing_subscriber::registry()
.with(filter)
.with(ErrorLayer::default())
.with(fmt_layer_opt)
.with(tracer_layer)
.with(otel_bridge)
.with(metrics_layer)
.init();
OBSERVABILITY_METRIC_ENABLED.set(true).ok();
counter!("rustfs.start.total").increment(1);
info!(
"Init observability (HTTP): trace='{}', metric='{}', log='{}'",
trace_ep, metric_ep, log_ep
);
Ok(OtelGuard {
tracer_provider,
meter_provider,
logger_provider,
flexi_logger_handles: None,
tracing_guard: None,
})
}
/// Initialize Telemetry,Entrance: three rules
pub(crate) fn init_telemetry(config: &OtelConfig) -> Result<OtelGuard, TelemetryError> {
let environment = config.environment.as_deref().unwrap_or(ENVIRONMENT);
let is_production = environment.eq_ignore_ascii_case(DEFAULT_OBS_ENVIRONMENT_PRODUCTION);
let logger_level = config.logger_level.as_deref().unwrap_or(DEFAULT_LOG_LEVEL);
// Rule 3: Observability (any endpoint is enabled if it is not empty)
let has_obs = !config.endpoint.is_empty()
|| config.trace_endpoint.as_deref().map(|s| !s.is_empty()).unwrap_or(false)
|| config.metric_endpoint.as_deref().map(|s| !s.is_empty()).unwrap_or(false)
|| config.log_endpoint.as_deref().map(|s| !s.is_empty()).unwrap_or(false);
if has_obs {
return init_observability_http(config, logger_level, is_production);
}
// Rule 2: The user has explicitly customized the log directory (determined by whether ENV_OBS_LOG_DIRECTORY is set)
let user_set_log_dir = get_env_opt_str(ENV_OBS_LOG_DIRECTORY);
if user_set_log_dir.filter(|d| !d.is_empty()).is_some() {
return init_file_logging(config, logger_level, is_production);
}
// Rule 1: Default stdout (error level)
Ok(init_stdout_logging(config, DEFAULT_LOG_LEVEL, is_production))
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_config::USE_STDOUT;
#[test]
fn test_production_environment_detection() {
// Test production environment logic
let production_envs = vec!["production", "PRODUCTION", "Production"];
for env_value in production_envs {
let is_production = env_value.to_lowercase() == "production";
assert!(is_production, "Should detect '{env_value}' as production environment");
}
}
#[test]
fn test_non_production_environment_detection() {
// Test non-production environment logic
let non_production_envs = vec!["development", "test", "staging", "dev", "local"];
for env_value in non_production_envs {
let is_production = env_value.to_lowercase() == "production";
assert!(!is_production, "Should not detect '{env_value}' as production environment");
}
}
#[test]
fn test_stdout_behavior_logic() {
// Test the stdout behavior logic without environment manipulation
struct TestCase {
is_production: bool,
config_use_stdout: Option<bool>,
expected_use_stdout: bool,
description: &'static str,
}
let test_cases = vec![
TestCase {
is_production: true,
config_use_stdout: None,
expected_use_stdout: false,
description: "Production with no config should disable stdout",
},
TestCase {
is_production: false,
config_use_stdout: None,
expected_use_stdout: USE_STDOUT,
description: "Non-production with no config should use default",
},
TestCase {
is_production: true,
config_use_stdout: Some(true),
expected_use_stdout: true,
description: "Production with explicit true should enable stdout",
},
TestCase {
is_production: true,
config_use_stdout: Some(false),
expected_use_stdout: false,
description: "Production with explicit false should disable stdout",
},
TestCase {
is_production: false,
config_use_stdout: Some(true),
expected_use_stdout: true,
description: "Non-production with explicit true should enable stdout",
},
];
for case in test_cases {
let default_use_stdout = if case.is_production { false } else { USE_STDOUT };
let actual_use_stdout = case.config_use_stdout.unwrap_or(default_use_stdout);
assert_eq!(actual_use_stdout, case.expected_use_stdout, "Test case failed: {}", case.description);
}
}
#[test]
fn test_log_level_filter_mapping_logic() {
// Test the log level mapping logic used in the real implementation
let test_cases = vec![
("trace", "Trace"),
("debug", "Debug"),
("info", "Info"),
("warn", "Warn"),
("warning", "Warn"),
("error", "Error"),
("off", "None"),
("invalid_level", "Info"), // Should default to Info
];
for (input_level, expected_variant) in test_cases {
let filter_variant = match input_level.to_lowercase().as_str() {
"trace" => "Trace",
"debug" => "Debug",
"info" => "Info",
"warn" | "warning" => "Warn",
"error" => "Error",
"off" => "None",
_ => "Info", // default case
};
assert_eq!(
filter_variant, expected_variant,
"Log level '{input_level}' should map to '{expected_variant}'"
);
}
}
#[test]
fn test_otel_config_environment_defaults() {
// Test that OtelConfig properly handles environment detection logic
let config = OtelConfig {
endpoint: "".to_string(),
use_stdout: None,
environment: Some("production".to_string()),
..Default::default()
};
// Simulate the logic from init_telemetry
let environment = config.environment.as_deref().unwrap_or(ENVIRONMENT);
assert_eq!(environment, "production");
// Test with development environment
let dev_config = OtelConfig {
endpoint: "".to_string(),
use_stdout: None,
environment: Some("development".to_string()),
..Default::default()
};
let dev_environment = dev_config.environment.as_deref().unwrap_or(ENVIRONMENT);
assert_eq!(dev_environment, "development");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/error.rs | crates/obs/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::OtelGuard;
use std::sync::{Arc, Mutex};
use tokio::sync::SetError;
/// Error type for global guard operations
#[derive(Debug, thiserror::Error)]
pub enum GlobalError {
/// Occurs when attempting to set a global recorder (e.g., via [`crate::Recorder::install_global`] or [`metrics::set_global_recorder`])
/// but a global recorder is already initialized.
///
/// [`crate::Recorder::install_global`]: crate::Recorder::install_global
/// [`metrics::set_global_recorder`]: https://docs.rs/metrics/latest/metrics/fn.set_global_recorder.html
#[error("Failed to set a global recorder: {0}")]
SetRecorder(#[from] metrics::SetRecorderError<crate::Recorder>),
#[error("Failed to set global guard: {0}")]
SetError(#[from] SetError<Arc<Mutex<OtelGuard>>>),
#[error("Global guard not initialized")]
NotInitialized,
#[error("Global system metrics err: {0}")]
MetricsError(String),
#[error("Failed to get current PID: {0}")]
PidError(String),
#[error("Process with PID {0} not found")]
ProcessNotFound(u32),
#[error("Failed to get physical core count")]
CoreCountError,
#[error("GPU initialization failed: {0}")]
GpuInitError(String),
#[error("GPU device not found: {0}")]
GpuDeviceError(String),
#[error("Failed to send log: {0}")]
SendFailed(&'static str),
#[error("Operation timed out: {0}")]
Timeout(&'static str),
#[error("Telemetry initialization failed: {0}")]
TelemetryError(#[from] TelemetryError),
}
#[derive(Debug, thiserror::Error)]
pub enum TelemetryError {
#[error("Span exporter build failed: {0}")]
BuildSpanExporter(String),
#[error("Metric exporter build failed: {0}")]
BuildMetricExporter(String),
#[error("Log exporter build failed: {0}")]
BuildLogExporter(String),
#[error("Install metrics recorder failed: {0}")]
InstallMetricsRecorder(String),
#[error("Tracing subscriber init failed: {0}")]
SubscriberInit(String),
#[error("I/O error: {0}")]
Io(String),
#[error("Set permissions failed: {0}")]
SetPermissions(String),
}
impl From<std::io::Error> for TelemetryError {
fn from(e: std::io::Error) -> Self {
TelemetryError::Io(e.to_string())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/recorder.rs | crates/obs/src/recorder.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::GlobalError;
use metrics::{Counter, CounterFn, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Metadata, SharedString, Unit};
use opentelemetry::{
InstrumentationScope, InstrumentationScopeBuilder, KeyValue, global,
metrics::{Meter, MeterProvider},
};
use opentelemetry_sdk::metrics::{MeterProviderBuilder, SdkMeterProvider};
use std::{
borrow::Cow,
collections::HashMap,
ops::Deref,
sync::{
Arc, Mutex,
atomic::{AtomicU64, Ordering},
},
};
/// A builder for constructing a [`Recorder`].
#[derive(Debug)]
pub struct Builder {
builder: MeterProviderBuilder,
scope: InstrumentationScopeBuilder,
}
impl Builder {
/// Runs the closure (`f`) to modify the [`MeterProviderBuilder`] to build a
/// [`MeterProvider`](MeterProvider).
pub fn with_meter_provider(mut self, f: impl FnOnce(MeterProviderBuilder) -> MeterProviderBuilder) -> Self {
self.builder = f(self.builder);
self
}
/// Modify the [`InstrumentationScope`] to provide additional metadata from the
/// closure (`f`).
pub fn with_instrumentation_scope(
mut self,
f: impl FnOnce(InstrumentationScopeBuilder) -> InstrumentationScopeBuilder,
) -> Self {
self.scope = f(self.scope);
self
}
/// Consumes the builder and builds a new [`Recorder`] and returns
/// a [`SdkMeterProvider`].
///
/// A [`SdkMeterProvider`] is provided so you have the responsibility to
/// do whatever you need to do with it.
///
/// This will not install the recorder as the global recorder for
/// the [`metrics`] crate, use [`Builder::install`]. This will not install a meter
/// provider to [`global`], use [`Builder::install_global`].
pub fn build(self) -> (SdkMeterProvider, Recorder) {
let provider = self.builder.build();
let meter = provider.meter_with_scope(self.scope.build());
(
provider,
Recorder {
meter,
metrics_metadata: Arc::new(Mutex::new(HashMap::new())),
},
)
}
/// Builds a [`Recorder`] and sets it as the global recorder for the [`metrics`]
/// crate.
///
/// This method will not call [`global::set_meter_provider`] for OpenTelemetry and
/// will be returned as the first element in the return's type tuple.
pub fn install(self) -> Result<(SdkMeterProvider, Recorder), GlobalError> {
let (provider, recorder) = self.build();
metrics::set_global_recorder(recorder.clone())?;
Ok((provider, recorder))
}
/// Builds the [`Recorder`] to record metrics to OpenTelemetry, set the global
/// recorder for the [`metrics`] crate, and calls [`global::set_meter_provider`]
/// to set the constructed [`SdkMeterProvider`].
pub fn install_global(self) -> Result<Recorder, GlobalError> {
let (provider, recorder) = self.install()?;
global::set_meter_provider(provider);
Ok(recorder)
}
}
#[derive(Debug)]
struct MetricMetadata {
unit: Option<Unit>,
description: SharedString,
}
/// A standard recorder that implements [`metrics::Recorder`].
///
/// This instance implements <code>[`Deref`]\<Target = [`Meter`]\></code>, so
/// you can still interact with the SDK's initialized [`Meter`] instance.
#[derive(Debug, Clone)]
pub struct Recorder {
meter: Meter,
metrics_metadata: Arc<Mutex<HashMap<KeyName, MetricMetadata>>>,
}
impl Recorder {
/// Creates a new [`Builder`] with a given name for instrumentation.
pub fn builder<S: Into<Cow<'static, str>>>(name: S) -> Builder {
Builder {
builder: MeterProviderBuilder::default(),
scope: InstrumentationScope::builder(name.into()),
}
}
/// Creates a [`Recorder`] with an already established [`Meter`].
pub fn with_meter(meter: Meter) -> Self {
Recorder {
meter,
metrics_metadata: Arc::new(Mutex::new(HashMap::new())),
}
}
}
impl Deref for Recorder {
type Target = Meter;
fn deref(&self) -> &Self::Target {
&self.meter
}
}
impl metrics::Recorder for Recorder {
fn describe_counter(&self, key: KeyName, unit: Option<Unit>, description: SharedString) {
let mut metrics_metadata = self.metrics_metadata.lock().unwrap();
metrics_metadata.insert(key, MetricMetadata { unit, description });
}
fn describe_gauge(&self, key: KeyName, unit: Option<Unit>, description: SharedString) {
let mut metrics_metadata = self.metrics_metadata.lock().unwrap();
metrics_metadata.insert(key, MetricMetadata { unit, description });
}
fn describe_histogram(&self, key: KeyName, unit: Option<Unit>, description: SharedString) {
let mut metrics_metadata = self.metrics_metadata.lock().unwrap();
metrics_metadata.insert(key, MetricMetadata { unit, description });
}
fn register_counter(&self, key: &Key, _metadata: &Metadata<'_>) -> Counter {
let mut builder = self.meter.u64_counter(key.name().to_owned());
if let Some(metadata) = self.metrics_metadata.lock().unwrap().remove(key.name()) {
if let Some(unit) = metadata.unit {
builder = builder.with_unit(unit.as_canonical_label());
}
builder = builder.with_description(metadata.description.to_string());
}
let counter = builder.build();
let labels = key
.labels()
.map(|label| KeyValue::new(label.key().to_owned(), label.value().to_owned()))
.collect();
Counter::from_arc(Arc::new(WrappedCounter {
counter,
labels,
value: AtomicU64::new(0),
}))
}
fn register_gauge(&self, key: &Key, _metadata: &Metadata<'_>) -> Gauge {
let mut builder = self.meter.f64_gauge(key.name().to_owned());
if let Some(metadata) = self.metrics_metadata.lock().unwrap().remove(key.name()) {
if let Some(unit) = metadata.unit {
builder = builder.with_unit(unit.as_canonical_label());
}
builder = builder.with_description(metadata.description.to_string());
}
let gauge = builder.build();
let labels = key
.labels()
.map(|label| KeyValue::new(label.key().to_owned(), label.value().to_owned()))
.collect();
Gauge::from_arc(Arc::new(WrappedGauge {
gauge,
labels,
value: AtomicU64::new(0),
}))
}
fn register_histogram(&self, key: &Key, _metadata: &Metadata<'_>) -> Histogram {
let mut builder = self.meter.f64_histogram(key.name().to_owned());
if let Some(metadata) = self.metrics_metadata.lock().unwrap().remove(key.name()) {
if let Some(unit) = metadata.unit {
builder = builder.with_unit(unit.as_canonical_label());
}
builder = builder.with_description(metadata.description.to_string());
}
let histogram = builder.build();
let labels = key
.labels()
.map(|label| KeyValue::new(label.key().to_owned(), label.value().to_owned()))
.collect();
Histogram::from_arc(Arc::new(WrappedHistogram { histogram, labels }))
}
}
struct WrappedCounter {
counter: opentelemetry::metrics::Counter<u64>,
labels: Vec<KeyValue>,
value: AtomicU64,
}
impl CounterFn for WrappedCounter {
fn increment(&self, value: u64) {
self.value.fetch_add(value, Ordering::Relaxed);
self.counter.add(value, &self.labels);
}
fn absolute(&self, value: u64) {
let prev = self.value.swap(value, Ordering::Relaxed);
let diff = value.saturating_sub(prev);
self.counter.add(diff, &self.labels);
}
}
struct WrappedGauge {
gauge: opentelemetry::metrics::Gauge<f64>,
labels: Vec<KeyValue>,
value: AtomicU64,
}
impl GaugeFn for WrappedGauge {
fn increment(&self, value: f64) {
let mut current = self.value.load(Ordering::Relaxed);
let mut new = f64::from_bits(current) + value;
while let Err(val) = self
.value
.compare_exchange(current, new.to_bits(), Ordering::AcqRel, Ordering::Relaxed)
{
current = val;
new = f64::from_bits(current) + value;
}
self.gauge.record(new, &self.labels);
}
fn decrement(&self, value: f64) {
let mut current = self.value.load(Ordering::Relaxed);
let mut new = f64::from_bits(current) - value;
while let Err(val) = self
.value
.compare_exchange(current, new.to_bits(), Ordering::AcqRel, Ordering::Relaxed)
{
current = val;
new = f64::from_bits(current) - value;
}
self.gauge.record(new, &self.labels);
}
fn set(&self, value: f64) {
self.value.store(value.to_bits(), Ordering::Relaxed);
self.gauge.record(value, &self.labels);
}
}
struct WrappedHistogram {
histogram: opentelemetry::metrics::Histogram<f64>,
labels: Vec<KeyValue>,
}
impl HistogramFn for WrappedHistogram {
fn record(&self, value: f64) {
self.histogram.record(value, &self.labels);
}
fn record_many(&self, value: f64, count: usize) {
for _ in 0..count {
self.histogram.record(value, &self.labels);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use opentelemetry_sdk::metrics::Temporality;
#[test]
fn standard_usage() {
let exporter = opentelemetry_stdout::MetricExporterBuilder::default()
.with_temporality(Temporality::Cumulative)
.build();
let (provider, recorder) = Recorder::builder("my-app")
.with_meter_provider(|builder| builder.with_periodic_exporter(exporter))
.build();
global::set_meter_provider(provider.clone());
metrics::set_global_recorder(recorder).unwrap();
let counter = metrics::counter!("my-counter");
counter.increment(1);
provider.force_flush().unwrap();
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/system/gpu.rs | crates/obs/src/system/gpu.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::GlobalError;
use crate::system::attributes::ProcessAttributes;
use crate::system::metrics::Metrics;
use nvml_wrapper::Nvml;
use nvml_wrapper::enums::device::UsedGpuMemory;
use sysinfo::Pid;
use tracing::warn;
/// `GpuCollector` is responsible for collecting GPU memory usage metrics.
pub struct GpuCollector {
nvml: Nvml,
pid: Pid,
}
impl GpuCollector {
pub fn new(pid: Pid) -> Result<Self, GlobalError> {
let nvml = Nvml::init().map_err(|e| GlobalError::GpuInitError(e.to_string()))?;
Ok(GpuCollector { nvml, pid })
}
pub fn collect(&self, metrics: &Metrics, attributes: &ProcessAttributes) -> Result<(), GlobalError> {
if let Ok(device) = self.nvml.device_by_index(0) {
if let Ok(gpu_stats) = device.running_compute_processes() {
for stat in gpu_stats.iter() {
if stat.pid == self.pid.as_u32() {
let memory_used = match stat.used_gpu_memory {
UsedGpuMemory::Used(bytes) => bytes,
UsedGpuMemory::Unavailable => 0,
};
metrics.gpu_memory_usage.record(memory_used, &attributes.attributes);
return Ok(());
}
}
} else {
warn!("Could not get GPU stats, recording 0 for GPU memory usage");
}
} else {
return Err(GlobalError::GpuDeviceError("No GPU device found".to_string()));
}
metrics.gpu_memory_usage.record(0, &attributes.attributes);
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/system/mod.rs | crates/obs/src/system/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{GlobalError, observability_metric_enabled};
use opentelemetry::{global::meter, metrics::Meter};
use sysinfo::Pid;
mod attributes;
mod collector;
#[cfg(feature = "gpu")]
mod gpu;
mod metrics;
pub struct SystemObserver {}
impl SystemObserver {
/// Initialize the indicator collector for the current process
/// This function will create a new `Collector` instance and start collecting metrics.
/// It will run indefinitely until the process is terminated.
pub async fn init_process_observer() -> Result<(), GlobalError> {
if observability_metric_enabled() {
let meter = meter("system");
let pid = sysinfo::get_current_pid().map_err(|e| GlobalError::PidError(e.to_string()))?;
return SystemObserver::init_process_observer_for_pid(meter, pid).await;
}
Ok(())
}
/// Initialize the metric collector for the specified PID process
/// This function will create a new `Collector` instance and start collecting metrics.
/// It will run indefinitely until the process is terminated.
pub async fn init_process_observer_for_pid(meter: Meter, pid: Pid) -> Result<(), GlobalError> {
let interval_ms = rustfs_utils::get_env_u64(
rustfs_config::observability::ENV_OBS_METRICS_SYSTEM_INTERVAL_MS,
rustfs_config::observability::DEFAULT_METRICS_SYSTEM_INTERVAL_MS,
);
let mut collector = collector::Collector::new(pid, meter, interval_ms)?;
collector.run().await
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/system/attributes.rs | crates/obs/src/system/attributes.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::GlobalError;
use opentelemetry::KeyValue;
use sysinfo::{Pid, System};
pub(crate) const PROCESS_PID: opentelemetry::Key = opentelemetry::Key::from_static_str("process.pid");
pub(crate) const PROCESS_EXECUTABLE_NAME: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.name");
pub(crate) const PROCESS_EXECUTABLE_PATH: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.path");
pub(crate) const PROCESS_COMMAND: opentelemetry::Key = opentelemetry::Key::from_static_str("process.command");
/// Struct to hold process attributes
pub struct ProcessAttributes {
pub attributes: Vec<KeyValue>,
}
impl ProcessAttributes {
/// Creates a new instance of `ProcessAttributes` for the given PID.
pub fn new(pid: Pid, system: &mut System) -> Result<Self, GlobalError> {
system.refresh_processes(sysinfo::ProcessesToUpdate::Some(&[pid]), true);
let process = system
.process(pid)
.ok_or_else(|| GlobalError::ProcessNotFound(pid.as_u32()))?;
let attributes = vec![
KeyValue::new(PROCESS_PID, pid.as_u32() as i64),
KeyValue::new(PROCESS_EXECUTABLE_NAME, process.name().to_os_string().into_string().unwrap_or_default()),
KeyValue::new(
PROCESS_EXECUTABLE_PATH,
process
.exe()
.map(|path| path.to_string_lossy().into_owned())
.unwrap_or_default(),
),
KeyValue::new(
PROCESS_COMMAND,
process
.cmd()
.iter()
.fold(String::new(), |t1, t2| t1 + " " + t2.to_str().unwrap_or_default()),
),
];
Ok(ProcessAttributes { attributes })
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/system/collector.rs | crates/obs/src/system/collector.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::GlobalError;
use crate::system::attributes::ProcessAttributes;
use crate::system::metrics::{DIRECTION, INTERFACE, Metrics, STATUS};
use opentelemetry::KeyValue;
use std::time::SystemTime;
use sysinfo::{Networks, Pid, ProcessStatus, System};
use tokio::time::{Duration, sleep};
/// Collector is responsible for collecting system metrics and attributes.
/// It uses the sysinfo crate to gather information about the system and processes.
/// It also uses OpenTelemetry to record metrics.
pub struct Collector {
metrics: Metrics,
attributes: ProcessAttributes,
#[cfg(feature = "gpu")]
gpu_collector: crate::system::gpu::GpuCollector,
pid: Pid,
system: System,
networks: Networks,
core_count: usize,
interval_ms: u64,
}
impl Collector {
pub fn new(pid: Pid, meter: opentelemetry::metrics::Meter, interval_ms: u64) -> Result<Self, GlobalError> {
let mut system = System::new();
let attributes = ProcessAttributes::new(pid, &mut system)?;
let core_count = System::physical_core_count().ok_or(GlobalError::CoreCountError)?;
let metrics = Metrics::new(&meter);
#[cfg(feature = "gpu")]
let gpu_collector = crate::system::gpu::GpuCollector::new(pid)?;
let networks = Networks::new_with_refreshed_list();
Ok(Collector {
metrics,
attributes,
#[cfg(feature = "gpu")]
gpu_collector,
pid,
system,
networks,
core_count,
interval_ms,
})
}
pub async fn run(&mut self) -> Result<(), GlobalError> {
loop {
self.collect()?;
tracing::debug!("Collected metrics for PID: {} ,time: {:?}", self.pid, SystemTime::now());
sleep(Duration::from_millis(self.interval_ms)).await;
}
}
fn collect(&mut self) -> Result<(), GlobalError> {
self.system
.refresh_processes(sysinfo::ProcessesToUpdate::Some(&[self.pid]), false);
// refresh the network interface list and statistics
self.networks.refresh(false);
let process = self
.system
.process(self.pid)
.ok_or_else(|| GlobalError::ProcessNotFound(self.pid.as_u32()))?;
// CPU metrics
let cpu_usage = process.cpu_usage();
self.metrics.cpu_usage.record(cpu_usage as f64, &[]);
self.metrics
.cpu_utilization
.record((cpu_usage / self.core_count as f32) as f64, &self.attributes.attributes);
// Memory metrics
self.metrics
.memory_usage
.record(process.memory() as i64, &self.attributes.attributes);
self.metrics
.memory_virtual
.record(process.virtual_memory() as i64, &self.attributes.attributes);
// Disk I/O metrics
let disk_io = process.disk_usage();
self.metrics.disk_io.record(
disk_io.read_bytes as i64,
&[&self.attributes.attributes[..], &[KeyValue::new(DIRECTION, "read")]].concat(),
);
self.metrics.disk_io.record(
disk_io.written_bytes as i64,
&[&self.attributes.attributes[..], &[KeyValue::new(DIRECTION, "write")]].concat(),
);
// Network I/O indicators (corresponding to /system/network/internode)
let mut total_received: i64 = 0;
let mut total_transmitted: i64 = 0;
// statistics by interface
for (interface_name, data) in self.networks.iter() {
total_received += data.total_received() as i64;
total_transmitted += data.total_transmitted() as i64;
let received = data.received() as i64;
let transmitted = data.transmitted() as i64;
self.metrics.network_io_per_interface.record(
received,
&[
&self.attributes.attributes[..],
&[
KeyValue::new(INTERFACE, interface_name.to_string()),
KeyValue::new(DIRECTION, "received"),
],
]
.concat(),
);
self.metrics.network_io_per_interface.record(
transmitted,
&[
&self.attributes.attributes[..],
&[
KeyValue::new(INTERFACE, interface_name.to_string()),
KeyValue::new(DIRECTION, "transmitted"),
],
]
.concat(),
);
}
// global statistics
self.metrics.network_io.record(
total_received,
&[&self.attributes.attributes[..], &[KeyValue::new(DIRECTION, "received")]].concat(),
);
self.metrics.network_io.record(
total_transmitted,
&[&self.attributes.attributes[..], &[KeyValue::new(DIRECTION, "transmitted")]].concat(),
);
// Process status indicator (corresponding to /system/process)
let status_value = match process.status() {
ProcessStatus::Run => 0,
ProcessStatus::Sleep => 1,
ProcessStatus::Zombie => 2,
_ => 3, // other status
};
self.metrics.process_status.record(
status_value,
&[
&self.attributes.attributes[..],
&[KeyValue::new(STATUS, format!("{:?}", process.status()))],
]
.concat(),
);
// GPU Metrics (Optional) Non-MacOS
#[cfg(feature = "gpu")]
self.gpu_collector.collect(&self.metrics, &self.attributes)?;
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/system/metrics.rs | crates/obs/src/system/metrics.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opentelemetry::metrics::{Gauge, Meter};
pub(crate) const PROCESS_CPU_USAGE: &str = "process.cpu.usage";
pub(crate) const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization";
pub(crate) const PROCESS_MEMORY_USAGE: &str = "process.memory.usage";
pub(crate) const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual";
pub(crate) const PROCESS_DISK_IO: &str = "process.disk.io";
pub(crate) const PROCESS_NETWORK_IO: &str = "process.network.io";
pub(crate) const PROCESS_NETWORK_IO_PER_INTERFACE: &str = "process.network.io.per_interface";
pub(crate) const PROCESS_STATUS: &str = "process.status";
#[cfg(feature = "gpu")]
pub const PROCESS_GPU_MEMORY_USAGE: &str = "process.gpu.memory.usage";
pub(crate) const DIRECTION: opentelemetry::Key = opentelemetry::Key::from_static_str("direction");
pub(crate) const STATUS: opentelemetry::Key = opentelemetry::Key::from_static_str("status");
pub(crate) const INTERFACE: opentelemetry::Key = opentelemetry::Key::from_static_str("interface");
/// `Metrics` struct holds the OpenTelemetry metrics for process monitoring.
/// It contains various metrics such as CPU usage, memory usage,
/// disk I/O, network I/O, and process status.
///
/// The `Metrics` struct is designed to be used with OpenTelemetry's
/// metrics API to record and export these metrics.
///
/// The `new` method initializes the metrics using the provided
/// `opentelemetry::metrics::Meter`.
pub struct Metrics {
pub cpu_usage: Gauge<f64>,
pub cpu_utilization: Gauge<f64>,
pub memory_usage: Gauge<i64>,
pub memory_virtual: Gauge<i64>,
pub disk_io: Gauge<i64>,
pub network_io: Gauge<i64>,
pub network_io_per_interface: Gauge<i64>,
pub process_status: Gauge<i64>,
#[cfg(feature = "gpu")]
pub gpu_memory_usage: Gauge<u64>,
}
impl Metrics {
pub fn new(meter: &Meter) -> Self {
let cpu_usage = meter
.f64_gauge(PROCESS_CPU_USAGE)
.with_description("The percentage of CPU in use.")
.with_unit("percent")
.build();
let cpu_utilization = meter
.f64_gauge(PROCESS_CPU_UTILIZATION)
.with_description("The amount of CPU in use.")
.with_unit("percent")
.build();
let memory_usage = meter
.i64_gauge(PROCESS_MEMORY_USAGE)
.with_description("The amount of physical memory in use.")
.with_unit("byte")
.build();
let memory_virtual = meter
.i64_gauge(PROCESS_MEMORY_VIRTUAL)
.with_description("The amount of committed virtual memory.")
.with_unit("byte")
.build();
let disk_io = meter
.i64_gauge(PROCESS_DISK_IO)
.with_description("Disk bytes transferred.")
.with_unit("byte")
.build();
let network_io = meter
.i64_gauge(PROCESS_NETWORK_IO)
.with_description("Network bytes transferred.")
.with_unit("byte")
.build();
let network_io_per_interface = meter
.i64_gauge(PROCESS_NETWORK_IO_PER_INTERFACE)
.with_description("Network bytes transferred (per interface).")
.with_unit("byte")
.build();
let process_status = meter
.i64_gauge(PROCESS_STATUS)
.with_description("Process status (0: Running, 1: Sleeping, 2: Zombie, etc.)")
.build();
#[cfg(feature = "gpu")]
let gpu_memory_usage = meter
.u64_gauge(PROCESS_GPU_MEMORY_USAGE)
.with_description("The amount of physical GPU memory in use.")
.with_unit("byte")
.build();
Metrics {
cpu_usage,
cpu_utilization,
memory_usage,
memory_virtual,
disk_io,
network_io,
network_io_per_interface,
process_status,
#[cfg(feature = "gpu")]
gpu_memory_usage,
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_notification.rs | crates/obs/src/metrics/cluster_notification.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Notify the relevant metric descriptor
use crate::{MetricDescriptor, MetricName, new_counter_md, subsystems};
use std::sync::LazyLock;
pub static NOTIFICATION_CURRENT_SEND_IN_PROGRESS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::NotificationCurrentSendInProgress,
"Number of concurrent async Send calls active to all targets",
&[],
subsystems::NOTIFICATION,
)
});
pub static NOTIFICATION_EVENTS_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::NotificationEventsErrorsTotal,
"Events that were failed to be sent to the targets",
&[],
subsystems::NOTIFICATION,
)
});
pub static NOTIFICATION_EVENTS_SENT_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::NotificationEventsSentTotal,
"Total number of events sent to the targets",
&[],
subsystems::NOTIFICATION,
)
});
pub static NOTIFICATION_EVENTS_SKIPPED_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::NotificationEventsSkippedTotal,
"Events that were skipped to be sent to the targets due to the in-memory queue being full",
&[],
subsystems::NOTIFICATION,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/bucket_replication.rs | crates/obs/src/metrics/bucket_replication.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Bucket copy metric descriptor
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Bucket level replication metric descriptor
pub const BUCKET_L: &str = "bucket";
/// Replication operation
pub const OPERATION_L: &str = "operation";
/// Replication target ARN
pub const TARGET_ARN_L: &str = "targetArn";
/// Replication range
pub const RANGE_L: &str = "range";
pub static BUCKET_REPL_LAST_HR_FAILED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::LastHourFailedBytes,
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_LAST_HR_FAILED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::LastHourFailedCount,
"Total number of objects which failed replication in the last hour on a bucket",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_LAST_MIN_FAILED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::LastMinFailedBytes,
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_LAST_MIN_FAILED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::LastMinFailedCount,
"Total number of objects which failed replication in the last full minute on a bucket",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_LATENCY_MS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::LatencyMilliSec,
"Replication latency on a bucket in milliseconds",
&[BUCKET_L, OPERATION_L, RANGE_L, TARGET_ARN_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_DELETE_TAGGING_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedDeleteTaggingRequestsTotal,
"Number of DELETE tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_GET_REQUESTS_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedGetRequestsFailures,
"Number of failures in GET requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_GET_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedGetRequestsTotal,
"Number of GET requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
// TODO - add a metric for the number of PUT requests proxied to replication target
pub static BUCKET_REPL_PROXIED_GET_TAGGING_REQUESTS_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedGetTaggingRequestFailures,
"Number of failures in GET tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_GET_TAGGING_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedGetTaggingRequestsTotal,
"Number of GET tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_HEAD_REQUESTS_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedHeadRequestsFailures,
"Number of failures in HEAD requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_HEAD_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedHeadRequestsTotal,
"Number of HEAD requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
// TODO - add a metric for the number of PUT requests proxied to replication target
pub static BUCKET_REPL_PROXIED_PUT_TAGGING_REQUESTS_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedPutTaggingRequestFailures,
"Number of failures in PUT tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_PROXIED_PUT_TAGGING_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedPutTaggingRequestsTotal,
"Number of PUT tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_SENT_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::SentBytes,
"Total number of bytes replicated to the target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_SENT_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::SentCount,
"Total number of objects replicated to the target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_TOTAL_FAILED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::TotalFailedBytes,
"Total number of bytes failed at least once to replicate since server start",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
pub static BUCKET_REPL_TOTAL_FAILED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::TotalFailedCount,
"Total number of objects which failed replication since server start",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
// TODO - add a metric for the number of DELETE requests proxied to replication target
pub static BUCKET_REPL_PROXIED_DELETE_TAGGING_REQUESTS_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProxiedDeleteTaggingRequestFailures,
"Number of failures in DELETE tagging requests proxied to replication target",
&[BUCKET_L],
subsystems::BUCKET_REPLICATION,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_config.rs | crates/obs/src/metrics/cluster_config.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Metric descriptors related to cluster configuration
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static CONFIG_RRS_PARITY_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ConfigRRSParity,
"Reduced redundancy storage class parity",
&[],
subsystems::CLUSTER_CONFIG,
)
});
pub static CONFIG_STANDARD_PARITY_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ConfigStandardParity,
"Standard storage class parity",
&[],
subsystems::CLUSTER_CONFIG,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/system_memory.rs | crates/obs/src/metrics/system_memory.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Memory-related metric descriptors
///
/// This module provides a set of metric descriptors for system memory statistics.
/// These descriptors are initialized lazily using `std::sync::LazyLock` to ensure
/// they are only created when actually needed, improving performance and reducing
/// startup overhead.
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Total memory available on the node
pub static MEM_TOTAL_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemTotal, "Total memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Memory currently in use on the node
pub static MEM_USED_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemUsed, "Used memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Percentage of total memory currently in use
pub static MEM_USED_PERC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::MemUsedPerc,
"Used memory percentage on the node",
&[],
subsystems::SYSTEM_MEMORY,
)
});
/// Memory not currently in use and available for allocation
pub static MEM_FREE_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemFree, "Free memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Memory used for file buffers by the kernel
pub static MEM_BUFFERS_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemBuffers, "Buffers memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Memory used for caching file data by the kernel
pub static MEM_CACHE_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemCache, "Cache memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Memory shared between multiple processes
pub static MEM_SHARED_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemShared, "Shared memory on the node", &[], subsystems::SYSTEM_MEMORY));
/// Estimate of memory available for new applications without swapping
pub static MEM_AVAILABLE_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::MemAvailable, "Available memory on the node", &[], subsystems::SYSTEM_MEMORY));
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/audit.rs | crates/obs/src/metrics/audit.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// audit related metric descriptors
///
/// This module contains the metric descriptors for the audit subsystem.
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
const TARGET_ID: &str = "target_id";
pub const RESULT: &str = "result"; // success / failure
pub const STATUS: &str = "status"; // success / failure
pub const SUCCESS: &str = "success";
pub const FAILURE: &str = "failure";
pub static AUDIT_FAILED_MESSAGES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::AuditFailedMessages,
"Total number of messages that failed to send since start",
&[TARGET_ID],
subsystems::AUDIT,
)
});
pub static AUDIT_TARGET_QUEUE_LENGTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::AuditTargetQueueLength,
"Number of unsent messages in queue for target",
&[TARGET_ID],
subsystems::AUDIT,
)
});
pub static AUDIT_TOTAL_MESSAGES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::AuditTotalMessages,
"Total number of messages sent since start",
&[TARGET_ID],
subsystems::AUDIT,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/scanner.rs | crates/obs/src/metrics/scanner.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Scanner-related metric descriptors
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static SCANNER_BUCKET_SCANS_FINISHED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ScannerBucketScansFinished,
"Total number of bucket scans finished since server start",
&[],
subsystems::SCANNER,
)
});
pub static SCANNER_BUCKET_SCANS_STARTED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ScannerBucketScansStarted,
"Total number of bucket scans started since server start",
&[],
subsystems::SCANNER,
)
});
pub static SCANNER_DIRECTORIES_SCANNED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ScannerDirectoriesScanned,
"Total number of directories scanned since server start",
&[],
subsystems::SCANNER,
)
});
pub static SCANNER_OBJECTS_SCANNED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ScannerObjectsScanned,
"Total number of unique objects scanned since server start",
&[],
subsystems::SCANNER,
)
});
pub static SCANNER_VERSIONS_SCANNED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ScannerVersionsScanned,
"Total number of object versions scanned since server start",
&[],
subsystems::SCANNER,
)
});
pub static SCANNER_LAST_ACTIVITY_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ScannerLastActivitySeconds,
"Time elapsed (in seconds) since last scan activity.",
&[],
subsystems::SCANNER,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/bucket.rs | crates/obs/src/metrics/bucket.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// bucket level s3 metric descriptor
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, new_histogram_md, subsystems};
use std::sync::LazyLock;
pub static BUCKET_API_TRAFFIC_SENT_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiTrafficSentBytes,
"Total number of bytes received for a bucket",
&["bucket", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_TRAFFIC_RECV_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiTrafficRecvBytes,
"Total number of bytes sent for a bucket",
&["bucket", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_IN_FLIGHT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ApiRequestsInFlightTotal,
"Total number of requests currently in flight for a bucket",
&["bucket", "name", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsTotal,
"Total number of requests for a bucket",
&["bucket", "name", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_CANCELED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsCanceledTotal,
"Total number of requests canceled by the client for a bucket",
&["bucket", "name", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_4XX_ERRORS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequests4xxErrorsTotal,
"Total number of requests with 4xx errors for a bucket",
&["bucket", "name", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_5XX_ERRORS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequests5xxErrorsTotal,
"Total number of requests with 5xx errors for a bucket",
&["bucket", "name", "type"],
subsystems::BUCKET_API,
)
});
pub static BUCKET_API_REQUESTS_TTFB_SECONDS_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_histogram_md(
MetricName::ApiRequestsTTFBSecondsDistribution,
"Distribution of time to first byte across API calls for a bucket",
&["bucket", "name", "le", "type"],
subsystems::BUCKET_API,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/logger_webhook.rs | crates/obs/src/metrics/logger_webhook.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// A descriptor for metrics related to webhook logs
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Define label constants for webhook metrics
/// name label
pub const NAME_LABEL: &str = "name";
/// endpoint label
pub const ENDPOINT_LABEL: &str = "endpoint";
// The label used by all webhook metrics
const ALL_WEBHOOK_LABELS: [&str; 2] = [NAME_LABEL, ENDPOINT_LABEL];
pub static WEBHOOK_FAILED_MESSAGES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::WebhookFailedMessages,
"Number of messages that failed to send",
&ALL_WEBHOOK_LABELS[..],
subsystems::LOGGER_WEBHOOK,
)
});
pub static WEBHOOK_QUEUE_LENGTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::WebhookQueueLength,
"Webhook queue length",
&ALL_WEBHOOK_LABELS[..],
subsystems::LOGGER_WEBHOOK,
)
});
pub static WEBHOOK_TOTAL_MESSAGES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::WebhookTotalMessages,
"Total number of messages sent to this target",
&ALL_WEBHOOK_LABELS[..],
subsystems::LOGGER_WEBHOOK,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/ilm.rs | crates/obs/src/metrics/ilm.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// ILM-related metric descriptors
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static ILM_EXPIRY_PENDING_TASKS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::IlmExpiryPendingTasks,
"Number of pending ILM expiry tasks in the queue",
&[],
subsystems::ILM,
)
});
pub static ILM_TRANSITION_ACTIVE_TASKS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::IlmTransitionActiveTasks,
"Number of active ILM transition tasks",
&[],
subsystems::ILM,
)
});
pub static ILM_TRANSITION_PENDING_TASKS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::IlmTransitionPendingTasks,
"Number of pending ILM transition tasks in the queue",
&[],
subsystems::ILM,
)
});
pub static ILM_TRANSITION_MISSED_IMMEDIATE_TASKS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::IlmTransitionMissedImmediateTasks,
"Number of missed immediate ILM transition tasks",
&[],
subsystems::ILM,
)
});
pub static ILM_VERSIONS_SCANNED_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::IlmVersionsScanned,
"Total number of object versions checked for ILM actions since server start",
&[],
subsystems::ILM,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/system_network.rs | crates/obs/src/metrics/system_network.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Network-related metric descriptors
///
/// These metrics capture internode network communication statistics including:
/// - Error counts for connection and general internode calls
/// - Network dial performance metrics
/// - Data transfer volume in both directions
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Total number of failed internode calls counter
pub static INTERNODE_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::InternodeErrorsTotal,
"Total number of failed internode calls",
&[],
subsystems::SYSTEM_NETWORK_INTERNODE,
)
});
/// TCP dial timeouts and errors counter
pub static INTERNODE_DIAL_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::InternodeDialErrorsTotal,
"Total number of internode TCP dial timeouts and errors",
&[],
subsystems::SYSTEM_NETWORK_INTERNODE,
)
});
/// Average dial time gauge in nanoseconds
pub static INTERNODE_DIAL_AVG_TIME_NANOS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::InternodeDialAvgTimeNanos,
"Average dial time of internode TCP calls in nanoseconds",
&[],
subsystems::SYSTEM_NETWORK_INTERNODE,
)
});
/// Outbound network traffic counter in bytes
pub static INTERNODE_SENT_BYTES_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::InternodeSentBytesTotal,
"Total number of bytes sent to other peer nodes",
&[],
subsystems::SYSTEM_NETWORK_INTERNODE,
)
});
/// Inbound network traffic counter in bytes
pub static INTERNODE_RECV_BYTES_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::InternodeRecvBytesTotal,
"Total number of bytes received from other peer nodes",
&[],
subsystems::SYSTEM_NETWORK_INTERNODE,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_usage.rs | crates/obs/src/metrics/cluster_usage.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Descriptors of metrics related to cluster object and bucket usage
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Bucket labels
pub const BUCKET_LABEL: &str = "bucket";
/// Range labels
pub const RANGE_LABEL: &str = "range";
pub static USAGE_SINCE_LAST_UPDATE_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageSinceLastUpdateSeconds,
"Time since last update of usage metrics in seconds",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_TOTAL_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageTotalBytes,
"Total cluster usage in bytes",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_OBJECTS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageObjectsCount,
"Total cluster objects count",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_VERSIONS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageVersionsCount,
"Total cluster object versions (including delete markers) count",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_DELETE_MARKERS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageDeleteMarkersCount,
"Total cluster delete markers count",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_BUCKETS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketsCount,
"Total cluster buckets count",
&[],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_OBJECTS_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageSizeDistribution,
"Cluster object size distribution",
&[RANGE_LABEL],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_VERSIONS_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageVersionCountDistribution,
"Cluster object version count distribution",
&[RANGE_LABEL],
subsystems::CLUSTER_USAGE_OBJECTS,
)
});
pub static USAGE_BUCKET_TOTAL_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketTotalBytes,
"Total bucket size in bytes",
&[BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_OBJECTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketObjectsCount,
"Total objects count in bucket",
&[BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_VERSIONS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketVersionsCount,
"Total object versions (including delete markers) count in bucket",
&[BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_DELETE_MARKERS_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketDeleteMarkersCount,
"Total delete markers count in bucket",
&[BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_QUOTA_TOTAL_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketQuotaTotalBytes,
"Total bucket quota in bytes",
&[BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_OBJECT_SIZE_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketObjectSizeDistribution,
"Bucket object size distribution",
&[RANGE_LABEL, BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
pub static USAGE_BUCKET_OBJECT_VERSION_COUNT_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::UsageBucketObjectVersionCountDistribution,
"Bucket object version count distribution",
&[RANGE_LABEL, BUCKET_LABEL],
subsystems::CLUSTER_USAGE_BUCKETS,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_iam.rs | crates/obs/src/metrics/cluster_iam.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// IAM related metric descriptors
use crate::{MetricDescriptor, MetricName, new_counter_md, subsystems};
use std::sync::LazyLock;
pub static LAST_SYNC_DURATION_MILLIS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::LastSyncDurationMillis,
"Last successful IAM data sync duration in milliseconds",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_FAILED_REQUESTS_MINUTE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceFailedRequestsMinute,
"When plugin authentication is configured, returns failed requests count in the last full minute",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_LAST_FAIL_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceLastFailSeconds,
"When plugin authentication is configured, returns time (in seconds) since the last failed request to the service",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_LAST_SUCC_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceLastSuccSeconds,
"When plugin authentication is configured, returns time (in seconds) since the last successful request to the service",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_SUCC_AVG_RTT_MS_MINUTE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceSuccAvgRttMsMinute,
"When plugin authentication is configured, returns average round-trip-time of successful requests in the last full minute",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_SUCC_MAX_RTT_MS_MINUTE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceSuccMaxRttMsMinute,
"When plugin authentication is configured, returns maximum round-trip-time of successful requests in the last full minute",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static PLUGIN_AUTHN_SERVICE_TOTAL_REQUESTS_MINUTE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::PluginAuthnServiceTotalRequestsMinute,
"When plugin authentication is configured, returns total requests count in the last full minute",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static SINCE_LAST_SYNC_MILLIS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::SinceLastSyncMillis,
"Time (in milliseconds) since last successful IAM data sync.",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static SYNC_FAILURES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::SyncFailures,
"Number of failed IAM data syncs since server start.",
&[],
subsystems::CLUSTER_IAM,
)
});
pub static SYNC_SUCCESSES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::SyncSuccesses,
"Number of successful IAM data syncs since server start.",
&[],
subsystems::CLUSTER_IAM,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/system_process.rs | crates/obs/src/metrics/system_process.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Process related metric descriptors
///
/// This module defines various system process metrics used for monitoring
/// the RustFS process performance, resource usage, and system integration.
/// Metrics are implemented using std::sync::LazyLock for thread-safe lazy initialization.
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// Number of current READ locks on this peer
pub static PROCESS_LOCKS_READ_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessLocksReadTotal,
"Number of current READ locks on this peer",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Number of current WRITE locks on this peer
pub static PROCESS_LOCKS_WRITE_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessLocksWriteTotal,
"Number of current WRITE locks on this peer",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total user and system CPU time spent in seconds
pub static PROCESS_CPU_TOTAL_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessCPUTotalSeconds,
"Total user and system CPU time spent in seconds",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total number of go routines running
pub static PROCESS_GO_ROUTINE_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessGoRoutineTotal,
"Total number of go routines running",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total bytes read by the process from the underlying storage system including cache
pub static PROCESS_IO_RCHAR_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessIORCharBytes,
"Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total bytes read by the process from the underlying storage system
pub static PROCESS_IO_READ_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessIOReadBytes,
"Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total bytes written by the process to the underlying storage system including page cache
pub static PROCESS_IO_WCHAR_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessIOWCharBytes,
"Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total bytes written by the process to the underlying storage system
pub static PROCESS_IO_WRITE_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessIOWriteBytes,
"Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Start time for RustFS process in seconds since Unix epoch
pub static PROCESS_START_TIME_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessStartTimeSeconds,
"Start time for RustFS process in seconds since Unix epoch",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Uptime for RustFS process in seconds
pub static PROCESS_UPTIME_SECONDS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessUptimeSeconds,
"Uptime for RustFS process in seconds",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Limit on total number of open file descriptors for the RustFS Server process
pub static PROCESS_FILE_DESCRIPTOR_LIMIT_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessFileDescriptorLimitTotal,
"Limit on total number of open file descriptors for the RustFS Server process",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total number of open file descriptors by the RustFS Server process
pub static PROCESS_FILE_DESCRIPTOR_OPEN_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessFileDescriptorOpenTotal,
"Total number of open file descriptors by the RustFS Server process",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total read SysCalls to the kernel
pub static PROCESS_SYSCALL_READ_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessSyscallReadTotal,
"Total read SysCalls to the kernel. /proc/[pid]/io syscr",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Total write SysCalls to the kernel
pub static PROCESS_SYSCALL_WRITE_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ProcessSyscallWriteTotal,
"Total write SysCalls to the kernel. /proc/[pid]/io syscw",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Resident memory size in bytes
pub static PROCESS_RESIDENT_MEMORY_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessResidentMemoryBytes,
"Resident memory size in bytes",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Virtual memory size in bytes
pub static PROCESS_VIRTUAL_MEMORY_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessVirtualMemoryBytes,
"Virtual memory size in bytes",
&[],
subsystems::SYSTEM_PROCESS,
)
});
/// Maximum virtual memory size in bytes
pub static PROCESS_VIRTUAL_MEMORY_MAX_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ProcessVirtualMemoryMaxBytes,
"Maximum virtual memory size in bytes",
&[],
subsystems::SYSTEM_PROCESS,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_erasure_set.rs | crates/obs/src/metrics/cluster_erasure_set.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Erasure code set related metric descriptors
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// The label for the pool ID
pub const POOL_ID_L: &str = "pool_id";
/// The label for the pool ID
pub const SET_ID_L: &str = "set_id";
pub static ERASURE_SET_OVERALL_WRITE_QUORUM_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetOverallWriteQuorum,
"Overall write quorum across pools and sets",
&[],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_OVERALL_HEALTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetOverallHealth,
"Overall health across pools and sets (1=healthy, 0=unhealthy)",
&[],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_READ_QUORUM_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetReadQuorum,
"Read quorum for the erasure set in a pool",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_WRITE_QUORUM_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetWriteQuorum,
"Write quorum for the erasure set in a pool",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_ONLINE_DRIVES_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetOnlineDrivesCount,
"Count of online drives in the erasure set in a pool",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_HEALING_DRIVES_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetHealingDrivesCount,
"Count of healing drives in the erasure set in a pool",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_HEALTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetHealth,
"Health of the erasure set in a pool (1=healthy, 0=unhealthy)",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_READ_TOLERANCE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetReadTolerance,
"No of drive failures that can be tolerated without disrupting read operations",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_WRITE_TOLERANCE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetWriteTolerance,
"No of drive failures that can be tolerated without disrupting write operations",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_READ_HEALTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetReadHealth,
"Health of the erasure set in a pool for read operations (1=healthy, 0=unhealthy)",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
pub static ERASURE_SET_WRITE_HEALTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ErasureSetWriteHealth,
"Health of the erasure set in a pool for write operations (1=healthy, 0=unhealthy)",
&[POOL_ID_L, SET_ID_L],
subsystems::CLUSTER_ERASURE_SET,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/mod.rs | crates/obs/src/metrics/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub(crate) mod audit;
pub(crate) mod bucket;
pub(crate) mod bucket_replication;
pub(crate) mod cluster_config;
pub(crate) mod cluster_erasure_set;
pub(crate) mod cluster_health;
pub(crate) mod cluster_iam;
pub(crate) mod cluster_notification;
pub(crate) mod cluster_usage;
pub(crate) mod entry;
pub(crate) mod ilm;
pub(crate) mod logger_webhook;
pub(crate) mod replication;
pub(crate) mod request;
pub(crate) mod scanner;
pub(crate) mod system_cpu;
pub(crate) mod system_drive;
pub(crate) mod system_memory;
pub(crate) mod system_network;
pub(crate) mod system_process;
pub use entry::descriptor::MetricDescriptor;
pub use entry::metric_name::MetricName;
pub use entry::metric_type::MetricType;
pub use entry::namespace::MetricNamespace;
pub use entry::subsystem::MetricSubsystem;
pub use entry::subsystem::subsystems;
pub use entry::{new_counter_md, new_gauge_md, new_histogram_md};
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/cluster_health.rs | crates/obs/src/metrics/cluster_health.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Cluster health-related metric descriptors
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static HEALTH_DRIVES_OFFLINE_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::HealthDrivesOfflineCount,
"Count of offline drives in the cluster",
&[],
subsystems::CLUSTER_HEALTH,
)
});
pub static HEALTH_DRIVES_ONLINE_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::HealthDrivesOnlineCount,
"Count of online drives in the cluster",
&[],
subsystems::CLUSTER_HEALTH,
)
});
pub static HEALTH_DRIVES_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::HealthDrivesCount,
"Count of all drives in the cluster",
&[],
subsystems::CLUSTER_HEALTH,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/request.rs | crates/obs/src/metrics/request.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use crate::{MetricDescriptor, MetricName, MetricSubsystem, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static API_REJECTED_AUTH_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRejectedAuthTotal,
"Total number of requests rejected for auth failure",
&["type"],
subsystems::API_REQUESTS,
)
});
pub static API_REJECTED_HEADER_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRejectedHeaderTotal,
"Total number of requests rejected for invalid header",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REJECTED_TIMESTAMP_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRejectedTimestampTotal,
"Total number of requests rejected for invalid timestamp",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REJECTED_INVALID_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRejectedInvalidTotal,
"Total number of invalid requests",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_WAITING_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ApiRequestsWaitingTotal,
"Total number of requests in the waiting queue",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_INCOMING_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ApiRequestsIncomingTotal,
"Total number of incoming requests",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_IN_FLIGHT_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ApiRequestsInFlightTotal,
"Total number of requests currently in flight",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsTotal,
"Total number of requests",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsErrorsTotal,
"Total number of requests with (4xx and 5xx) errors",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_5XX_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequests5xxErrorsTotal,
"Total number of requests with 5xx errors",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_4XX_ERRORS_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequests4xxErrorsTotal,
"Total number of requests with 4xx errors",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_CANCELED_TOTAL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsCanceledTotal,
"Total number of requests canceled by the client",
&["name", "type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_REQUESTS_TTFB_SECONDS_DISTRIBUTION_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiRequestsTTFBSecondsDistribution,
"Distribution of time to first byte across API calls",
&["name", "type", "le"],
MetricSubsystem::ApiRequests,
)
});
pub static API_TRAFFIC_SENT_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiTrafficSentBytes,
"Total number of bytes sent",
&["type"],
MetricSubsystem::ApiRequests,
)
});
pub static API_TRAFFIC_RECV_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::ApiTrafficRecvBytes,
"Total number of bytes received",
&["type"],
MetricSubsystem::ApiRequests,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/replication.rs | crates/obs/src/metrics/replication.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Metrics for replication subsystem
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
use std::sync::LazyLock;
pub static REPLICATION_AVERAGE_ACTIVE_WORKERS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationAverageActiveWorkers,
"Average number of active replication workers",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_AVERAGE_QUEUED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationAverageQueuedBytes,
"Average number of bytes queued for replication since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_AVERAGE_QUEUED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationAverageQueuedCount,
"Average number of objects queued for replication since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_AVERAGE_DATA_TRANSFER_RATE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationAverageDataTransferRate,
"Average replication data transfer rate in bytes/sec",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_CURRENT_ACTIVE_WORKERS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationCurrentActiveWorkers,
"Total number of active replication workers",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_CURRENT_DATA_TRANSFER_RATE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationCurrentDataTransferRate,
"Current replication data transfer rate in bytes/sec",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_LAST_MINUTE_QUEUED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationLastMinuteQueuedBytes,
"Number of bytes queued for replication in the last full minute",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_LAST_MINUTE_QUEUED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationLastMinuteQueuedCount,
"Number of objects queued for replication in the last full minute",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_MAX_ACTIVE_WORKERS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationMaxActiveWorkers,
"Maximum number of active replication workers seen since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_MAX_QUEUED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationMaxQueuedBytes,
"Maximum number of bytes queued for replication since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_MAX_QUEUED_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationMaxQueuedCount,
"Maximum number of objects queued for replication since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_MAX_DATA_TRANSFER_RATE_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationMaxDataTransferRate,
"Maximum replication data transfer rate in bytes/sec seen since server start",
&[],
subsystems::REPLICATION,
)
});
pub static REPLICATION_RECENT_BACKLOG_COUNT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::ReplicationRecentBacklogCount,
"Total number of objects seen in replication backlog in the last 5 minutes",
&[],
subsystems::REPLICATION,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/system_cpu.rs | crates/obs/src/metrics/system_cpu.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use crate::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
/// CPU system-related metric descriptors
use std::sync::LazyLock;
pub static SYS_CPU_AVG_IDLE_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUAvgIdle, "Average CPU idle time", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_AVG_IOWAIT_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUAvgIOWait, "Average CPU IOWait time", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_LOAD_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPULoad, "CPU load average 1min", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_LOAD_PERC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::SysCPULoadPerc,
"CPU load average 1min (percentage)",
&[],
subsystems::SYSTEM_CPU,
)
});
pub static SYS_CPU_NICE_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUNice, "CPU nice time", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_STEAL_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUSteal, "CPU steal time", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_SYSTEM_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUSystem, "CPU system time", &[], subsystems::SYSTEM_CPU));
pub static SYS_CPU_USER_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::SysCPUUser, "CPU user time", &[], subsystems::SYSTEM_CPU));
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/system_drive.rs | crates/obs/src/metrics/system_drive.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
/// Drive-related metric descriptors
use crate::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
use std::sync::LazyLock;
/// drive related labels
pub const DRIVE_LABEL: &str = "drive";
/// pool index label
pub const POOL_INDEX_LABEL: &str = "pool_index";
/// set index label
pub const SET_INDEX_LABEL: &str = "set_index";
/// drive index label
pub const DRIVE_INDEX_LABEL: &str = "drive_index";
/// API label
pub const API_LABEL: &str = "api";
/// All drive-related labels
pub const ALL_DRIVE_LABELS: [&str; 4] = [DRIVE_LABEL, POOL_INDEX_LABEL, SET_INDEX_LABEL, DRIVE_INDEX_LABEL];
pub static DRIVE_USED_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveUsedBytes,
"Total storage used on a drive in bytes",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_FREE_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveFreeBytes,
"Total storage free on a drive in bytes",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_TOTAL_BYTES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveTotalBytes,
"Total storage available on a drive in bytes",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_USED_INODES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveUsedInodes,
"Total used inodes on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_FREE_INODES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveFreeInodes,
"Total free inodes on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_TOTAL_INODES_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveTotalInodes,
"Total inodes available on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_TIMEOUT_ERRORS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::DriveTimeoutErrorsTotal,
"Total timeout errors on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_IO_ERRORS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::DriveIOErrorsTotal,
"Total I/O errors on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_AVAILABILITY_ERRORS_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_counter_md(
MetricName::DriveAvailabilityErrorsTotal,
"Total availability errors (I/O errors, timeouts) on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_WAITING_IO_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveWaitingIO,
"Total waiting I/O operations on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_API_LATENCY_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveAPILatencyMicros,
"Average last minute latency in µs for drive API storage operations",
&[&ALL_DRIVE_LABELS[..], &[API_LABEL]].concat(),
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_HEALTH_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveHealth,
"Drive health (0 = offline, 1 = healthy, 2 = healing)",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_OFFLINE_COUNT_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::DriveOfflineCount, "Count of offline drives", &[], subsystems::SYSTEM_DRIVE));
pub static DRIVE_ONLINE_COUNT_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::DriveOnlineCount, "Count of online drives", &[], subsystems::SYSTEM_DRIVE));
pub static DRIVE_COUNT_MD: LazyLock<MetricDescriptor> =
LazyLock::new(|| new_gauge_md(MetricName::DriveCount, "Count of all drives", &[], subsystems::SYSTEM_DRIVE));
pub static DRIVE_READS_PER_SEC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveReadsPerSec,
"Reads per second on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_READS_KB_PER_SEC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveReadsKBPerSec,
"Kilobytes read per second on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_READS_AWAIT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveReadsAwait,
"Average time for read requests served on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_WRITES_PER_SEC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveWritesPerSec,
"Writes per second on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_WRITES_KB_PER_SEC_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveWritesKBPerSec,
"Kilobytes written per second on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_WRITES_AWAIT_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DriveWritesAwait,
"Average time for write requests served on a drive",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
pub static DRIVE_PERC_UTIL_MD: LazyLock<MetricDescriptor> = LazyLock::new(|| {
new_gauge_md(
MetricName::DrivePercUtil,
"Percentage of time the disk was busy",
&ALL_DRIVE_LABELS[..],
subsystems::SYSTEM_DRIVE,
)
});
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/descriptor.rs | crates/obs/src/metrics/entry/descriptor.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{MetricName, MetricNamespace, MetricSubsystem, MetricType};
use std::collections::HashSet;
/// MetricDescriptor - Metric descriptors
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct MetricDescriptor {
pub name: MetricName,
pub metric_type: MetricType,
pub help: String,
pub variable_labels: Vec<String>,
pub namespace: MetricNamespace,
pub subsystem: MetricSubsystem,
// Internal management values
label_set: Option<HashSet<String>>,
}
impl MetricDescriptor {
/// Create a new metric descriptor
pub fn new(
name: MetricName,
metric_type: MetricType,
help: String,
variable_labels: Vec<String>,
namespace: MetricNamespace,
subsystem: impl Into<MetricSubsystem>, // Modify the parameter type
) -> Self {
Self {
name,
metric_type,
help,
variable_labels,
namespace,
subsystem: subsystem.into(),
label_set: None,
}
}
/// Get the full metric name, including the prefix and formatting path
#[allow(dead_code)]
pub fn get_full_metric_name(&self) -> String {
let prefix = self.metric_type.as_prom();
let namespace = self.namespace.as_str();
let formatted_subsystem = self.subsystem.as_str();
format!("{}{}_{}_{}", prefix, namespace, formatted_subsystem, self.name.as_str())
}
/// check whether the label is in the label set
#[allow(dead_code)]
pub fn has_label(&mut self, label: &str) -> bool {
self.get_label_set().contains(label)
}
/// Gets a collection of tags and creates them if they don't exist
pub fn get_label_set(&mut self) -> &HashSet<String> {
if self.label_set.is_none() {
let mut set = HashSet::with_capacity(self.variable_labels.len());
for label in &self.variable_labels {
set.insert(label.clone());
}
self.label_set = Some(set);
}
self.label_set.as_ref().unwrap()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/metric_name.rs | crates/obs/src/metrics/entry/metric_name.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// The metric name is the individual name of the metric
#[allow(dead_code)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MetricName {
// The generic metric name
AuthTotal,
CanceledTotal,
ErrorsTotal,
HeaderTotal,
HealTotal,
HitsTotal,
InflightTotal,
InvalidTotal,
LimitTotal,
MissedTotal,
WaitingTotal,
IncomingTotal,
ObjectTotal,
VersionTotal,
DeleteMarkerTotal,
OfflineTotal,
OnlineTotal,
OpenTotal,
ReadTotal,
TimestampTotal,
WriteTotal,
Total,
FreeInodes,
// Failure statistical metrics
LastMinFailedCount,
LastMinFailedBytes,
LastHourFailedCount,
LastHourFailedBytes,
TotalFailedCount,
TotalFailedBytes,
// Worker metrics
CurrActiveWorkers,
AvgActiveWorkers,
MaxActiveWorkers,
RecentBacklogCount,
CurrInQueueCount,
CurrInQueueBytes,
ReceivedCount,
SentCount,
CurrTransferRate,
AvgTransferRate,
MaxTransferRate,
CredentialErrors,
// Link latency metrics
CurrLinkLatency,
AvgLinkLatency,
MaxLinkLatency,
// Link status metrics
LinkOnline,
LinkOfflineDuration,
LinkDowntimeTotalDuration,
// Queue metrics
AvgInQueueCount,
AvgInQueueBytes,
MaxInQueueCount,
MaxInQueueBytes,
// Proxy request metrics
ProxiedGetRequestsTotal,
ProxiedHeadRequestsTotal,
ProxiedPutTaggingRequestsTotal,
ProxiedGetTaggingRequestsTotal,
ProxiedDeleteTaggingRequestsTotal,
ProxiedGetRequestsFailures,
ProxiedHeadRequestsFailures,
ProxiedPutTaggingRequestFailures,
ProxiedGetTaggingRequestFailures,
ProxiedDeleteTaggingRequestFailures,
// Byte-related metrics
FreeBytes,
ReadBytes,
RcharBytes,
ReceivedBytes,
LatencyMilliSec,
SentBytes,
TotalBytes,
UsedBytes,
WriteBytes,
WcharBytes,
// Latency metrics
LatencyMicroSec,
LatencyNanoSec,
// Information metrics
CommitInfo,
UsageInfo,
VersionInfo,
// Distribution metrics
SizeDistribution,
VersionDistribution,
TtfbDistribution,
TtlbDistribution,
// Time metrics
LastActivityTime,
StartTime,
UpTime,
Memory,
Vmemory,
Cpu,
// Expiration and conversion metrics
ExpiryMissedTasks,
ExpiryMissedFreeVersions,
ExpiryMissedTierJournalTasks,
ExpiryNumWorkers,
TransitionMissedTasks,
TransitionedBytes,
TransitionedObjects,
TransitionedVersions,
//Tier request metrics
TierRequestsSuccess,
TierRequestsFailure,
// KMS metrics
KmsOnline,
KmsRequestsSuccess,
KmsRequestsError,
KmsRequestsFail,
KmsUptime,
// Webhook metrics
WebhookOnline,
// API rejection metrics
ApiRejectedAuthTotal,
ApiRejectedHeaderTotal,
ApiRejectedTimestampTotal,
ApiRejectedInvalidTotal,
//API request metrics
ApiRequestsWaitingTotal,
ApiRequestsIncomingTotal,
ApiRequestsInFlightTotal,
ApiRequestsTotal,
ApiRequestsErrorsTotal,
ApiRequests5xxErrorsTotal,
ApiRequests4xxErrorsTotal,
ApiRequestsCanceledTotal,
// API distribution metrics
ApiRequestsTTFBSecondsDistribution,
// API traffic metrics
ApiTrafficSentBytes,
ApiTrafficRecvBytes,
// Audit metrics
AuditFailedMessages,
AuditTargetQueueLength,
AuditTotalMessages,
// Metrics related to cluster configurations
ConfigRRSParity,
ConfigStandardParity,
// Erasure coding set related metrics
ErasureSetOverallWriteQuorum,
ErasureSetOverallHealth,
ErasureSetReadQuorum,
ErasureSetWriteQuorum,
ErasureSetOnlineDrivesCount,
ErasureSetHealingDrivesCount,
ErasureSetHealth,
ErasureSetReadTolerance,
ErasureSetWriteTolerance,
ErasureSetReadHealth,
ErasureSetWriteHealth,
// Cluster health-related metrics
HealthDrivesOfflineCount,
HealthDrivesOnlineCount,
HealthDrivesCount,
// IAM-related metrics
LastSyncDurationMillis,
PluginAuthnServiceFailedRequestsMinute,
PluginAuthnServiceLastFailSeconds,
PluginAuthnServiceLastSuccSeconds,
PluginAuthnServiceSuccAvgRttMsMinute,
PluginAuthnServiceSuccMaxRttMsMinute,
PluginAuthnServiceTotalRequestsMinute,
SinceLastSyncMillis,
SyncFailures,
SyncSuccesses,
// Notify relevant metrics
NotificationCurrentSendInProgress,
NotificationEventsErrorsTotal,
NotificationEventsSentTotal,
NotificationEventsSkippedTotal,
// Metrics related to the usage of cluster objects
UsageSinceLastUpdateSeconds,
UsageTotalBytes,
UsageObjectsCount,
UsageVersionsCount,
UsageDeleteMarkersCount,
UsageBucketsCount,
UsageSizeDistribution,
UsageVersionCountDistribution,
// Metrics related to bucket usage
UsageBucketQuotaTotalBytes,
UsageBucketTotalBytes,
UsageBucketObjectsCount,
UsageBucketVersionsCount,
UsageBucketDeleteMarkersCount,
UsageBucketObjectSizeDistribution,
UsageBucketObjectVersionCountDistribution,
// ILM-related metrics
IlmExpiryPendingTasks,
IlmTransitionActiveTasks,
IlmTransitionPendingTasks,
IlmTransitionMissedImmediateTasks,
IlmVersionsScanned,
// Webhook logs
WebhookQueueLength,
WebhookTotalMessages,
WebhookFailedMessages,
// Copy the relevant metrics
ReplicationAverageActiveWorkers,
ReplicationAverageQueuedBytes,
ReplicationAverageQueuedCount,
ReplicationAverageDataTransferRate,
ReplicationCurrentActiveWorkers,
ReplicationCurrentDataTransferRate,
ReplicationLastMinuteQueuedBytes,
ReplicationLastMinuteQueuedCount,
ReplicationMaxActiveWorkers,
ReplicationMaxQueuedBytes,
ReplicationMaxQueuedCount,
ReplicationMaxDataTransferRate,
ReplicationRecentBacklogCount,
// Scanner-related metrics
ScannerBucketScansFinished,
ScannerBucketScansStarted,
ScannerDirectoriesScanned,
ScannerObjectsScanned,
ScannerVersionsScanned,
ScannerLastActivitySeconds,
// CPU system-related metrics
SysCPUAvgIdle,
SysCPUAvgIOWait,
SysCPULoad,
SysCPULoadPerc,
SysCPUNice,
SysCPUSteal,
SysCPUSystem,
SysCPUUser,
// Drive-related metrics
DriveUsedBytes,
DriveFreeBytes,
DriveTotalBytes,
DriveUsedInodes,
DriveFreeInodes,
DriveTotalInodes,
DriveTimeoutErrorsTotal,
DriveIOErrorsTotal,
DriveAvailabilityErrorsTotal,
DriveWaitingIO,
DriveAPILatencyMicros,
DriveHealth,
DriveOfflineCount,
DriveOnlineCount,
DriveCount,
// iostat related metrics
DriveReadsPerSec,
DriveReadsKBPerSec,
DriveReadsAwait,
DriveWritesPerSec,
DriveWritesKBPerSec,
DriveWritesAwait,
DrivePercUtil,
// Memory-related metrics
MemTotal,
MemUsed,
MemUsedPerc,
MemFree,
MemBuffers,
MemCache,
MemShared,
MemAvailable,
// Network-related metrics
InternodeErrorsTotal,
InternodeDialErrorsTotal,
InternodeDialAvgTimeNanos,
InternodeSentBytesTotal,
InternodeRecvBytesTotal,
// Process-related metrics
ProcessLocksReadTotal,
ProcessLocksWriteTotal,
ProcessCPUTotalSeconds,
ProcessGoRoutineTotal,
ProcessIORCharBytes,
ProcessIOReadBytes,
ProcessIOWCharBytes,
ProcessIOWriteBytes,
ProcessStartTimeSeconds,
ProcessUptimeSeconds,
ProcessFileDescriptorLimitTotal,
ProcessFileDescriptorOpenTotal,
ProcessSyscallReadTotal,
ProcessSyscallWriteTotal,
ProcessResidentMemoryBytes,
ProcessVirtualMemoryBytes,
ProcessVirtualMemoryMaxBytes,
// Custom metrics
Custom(String),
}
impl MetricName {
#[allow(dead_code)]
pub fn as_str(&self) -> String {
match self {
Self::AuthTotal => "auth_total".to_string(),
Self::CanceledTotal => "canceled_total".to_string(),
Self::ErrorsTotal => "errors_total".to_string(),
Self::HeaderTotal => "header_total".to_string(),
Self::HealTotal => "heal_total".to_string(),
Self::HitsTotal => "hits_total".to_string(),
Self::InflightTotal => "inflight_total".to_string(),
Self::InvalidTotal => "invalid_total".to_string(),
Self::LimitTotal => "limit_total".to_string(),
Self::MissedTotal => "missed_total".to_string(),
Self::WaitingTotal => "waiting_total".to_string(),
Self::IncomingTotal => "incoming_total".to_string(),
Self::ObjectTotal => "object_total".to_string(),
Self::VersionTotal => "version_total".to_string(),
Self::DeleteMarkerTotal => "deletemarker_total".to_string(),
Self::OfflineTotal => "offline_total".to_string(),
Self::OnlineTotal => "online_total".to_string(),
Self::OpenTotal => "open_total".to_string(),
Self::ReadTotal => "read_total".to_string(),
Self::TimestampTotal => "timestamp_total".to_string(),
Self::WriteTotal => "write_total".to_string(),
Self::Total => "total".to_string(),
Self::FreeInodes => "free_inodes".to_string(),
Self::LastMinFailedCount => "last_minute_failed_count".to_string(),
Self::LastMinFailedBytes => "last_minute_failed_bytes".to_string(),
Self::LastHourFailedCount => "last_hour_failed_count".to_string(),
Self::LastHourFailedBytes => "last_hour_failed_bytes".to_string(),
Self::TotalFailedCount => "total_failed_count".to_string(),
Self::TotalFailedBytes => "total_failed_bytes".to_string(),
Self::CurrActiveWorkers => "current_active_workers".to_string(),
Self::AvgActiveWorkers => "average_active_workers".to_string(),
Self::MaxActiveWorkers => "max_active_workers".to_string(),
Self::RecentBacklogCount => "recent_backlog_count".to_string(),
Self::CurrInQueueCount => "last_minute_queued_count".to_string(),
Self::CurrInQueueBytes => "last_minute_queued_bytes".to_string(),
Self::ReceivedCount => "received_count".to_string(),
Self::SentCount => "sent_count".to_string(),
Self::CurrTransferRate => "current_transfer_rate".to_string(),
Self::AvgTransferRate => "average_transfer_rate".to_string(),
Self::MaxTransferRate => "max_transfer_rate".to_string(),
Self::CredentialErrors => "credential_errors".to_string(),
Self::CurrLinkLatency => "current_link_latency_ms".to_string(),
Self::AvgLinkLatency => "average_link_latency_ms".to_string(),
Self::MaxLinkLatency => "max_link_latency_ms".to_string(),
Self::LinkOnline => "link_online".to_string(),
Self::LinkOfflineDuration => "link_offline_duration_seconds".to_string(),
Self::LinkDowntimeTotalDuration => "link_downtime_duration_seconds".to_string(),
Self::AvgInQueueCount => "average_queued_count".to_string(),
Self::AvgInQueueBytes => "average_queued_bytes".to_string(),
Self::MaxInQueueCount => "max_queued_count".to_string(),
Self::MaxInQueueBytes => "max_queued_bytes".to_string(),
Self::ProxiedGetRequestsTotal => "proxied_get_requests_total".to_string(),
Self::ProxiedHeadRequestsTotal => "proxied_head_requests_total".to_string(),
Self::ProxiedPutTaggingRequestsTotal => "proxied_put_tagging_requests_total".to_string(),
Self::ProxiedGetTaggingRequestsTotal => "proxied_get_tagging_requests_total".to_string(),
Self::ProxiedDeleteTaggingRequestsTotal => "proxied_delete_tagging_requests_total".to_string(),
Self::ProxiedGetRequestsFailures => "proxied_get_requests_failures".to_string(),
Self::ProxiedHeadRequestsFailures => "proxied_head_requests_failures".to_string(),
Self::ProxiedPutTaggingRequestFailures => "proxied_put_tagging_requests_failures".to_string(),
Self::ProxiedGetTaggingRequestFailures => "proxied_get_tagging_requests_failures".to_string(),
Self::ProxiedDeleteTaggingRequestFailures => "proxied_delete_tagging_requests_failures".to_string(),
Self::FreeBytes => "free_bytes".to_string(),
Self::ReadBytes => "read_bytes".to_string(),
Self::RcharBytes => "rchar_bytes".to_string(),
Self::ReceivedBytes => "received_bytes".to_string(),
Self::LatencyMilliSec => "latency_ms".to_string(),
Self::SentBytes => "sent_bytes".to_string(),
Self::TotalBytes => "total_bytes".to_string(),
Self::UsedBytes => "used_bytes".to_string(),
Self::WriteBytes => "write_bytes".to_string(),
Self::WcharBytes => "wchar_bytes".to_string(),
Self::LatencyMicroSec => "latency_us".to_string(),
Self::LatencyNanoSec => "latency_ns".to_string(),
Self::CommitInfo => "commit_info".to_string(),
Self::UsageInfo => "usage_info".to_string(),
Self::VersionInfo => "version_info".to_string(),
Self::SizeDistribution => "size_distribution".to_string(),
Self::VersionDistribution => "version_distribution".to_string(),
Self::TtfbDistribution => "seconds_distribution".to_string(),
Self::TtlbDistribution => "ttlb_seconds_distribution".to_string(),
Self::LastActivityTime => "last_activity_nano_seconds".to_string(),
Self::StartTime => "starttime_seconds".to_string(),
Self::UpTime => "uptime_seconds".to_string(),
Self::Memory => "resident_memory_bytes".to_string(),
Self::Vmemory => "virtual_memory_bytes".to_string(),
Self::Cpu => "cpu_total_seconds".to_string(),
Self::ExpiryMissedTasks => "expiry_missed_tasks".to_string(),
Self::ExpiryMissedFreeVersions => "expiry_missed_freeversions".to_string(),
Self::ExpiryMissedTierJournalTasks => "expiry_missed_tierjournal_tasks".to_string(),
Self::ExpiryNumWorkers => "expiry_num_workers".to_string(),
Self::TransitionMissedTasks => "transition_missed_immediate_tasks".to_string(),
Self::TransitionedBytes => "transitioned_bytes".to_string(),
Self::TransitionedObjects => "transitioned_objects".to_string(),
Self::TransitionedVersions => "transitioned_versions".to_string(),
Self::TierRequestsSuccess => "requests_success".to_string(),
Self::TierRequestsFailure => "requests_failure".to_string(),
Self::KmsOnline => "online".to_string(),
Self::KmsRequestsSuccess => "request_success".to_string(),
Self::KmsRequestsError => "request_error".to_string(),
Self::KmsRequestsFail => "request_failure".to_string(),
Self::KmsUptime => "uptime".to_string(),
Self::WebhookOnline => "online".to_string(),
Self::ApiRejectedAuthTotal => "rejected_auth_total".to_string(),
Self::ApiRejectedHeaderTotal => "rejected_header_total".to_string(),
Self::ApiRejectedTimestampTotal => "rejected_timestamp_total".to_string(),
Self::ApiRejectedInvalidTotal => "rejected_invalid_total".to_string(),
Self::ApiRequestsWaitingTotal => "waiting_total".to_string(),
Self::ApiRequestsIncomingTotal => "incoming_total".to_string(),
Self::ApiRequestsInFlightTotal => "inflight_total".to_string(),
Self::ApiRequestsTotal => "total".to_string(),
Self::ApiRequestsErrorsTotal => "errors_total".to_string(),
Self::ApiRequests5xxErrorsTotal => "5xx_errors_total".to_string(),
Self::ApiRequests4xxErrorsTotal => "4xx_errors_total".to_string(),
Self::ApiRequestsCanceledTotal => "canceled_total".to_string(),
Self::ApiRequestsTTFBSecondsDistribution => "ttfb_seconds_distribution".to_string(),
Self::ApiTrafficSentBytes => "traffic_sent_bytes".to_string(),
Self::ApiTrafficRecvBytes => "traffic_received_bytes".to_string(),
Self::AuditFailedMessages => "failed_messages".to_string(),
Self::AuditTargetQueueLength => "target_queue_length".to_string(),
Self::AuditTotalMessages => "total_messages".to_string(),
// metrics related to cluster configurations
Self::ConfigRRSParity => "rrs_parity".to_string(),
Self::ConfigStandardParity => "standard_parity".to_string(),
// Erasure coding set related metrics
Self::ErasureSetOverallWriteQuorum => "overall_write_quorum".to_string(),
Self::ErasureSetOverallHealth => "overall_health".to_string(),
Self::ErasureSetReadQuorum => "read_quorum".to_string(),
Self::ErasureSetWriteQuorum => "write_quorum".to_string(),
Self::ErasureSetOnlineDrivesCount => "online_drives_count".to_string(),
Self::ErasureSetHealingDrivesCount => "healing_drives_count".to_string(),
Self::ErasureSetHealth => "health".to_string(),
Self::ErasureSetReadTolerance => "read_tolerance".to_string(),
Self::ErasureSetWriteTolerance => "write_tolerance".to_string(),
Self::ErasureSetReadHealth => "read_health".to_string(),
Self::ErasureSetWriteHealth => "write_health".to_string(),
// Cluster health-related metrics
Self::HealthDrivesOfflineCount => "drives_offline_count".to_string(),
Self::HealthDrivesOnlineCount => "drives_online_count".to_string(),
Self::HealthDrivesCount => "drives_count".to_string(),
// IAM-related metrics
Self::LastSyncDurationMillis => "last_sync_duration_millis".to_string(),
Self::PluginAuthnServiceFailedRequestsMinute => "plugin_authn_service_failed_requests_minute".to_string(),
Self::PluginAuthnServiceLastFailSeconds => "plugin_authn_service_last_fail_seconds".to_string(),
Self::PluginAuthnServiceLastSuccSeconds => "plugin_authn_service_last_succ_seconds".to_string(),
Self::PluginAuthnServiceSuccAvgRttMsMinute => "plugin_authn_service_succ_avg_rtt_ms_minute".to_string(),
Self::PluginAuthnServiceSuccMaxRttMsMinute => "plugin_authn_service_succ_max_rtt_ms_minute".to_string(),
Self::PluginAuthnServiceTotalRequestsMinute => "plugin_authn_service_total_requests_minute".to_string(),
Self::SinceLastSyncMillis => "since_last_sync_millis".to_string(),
Self::SyncFailures => "sync_failures".to_string(),
Self::SyncSuccesses => "sync_successes".to_string(),
// Notify relevant metrics
Self::NotificationCurrentSendInProgress => "current_send_in_progress".to_string(),
Self::NotificationEventsErrorsTotal => "events_errors_total".to_string(),
Self::NotificationEventsSentTotal => "events_sent_total".to_string(),
Self::NotificationEventsSkippedTotal => "events_skipped_total".to_string(),
// Metrics related to the usage of cluster objects
Self::UsageSinceLastUpdateSeconds => "since_last_update_seconds".to_string(),
Self::UsageTotalBytes => "total_bytes".to_string(),
Self::UsageObjectsCount => "count".to_string(),
Self::UsageVersionsCount => "versions_count".to_string(),
Self::UsageDeleteMarkersCount => "delete_markers_count".to_string(),
Self::UsageBucketsCount => "buckets_count".to_string(),
Self::UsageSizeDistribution => "size_distribution".to_string(),
Self::UsageVersionCountDistribution => "version_count_distribution".to_string(),
// Metrics related to bucket usage
Self::UsageBucketQuotaTotalBytes => "quota_total_bytes".to_string(),
Self::UsageBucketTotalBytes => "total_bytes".to_string(),
Self::UsageBucketObjectsCount => "objects_count".to_string(),
Self::UsageBucketVersionsCount => "versions_count".to_string(),
Self::UsageBucketDeleteMarkersCount => "delete_markers_count".to_string(),
Self::UsageBucketObjectSizeDistribution => "object_size_distribution".to_string(),
Self::UsageBucketObjectVersionCountDistribution => "object_version_count_distribution".to_string(),
// ILM-related metrics
Self::IlmExpiryPendingTasks => "expiry_pending_tasks".to_string(),
Self::IlmTransitionActiveTasks => "transition_active_tasks".to_string(),
Self::IlmTransitionPendingTasks => "transition_pending_tasks".to_string(),
Self::IlmTransitionMissedImmediateTasks => "transition_missed_immediate_tasks".to_string(),
Self::IlmVersionsScanned => "versions_scanned".to_string(),
// Webhook logs
Self::WebhookQueueLength => "queue_length".to_string(),
Self::WebhookTotalMessages => "total_messages".to_string(),
Self::WebhookFailedMessages => "failed_messages".to_string(),
// Copy the relevant metrics
Self::ReplicationAverageActiveWorkers => "average_active_workers".to_string(),
Self::ReplicationAverageQueuedBytes => "average_queued_bytes".to_string(),
Self::ReplicationAverageQueuedCount => "average_queued_count".to_string(),
Self::ReplicationAverageDataTransferRate => "average_data_transfer_rate".to_string(),
Self::ReplicationCurrentActiveWorkers => "current_active_workers".to_string(),
Self::ReplicationCurrentDataTransferRate => "current_data_transfer_rate".to_string(),
Self::ReplicationLastMinuteQueuedBytes => "last_minute_queued_bytes".to_string(),
Self::ReplicationLastMinuteQueuedCount => "last_minute_queued_count".to_string(),
Self::ReplicationMaxActiveWorkers => "max_active_workers".to_string(),
Self::ReplicationMaxQueuedBytes => "max_queued_bytes".to_string(),
Self::ReplicationMaxQueuedCount => "max_queued_count".to_string(),
Self::ReplicationMaxDataTransferRate => "max_data_transfer_rate".to_string(),
Self::ReplicationRecentBacklogCount => "recent_backlog_count".to_string(),
// Scanner-related metrics
Self::ScannerBucketScansFinished => "bucket_scans_finished".to_string(),
Self::ScannerBucketScansStarted => "bucket_scans_started".to_string(),
Self::ScannerDirectoriesScanned => "directories_scanned".to_string(),
Self::ScannerObjectsScanned => "objects_scanned".to_string(),
Self::ScannerVersionsScanned => "versions_scanned".to_string(),
Self::ScannerLastActivitySeconds => "last_activity_seconds".to_string(),
// CPU system-related metrics
Self::SysCPUAvgIdle => "avg_idle".to_string(),
Self::SysCPUAvgIOWait => "avg_iowait".to_string(),
Self::SysCPULoad => "load".to_string(),
Self::SysCPULoadPerc => "load_perc".to_string(),
Self::SysCPUNice => "nice".to_string(),
Self::SysCPUSteal => "steal".to_string(),
Self::SysCPUSystem => "system".to_string(),
Self::SysCPUUser => "user".to_string(),
// Drive-related metrics
Self::DriveUsedBytes => "used_bytes".to_string(),
Self::DriveFreeBytes => "free_bytes".to_string(),
Self::DriveTotalBytes => "total_bytes".to_string(),
Self::DriveUsedInodes => "used_inodes".to_string(),
Self::DriveFreeInodes => "free_inodes".to_string(),
Self::DriveTotalInodes => "total_inodes".to_string(),
Self::DriveTimeoutErrorsTotal => "timeout_errors_total".to_string(),
Self::DriveIOErrorsTotal => "io_errors_total".to_string(),
Self::DriveAvailabilityErrorsTotal => "availability_errors_total".to_string(),
Self::DriveWaitingIO => "waiting_io".to_string(),
Self::DriveAPILatencyMicros => "api_latency_micros".to_string(),
Self::DriveHealth => "health".to_string(),
Self::DriveOfflineCount => "offline_count".to_string(),
Self::DriveOnlineCount => "online_count".to_string(),
Self::DriveCount => "count".to_string(),
// iostat related metrics
Self::DriveReadsPerSec => "reads_per_sec".to_string(),
Self::DriveReadsKBPerSec => "reads_kb_per_sec".to_string(),
Self::DriveReadsAwait => "reads_await".to_string(),
Self::DriveWritesPerSec => "writes_per_sec".to_string(),
Self::DriveWritesKBPerSec => "writes_kb_per_sec".to_string(),
Self::DriveWritesAwait => "writes_await".to_string(),
Self::DrivePercUtil => "perc_util".to_string(),
// Memory-related metrics
Self::MemTotal => "total".to_string(),
Self::MemUsed => "used".to_string(),
Self::MemUsedPerc => "used_perc".to_string(),
Self::MemFree => "free".to_string(),
Self::MemBuffers => "buffers".to_string(),
Self::MemCache => "cache".to_string(),
Self::MemShared => "shared".to_string(),
Self::MemAvailable => "available".to_string(),
// Network-related metrics
Self::InternodeErrorsTotal => "errors_total".to_string(),
Self::InternodeDialErrorsTotal => "dial_errors_total".to_string(),
Self::InternodeDialAvgTimeNanos => "dial_avg_time_nanos".to_string(),
Self::InternodeSentBytesTotal => "sent_bytes_total".to_string(),
Self::InternodeRecvBytesTotal => "recv_bytes_total".to_string(),
// Process-related metrics
Self::ProcessLocksReadTotal => "locks_read_total".to_string(),
Self::ProcessLocksWriteTotal => "locks_write_total".to_string(),
Self::ProcessCPUTotalSeconds => "cpu_total_seconds".to_string(),
Self::ProcessGoRoutineTotal => "go_routine_total".to_string(),
Self::ProcessIORCharBytes => "io_rchar_bytes".to_string(),
Self::ProcessIOReadBytes => "io_read_bytes".to_string(),
Self::ProcessIOWCharBytes => "io_wchar_bytes".to_string(),
Self::ProcessIOWriteBytes => "io_write_bytes".to_string(),
Self::ProcessStartTimeSeconds => "start_time_seconds".to_string(),
Self::ProcessUptimeSeconds => "uptime_seconds".to_string(),
Self::ProcessFileDescriptorLimitTotal => "file_descriptor_limit_total".to_string(),
Self::ProcessFileDescriptorOpenTotal => "file_descriptor_open_total".to_string(),
Self::ProcessSyscallReadTotal => "syscall_read_total".to_string(),
Self::ProcessSyscallWriteTotal => "syscall_write_total".to_string(),
Self::ProcessResidentMemoryBytes => "resident_memory_bytes".to_string(),
Self::ProcessVirtualMemoryBytes => "virtual_memory_bytes".to_string(),
Self::ProcessVirtualMemoryMaxBytes => "virtual_memory_max_bytes".to_string(),
Self::Custom(name) => name.clone(),
}
}
}
impl From<String> for MetricName {
fn from(s: String) -> Self {
Self::Custom(s)
}
}
impl From<&str> for MetricName {
fn from(s: &str) -> Self {
Self::Custom(s.to_string())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/path_utils.rs | crates/obs/src/metrics/entry/path_utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Format the path to the metric name format
/// Replace '/' and '-' with '_'
#[allow(dead_code)]
pub fn format_path_to_metric_name(path: &str) -> String {
path.trim_start_matches('/').replace(['/', '-'], "_")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_path_to_metric_name() {
assert_eq!(format_path_to_metric_name("/api/requests"), "api_requests");
assert_eq!(format_path_to_metric_name("/system/network/internode"), "system_network_internode");
assert_eq!(format_path_to_metric_name("/bucket-api"), "bucket_api");
assert_eq!(format_path_to_metric_name("cluster/health"), "cluster_health");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/mod.rs | crates/obs/src/metrics/entry/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{MetricDescriptor, MetricName, MetricNamespace, MetricSubsystem, MetricType};
pub(crate) mod descriptor;
pub(crate) mod metric_name;
pub(crate) mod metric_type;
pub(crate) mod namespace;
mod path_utils;
pub(crate) mod subsystem;
/// Create a new counter metric descriptor
pub fn new_counter_md(
name: impl Into<MetricName>,
help: impl Into<String>,
labels: &[&str],
subsystem: impl Into<MetricSubsystem>,
) -> MetricDescriptor {
MetricDescriptor::new(
name.into(),
MetricType::Counter,
help.into(),
labels.iter().map(|&s| s.to_string()).collect(),
MetricNamespace::RustFS,
subsystem,
)
}
/// create a new dashboard metric descriptor
pub fn new_gauge_md(
name: impl Into<MetricName>,
help: impl Into<String>,
labels: &[&str],
subsystem: impl Into<MetricSubsystem>,
) -> MetricDescriptor {
MetricDescriptor::new(
name.into(),
MetricType::Gauge,
help.into(),
labels.iter().map(|&s| s.to_string()).collect(),
MetricNamespace::RustFS,
subsystem,
)
}
/// create a new histogram indicator descriptor
#[allow(dead_code)]
pub fn new_histogram_md(
name: impl Into<MetricName>,
help: impl Into<String>,
labels: &[&str],
subsystem: impl Into<MetricSubsystem>,
) -> MetricDescriptor {
MetricDescriptor::new(
name.into(),
MetricType::Histogram,
help.into(),
labels.iter().map(|&s| s.to_string()).collect(),
MetricNamespace::RustFS,
subsystem,
)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::subsystems;
#[test]
fn test_new_histogram_md() {
// create a histogram indicator descriptor
let histogram_md = new_histogram_md(
MetricName::TtfbDistribution,
"test the response time distribution",
&["api", "method", "le"],
subsystems::API_REQUESTS,
);
// verify that the metric type is correct
assert_eq!(histogram_md.metric_type, MetricType::Histogram);
// verify that the metric name is correct
assert_eq!(histogram_md.name.as_str(), "seconds_distribution");
// verify that the help information is correct
assert_eq!(histogram_md.help, "test the response time distribution");
// Verify that the label is correct
assert_eq!(histogram_md.variable_labels.len(), 3);
assert!(histogram_md.variable_labels.contains(&"api".to_string()));
assert!(histogram_md.variable_labels.contains(&"method".to_string()));
assert!(histogram_md.variable_labels.contains(&"le".to_string()));
// Verify that the namespace is correct
assert_eq!(histogram_md.namespace, MetricNamespace::RustFS);
// Verify that the subsystem is correct
assert_eq!(histogram_md.subsystem, MetricSubsystem::ApiRequests);
// Verify that the full metric name generated is formatted correctly
assert_eq!(histogram_md.get_full_metric_name(), "histogram.rustfs_api_requests_seconds_distribution");
// Tests use custom subsystems
let custom_histogram_md = new_histogram_md(
"custom_latency_distribution",
"custom latency distribution",
&["endpoint", "le"],
MetricSubsystem::new("/custom/path-metrics"),
);
// Verify the custom name and subsystem
assert_eq!(
custom_histogram_md.get_full_metric_name(),
"histogram.rustfs_custom_path_metrics_custom_latency_distribution"
);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/metric_type.rs | crates/obs/src/metrics/entry/metric_type.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// MetricType - Indicates the type of indicator
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MetricType {
Counter,
Gauge,
Histogram,
}
impl MetricType {
/// convert the metric type to a string representation
#[allow(dead_code)]
pub fn as_str(&self) -> &'static str {
match self {
Self::Counter => "counter",
Self::Gauge => "gauge",
Self::Histogram => "histogram",
}
}
/// Convert the metric type to the Prometheus value type
/// In a Rust implementation, this might return the corresponding Prometheus Rust client type
#[allow(dead_code)]
pub fn as_prom(&self) -> &'static str {
match self {
Self::Counter => "counter.",
Self::Gauge => "gauge.",
Self::Histogram => "histogram.", // Histograms still use the counter value in Prometheus
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/namespace.rs | crates/obs/src/metrics/entry/namespace.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// The metric namespace, which represents the top-level grouping of the metric
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MetricNamespace {
RustFS,
}
impl MetricNamespace {
#[allow(dead_code)]
pub fn as_str(&self) -> &'static str {
match self {
Self::RustFS => "rustfs",
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/src/metrics/entry/subsystem.rs | crates/obs/src/metrics/entry/subsystem.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::entry::path_utils::format_path_to_metric_name;
/// The metrics subsystem is a subgroup of metrics within a namespace
/// The metrics subsystem, which represents a subgroup of metrics within a namespace
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum MetricSubsystem {
// API related subsystems
ApiRequests,
// bucket related subsystems
BucketApi,
BucketReplication,
// system related subsystems
SystemNetworkInternode,
SystemDrive,
SystemMemory,
SystemCpu,
SystemProcess,
// debug related subsystems
DebugGo,
// cluster related subsystems
ClusterHealth,
ClusterUsageObjects,
ClusterUsageBuckets,
ClusterErasureSet,
ClusterIam,
ClusterConfig,
// other service related subsystems
Ilm,
Audit,
LoggerWebhook,
Replication,
Notification,
Scanner,
// Custom paths
Custom(String),
}
impl MetricSubsystem {
/// Gets the original path string
pub fn path(&self) -> &str {
match self {
// api related subsystems
Self::ApiRequests => "/api/requests",
// bucket related subsystems
Self::BucketApi => "/bucket/api",
Self::BucketReplication => "/bucket/replication",
// system related subsystems
Self::SystemNetworkInternode => "/system/network/internode",
Self::SystemDrive => "/system/drive",
Self::SystemMemory => "/system/memory",
Self::SystemCpu => "/system/cpu",
Self::SystemProcess => "/system/process",
// debug related subsystems
Self::DebugGo => "/debug/go",
// cluster related subsystems
Self::ClusterHealth => "/cluster/health",
Self::ClusterUsageObjects => "/cluster/usage/objects",
Self::ClusterUsageBuckets => "/cluster/usage/buckets",
Self::ClusterErasureSet => "/cluster/erasure-set",
Self::ClusterIam => "/cluster/iam",
Self::ClusterConfig => "/cluster/config",
// other service related subsystems
Self::Ilm => "/ilm",
Self::Audit => "/audit",
Self::LoggerWebhook => "/logger/webhook",
Self::Replication => "/replication",
Self::Notification => "/notification",
Self::Scanner => "/scanner",
// Custom paths
Self::Custom(path) => path,
}
}
/// Get the formatted metric name format string
#[allow(dead_code)]
pub fn as_str(&self) -> String {
format_path_to_metric_name(self.path())
}
/// Create a subsystem enumeration from a path string
pub fn from_path(path: &str) -> Self {
match path {
// API-related subsystems
"/api/requests" => Self::ApiRequests,
// Bucket-related subsystems
"/bucket/api" => Self::BucketApi,
"/bucket/replication" => Self::BucketReplication,
// System-related subsystems
"/system/network/internode" => Self::SystemNetworkInternode,
"/system/drive" => Self::SystemDrive,
"/system/memory" => Self::SystemMemory,
"/system/cpu" => Self::SystemCpu,
"/system/process" => Self::SystemProcess,
// Debug related subsystems
"/debug/go" => Self::DebugGo,
// Cluster-related subsystems
"/cluster/health" => Self::ClusterHealth,
"/cluster/usage/objects" => Self::ClusterUsageObjects,
"/cluster/usage/buckets" => Self::ClusterUsageBuckets,
"/cluster/erasure-set" => Self::ClusterErasureSet,
"/cluster/iam" => Self::ClusterIam,
"/cluster/config" => Self::ClusterConfig,
// Other service-related subsystems
"/ilm" => Self::Ilm,
"/audit" => Self::Audit,
"/logger/webhook" => Self::LoggerWebhook,
"/replication" => Self::Replication,
"/notification" => Self::Notification,
"/scanner" => Self::Scanner,
// Treat other paths as custom subsystems
_ => Self::Custom(path.to_string()),
}
}
/// A convenient way to create custom subsystems directly
#[allow(dead_code)]
pub fn new(path: impl Into<String>) -> Self {
Self::Custom(path.into())
}
}
/// Implementations that facilitate conversion to and from strings
impl From<&str> for MetricSubsystem {
fn from(s: &str) -> Self {
Self::from_path(s)
}
}
impl From<String> for MetricSubsystem {
fn from(s: String) -> Self {
Self::from_path(&s)
}
}
impl std::fmt::Display for MetricSubsystem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.path())
}
}
#[allow(dead_code)]
pub mod subsystems {
use super::MetricSubsystem;
// cluster base path constant
pub const CLUSTER_BASE_PATH: &str = "/cluster";
// Quick access to constants for each subsystem
pub const API_REQUESTS: MetricSubsystem = MetricSubsystem::ApiRequests;
pub const BUCKET_API: MetricSubsystem = MetricSubsystem::BucketApi;
pub const BUCKET_REPLICATION: MetricSubsystem = MetricSubsystem::BucketReplication;
pub const SYSTEM_NETWORK_INTERNODE: MetricSubsystem = MetricSubsystem::SystemNetworkInternode;
pub const SYSTEM_DRIVE: MetricSubsystem = MetricSubsystem::SystemDrive;
pub const SYSTEM_MEMORY: MetricSubsystem = MetricSubsystem::SystemMemory;
pub const SYSTEM_CPU: MetricSubsystem = MetricSubsystem::SystemCpu;
pub const SYSTEM_PROCESS: MetricSubsystem = MetricSubsystem::SystemProcess;
pub const DEBUG_GO: MetricSubsystem = MetricSubsystem::DebugGo;
pub const CLUSTER_HEALTH: MetricSubsystem = MetricSubsystem::ClusterHealth;
pub const CLUSTER_USAGE_OBJECTS: MetricSubsystem = MetricSubsystem::ClusterUsageObjects;
pub const CLUSTER_USAGE_BUCKETS: MetricSubsystem = MetricSubsystem::ClusterUsageBuckets;
pub const CLUSTER_ERASURE_SET: MetricSubsystem = MetricSubsystem::ClusterErasureSet;
pub const CLUSTER_IAM: MetricSubsystem = MetricSubsystem::ClusterIam;
pub const CLUSTER_CONFIG: MetricSubsystem = MetricSubsystem::ClusterConfig;
pub const ILM: MetricSubsystem = MetricSubsystem::Ilm;
pub const AUDIT: MetricSubsystem = MetricSubsystem::Audit;
pub const LOGGER_WEBHOOK: MetricSubsystem = MetricSubsystem::LoggerWebhook;
pub const REPLICATION: MetricSubsystem = MetricSubsystem::Replication;
pub const NOTIFICATION: MetricSubsystem = MetricSubsystem::Notification;
pub const SCANNER: MetricSubsystem = MetricSubsystem::Scanner;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::MetricType;
use crate::{MetricDescriptor, MetricName, MetricNamespace};
#[test]
fn test_metric_subsystem_formatting() {
assert_eq!(MetricSubsystem::ApiRequests.as_str(), "api_requests");
assert_eq!(MetricSubsystem::SystemNetworkInternode.as_str(), "system_network_internode");
assert_eq!(MetricSubsystem::BucketApi.as_str(), "bucket_api");
assert_eq!(MetricSubsystem::ClusterHealth.as_str(), "cluster_health");
// Test custom paths
let custom = MetricSubsystem::new("/custom/path-test");
assert_eq!(custom.as_str(), "custom_path_test");
}
#[test]
fn test_metric_descriptor_name_generation() {
let md = MetricDescriptor::new(
MetricName::ApiRequestsTotal,
MetricType::Counter,
"Test help".to_string(),
vec!["label1".to_string(), "label2".to_string()],
MetricNamespace::RustFS,
MetricSubsystem::ApiRequests,
);
assert_eq!(md.get_full_metric_name(), "counter.rustfs_api_requests_total");
let custom_md = MetricDescriptor::new(
MetricName::Custom("test_metric".to_string()),
MetricType::Gauge,
"Test help".to_string(),
vec!["label1".to_string()],
MetricNamespace::RustFS,
MetricSubsystem::new("/custom/path-with-dash"),
);
assert_eq!(custom_md.get_full_metric_name(), "gauge.rustfs_custom_path_with_dash_test_metric");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/obs/examples/server.rs | crates/obs/examples/server.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opentelemetry::global;
use rustfs_obs::{SystemObserver, init_obs};
use std::time::{Duration, SystemTime};
use tracing::{Level, error, info, instrument};
#[tokio::main]
async fn main() {
let obs_conf = Some("http://localhost:4317".to_string());
let _guard = init_obs(obs_conf).await;
let span = tracing::span!(Level::INFO, "main");
let _enter = span.enter();
info!("Program starts");
// Simulate the operation
tokio::time::sleep(Duration::from_millis(100)).await;
run("service-demo".to_string()).await;
info!("Program ends");
}
#[instrument(fields(bucket, object, user))]
async fn run(service_name: String) {
let start_time = SystemTime::now();
info!("Log module initialization is completed service_name: {:?}", service_name);
// Record Metrics
let meter = global::meter("rustfs");
let request_duration = meter.f64_histogram("s3_request_duration_seconds").build();
request_duration.record(
start_time.elapsed().unwrap().as_secs_f64(),
&[opentelemetry::KeyValue::new("operation", "run")],
);
match SystemObserver::init_process_observer().await {
Ok(_) => info!("Process observer initialized successfully"),
Err(e) => error!("Failed to initialize process observer: {:?}", e),
}
put_object("bucket".to_string(), "object".to_string(), "user".to_string()).await;
info!("Logging is completed");
tokio::time::sleep(Duration::from_secs(2)).await;
info!("Program run ends");
}
#[instrument(fields(bucket, object, user))]
async fn put_object(bucket: String, object: String, user: String) {
let start_time = SystemTime::now();
info!("Starting put_object operation time: {:?}", start_time);
let meter = global::meter("rustfs");
let request_duration = meter.f64_histogram("s3_request_duration_seconds").build();
request_duration.record(
start_time.elapsed().unwrap().as_secs_f64(),
&[opentelemetry::KeyValue::new("operation", "put_object")],
);
info!(
"Starting PUT operation content: bucket = {}, object = {}, user = {},start_time = {}",
bucket,
object,
user,
start_time.elapsed().unwrap().as_secs_f64()
);
// Simulate the operation
tokio::time::sleep(Duration::from_millis(100)).await;
info!("PUT operation completed");
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/readiness.rs | crates/common/src/readiness.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::atomic::{AtomicU8, Ordering};
/// Represents the various stages of system startup
#[repr(u8)]
pub enum SystemStage {
Booting = 0,
StorageReady = 1, // Disks online, Quorum met
IamReady = 2, // Users and Policies loaded into cache
FullReady = 3, // System ready to serve all traffic
}
/// Global readiness tracker for the service
/// This struct uses atomic operations to track the readiness status of various components
/// of the service in a thread-safe manner.
pub struct GlobalReadiness {
status: AtomicU8,
}
impl Default for GlobalReadiness {
fn default() -> Self {
Self::new()
}
}
impl GlobalReadiness {
/// Create a new GlobalReadiness instance with initial status as Starting
/// # Returns
/// A new instance of GlobalReadiness
pub fn new() -> Self {
Self {
status: AtomicU8::new(SystemStage::Booting as u8),
}
}
/// Update the system to a new stage
///
/// # Arguments
/// * `step` - The SystemStage step to mark as ready
pub fn mark_stage(&self, step: SystemStage) {
self.status.fetch_max(step as u8, Ordering::SeqCst);
}
/// Check if the service is fully ready
/// # Returns
/// `true` if the service is fully ready, `false` otherwise
pub fn is_ready(&self) -> bool {
self.status.load(Ordering::SeqCst) == SystemStage::FullReady as u8
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_initial_state() {
let readiness = GlobalReadiness::new();
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::Booting as u8);
}
#[test]
fn test_mark_stage_progression() {
let readiness = GlobalReadiness::new();
readiness.mark_stage(SystemStage::StorageReady);
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::StorageReady as u8);
readiness.mark_stage(SystemStage::IamReady);
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::IamReady as u8);
readiness.mark_stage(SystemStage::FullReady);
assert!(readiness.is_ready());
}
#[test]
fn test_no_regression() {
let readiness = GlobalReadiness::new();
readiness.mark_stage(SystemStage::FullReady);
readiness.mark_stage(SystemStage::IamReady); // Should not regress
assert!(readiness.is_ready());
}
#[test]
fn test_concurrent_marking() {
let readiness = Arc::new(GlobalReadiness::new());
let mut handles = vec![];
for _ in 0..10 {
let r = Arc::clone(&readiness);
handles.push(thread::spawn(move || {
r.mark_stage(SystemStage::StorageReady);
r.mark_stage(SystemStage::IamReady);
r.mark_stage(SystemStage::FullReady);
}));
}
for h in handles {
h.join().unwrap();
}
assert!(readiness.is_ready());
}
#[test]
fn test_is_ready_only_at_full_ready() {
let readiness = GlobalReadiness::new();
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::StorageReady);
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::IamReady);
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::FullReady);
assert!(readiness.is_ready());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/last_minute.rs | crates/common/src/last_minute.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::{Duration, SystemTime, UNIX_EPOCH};
#[allow(dead_code)]
#[derive(Debug, Default)]
struct TimedAction {
count: u64,
acc_time: u64,
min_time: Option<u64>,
max_time: Option<u64>,
bytes: u64,
}
#[allow(dead_code)]
impl TimedAction {
// Avg returns the average time spent on the action.
pub fn avg(&self) -> Option<Duration> {
if self.count == 0 {
return None;
}
Some(Duration::from_nanos(self.acc_time / self.count))
}
// AvgBytes returns the average bytes processed.
pub fn avg_bytes(&self) -> u64 {
if self.count == 0 {
return 0;
}
self.bytes / self.count
}
// Merge other into t.
pub fn merge(&mut self, other: TimedAction) {
self.count += other.count;
self.acc_time += other.acc_time;
self.bytes += other.bytes;
if self.count == 0 {
self.min_time = other.min_time;
}
if let Some(other_min) = other.min_time {
self.min_time = self.min_time.map_or(Some(other_min), |min| Some(min.min(other_min)));
}
self.max_time = self
.max_time
.map_or(other.max_time, |max| Some(max.max(other.max_time.unwrap_or(0))));
}
}
#[allow(dead_code)]
#[derive(Debug)]
enum SizeCategory {
SizeLessThan1KiB = 0,
SizeLessThan1MiB,
SizeLessThan10MiB,
SizeLessThan100MiB,
SizeLessThan1GiB,
SizeGreaterThan1GiB,
// Add new entries here
SizeLastElemMarker,
}
impl std::fmt::Display for SizeCategory {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match *self {
SizeCategory::SizeLessThan1KiB => "SizeLessThan1KiB",
SizeCategory::SizeLessThan1MiB => "SizeLessThan1MiB",
SizeCategory::SizeLessThan10MiB => "SizeLessThan10MiB",
SizeCategory::SizeLessThan100MiB => "SizeLessThan100MiB",
SizeCategory::SizeLessThan1GiB => "SizeLessThan1GiB",
SizeCategory::SizeGreaterThan1GiB => "SizeGreaterThan1GiB",
SizeCategory::SizeLastElemMarker => "SizeLastElemMarker",
};
write!(f, "{s}")
}
}
#[derive(Clone, Debug, Default, Copy)]
pub struct AccElem {
pub total: u64,
pub size: u64,
pub n: u64,
}
impl AccElem {
pub fn add(&mut self, dur: &Duration) {
let dur = dur.as_secs();
self.total = self.total.wrapping_add(dur);
self.n = self.n.wrapping_add(1);
}
pub fn merge(&mut self, b: &AccElem) {
self.n = self.n.wrapping_add(b.n);
self.total = self.total.wrapping_add(b.total);
self.size = self.size.wrapping_add(b.size);
}
pub fn avg(&self) -> Duration {
if self.n >= 1 && self.total > 0 {
return Duration::from_secs(self.total / self.n);
}
Duration::from_secs(0)
}
}
#[derive(Clone, Debug)]
pub struct LastMinuteLatency {
pub totals: Vec<AccElem>,
pub last_sec: u64,
}
impl Default for LastMinuteLatency {
fn default() -> Self {
Self {
totals: vec![AccElem::default(); 60],
last_sec: Default::default(),
}
}
}
impl LastMinuteLatency {
pub fn merge(&mut self, o: &LastMinuteLatency) -> LastMinuteLatency {
let mut merged = LastMinuteLatency::default();
let mut x = o.clone();
if self.last_sec > o.last_sec {
x.forward_to(self.last_sec);
merged.last_sec = self.last_sec;
} else {
self.forward_to(o.last_sec);
merged.last_sec = o.last_sec;
}
for i in 0..merged.totals.len() {
merged.totals[i] = AccElem {
total: self.totals[i].total + o.totals[i].total,
n: self.totals[i].n + o.totals[i].n,
size: self.totals[i].size + o.totals[i].size,
}
}
merged
}
pub fn add(&mut self, t: &Duration) {
let sec = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
self.forward_to(sec);
let win_idx = sec % 60;
self.totals[win_idx as usize].add(t);
self.last_sec = sec;
}
pub fn add_all(&mut self, sec: u64, a: &AccElem) {
self.forward_to(sec);
let win_idx = sec % 60;
self.totals[win_idx as usize].merge(a);
self.last_sec = sec;
}
pub fn get_total(&mut self) -> AccElem {
let mut res = AccElem::default();
let sec = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
self.forward_to(sec);
for elem in self.totals.iter() {
res.merge(elem);
}
res
}
pub fn forward_to(&mut self, t: u64) {
if self.last_sec >= t {
return;
}
if t - self.last_sec >= 60 {
self.totals = vec![AccElem::default(); 60];
self.last_sec = t;
return;
}
while self.last_sec != t {
let idx = (self.last_sec + 1) % 60;
self.totals[idx as usize] = AccElem::default();
self.last_sec += 1;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_acc_elem_default() {
let elem = AccElem::default();
assert_eq!(elem.total, 0);
assert_eq!(elem.size, 0);
assert_eq!(elem.n, 0);
}
#[test]
fn test_acc_elem_add_single_duration() {
let mut elem = AccElem::default();
let duration = Duration::from_secs(5);
elem.add(&duration);
assert_eq!(elem.total, 5);
assert_eq!(elem.n, 1);
assert_eq!(elem.size, 0); // size is not modified by add
}
#[test]
fn test_acc_elem_add_multiple_durations() {
let mut elem = AccElem::default();
elem.add(&Duration::from_secs(3));
elem.add(&Duration::from_secs(7));
elem.add(&Duration::from_secs(2));
assert_eq!(elem.total, 12);
assert_eq!(elem.n, 3);
assert_eq!(elem.size, 0);
}
#[test]
fn test_acc_elem_add_zero_duration() {
let mut elem = AccElem::default();
let duration = Duration::from_secs(0);
elem.add(&duration);
assert_eq!(elem.total, 0);
assert_eq!(elem.n, 1);
}
#[test]
fn test_acc_elem_add_subsecond_duration() {
let mut elem = AccElem::default();
// Duration less than 1 second should be truncated to 0
let duration = Duration::from_millis(500);
elem.add(&duration);
assert_eq!(elem.total, 0); // as_secs() truncates subsecond values
assert_eq!(elem.n, 1);
}
#[test]
fn test_acc_elem_merge_empty_elements() {
let mut elem1 = AccElem::default();
let elem2 = AccElem::default();
elem1.merge(&elem2);
assert_eq!(elem1.total, 0);
assert_eq!(elem1.size, 0);
assert_eq!(elem1.n, 0);
}
#[test]
fn test_acc_elem_merge_with_data() {
let mut elem1 = AccElem {
total: 10,
size: 100,
n: 2,
};
let elem2 = AccElem {
total: 15,
size: 200,
n: 3,
};
elem1.merge(&elem2);
assert_eq!(elem1.total, 25);
assert_eq!(elem1.size, 300);
assert_eq!(elem1.n, 5);
}
#[test]
fn test_acc_elem_merge_one_empty() {
let mut elem1 = AccElem {
total: 10,
size: 100,
n: 2,
};
let elem2 = AccElem::default();
elem1.merge(&elem2);
assert_eq!(elem1.total, 10);
assert_eq!(elem1.size, 100);
assert_eq!(elem1.n, 2);
}
#[test]
fn test_acc_elem_avg_with_data() {
let elem = AccElem {
total: 15,
size: 0,
n: 3,
};
let avg = elem.avg();
assert_eq!(avg, Duration::from_secs(5)); // 15 / 3 = 5
}
#[test]
fn test_acc_elem_avg_zero_count() {
let elem = AccElem {
total: 10,
size: 0,
n: 0,
};
let avg = elem.avg();
assert_eq!(avg, Duration::from_secs(0));
}
#[test]
fn test_acc_elem_avg_zero_total() {
let elem = AccElem { total: 0, size: 0, n: 5 };
let avg = elem.avg();
assert_eq!(avg, Duration::from_secs(0));
}
#[test]
fn test_acc_elem_avg_rounding() {
let elem = AccElem {
total: 10,
size: 0,
n: 3,
};
let avg = elem.avg();
assert_eq!(avg, Duration::from_secs(3)); // 10 / 3 = 3 (integer division)
}
#[test]
fn test_last_minute_latency_default() {
let latency = LastMinuteLatency::default();
assert_eq!(latency.totals.len(), 60);
assert_eq!(latency.last_sec, 0);
// All elements should be default (empty)
for elem in &latency.totals {
assert_eq!(elem.total, 0);
assert_eq!(elem.size, 0);
assert_eq!(elem.n, 0);
}
}
#[test]
fn test_last_minute_latency_forward_to_same_time() {
let mut latency = LastMinuteLatency {
last_sec: 100,
..Default::default()
};
// Add some data to verify it's not cleared
latency.totals[0].total = 10;
latency.totals[0].n = 1;
latency.forward_to(100); // Same time
assert_eq!(latency.last_sec, 100);
assert_eq!(latency.totals[0].total, 10); // Data should remain
assert_eq!(latency.totals[0].n, 1);
}
#[test]
fn test_last_minute_latency_forward_to_past_time() {
let mut latency = LastMinuteLatency {
last_sec: 100,
..Default::default()
};
// Add some data to verify it's not cleared
latency.totals[0].total = 10;
latency.totals[0].n = 1;
latency.forward_to(50); // Past time
assert_eq!(latency.last_sec, 100); // Should not change
assert_eq!(latency.totals[0].total, 10); // Data should remain
assert_eq!(latency.totals[0].n, 1);
}
#[test]
fn test_last_minute_latency_forward_to_large_gap() {
let mut latency = LastMinuteLatency {
last_sec: 100,
..Default::default()
};
// Add some data to verify it's cleared
latency.totals[0].total = 10;
latency.totals[0].n = 1;
latency.forward_to(200); // Gap >= 60 seconds
assert_eq!(latency.last_sec, 200); // last_sec should be updated to target time
// All data should be cleared
for elem in &latency.totals {
assert_eq!(elem.total, 0);
assert_eq!(elem.size, 0);
assert_eq!(elem.n, 0);
}
}
#[test]
fn test_last_minute_latency_forward_to_small_gap() {
let mut latency = LastMinuteLatency {
last_sec: 100,
..Default::default()
};
// Add data at specific indices
latency.totals[41].total = 10; // (100 + 1) % 60 = 41
latency.totals[42].total = 20; // (100 + 2) % 60 = 42
latency.forward_to(102); // Forward by 2 seconds
assert_eq!(latency.last_sec, 102);
// The slots that were advanced should be cleared
assert_eq!(latency.totals[41].total, 0); // Cleared during forward
assert_eq!(latency.totals[42].total, 0); // Cleared during forward
}
#[test]
fn test_last_minute_latency_add_all() {
let mut latency = LastMinuteLatency::default();
let acc_elem = AccElem {
total: 15,
size: 100,
n: 3,
};
latency.add_all(1000, &acc_elem);
assert_eq!(latency.last_sec, 1000);
let idx = 1000 % 60; // Should be 40
assert_eq!(latency.totals[idx as usize].total, 15);
assert_eq!(latency.totals[idx as usize].size, 100);
assert_eq!(latency.totals[idx as usize].n, 3);
}
#[test]
fn test_last_minute_latency_add_all_multiple() {
let mut latency = LastMinuteLatency::default();
let acc_elem1 = AccElem {
total: 10,
size: 50,
n: 2,
};
let acc_elem2 = AccElem {
total: 20,
size: 100,
n: 4,
};
latency.add_all(1000, &acc_elem1);
latency.add_all(1000, &acc_elem2); // Same second
let idx = 1000 % 60;
assert_eq!(latency.totals[idx as usize].total, 30); // 10 + 20
assert_eq!(latency.totals[idx as usize].size, 150); // 50 + 100
assert_eq!(latency.totals[idx as usize].n, 6); // 2 + 4
}
#[test]
fn test_last_minute_latency_merge_same_time() {
let mut latency1 = LastMinuteLatency::default();
let mut latency2 = LastMinuteLatency::default();
latency1.last_sec = 1000;
latency2.last_sec = 1000;
// Add data to both
latency1.totals[0].total = 10;
latency1.totals[0].n = 2;
latency2.totals[0].total = 20;
latency2.totals[0].n = 3;
let merged = latency1.merge(&latency2);
assert_eq!(merged.last_sec, 1000);
assert_eq!(merged.totals[0].total, 30); // 10 + 20
assert_eq!(merged.totals[0].n, 5); // 2 + 3
}
#[test]
fn test_last_minute_latency_merge_different_times() {
let mut latency1 = LastMinuteLatency::default();
let mut latency2 = LastMinuteLatency::default();
latency1.last_sec = 1000;
latency2.last_sec = 1010; // 10 seconds later
// Add data to both
latency1.totals[0].total = 10;
latency2.totals[0].total = 20;
let merged = latency1.merge(&latency2);
assert_eq!(merged.last_sec, 1010); // Should use the later time
assert_eq!(merged.totals[0].total, 30);
}
#[test]
fn test_last_minute_latency_merge_empty() {
let mut latency1 = LastMinuteLatency::default();
let latency2 = LastMinuteLatency::default();
let merged = latency1.merge(&latency2);
assert_eq!(merged.last_sec, 0);
for elem in &merged.totals {
assert_eq!(elem.total, 0);
assert_eq!(elem.size, 0);
assert_eq!(elem.n, 0);
}
}
#[test]
fn test_last_minute_latency_window_wraparound() {
let mut latency = LastMinuteLatency::default();
// Test that indices wrap around correctly
for sec in 0..120 {
// Test for 2 minutes
let acc_elem = AccElem {
total: sec,
size: 0,
n: 1,
};
latency.add_all(sec, &acc_elem);
let expected_idx = sec % 60;
assert_eq!(latency.totals[expected_idx as usize].total, sec);
}
}
#[test]
fn test_last_minute_latency_time_progression() {
let mut latency = LastMinuteLatency::default();
// Add data at time 1000
latency.add_all(
1000,
&AccElem {
total: 10,
size: 0,
n: 1,
},
);
// Forward to time 1030 (30 seconds later)
latency.forward_to(1030);
// Original data should still be there
let idx_1000 = 1000 % 60;
assert_eq!(latency.totals[idx_1000 as usize].total, 10);
// Forward to time 1070 (70 seconds from original, > 60 seconds)
latency.forward_to(1070);
// All data should be cleared due to large gap
for elem in &latency.totals {
assert_eq!(elem.total, 0);
assert_eq!(elem.n, 0);
}
}
#[test]
fn test_last_minute_latency_realistic_scenario() {
let mut latency = LastMinuteLatency::default();
let base_time = 1000u64;
// Add data for exactly 60 seconds to fill the window
for i in 0..60 {
let current_time = base_time + i;
let duration_secs = i % 10 + 1; // Varying durations 1-10 seconds
let acc_elem = AccElem {
total: duration_secs,
size: 1024 * (i % 5 + 1), // Varying sizes
n: 1,
};
latency.add_all(current_time, &acc_elem);
}
// Count non-empty slots after filling the window
let mut non_empty_count = 0;
let mut total_n = 0;
let mut total_sum = 0;
for elem in &latency.totals {
if elem.n > 0 {
non_empty_count += 1;
total_n += elem.n;
total_sum += elem.total;
}
}
// We should have exactly 60 non-empty slots (one for each second in the window)
assert_eq!(non_empty_count, 60);
assert_eq!(total_n, 60); // 60 data points total
assert!(total_sum > 0);
// Test manual total calculation (get_total uses system time which interferes with test)
let mut manual_total = AccElem::default();
for elem in &latency.totals {
manual_total.merge(elem);
}
assert_eq!(manual_total.n, 60);
assert_eq!(manual_total.total, total_sum);
}
#[test]
fn test_acc_elem_clone_and_debug() {
let elem = AccElem {
total: 100,
size: 200,
n: 5,
};
let cloned = elem;
assert_eq!(elem.total, cloned.total);
assert_eq!(elem.size, cloned.size);
assert_eq!(elem.n, cloned.n);
// Test Debug trait
let debug_str = format!("{elem:?}");
assert!(debug_str.contains("100"));
assert!(debug_str.contains("200"));
assert!(debug_str.contains("5"));
}
#[test]
fn test_last_minute_latency_clone() {
let mut latency = LastMinuteLatency {
last_sec: 1000,
..Default::default()
};
latency.totals[0].total = 100;
latency.totals[0].n = 5;
let cloned = latency.clone();
assert_eq!(latency.last_sec, cloned.last_sec);
assert_eq!(latency.totals[0].total, cloned.totals[0].total);
assert_eq!(latency.totals[0].n, cloned.totals[0].n);
}
#[test]
fn test_edge_case_max_values() {
let mut elem = AccElem {
total: u64::MAX - 50,
size: u64::MAX - 50,
n: u64::MAX - 50,
};
let other = AccElem {
total: 100,
size: 100,
n: 100,
};
// This should not panic due to overflow, values will wrap around
elem.merge(&other);
// Values should wrap around due to overflow (wrapping_add behavior)
assert_eq!(elem.total, 49); // (u64::MAX - 50) + 100 wraps to 49
assert_eq!(elem.size, 49);
assert_eq!(elem.n, 49);
}
#[test]
fn test_forward_to_boundary_conditions() {
let mut latency = LastMinuteLatency {
last_sec: 59,
..Default::default()
};
// Add data at the last slot
latency.totals[59].total = 100;
latency.totals[59].n = 1;
// Forward exactly 60 seconds (boundary case)
latency.forward_to(119);
// All data should be cleared
for elem in &latency.totals {
assert_eq!(elem.total, 0);
assert_eq!(elem.n, 0);
}
}
#[test]
fn test_get_total_with_data() {
let mut latency = LastMinuteLatency::default();
// Set a recent timestamp to avoid forward_to clearing data
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
latency.last_sec = current_time;
// Add data to multiple slots
latency.totals[0] = AccElem {
total: 10,
size: 100,
n: 1,
};
latency.totals[1] = AccElem {
total: 20,
size: 200,
n: 2,
};
latency.totals[59] = AccElem {
total: 30,
size: 300,
n: 3,
};
let total = latency.get_total();
assert_eq!(total.total, 60);
assert_eq!(total.size, 600);
assert_eq!(total.n, 6);
}
#[test]
fn test_window_index_calculation() {
// Test that window index calculation works correctly
let _latency = LastMinuteLatency::default();
let acc_elem = AccElem { total: 1, size: 1, n: 1 };
// Test various timestamps
let test_cases = [(0, 0), (1, 1), (59, 59), (60, 0), (61, 1), (119, 59), (120, 0)];
for (timestamp, expected_idx) in test_cases {
let mut test_latency = LastMinuteLatency::default();
test_latency.add_all(timestamp, &acc_elem);
assert_eq!(
test_latency.totals[expected_idx].n, 1,
"Failed for timestamp {timestamp} (expected index {expected_idx})"
);
}
}
#[test]
fn test_concurrent_safety_simulation() {
// Simulate concurrent access patterns
let mut latency = LastMinuteLatency::default();
// Use current time to ensure data doesn't get cleared by get_total
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
// Simulate rapid additions within a 60-second window
for i in 0..1000 {
let acc_elem = AccElem {
total: (i % 10) + 1, // Ensure non-zero values
size: (i % 100) + 1,
n: 1,
};
// Keep all timestamps within the current minute window
latency.add_all(current_time - (i % 60), &acc_elem);
}
let total = latency.get_total();
assert!(total.n > 0, "Total count should be greater than 0");
assert!(total.total > 0, "Total time should be greater than 0");
}
#[test]
fn test_acc_elem_debug_format() {
let elem = AccElem {
total: 123,
size: 456,
n: 789,
};
let debug_str = format!("{elem:?}");
assert!(debug_str.contains("123"));
assert!(debug_str.contains("456"));
assert!(debug_str.contains("789"));
}
#[test]
fn test_large_values() {
let mut elem = AccElem::default();
// Test with large duration values
let large_duration = Duration::from_secs(u64::MAX / 2);
elem.add(&large_duration);
assert_eq!(elem.total, u64::MAX / 2);
assert_eq!(elem.n, 1);
// Test average calculation with large values
let avg = elem.avg();
assert_eq!(avg, Duration::from_secs(u64::MAX / 2));
}
#[test]
fn test_zero_duration_handling() {
let mut elem = AccElem::default();
let zero_duration = Duration::from_secs(0);
elem.add(&zero_duration);
assert_eq!(elem.total, 0);
assert_eq!(elem.n, 1);
assert_eq!(elem.avg(), Duration::from_secs(0));
}
}
const SIZE_LAST_ELEM_MARKER: usize = 10; // Assumed marker size is 10, modify according to actual situation
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct LastMinuteHistogram {
histogram: Vec<LastMinuteLatency>,
size: u32,
}
impl LastMinuteHistogram {
pub fn merge(&mut self, other: &LastMinuteHistogram) {
for i in 0..self.histogram.len() {
self.histogram[i].merge(&other.histogram[i]);
}
}
pub fn add(&mut self, size: i64, t: Duration) {
let index = size_to_tag(size);
self.histogram[index].add(&t);
}
pub fn get_avg_data(&mut self) -> [AccElem; SIZE_LAST_ELEM_MARKER] {
let mut res = [AccElem::default(); SIZE_LAST_ELEM_MARKER];
for (i, elem) in self.histogram.iter_mut().enumerate() {
res[i] = elem.get_total();
}
res
}
}
fn size_to_tag(size: i64) -> usize {
match size {
_ if size < 1024 => 0, // sizeLessThan1KiB
_ if size < 1024 * 1024 => 1, // sizeLessThan1MiB
_ if size < 10 * 1024 * 1024 => 2, // sizeLessThan10MiB
_ if size < 100 * 1024 * 1024 => 3, // sizeLessThan100MiB
_ if size < 1024 * 1024 * 1024 => 4, // sizeLessThan1GiB
_ => 5, // sizeGreaterThan1GiB
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/lib.rs | crates/common/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod bucket_stats;
// pub mod error;
pub mod data_usage;
pub mod globals;
pub mod heal_channel;
pub mod last_minute;
pub mod metrics;
mod readiness;
pub use globals::*;
pub use readiness::{GlobalReadiness, SystemStage};
// is ','
pub static DEFAULT_DELIMITER: u8 = 44;
/// Defers evaluation of a block of code until the end of the scope.
#[macro_export]
macro_rules! defer {
($($body:tt)*) => {
let _guard = {
pub struct Guard<F: FnOnce()>(Option<F>);
impl<F: FnOnce()> Drop for Guard<F> {
fn drop(&mut self) {
(self.0).take().map(|f| f());
}
}
Guard(Some(|| {
let _ = { $($body)* };
}))
};
};
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/globals.rs | crates/common/src/globals.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(non_upper_case_globals)] // FIXME
use chrono::{DateTime, Utc};
use std::collections::HashMap;
use std::sync::LazyLock;
use tokio::sync::RwLock;
use tonic::transport::Channel;
pub static GLOBAL_LOCAL_NODE_NAME: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
/// Global initialization time of the RustFS node.
pub static GLOBAL_INIT_TIME: LazyLock<RwLock<Option<DateTime<Utc>>>> = LazyLock::new(|| RwLock::new(None));
/// Set the global local node name.
///
/// # Arguments
/// * `name` - A string slice representing the local node name.
pub async fn set_global_local_node_name(name: &str) {
*GLOBAL_LOCAL_NODE_NAME.write().await = name.to_string();
}
/// Set the global RustFS initialization time to the current UTC time.
pub async fn set_global_init_time_now() {
let now = Utc::now();
*GLOBAL_INIT_TIME.write().await = Some(now);
}
/// Get the global RustFS initialization time.
///
/// # Returns
/// * `Option<DateTime<Utc>>` - The initialization time if set.
pub async fn get_global_init_time() -> Option<DateTime<Utc>> {
*GLOBAL_INIT_TIME.read().await
}
/// Set the global RustFS address used for gRPC connections.
///
/// # Arguments
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
pub async fn set_global_addr(addr: &str) {
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
}
/// Set the global root CA certificate for outbound gRPC clients.
/// This certificate is used to validate server TLS certificates.
/// When set to None, clients use the system default root CAs.
///
/// # Arguments
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
pub async fn set_global_root_cert(cert: Vec<u8>) {
*GLOBAL_ROOT_CERT.write().await = Some(cert);
}
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
/// When set, clients will present this identity to servers requesting/requiring mTLS.
/// When None, clients proceed with standard server-authenticated TLS.
///
/// # Arguments
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
*GLOBAL_MTLS_IDENTITY.write().await = identity;
}
/// Evict a stale/dead connection from the global connection cache.
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
/// By removing the cached connection, subsequent requests will establish a fresh connection.
///
/// # Arguments
/// * `addr` - The address of the connection to evict.
pub async fn evict_connection(addr: &str) {
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
if removed.is_some() {
tracing::warn!("Evicted stale connection from cache: {}", addr);
}
}
/// Check if a connection exists in the cache for the given address.
///
/// # Arguments
/// * `addr` - The address to check.
///
/// # Returns
/// * `bool` - True if a cached connection exists, false otherwise.
pub async fn has_cached_connection(addr: &str) -> bool {
GLOBAL_CONN_MAP.read().await.contains_key(addr)
}
/// Clear all cached connections. Useful for full cluster reset/recovery.
pub async fn clear_all_connections() {
let mut map = GLOBAL_CONN_MAP.write().await;
let count = map.len();
map.clear();
if count > 0 {
tracing::warn!("Cleared {} cached connections from global map", count);
}
}
/// Optional client identity (cert+key PEM) for outbound mTLS.
///
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
/// When absent, clients proceed with standard server-authenticated TLS.
#[derive(Clone, Debug)]
pub struct MtlsIdentityPem {
pub cert_pem: Vec<u8>,
pub key_pem: Vec<u8>,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/bucket_stats.rs | crates/common/src/bucket_stats.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::last_minute::{self};
use std::collections::HashMap;
pub struct ReplicationLatency {
// Delays for single and multipart PUT requests
upload_histogram: last_minute::LastMinuteHistogram,
}
impl ReplicationLatency {
// Merge two ReplicationLatency
pub fn merge(&mut self, other: &mut ReplicationLatency) -> &ReplicationLatency {
self.upload_histogram.merge(&other.upload_histogram);
self
}
// Get upload delay (categorized by object size interval)
pub fn get_upload_latency(&mut self) -> HashMap<String, u64> {
let mut ret = HashMap::new();
let avg = self.upload_histogram.get_avg_data();
for (i, v) in avg.iter().enumerate() {
let avg_duration = v.avg();
ret.insert(self.size_tag_to_string(i), avg_duration.as_millis() as u64);
}
ret
}
pub fn update(&mut self, size: i64, during: std::time::Duration) {
self.upload_histogram.add(size, during);
}
// Simulate the conversion from size tag to string
fn size_tag_to_string(&self, tag: usize) -> String {
match tag {
0 => String::from("Size < 1 KiB"),
1 => String::from("Size < 1 MiB"),
2 => String::from("Size < 10 MiB"),
3 => String::from("Size < 100 MiB"),
4 => String::from("Size < 1 GiB"),
_ => String::from("Size > 1 GiB"),
}
}
}
// #[derive(Debug, Clone, Default)]
// pub struct ReplicationLastMinute {
// pub last_minute: LastMinuteLatency,
// }
// impl ReplicationLastMinute {
// pub fn merge(&mut self, other: ReplicationLastMinute) -> ReplicationLastMinute {
// let mut nl = ReplicationLastMinute::default();
// nl.last_minute = self.last_minute.merge(&mut other.last_minute);
// nl
// }
// pub fn add_size(&mut self, n: i64) {
// let t = SystemTime::now()
// .duration_since(UNIX_EPOCH)
// .expect("Time went backwards")
// .as_secs();
// self.last_minute.add_all(t - 1, &AccElem { total: t - 1, size: n as u64, n: 1 });
// }
// pub fn get_total(&self) -> AccElem {
// self.last_minute.get_total()
// }
// }
// impl fmt::Display for ReplicationLastMinute {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// let t = self.last_minute.get_total();
// write!(f, "ReplicationLastMinute sz= {}, n= {}, dur= {}", t.size, t.n, t.total)
// }
// }
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/data_usage.rs | crates/common/src/data_usage.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use path_clean::PathClean;
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
hash::{DefaultHasher, Hash, Hasher},
path::Path,
time::SystemTime,
};
#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize, PartialEq)]
pub struct TierStats {
pub total_size: u64,
pub num_versions: i32,
pub num_objects: i32,
}
impl TierStats {
pub fn add(&self, u: &TierStats) -> TierStats {
TierStats {
total_size: self.total_size + u.total_size,
num_versions: self.num_versions + u.num_versions,
num_objects: self.num_objects + u.num_objects,
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct AllTierStats {
pub tiers: HashMap<String, TierStats>,
}
impl AllTierStats {
pub fn new() -> Self {
Self { tiers: HashMap::new() }
}
pub fn add_sizes(&mut self, tiers: HashMap<String, TierStats>) {
for (tier, st) in tiers {
self.tiers
.insert(tier.clone(), self.tiers.get(&tier).unwrap_or(&TierStats::default()).add(&st));
}
}
pub fn merge(&mut self, other: AllTierStats) {
for (tier, st) in other.tiers {
self.tiers
.insert(tier.clone(), self.tiers.get(&tier).unwrap_or(&TierStats::default()).add(&st));
}
}
pub fn populate_stats(&self, stats: &mut HashMap<String, TierStats>) {
for (tier, st) in &self.tiers {
stats.insert(
tier.clone(),
TierStats {
total_size: st.total_size,
num_versions: st.num_versions,
num_objects: st.num_objects,
},
);
}
}
}
/// Bucket target usage info provides replication statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct BucketTargetUsageInfo {
pub replication_pending_size: u64,
pub replication_failed_size: u64,
pub replicated_size: u64,
pub replica_size: u64,
pub replication_pending_count: u64,
pub replication_failed_count: u64,
pub replicated_count: u64,
}
/// Bucket usage info provides bucket-level statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct BucketUsageInfo {
pub size: u64,
// Following five fields suffixed with V1 are here for backward compatibility
// Total Size for objects that have not yet been replicated
pub replication_pending_size_v1: u64,
// Total size for objects that have witness one or more failures and will be retried
pub replication_failed_size_v1: u64,
// Total size for objects that have been replicated to destination
pub replicated_size_v1: u64,
// Total number of objects pending replication
pub replication_pending_count_v1: u64,
// Total number of objects that failed replication
pub replication_failed_count_v1: u64,
pub objects_count: u64,
pub object_size_histogram: HashMap<String, u64>,
pub object_versions_histogram: HashMap<String, u64>,
pub versions_count: u64,
pub delete_markers_count: u64,
pub replica_size: u64,
pub replica_count: u64,
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
}
/// DataUsageInfo represents data usage stats of the underlying storage
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct DataUsageInfo {
/// Total capacity
pub total_capacity: u64,
/// Total used capacity
pub total_used_capacity: u64,
/// Total free capacity
pub total_free_capacity: u64,
/// LastUpdate is the timestamp of when the data usage info was last updated
pub last_update: Option<SystemTime>,
/// Objects total count across all buckets
pub objects_total_count: u64,
/// Versions total count across all buckets
pub versions_total_count: u64,
/// Delete markers total count across all buckets
pub delete_markers_total_count: u64,
/// Objects total size across all buckets
pub objects_total_size: u64,
/// Replication info across all buckets
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
/// Total number of buckets in this cluster
pub buckets_count: u64,
/// Buckets usage info provides following information across all buckets
pub buckets_usage: HashMap<String, BucketUsageInfo>,
/// Deprecated kept here for backward compatibility reasons
pub bucket_sizes: HashMap<String, u64>,
/// Per-disk snapshot information when available
#[serde(default)]
pub disk_usage_status: Vec<DiskUsageStatus>,
}
/// Metadata describing the status of a disk-level data usage snapshot.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct DiskUsageStatus {
pub disk_id: String,
pub pool_index: Option<usize>,
pub set_index: Option<usize>,
pub disk_index: Option<usize>,
pub last_update: Option<SystemTime>,
pub snapshot_exists: bool,
}
/// Size summary for a single object or group of objects
#[derive(Debug, Default, Clone)]
pub struct SizeSummary {
/// Total size
pub total_size: usize,
/// Number of versions
pub versions: usize,
/// Number of delete markers
pub delete_markers: usize,
/// Replicated size
pub replicated_size: usize,
/// Replicated count
pub replicated_count: usize,
/// Pending size
pub pending_size: usize,
/// Failed size
pub failed_size: usize,
/// Replica size
pub replica_size: usize,
/// Replica count
pub replica_count: usize,
/// Pending count
pub pending_count: usize,
/// Failed count
pub failed_count: usize,
/// Replication target stats
pub repl_target_stats: HashMap<String, ReplTargetSizeSummary>,
}
/// Replication target size summary
#[derive(Debug, Default, Clone)]
pub struct ReplTargetSizeSummary {
/// Replicated size
pub replicated_size: usize,
/// Replicated count
pub replicated_count: usize,
/// Pending size
pub pending_size: usize,
/// Failed size
pub failed_size: usize,
/// Pending count
pub pending_count: usize,
/// Failed count
pub failed_count: usize,
}
// ===== Cache-related data structures =====
/// Data usage hash for path-based caching
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct DataUsageHash(pub String);
impl DataUsageHash {
pub fn string(&self) -> String {
self.0.clone()
}
pub fn key(&self) -> String {
self.0.clone()
}
pub fn mod_(&self, cycle: u32, cycles: u32) -> bool {
if cycles <= 1 {
return cycles == 1;
}
let hash = self.calculate_hash();
hash as u32 % cycles == cycle % cycles
}
pub fn mod_alt(&self, cycle: u32, cycles: u32) -> bool {
if cycles <= 1 {
return cycles == 1;
}
let hash = self.calculate_hash();
(hash >> 32) as u32 % cycles == cycle % cycles
}
fn calculate_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.0.hash(&mut hasher);
hasher.finish()
}
}
/// Data usage hash map type
pub type DataUsageHashMap = HashSet<String>;
/// Size histogram for object size distribution
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SizeHistogram(Vec<u64>);
impl Default for SizeHistogram {
fn default() -> Self {
Self(vec![0; 11]) // DATA_USAGE_BUCKET_LEN = 11
}
}
impl SizeHistogram {
pub fn add(&mut self, size: u64) {
let intervals = [
(0, 1024), // LESS_THAN_1024_B
(1024, 64 * 1024 - 1), // BETWEEN_1024_B_AND_64_KB
(64 * 1024, 256 * 1024 - 1), // BETWEEN_64_KB_AND_256_KB
(256 * 1024, 512 * 1024 - 1), // BETWEEN_256_KB_AND_512_KB
(512 * 1024, 1024 * 1024 - 1), // BETWEEN_512_KB_AND_1_MB
(1024, 1024 * 1024 - 1), // BETWEEN_1024B_AND_1_MB
(1024 * 1024, 10 * 1024 * 1024 - 1), // BETWEEN_1_MB_AND_10_MB
(10 * 1024 * 1024, 64 * 1024 * 1024 - 1), // BETWEEN_10_MB_AND_64_MB
(64 * 1024 * 1024, 128 * 1024 * 1024 - 1), // BETWEEN_64_MB_AND_128_MB
(128 * 1024 * 1024, 512 * 1024 * 1024 - 1), // BETWEEN_128_MB_AND_512_MB
(512 * 1024 * 1024, u64::MAX), // GREATER_THAN_512_MB
];
for (idx, (start, end)) in intervals.iter().enumerate() {
if size >= *start && size <= *end {
self.0[idx] += 1;
break;
}
}
}
pub fn to_map(&self) -> HashMap<String, u64> {
let names = [
"LESS_THAN_1024_B",
"BETWEEN_1024_B_AND_64_KB",
"BETWEEN_64_KB_AND_256_KB",
"BETWEEN_256_KB_AND_512_KB",
"BETWEEN_512_KB_AND_1_MB",
"BETWEEN_1024B_AND_1_MB",
"BETWEEN_1_MB_AND_10_MB",
"BETWEEN_10_MB_AND_64_MB",
"BETWEEN_64_MB_AND_128_MB",
"BETWEEN_128_MB_AND_512_MB",
"GREATER_THAN_512_MB",
];
let mut res = HashMap::new();
let mut spl_count = 0;
for (count, name) in self.0.iter().zip(names.iter()) {
if name == &"BETWEEN_1024B_AND_1_MB" {
res.insert(name.to_string(), spl_count);
} else if name.starts_with("BETWEEN_") && name.contains("_KB_") && name.contains("_MB") {
spl_count += count;
res.insert(name.to_string(), *count);
} else {
res.insert(name.to_string(), *count);
}
}
res
}
}
/// Versions histogram for version count distribution
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct VersionsHistogram(Vec<u64>);
impl Default for VersionsHistogram {
fn default() -> Self {
Self(vec![0; 7]) // DATA_USAGE_VERSION_LEN = 7
}
}
impl VersionsHistogram {
pub fn add(&mut self, count: u64) {
let intervals = [
(0, 0), // UNVERSIONED
(1, 1), // SINGLE_VERSION
(2, 9), // BETWEEN_2_AND_10
(10, 99), // BETWEEN_10_AND_100
(100, 999), // BETWEEN_100_AND_1000
(1000, 9999), // BETWEEN_1000_AND_10000
(10000, u64::MAX), // GREATER_THAN_10000
];
for (idx, (start, end)) in intervals.iter().enumerate() {
if count >= *start && count <= *end {
self.0[idx] += 1;
break;
}
}
}
pub fn to_map(&self) -> HashMap<String, u64> {
let names = [
"UNVERSIONED",
"SINGLE_VERSION",
"BETWEEN_2_AND_10",
"BETWEEN_10_AND_100",
"BETWEEN_100_AND_1000",
"BETWEEN_1000_AND_10000",
"GREATER_THAN_10000",
];
let mut res = HashMap::new();
for (count, name) in self.0.iter().zip(names.iter()) {
res.insert(name.to_string(), *count);
}
res
}
}
/// Replication statistics for a single target
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ReplicationStats {
pub pending_size: u64,
pub replicated_size: u64,
pub failed_size: u64,
pub failed_count: u64,
pub pending_count: u64,
pub missed_threshold_size: u64,
pub after_threshold_size: u64,
pub missed_threshold_count: u64,
pub after_threshold_count: u64,
pub replicated_count: u64,
}
impl ReplicationStats {
pub fn empty(&self) -> bool {
self.replicated_size == 0 && self.failed_size == 0 && self.failed_count == 0
}
}
/// Replication statistics for all targets
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ReplicationAllStats {
pub targets: HashMap<String, ReplicationStats>,
pub replica_size: u64,
pub replica_count: u64,
}
impl ReplicationAllStats {
pub fn empty(&self) -> bool {
if self.replica_size != 0 && self.replica_count != 0 {
return false;
}
for (_, v) in self.targets.iter() {
if !v.empty() {
return false;
}
}
true
}
}
/// Data usage cache entry
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DataUsageEntry {
pub children: DataUsageHashMap,
// These fields do not include any children.
pub size: usize,
pub objects: usize,
pub versions: usize,
pub delete_markers: usize,
pub obj_sizes: SizeHistogram,
pub obj_versions: VersionsHistogram,
pub replication_stats: Option<ReplicationAllStats>,
pub compacted: bool,
}
impl DataUsageEntry {
pub fn add_child(&mut self, hash: &DataUsageHash) {
if self.children.contains(&hash.key()) {
return;
}
self.children.insert(hash.key());
}
pub fn add_sizes(&mut self, summary: &SizeSummary) {
self.size += summary.total_size;
self.versions += summary.versions;
self.delete_markers += summary.delete_markers;
self.obj_sizes.add(summary.total_size as u64);
self.obj_versions.add(summary.versions as u64);
let replication_stats = if self.replication_stats.is_none() {
self.replication_stats = Some(ReplicationAllStats::default());
self.replication_stats.as_mut().unwrap()
} else {
self.replication_stats.as_mut().unwrap()
};
replication_stats.replica_size += summary.replica_size as u64;
replication_stats.replica_count += summary.replica_count as u64;
for (arn, st) in &summary.repl_target_stats {
let tgt_stat = replication_stats
.targets
.entry(arn.to_string())
.or_insert(ReplicationStats::default());
tgt_stat.pending_size += st.pending_size as u64;
tgt_stat.failed_size += st.failed_size as u64;
tgt_stat.replicated_size += st.replicated_size as u64;
tgt_stat.replicated_count += st.replicated_count as u64;
tgt_stat.failed_count += st.failed_count as u64;
tgt_stat.pending_count += st.pending_count as u64;
}
}
pub fn merge(&mut self, other: &DataUsageEntry) {
self.objects += other.objects;
self.versions += other.versions;
self.delete_markers += other.delete_markers;
self.size += other.size;
if let Some(o_rep) = &other.replication_stats {
if self.replication_stats.is_none() {
self.replication_stats = Some(ReplicationAllStats::default());
}
let s_rep = self.replication_stats.as_mut().unwrap();
s_rep.targets.clear();
s_rep.replica_size += o_rep.replica_size;
s_rep.replica_count += o_rep.replica_count;
for (arn, stat) in o_rep.targets.iter() {
let st = s_rep.targets.entry(arn.clone()).or_default();
*st = ReplicationStats {
pending_size: stat.pending_size + st.pending_size,
failed_size: stat.failed_size + st.failed_size,
replicated_size: stat.replicated_size + st.replicated_size,
pending_count: stat.pending_count + st.pending_count,
failed_count: stat.failed_count + st.failed_count,
replicated_count: stat.replicated_count + st.replicated_count,
..Default::default()
};
}
}
for (i, v) in other.obj_sizes.0.iter().enumerate() {
self.obj_sizes.0[i] += v;
}
for (i, v) in other.obj_versions.0.iter().enumerate() {
self.obj_versions.0[i] += v;
}
}
}
/// Data usage cache info
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DataUsageCacheInfo {
pub name: String,
pub next_cycle: u32,
pub last_update: Option<SystemTime>,
pub skip_healing: bool,
}
/// Data usage cache
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DataUsageCache {
pub info: DataUsageCacheInfo,
pub cache: HashMap<String, DataUsageEntry>,
}
impl DataUsageCache {
pub fn replace(&mut self, path: &str, parent: &str, e: DataUsageEntry) {
let hash = hash_path(path);
self.cache.insert(hash.key(), e);
if !parent.is_empty() {
let phash = hash_path(parent);
let p = {
let p = self.cache.entry(phash.key()).or_default();
p.add_child(&hash);
p.clone()
};
self.cache.insert(phash.key(), p);
}
}
pub fn replace_hashed(&mut self, hash: &DataUsageHash, parent: &Option<DataUsageHash>, e: &DataUsageEntry) {
self.cache.insert(hash.key(), e.clone());
if let Some(parent) = parent {
self.cache.entry(parent.key()).or_default().add_child(hash);
}
}
pub fn find(&self, path: &str) -> Option<DataUsageEntry> {
self.cache.get(&hash_path(path).key()).cloned()
}
pub fn find_children_copy(&mut self, h: DataUsageHash) -> DataUsageHashMap {
self.cache.entry(h.string()).or_default().children.clone()
}
pub fn flatten(&self, root: &DataUsageEntry) -> DataUsageEntry {
let mut root = root.clone();
for id in root.children.clone().iter() {
if let Some(e) = self.cache.get(id) {
let mut e = e.clone();
if !e.children.is_empty() {
e = self.flatten(&e);
}
root.merge(&e);
}
}
root.children.clear();
root
}
pub fn copy_with_children(&mut self, src: &DataUsageCache, hash: &DataUsageHash, parent: &Option<DataUsageHash>) {
if let Some(e) = src.cache.get(&hash.string()) {
self.cache.insert(hash.key(), e.clone());
for ch in e.children.iter() {
if *ch == hash.key() {
return;
}
self.copy_with_children(src, &DataUsageHash(ch.to_string()), &Some(hash.clone()));
}
if let Some(parent) = parent {
let p = self.cache.entry(parent.key()).or_default();
p.add_child(hash);
}
}
}
pub fn delete_recursive(&mut self, hash: &DataUsageHash) {
let mut need_remove = Vec::new();
if let Some(v) = self.cache.get(&hash.string()) {
for child in v.children.iter() {
need_remove.push(child.clone());
}
}
self.cache.remove(&hash.string());
need_remove.iter().for_each(|child| {
self.delete_recursive(&DataUsageHash(child.to_string()));
});
}
pub fn size_recursive(&self, path: &str) -> Option<DataUsageEntry> {
match self.find(path) {
Some(root) => {
if root.children.is_empty() {
return Some(root);
}
let mut flat = self.flatten(&root);
if flat.replication_stats.is_some() && flat.replication_stats.as_ref().unwrap().empty() {
flat.replication_stats = None;
}
Some(flat)
}
None => None,
}
}
pub fn search_parent(&self, hash: &DataUsageHash) -> Option<DataUsageHash> {
let want = hash.key();
if let Some(last_index) = want.rfind('/')
&& let Some(v) = self.find(&want[0..last_index])
&& v.children.contains(&want)
{
let found = hash_path(&want[0..last_index]);
return Some(found);
}
for (k, v) in self.cache.iter() {
if v.children.contains(&want) {
let found = DataUsageHash(k.clone());
return Some(found);
}
}
None
}
pub fn is_compacted(&self, hash: &DataUsageHash) -> bool {
match self.cache.get(&hash.key()) {
Some(due) => due.compacted,
None => false,
}
}
pub fn force_compact(&mut self, limit: usize) {
if self.cache.len() < limit {
return;
}
let top = hash_path(&self.info.name).key();
let top_e = match self.find(&top) {
Some(e) => e,
None => return,
};
// Note: DATA_SCANNER_FORCE_COMPACT_AT_FOLDERS constant would need to be passed as parameter
// or defined in common crate if needed
if top_e.children.len() > 250_000 {
// DATA_SCANNER_FORCE_COMPACT_AT_FOLDERS
self.reduce_children_of(&hash_path(&self.info.name), limit, true);
}
if self.cache.len() <= limit {
return;
}
let mut found = HashSet::new();
found.insert(top);
mark(self, &top_e, &mut found);
self.cache.retain(|k, _| {
if !found.contains(k) {
return false;
}
true
});
}
pub fn reduce_children_of(&mut self, path: &DataUsageHash, limit: usize, compact_self: bool) {
let e = match self.cache.get(&path.key()) {
Some(e) => e,
None => return,
};
if e.compacted {
return;
}
if e.children.len() > limit && compact_self {
let mut flat = self.size_recursive(&path.key()).unwrap_or_default();
flat.compacted = true;
self.delete_recursive(path);
self.replace_hashed(path, &None, &flat);
return;
}
let total = self.total_children_rec(&path.key());
if total < limit {
return;
}
let mut leaves = Vec::new();
let mut remove = total - limit;
add(self, path, &mut leaves);
leaves.sort_by(|a, b| a.objects.cmp(&b.objects));
while remove > 0 && !leaves.is_empty() {
let e = leaves.first().unwrap();
let candidate = e.path.clone();
if candidate == *path && !compact_self {
break;
}
let removing = self.total_children_rec(&candidate.key());
let mut flat = match self.size_recursive(&candidate.key()) {
Some(flat) => flat,
None => {
leaves.remove(0);
continue;
}
};
flat.compacted = true;
self.delete_recursive(&candidate);
self.replace_hashed(&candidate, &None, &flat);
remove -= removing;
leaves.remove(0);
}
}
pub fn total_children_rec(&self, path: &str) -> usize {
let root = self.find(path);
if root.is_none() {
return 0;
}
let root = root.unwrap();
if root.children.is_empty() {
return 0;
}
let mut n = root.children.len();
for ch in root.children.iter() {
n += self.total_children_rec(ch);
}
n
}
pub fn merge(&mut self, o: &DataUsageCache) {
let mut existing_root = self.root();
let other_root = o.root();
if existing_root.is_none() && other_root.is_none() {
return;
}
if other_root.is_none() {
return;
}
if existing_root.is_none() {
*self = o.clone();
return;
}
if o.info.last_update.gt(&self.info.last_update) {
self.info.last_update = o.info.last_update;
}
existing_root.as_mut().unwrap().merge(other_root.as_ref().unwrap());
self.cache.insert(hash_path(&self.info.name).key(), existing_root.unwrap());
let e_hash = self.root_hash();
for key in other_root.as_ref().unwrap().children.iter() {
let entry = &o.cache[key];
let flat = o.flatten(entry);
let mut existing = self.cache[key].clone();
existing.merge(&flat);
self.replace_hashed(&DataUsageHash(key.clone()), &Some(e_hash.clone()), &existing);
}
}
pub fn root_hash(&self) -> DataUsageHash {
hash_path(&self.info.name)
}
pub fn root(&self) -> Option<DataUsageEntry> {
self.find(&self.info.name)
}
/// Convert cache to DataUsageInfo for a specific path
pub fn dui(&self, path: &str, buckets: &[String]) -> DataUsageInfo {
let e = match self.find(path) {
Some(e) => e,
None => return DataUsageInfo::default(),
};
let flat = self.flatten(&e);
let mut buckets_usage = HashMap::new();
for bucket_name in buckets.iter() {
let e = match self.find(bucket_name) {
Some(e) => e,
None => continue,
};
let flat = self.flatten(&e);
let mut bui = BucketUsageInfo {
size: flat.size as u64,
versions_count: flat.versions as u64,
objects_count: flat.objects as u64,
delete_markers_count: flat.delete_markers as u64,
object_size_histogram: flat.obj_sizes.to_map(),
object_versions_histogram: flat.obj_versions.to_map(),
..Default::default()
};
if let Some(rs) = &flat.replication_stats {
bui.replica_size = rs.replica_size;
bui.replica_count = rs.replica_count;
for (arn, stat) in rs.targets.iter() {
bui.replication_info.insert(
arn.clone(),
BucketTargetUsageInfo {
replication_pending_size: stat.pending_size,
replicated_size: stat.replicated_size,
replication_failed_size: stat.failed_size,
replication_pending_count: stat.pending_count,
replication_failed_count: stat.failed_count,
replicated_count: stat.replicated_count,
..Default::default()
},
);
}
}
buckets_usage.insert(bucket_name.clone(), bui);
}
DataUsageInfo {
last_update: self.info.last_update,
objects_total_count: flat.objects as u64,
versions_total_count: flat.versions as u64,
delete_markers_total_count: flat.delete_markers as u64,
objects_total_size: flat.size as u64,
buckets_count: e.children.len() as u64,
buckets_usage,
..Default::default()
}
}
pub fn marshal_msg(&self) -> Result<Vec<u8>, Box<dyn std::error::Error + Send + Sync>> {
let mut buf = Vec::new();
self.serialize(&mut rmp_serde::Serializer::new(&mut buf))?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let t: Self = rmp_serde::from_slice(buf)?;
Ok(t)
}
// Note: load and save methods are storage-specific and should be implemented
// in the ecstore crate where storage access is available
}
/// Trait for storage-specific operations on DataUsageCache
#[async_trait::async_trait]
pub trait DataUsageCacheStorage {
/// Load data usage cache from backend storage
async fn load(store: &dyn std::any::Any, name: &str) -> Result<Self, Box<dyn std::error::Error + Send + Sync>>
where
Self: Sized;
/// Save data usage cache to backend storage
async fn save(&self, name: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>>;
}
// Helper structs and functions for cache operations
#[derive(Default, Clone)]
struct Inner {
objects: usize,
path: DataUsageHash,
}
fn add(data_usage_cache: &DataUsageCache, path: &DataUsageHash, leaves: &mut Vec<Inner>) {
let e = match data_usage_cache.cache.get(&path.key()) {
Some(e) => e,
None => return,
};
if !e.children.is_empty() {
return;
}
let sz = data_usage_cache.size_recursive(&path.key()).unwrap_or_default();
leaves.push(Inner {
objects: sz.objects,
path: path.clone(),
});
for ch in e.children.iter() {
add(data_usage_cache, &DataUsageHash(ch.clone()), leaves);
}
}
fn mark(duc: &DataUsageCache, entry: &DataUsageEntry, found: &mut HashSet<String>) {
for k in entry.children.iter() {
found.insert(k.to_string());
if let Some(ch) = duc.cache.get(k) {
mark(duc, ch, found);
}
}
}
/// Hash a path for data usage caching
pub fn hash_path(data: &str) -> DataUsageHash {
DataUsageHash(Path::new(&data).clean().to_string_lossy().to_string())
}
impl DataUsageInfo {
/// Create a new DataUsageInfo
pub fn new() -> Self {
Self::default()
}
/// Add object metadata to data usage statistics
pub fn add_object(&mut self, object_path: &str, meta_object: &rustfs_filemeta::MetaObject) {
// This method is kept for backward compatibility
// For accurate version counting, use add_object_from_file_meta instead
let bucket_name = match self.extract_bucket_from_path(object_path) {
Ok(name) => name,
Err(_) => return,
};
// Update bucket statistics
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
bucket_usage.size += meta_object.size as u64;
bucket_usage.objects_count += 1;
bucket_usage.versions_count += 1; // Simplified: assume 1 version per object
// Update size histogram
let total_size = meta_object.size as u64;
let size_ranges = [
("0-1KB", 0, 1024),
("1KB-1MB", 1024, 1024 * 1024),
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
("1GB+", 1024 * 1024 * 1024, u64::MAX),
];
for (range_name, min_size, max_size) in size_ranges {
if total_size >= min_size && total_size < max_size {
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
break;
}
}
// Update version histogram (simplified - count as single version)
*bucket_usage
.object_versions_histogram
.entry("SINGLE_VERSION".to_string())
.or_insert(0) += 1;
} else {
// Create new bucket usage
let mut bucket_usage = BucketUsageInfo {
size: meta_object.size as u64,
objects_count: 1,
versions_count: 1,
..Default::default()
};
bucket_usage.object_size_histogram.insert("0-1KB".to_string(), 1);
bucket_usage.object_versions_histogram.insert("SINGLE_VERSION".to_string(), 1);
self.buckets_usage.insert(bucket_name, bucket_usage);
}
// Update global statistics
self.objects_total_size += meta_object.size as u64;
self.objects_total_count += 1;
self.versions_total_count += 1;
}
/// Add object from FileMeta for accurate version counting
pub fn add_object_from_file_meta(&mut self, object_path: &str, file_meta: &rustfs_filemeta::FileMeta) {
let bucket_name = match self.extract_bucket_from_path(object_path) {
Ok(name) => name,
Err(_) => return,
};
// Calculate accurate statistics from all versions
let mut total_size = 0u64;
let mut versions_count = 0u64;
let mut delete_markers_count = 0u64;
let mut latest_object_size = 0u64;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/metrics.rs | crates/common/src/metrics.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::last_minute::{AccElem, LastMinuteLatency};
use chrono::{DateTime, Utc};
use rustfs_madmin::metrics::ScannerMetrics as M_ScannerMetrics;
use std::{
collections::HashMap,
fmt::Display,
future::Future,
pin::Pin,
sync::{
Arc, OnceLock,
atomic::{AtomicU64, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::{Mutex, RwLock};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IlmAction {
NoneAction = 0,
DeleteAction,
DeleteVersionAction,
TransitionAction,
TransitionVersionAction,
DeleteRestoredAction,
DeleteRestoredVersionAction,
DeleteAllVersionsAction,
DelMarkerDeleteAllVersionsAction,
ActionCount,
}
impl IlmAction {
pub fn delete_restored(&self) -> bool {
*self == Self::DeleteRestoredAction || *self == Self::DeleteRestoredVersionAction
}
pub fn delete_versioned(&self) -> bool {
*self == Self::DeleteVersionAction || *self == Self::DeleteRestoredVersionAction
}
pub fn delete_all(&self) -> bool {
*self == Self::DeleteAllVersionsAction || *self == Self::DelMarkerDeleteAllVersionsAction
}
pub fn delete(&self) -> bool {
if self.delete_restored() {
return true;
}
*self == Self::DeleteVersionAction
|| *self == Self::DeleteAction
|| *self == Self::DeleteAllVersionsAction
|| *self == Self::DelMarkerDeleteAllVersionsAction
}
}
impl Display for IlmAction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{self:?}")
}
}
pub static GLOBAL_METRICS: OnceLock<Arc<Metrics>> = OnceLock::new();
pub fn global_metrics() -> &'static Arc<Metrics> {
GLOBAL_METRICS.get_or_init(|| Arc::new(Metrics::new()))
}
#[derive(Clone, Debug, PartialEq, PartialOrd)]
pub enum Metric {
// START Realtime metrics, that only records
// last minute latencies and total operation count.
ReadMetadata = 0,
CheckMissing,
SaveUsage,
ApplyAll,
ApplyVersion,
TierObjSweep,
HealCheck,
Ilm,
CheckReplication,
Yield,
CleanAbandoned,
ApplyNonCurrent,
HealAbandonedVersion,
// START Trace metrics:
StartTrace,
ScanObject, // Scan object. All operations included.
HealAbandonedObject,
// END realtime metrics:
LastRealtime,
// Trace only metrics:
ScanFolder, // Scan a folder on disk, recursively.
ScanCycle, // Full cycle, cluster global.
ScanBucketDrive, // Single bucket on one drive.
CompactFolder, // Folder compacted.
// Must be last:
Last,
}
impl Metric {
/// Convert to string representation for metrics
pub fn as_str(&self) -> &'static str {
match self {
Self::ReadMetadata => "read_metadata",
Self::CheckMissing => "check_missing",
Self::SaveUsage => "save_usage",
Self::ApplyAll => "apply_all",
Self::ApplyVersion => "apply_version",
Self::TierObjSweep => "tier_obj_sweep",
Self::HealCheck => "heal_check",
Self::Ilm => "ilm",
Self::CheckReplication => "check_replication",
Self::Yield => "yield",
Self::CleanAbandoned => "clean_abandoned",
Self::ApplyNonCurrent => "apply_non_current",
Self::HealAbandonedVersion => "heal_abandoned_version",
Self::StartTrace => "start_trace",
Self::ScanObject => "scan_object",
Self::HealAbandonedObject => "heal_abandoned_object",
Self::LastRealtime => "last_realtime",
Self::ScanFolder => "scan_folder",
Self::ScanCycle => "scan_cycle",
Self::ScanBucketDrive => "scan_bucket_drive",
Self::CompactFolder => "compact_folder",
Self::Last => "last",
}
}
/// Convert from index back to enum (safe version)
pub fn from_index(index: usize) -> Option<Self> {
if index >= Self::Last as usize {
return None;
}
// Safe conversion using match instead of unsafe transmute
match index {
0 => Some(Self::ReadMetadata),
1 => Some(Self::CheckMissing),
2 => Some(Self::SaveUsage),
3 => Some(Self::ApplyAll),
4 => Some(Self::ApplyVersion),
5 => Some(Self::TierObjSweep),
6 => Some(Self::HealCheck),
7 => Some(Self::Ilm),
8 => Some(Self::CheckReplication),
9 => Some(Self::Yield),
10 => Some(Self::CleanAbandoned),
11 => Some(Self::ApplyNonCurrent),
12 => Some(Self::HealAbandonedVersion),
13 => Some(Self::StartTrace),
14 => Some(Self::ScanObject),
15 => Some(Self::HealAbandonedObject),
16 => Some(Self::LastRealtime),
17 => Some(Self::ScanFolder),
18 => Some(Self::ScanCycle),
19 => Some(Self::ScanBucketDrive),
20 => Some(Self::CompactFolder),
21 => Some(Self::Last),
_ => None,
}
}
}
/// Thread-safe wrapper for LastMinuteLatency with atomic operations
#[derive(Default)]
pub struct LockedLastMinuteLatency {
latency: Arc<Mutex<LastMinuteLatency>>,
}
impl Clone for LockedLastMinuteLatency {
fn clone(&self) -> Self {
Self {
latency: Arc::clone(&self.latency),
}
}
}
impl LockedLastMinuteLatency {
pub fn new() -> Self {
Self {
latency: Arc::new(Mutex::new(LastMinuteLatency::default())),
}
}
/// Add a duration measurement
pub async fn add(&self, duration: Duration) {
self.add_size(duration, 0).await;
}
/// Add a duration measurement with size
pub async fn add_size(&self, duration: Duration, size: u64) {
let mut latency = self.latency.lock().await;
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let elem = AccElem {
n: 1,
total: duration.as_secs(),
size,
};
latency.add_all(now, &elem);
}
/// Get total accumulated metrics for the last minute
pub async fn total(&self) -> AccElem {
let mut latency = self.latency.lock().await;
latency.get_total()
}
}
/// Current path tracker for monitoring active scan paths
struct CurrentPathTracker {
current_path: Arc<RwLock<String>>,
}
impl CurrentPathTracker {
fn new(initial_path: String) -> Self {
Self {
current_path: Arc::new(RwLock::new(initial_path)),
}
}
async fn update_path(&self, path: String) {
*self.current_path.write().await = path;
}
async fn get_path(&self) -> String {
self.current_path.read().await.clone()
}
}
/// Main scanner metrics structure
pub struct Metrics {
// All fields must be accessed atomically and aligned.
operations: Vec<AtomicU64>,
latency: Vec<LockedLastMinuteLatency>,
actions: Vec<AtomicU64>,
actions_latency: Vec<LockedLastMinuteLatency>,
// Current paths contains disk -> tracker mappings
current_paths: Arc<RwLock<HashMap<String, Arc<CurrentPathTracker>>>>,
// Cycle information
cycle_info: Arc<RwLock<Option<CurrentCycle>>>,
}
// This is a placeholder. We'll need to define this struct.
#[derive(Clone, Debug)]
pub struct CurrentCycle {
pub current: u64,
pub cycle_completed: Vec<DateTime<Utc>>,
pub started: DateTime<Utc>,
}
impl Metrics {
pub fn new() -> Self {
let operations = (0..Metric::Last as usize).map(|_| AtomicU64::new(0)).collect();
let latency = (0..Metric::LastRealtime as usize)
.map(|_| LockedLastMinuteLatency::new())
.collect();
Self {
operations,
latency,
actions: (0..IlmAction::ActionCount as usize).map(|_| AtomicU64::new(0)).collect(),
actions_latency: vec![LockedLastMinuteLatency::default(); IlmAction::ActionCount as usize],
current_paths: Arc::new(RwLock::new(HashMap::new())),
cycle_info: Arc::new(RwLock::new(None)),
}
}
/// Log scanner action with custom metadata - compatible with existing usage
pub fn log(metric: Metric) -> impl Fn(&HashMap<String, String>) {
let metric = metric as usize;
let start_time = SystemTime::now();
move |_custom: &HashMap<String, String>| {
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
// Update operation count
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
// Update latency for realtime metrics (spawn async task for this)
if (metric) < Metric::LastRealtime as usize {
let metric_index = metric;
tokio::spawn(async move {
global_metrics().latency[metric_index].add(duration).await;
});
}
// Log trace metrics
if metric as u8 > Metric::StartTrace as u8 {
//debug!(metric = metric.as_str(), duration_ms = duration.as_millis(), "Scanner trace metric");
}
}
}
/// Time scanner action with size - returns function that takes size
pub fn time_size(metric: Metric) -> impl Fn(u64) {
let metric = metric as usize;
let start_time = SystemTime::now();
move |size: u64| {
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
// Update operation count
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
// Update latency for realtime metrics with size (spawn async task)
if (metric) < Metric::LastRealtime as usize {
let metric_index = metric;
tokio::spawn(async move {
global_metrics().latency[metric_index].add_size(duration, size).await;
});
}
}
}
/// Time a scanner action - returns a closure to call when done
pub fn time(metric: Metric) -> impl Fn() {
let metric = metric as usize;
let start_time = SystemTime::now();
move || {
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
// Update operation count
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
// Update latency for realtime metrics (spawn async task)
if (metric) < Metric::LastRealtime as usize {
let metric_index = metric;
tokio::spawn(async move {
global_metrics().latency[metric_index].add(duration).await;
});
}
}
}
/// Time N scanner actions - returns function that takes count, then returns completion function
pub fn time_n(metric: Metric) -> Box<dyn Fn(usize) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
let metric = metric as usize;
let start_time = SystemTime::now();
Box::new(move |count: usize| {
Box::new(move || {
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
// Update operation count
global_metrics().operations[metric].fetch_add(count as u64, Ordering::Relaxed);
// Update latency for realtime metrics (spawn async task)
if (metric) < Metric::LastRealtime as usize {
let metric_index = metric;
tokio::spawn(async move {
global_metrics().latency[metric_index].add(duration).await;
});
}
})
})
}
/// Time ILM action with versions - returns function that takes versions, then returns completion function
pub fn time_ilm(a: IlmAction) -> Box<dyn Fn(u64) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
let a_clone = a as usize;
if a_clone == IlmAction::NoneAction as usize || a_clone >= IlmAction::ActionCount as usize {
return Box::new(move |_: u64| Box::new(move || {}));
}
let start = SystemTime::now();
Box::new(move |versions: u64| {
Box::new(move || {
let duration = SystemTime::now().duration_since(start).unwrap_or(Duration::from_secs(0));
tokio::spawn(async move {
global_metrics().actions[a_clone].fetch_add(versions, Ordering::Relaxed);
global_metrics().actions_latency[a_clone].add(duration).await;
});
})
})
}
/// Increment time with specific duration
pub async fn inc_time(metric: Metric, duration: Duration) {
let metric = metric as usize;
// Update operation count
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
// Update latency for realtime metrics
if (metric) < Metric::LastRealtime as usize {
global_metrics().latency[metric].add(duration).await;
}
}
/// Get lifetime operation count for a metric
pub fn lifetime(&self, metric: Metric) -> u64 {
let metric = metric as usize;
if (metric) >= Metric::Last as usize {
return 0;
}
self.operations[metric].load(Ordering::Relaxed)
}
/// Get last minute statistics for a metric
pub async fn last_minute(&self, metric: Metric) -> AccElem {
let metric = metric as usize;
if (metric) >= Metric::LastRealtime as usize {
return AccElem::default();
}
self.latency[metric].total().await
}
/// Set current cycle information
pub async fn set_cycle(&self, cycle: Option<CurrentCycle>) {
*self.cycle_info.write().await = cycle;
}
/// Get current cycle information
pub async fn get_cycle(&self) -> Option<CurrentCycle> {
self.cycle_info.read().await.clone()
}
/// Get current active paths
pub async fn get_current_paths(&self) -> Vec<String> {
let mut result = Vec::new();
let paths = self.current_paths.read().await;
for (disk, tracker) in paths.iter() {
let path = tracker.get_path().await;
result.push(format!("{disk}/{path}"));
}
result
}
/// Get number of active drives
pub async fn active_drives(&self) -> usize {
self.current_paths.read().await.len()
}
/// Generate metrics report
pub async fn report(&self) -> M_ScannerMetrics {
let mut metrics = M_ScannerMetrics::default();
// Set cycle information
if let Some(cycle) = self.get_cycle().await {
metrics.current_cycle = cycle.current;
metrics.cycles_completed_at = cycle.cycle_completed;
metrics.current_started = cycle.started;
}
// Replace default start time with global init time if it's the placeholder
if let Some(init_time) = crate::get_global_init_time().await {
metrics.current_started = init_time;
}
metrics.collected_at = Utc::now();
metrics.active_paths = self.get_current_paths().await;
// Lifetime operations
for i in 0..Metric::Last as usize {
let count = self.operations[i].load(Ordering::Relaxed);
if count > 0
&& let Some(metric) = Metric::from_index(i)
{
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
}
}
// Last minute statistics for realtime metrics
for i in 0..Metric::LastRealtime as usize {
let last_min = self.latency[i].total().await;
if last_min.n > 0
&& let Some(_metric) = Metric::from_index(i)
{
// Convert to madmin TimedAction format if needed
// This would require implementing the conversion
}
}
metrics
}
}
// Type aliases for compatibility with existing code
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
pub type CloseDiskFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
/// Create a current path updater for tracking scan progress
pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn, CloseDiskFn) {
let tracker = Arc::new(CurrentPathTracker::new(initial.to_string()));
let disk_name = disk.to_string();
// Store the tracker in global metrics
let tracker_clone = Arc::clone(&tracker);
let disk_clone = disk_name.clone();
tokio::spawn(async move {
global_metrics().current_paths.write().await.insert(disk_clone, tracker_clone);
});
let update_fn = {
let tracker = Arc::clone(&tracker);
Arc::new(move |path: &str| -> Pin<Box<dyn Future<Output = ()> + Send>> {
let tracker = Arc::clone(&tracker);
let path = path.to_string();
Box::pin(async move {
tracker.update_path(path).await;
})
})
};
let done_fn = {
let disk_name = disk_name.clone();
Arc::new(move || -> Pin<Box<dyn Future<Output = ()> + Send>> {
let disk_name = disk_name.clone();
Box::pin(async move {
global_metrics().current_paths.write().await.remove(&disk_name);
})
})
};
(update_fn, done_fn)
}
impl Default for Metrics {
fn default() -> Self {
Self::new()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/common/src/heal_channel.rs | crates/common/src/heal_channel.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use s3s::dto::{BucketLifecycleConfiguration, ExpirationStatus, LifecycleRule, ReplicationConfiguration, ReplicationRuleStatus};
use serde::{Deserialize, Serialize};
use std::{
fmt::{self, Display},
sync::OnceLock,
};
use tokio::sync::{broadcast, mpsc};
use uuid::Uuid;
pub const HEAL_DELETE_DANGLING: bool = true;
pub const RUSTFS_RESERVED_BUCKET: &str = "rustfs";
pub const RUSTFS_RESERVED_BUCKET_PATH: &str = "/rustfs";
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum HealItemType {
Metadata,
Bucket,
BucketMetadata,
Object,
}
impl HealItemType {
pub fn to_str(&self) -> &str {
match self {
HealItemType::Metadata => "metadata",
HealItemType::Bucket => "bucket",
HealItemType::BucketMetadata => "bucket-metadata",
HealItemType::Object => "object",
}
}
}
impl Display for HealItemType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_str())
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum DriveState {
Ok,
Offline,
Corrupt,
Missing,
PermissionDenied,
Faulty,
RootMount,
Unknown,
Unformatted, // only returned by disk
}
impl DriveState {
pub fn to_str(&self) -> &str {
match self {
DriveState::Ok => "ok",
DriveState::Offline => "offline",
DriveState::Corrupt => "corrupt",
DriveState::Missing => "missing",
DriveState::PermissionDenied => "permission-denied",
DriveState::Faulty => "faulty",
DriveState::RootMount => "root-mount",
DriveState::Unknown => "unknown",
DriveState::Unformatted => "unformatted",
}
}
}
impl Display for DriveState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_str())
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(u8)]
pub enum HealScanMode {
Unknown = 0,
#[default]
Normal = 1,
Deep = 2,
}
impl Serialize for HealScanMode {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u8(*self as u8)
}
}
impl<'de> Deserialize<'de> for HealScanMode {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct HealScanModeVisitor;
impl<'de> serde::de::Visitor<'de> for HealScanModeVisitor {
type Value = HealScanMode;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an integer between 0 and 2")
}
fn visit_u8<E>(self, value: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match value {
0 => Ok(HealScanMode::Unknown),
1 => Ok(HealScanMode::Normal),
2 => Ok(HealScanMode::Deep),
_ => Err(E::custom(format!("invalid HealScanMode value: {value}"))),
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value > u8::MAX as u64 {
return Err(E::custom(format!("HealScanMode value too large: {value}")));
}
self.visit_u8(value as u8)
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value < 0 || value > u8::MAX as i64 {
return Err(E::custom(format!("invalid HealScanMode value: {value}")));
}
self.visit_u8(value as u8)
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
// Try parsing as number string first (for URL-encoded values)
if let Ok(num) = value.parse::<u8>() {
return self.visit_u8(num);
}
// Try parsing as named string
match value {
"Unknown" | "unknown" => Ok(HealScanMode::Unknown),
"Normal" | "normal" => Ok(HealScanMode::Normal),
"Deep" | "deep" => Ok(HealScanMode::Deep),
_ => Err(E::custom(format!("invalid HealScanMode string: {value}"))),
}
}
}
deserializer.deserialize_any(HealScanModeVisitor)
}
}
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
pub struct HealOpts {
pub recursive: bool,
#[serde(rename = "dryRun")]
pub dry_run: bool,
pub remove: bool,
pub recreate: bool,
#[serde(rename = "scanMode")]
pub scan_mode: HealScanMode,
#[serde(rename = "updateParity")]
pub update_parity: bool,
#[serde(rename = "nolock")]
pub no_lock: bool,
#[serde(rename = "pool", default)]
pub pool: Option<usize>,
#[serde(rename = "set", default)]
pub set: Option<usize>,
}
/// Heal channel command type
#[derive(Debug, Clone)]
pub enum HealChannelCommand {
/// Start a new heal task
Start(HealChannelRequest),
/// Query heal task status
Query { heal_path: String, client_token: String },
/// Cancel heal task
Cancel { heal_path: String },
}
/// Heal request from admin to ahm
#[derive(Debug, Clone, Default)]
pub struct HealChannelRequest {
/// Unique request ID
pub id: String,
/// Disk ID for heal disk/erasure set task
pub disk: Option<String>,
/// Bucket name
pub bucket: String,
/// Object prefix (optional)
pub object_prefix: Option<String>,
/// Force start heal
pub force_start: bool,
/// Priority
pub priority: HealChannelPriority,
/// Pool index (optional)
pub pool_index: Option<usize>,
/// Set index (optional)
pub set_index: Option<usize>,
/// Scan mode (optional)
pub scan_mode: Option<HealScanMode>,
/// Whether to remove corrupted data
pub remove_corrupted: Option<bool>,
/// Whether to recreate missing data
pub recreate_missing: Option<bool>,
/// Whether to update parity
pub update_parity: Option<bool>,
/// Whether to recursively process
pub recursive: Option<bool>,
/// Whether to dry run
pub dry_run: Option<bool>,
/// Timeout in seconds (optional)
pub timeout_seconds: Option<u64>,
}
/// Heal response from ahm to admin
#[derive(Debug, Clone)]
pub struct HealChannelResponse {
/// Request ID
pub request_id: String,
/// Success status
pub success: bool,
/// Response data (if successful)
pub data: Option<Vec<u8>>,
/// Error message (if failed)
pub error: Option<String>,
}
/// Heal priority
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum HealChannelPriority {
/// Low priority
Low,
/// Normal priority
#[default]
Normal,
/// High priority
High,
/// Critical priority
Critical,
}
/// Heal channel sender
pub type HealChannelSender = mpsc::UnboundedSender<HealChannelCommand>;
/// Heal channel receiver
pub type HealChannelReceiver = mpsc::UnboundedReceiver<HealChannelCommand>;
/// Global heal channel sender
static GLOBAL_HEAL_CHANNEL_SENDER: OnceLock<HealChannelSender> = OnceLock::new();
type HealResponseSender = broadcast::Sender<HealChannelResponse>;
/// Global heal response broadcaster
static GLOBAL_HEAL_RESPONSE_SENDER: OnceLock<HealResponseSender> = OnceLock::new();
/// Initialize global heal channel
pub fn init_heal_channel() -> HealChannelReceiver {
let (tx, rx) = mpsc::unbounded_channel();
GLOBAL_HEAL_CHANNEL_SENDER
.set(tx)
.expect("Heal channel sender already initialized");
rx
}
/// Get global heal channel sender
pub fn get_heal_channel_sender() -> Option<&'static HealChannelSender> {
GLOBAL_HEAL_CHANNEL_SENDER.get()
}
/// Send heal command through global channel
pub async fn send_heal_command(command: HealChannelCommand) -> Result<(), String> {
if let Some(sender) = get_heal_channel_sender() {
sender
.send(command)
.map_err(|e| format!("Failed to send heal command: {e}"))?;
Ok(())
} else {
Err("Heal channel not initialized".to_string())
}
}
fn heal_response_sender() -> &'static HealResponseSender {
GLOBAL_HEAL_RESPONSE_SENDER.get_or_init(|| {
let (tx, _rx) = broadcast::channel(1024);
tx
})
}
/// Publish a heal response to subscribers.
pub fn publish_heal_response(response: HealChannelResponse) -> Result<(), broadcast::error::SendError<HealChannelResponse>> {
heal_response_sender().send(response).map(|_| ())
}
/// Subscribe to heal responses.
pub fn subscribe_heal_responses() -> broadcast::Receiver<HealChannelResponse> {
heal_response_sender().subscribe()
}
/// Send heal start request
pub async fn send_heal_request(request: HealChannelRequest) -> Result<(), String> {
send_heal_command(HealChannelCommand::Start(request)).await
}
/// Send heal query request
pub async fn query_heal_status(heal_path: String, client_token: String) -> Result<(), String> {
send_heal_command(HealChannelCommand::Query { heal_path, client_token }).await
}
/// Send heal cancel request
pub async fn cancel_heal_task(heal_path: String) -> Result<(), String> {
send_heal_command(HealChannelCommand::Cancel { heal_path }).await
}
/// Create a new heal request
pub fn create_heal_request(
bucket: String,
object_prefix: Option<String>,
force_start: bool,
priority: Option<HealChannelPriority>,
) -> HealChannelRequest {
HealChannelRequest {
id: Uuid::new_v4().to_string(),
bucket,
object_prefix,
force_start,
priority: priority.unwrap_or_default(),
pool_index: None,
set_index: None,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
disk: None,
}
}
/// Create a new heal request with advanced options
pub fn create_heal_request_with_options(
bucket: String,
object_prefix: Option<String>,
force_start: bool,
priority: Option<HealChannelPriority>,
pool_index: Option<usize>,
set_index: Option<usize>,
) -> HealChannelRequest {
HealChannelRequest {
id: Uuid::new_v4().to_string(),
bucket,
object_prefix,
force_start,
priority: priority.unwrap_or_default(),
pool_index,
set_index,
..Default::default()
}
}
/// Create a heal response
pub fn create_heal_response(
request_id: String,
success: bool,
data: Option<Vec<u8>>,
error: Option<String>,
) -> HealChannelResponse {
HealChannelResponse {
request_id,
success,
data,
error,
}
}
fn lc_get_prefix(rule: &LifecycleRule) -> String {
if let Some(p) = &rule.prefix {
return p.to_string();
} else if let Some(filter) = &rule.filter {
if let Some(p) = &filter.prefix {
return p.to_string();
} else if let Some(and) = &filter.and
&& let Some(p) = &and.prefix
{
return p.to_string();
}
}
"".into()
}
pub fn lc_has_active_rules(config: &BucketLifecycleConfiguration, prefix: &str) -> bool {
if config.rules.is_empty() {
return false;
}
for rule in config.rules.iter() {
if rule.status == ExpirationStatus::from_static(ExpirationStatus::DISABLED) {
continue;
}
let rule_prefix = lc_get_prefix(rule);
if !prefix.is_empty() && !rule_prefix.is_empty() && !prefix.starts_with(&rule_prefix) && !rule_prefix.starts_with(prefix)
{
continue;
}
if let Some(e) = &rule.noncurrent_version_expiration {
if let Some(true) = e.noncurrent_days.map(|d| d > 0) {
return true;
}
if let Some(true) = e.newer_noncurrent_versions.map(|d| d > 0) {
return true;
}
}
if rule.noncurrent_version_transitions.is_some() {
return true;
}
if let Some(true) = rule.expiration.as_ref().map(|e| e.date.is_some()) {
return true;
}
if let Some(true) = rule.expiration.as_ref().map(|e| e.days.is_some()) {
return true;
}
if let Some(Some(true)) = rule.expiration.as_ref().map(|e| e.expired_object_delete_marker) {
return true;
}
if let Some(true) = rule.transitions.as_ref().map(|t| !t.is_empty()) {
return true;
}
if rule.transitions.is_some() {
return true;
}
}
false
}
pub fn rep_has_active_rules(config: &ReplicationConfiguration, prefix: &str, recursive: bool) -> bool {
if config.rules.is_empty() {
return false;
}
for rule in config.rules.iter() {
if rule
.status
.eq(&ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED))
{
continue;
}
if !prefix.is_empty()
&& let Some(filter) = &rule.filter
&& let Some(r_prefix) = &filter.prefix
&& !r_prefix.is_empty()
{
// incoming prefix must be in rule prefix
if !recursive && !prefix.starts_with(r_prefix) {
continue;
}
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
// does not match
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
continue;
}
}
return true;
}
false
}
pub async fn send_heal_disk(set_disk_id: String, priority: Option<HealChannelPriority>) -> Result<(), String> {
let req = HealChannelRequest {
id: Uuid::new_v4().to_string(),
bucket: "".to_string(),
object_prefix: None,
disk: Some(set_disk_id),
force_start: false,
priority: priority.unwrap_or_default(),
pool_index: None,
set_index: None,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
};
send_heal_request(req).await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn heal_response_broadcast_reaches_subscriber() {
let mut receiver = subscribe_heal_responses();
let response = create_heal_response("req-1".to_string(), true, None, None);
publish_heal_response(response.clone()).expect("publish should succeed");
let received = receiver.recv().await.expect("should receive heal response");
assert_eq!(received.request_id, response.request_id);
assert!(received.success);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/credentials/src/lib.rs | crates/credentials/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod constants;
mod credentials;
pub use constants::*;
pub use credentials::*;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/credentials/src/credentials.rs | crates/credentials/src/credentials.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
use rand::{Rng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::env;
use std::io::Error;
use std::sync::OnceLock;
use time::OffsetDateTime;
/// Global active credentials
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
/// Global gRPC authentication token
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
/// Initialize the global action credentials
///
/// # Arguments
/// * `ak` - Optional access key
/// * `sk` - Optional secret key
///
/// # Returns
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
///
/// # Panics
/// This function panics if automatic credential generation fails when `ak` or `sk`
/// are `None`, for example if the random number generator fails while calling
/// `gen_access_key` or `gen_secret_key`.
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
let cred = Credentials {
access_key: ak,
secret_key: sk,
..Default::default()
};
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
Box::new(Credentials {
access_key: e.access_key.clone(),
..Default::default()
})
})
}
/// Get the global action credentials
pub fn get_global_action_cred() -> Option<Credentials> {
GLOBAL_ACTIVE_CRED.get().cloned()
}
/// Get the global secret key
///
/// # Returns
/// * `Option<String>` - The global secret key, if set
///
pub fn get_global_secret_key_opt() -> Option<String> {
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
}
/// Get the global secret key
///
/// # Returns
/// * `String` - The global secret key, or empty string if not set
///
pub fn get_global_secret_key() -> String {
GLOBAL_ACTIVE_CRED
.get()
.map(|cred| cred.secret_key.clone())
.unwrap_or_default()
}
/// Get the global access key
///
/// # Returns
/// * `Option<String>` - The global access key, if set
///
pub fn get_global_access_key_opt() -> Option<String> {
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
}
/// Get the global access key
///
/// # Returns
/// * `String` - The global access key, or empty string if not set
///
pub fn get_global_access_key() -> String {
GLOBAL_ACTIVE_CRED
.get()
.map(|cred| cred.access_key.clone())
.unwrap_or_default()
}
/// Generates a random access key of the specified length.
///
/// # Arguments
/// * `length` - The length of the access key to generate
///
/// # Returns
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
///
/// # Errors
/// This function will return an error if the specified length is less than 3.
///
/// Examples
/// ```no_run
/// use rustfs_credentials::gen_access_key;
///
/// let access_key = gen_access_key(16).unwrap();
/// println!("Generated access key: {}", access_key);
/// ```
///
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
const ALPHA_NUMERIC_TABLE: [char; 36] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
];
if length < 3 {
return Err(Error::other("access key length is too short"));
}
let mut result = String::with_capacity(length);
let mut rng = rand::rng();
for _ in 0..length {
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
}
Ok(result)
}
/// Generates a random secret key of the specified length.
///
/// # Arguments
/// * `length` - The length of the secret key to generate
///
/// # Returns
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
///
/// # Errors
/// This function will return an error if the specified length is less than 8.
///
/// # Examples
/// ```no_run
/// use rustfs_credentials::gen_secret_key;
///
/// let secret_key = gen_secret_key(32).unwrap();
/// println!("Generated secret key: {}", secret_key);
/// ```
///
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
use base64_simd::URL_SAFE_NO_PAD;
if length < 8 {
return Err(Error::other("secret key length is too short"));
}
let mut rng = rand::rng();
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
rng.fill_bytes(&mut key);
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
let key_str = encoded.replace("/", "+");
Ok(key_str)
}
/// Get the gRPC authentication token from environment variable
///
/// # Returns
/// * `String` - The gRPC authentication token
///
pub fn get_grpc_token() -> String {
GLOBAL_GRPC_AUTH_TOKEN
.get_or_init(|| {
env::var(ENV_GRPC_AUTH_TOKEN)
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
})
.clone()
}
/// Credentials structure
///
/// Fields:
/// - access_key: Access key string
/// - secret_key: Secret key string
/// - session_token: Session token string
/// - expiration: Optional expiration time as OffsetDateTime
/// - status: Status string (e.g., "active", "off")
/// - parent_user: Parent user string
/// - groups: Optional list of groups
/// - claims: Optional map of claims
/// - name: Optional name string
/// - description: Optional description string
///
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
pub struct Credentials {
pub access_key: String,
pub secret_key: String,
pub session_token: String,
pub expiration: Option<OffsetDateTime>,
pub status: String,
pub parent_user: String,
pub groups: Option<Vec<String>>,
pub claims: Option<HashMap<String, Value>>,
pub name: Option<String>,
pub description: Option<String>,
}
impl Credentials {
pub fn is_expired(&self) -> bool {
if self.expiration.is_none() {
return false;
}
self.expiration
.as_ref()
.map(|e| OffsetDateTime::now_utc() > *e)
.unwrap_or(false)
}
pub fn is_temp(&self) -> bool {
!self.session_token.is_empty() && !self.is_expired()
}
pub fn is_service_account(&self) -> bool {
self.claims
.as_ref()
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
.unwrap_or_default()
}
pub fn is_implied_policy(&self) -> bool {
if self.is_service_account() {
return self
.claims
.as_ref()
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
.unwrap_or_default();
}
false
}
pub fn is_valid(&self) -> bool {
if self.status == "off" {
return false;
}
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
}
pub fn is_owner(&self) -> bool {
false
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
use time::Duration;
#[test]
fn test_credentials_is_expired() {
let mut cred = Credentials::default();
assert!(!cred.is_expired());
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
assert!(!cred.is_expired());
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
assert!(cred.is_expired());
}
#[test]
fn test_credentials_is_temp() {
let mut cred = Credentials::default();
assert!(!cred.is_temp());
cred.session_token = "token".to_string();
assert!(cred.is_temp());
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
assert!(!cred.is_temp());
}
#[test]
fn test_credentials_is_service_account() {
let mut cred = Credentials::default();
assert!(!cred.is_service_account());
let mut claims = HashMap::new();
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
cred.claims = Some(claims);
cred.parent_user = "parent".to_string();
assert!(cred.is_service_account());
}
#[test]
fn test_credentials_is_implied_policy() {
let mut cred = Credentials::default();
assert!(!cred.is_implied_policy());
let mut claims = HashMap::new();
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
cred.claims = Some(claims);
cred.parent_user = "parent".to_string();
assert!(cred.is_implied_policy());
}
#[test]
fn test_credentials_is_valid() {
let mut cred = Credentials::default();
assert!(!cred.is_valid());
cred.access_key = "abc".to_string();
cred.secret_key = "12345678".to_string();
assert!(cred.is_valid());
cred.status = "off".to_string();
assert!(!cred.is_valid());
}
#[test]
fn test_credentials_is_owner() {
let cred = Credentials::default();
assert!(!cred.is_owner());
}
#[test]
fn test_global_credentials_flow() {
// Since OnceLock can only be set once, we put together all globally related tests
// If it has already been initialized (possibly from other tests), we verify the results directly
if get_global_action_cred().is_none() {
// Verify that the initial state is empty
assert!(get_global_access_key_opt().is_none());
assert_eq!(get_global_access_key(), "");
assert!(get_global_secret_key_opt().is_none());
assert_eq!(get_global_secret_key(), "");
// Initialize
let test_ak = "test_access_key".to_string();
let test_sk = "test_secret_key_123456".to_string();
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
}
// Verify the state after initialization
let cred = get_global_action_cred().expect("Global credentials should be set");
assert!(!cred.access_key.is_empty());
assert!(!cred.secret_key.is_empty());
assert!(get_global_access_key_opt().is_some());
assert!(!get_global_access_key().is_empty());
assert!(get_global_secret_key_opt().is_some());
assert!(!get_global_secret_key().is_empty());
}
#[test]
fn test_init_global_credentials_auto_gen() {
// If it hasn't already been initialized, the test automatically generates logic
if get_global_action_cred().is_none() {
init_global_action_credentials(None, None).ok();
let ak = get_global_access_key();
let sk = get_global_secret_key();
assert_eq!(ak.len(), 20);
assert_eq!(sk.len(), 32);
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/credentials/src/constants.rs | crates/credentials/src/constants.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Default Access Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_ACCESS_KEY
/// Command line argument: --access-key
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
/// Example: --access-key rustfsadmin
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
/// Default Secret Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_SECRET_KEY
/// Command line argument: --secret-key
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
/// Example: --secret-key rustfsadmin
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
/// Environment variable for gRPC authentication token
/// Used to set the authentication token for gRPC communication
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
/// IAM Policy Types
/// Used to differentiate between embedded and inherited policies
/// Example: "embedded-policy" or "inherited-policy"
/// Default value: "embedded-policy"
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
/// IAM Policy Types
/// Used to differentiate between embedded and inherited policies
/// Example: "embedded-policy" or "inherited-policy"
/// Default value: "inherited-policy"
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
/// IAM Policy Claim Name for Service Account
/// Used to identify the service account policy claim in JWT tokens
/// Example: "sa-policy"
/// Default value: "sa-policy"
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_security_constants() {
// Test security related constants
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// In production environment, access key and secret key should be different
// These are default values, so being the same is acceptable, but should be warned in documentation
println!("Warning: Default access key and secret key are the same. Change them in production!");
}
#[test]
fn test_security_best_practices() {
// Test security best practices
// These are default values, should be changed in production environments
println!("Security Warning: Default credentials detected!");
println!("Access Key: {DEFAULT_ACCESS_KEY}");
println!("Secret Key: {DEFAULT_SECRET_KEY}");
println!("These should be changed in production environments!");
// Verify that key lengths meet minimum security requirements
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// Check if default credentials contain common insecure patterns
let _insecure_patterns = ["admin", "password", "123456", "default"];
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
// Note: More security check logic can be added here
// For example, check if keys contain insecure patterns
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/checksums/src/lib.rs | crates/checksums/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![allow(clippy::derive_partial_eq_without_eq)]
#![warn(
// missing_docs,
rustdoc::missing_crate_level_docs,
unreachable_pub,
rust_2018_idioms
)]
use crate::error::UnknownChecksumAlgorithmError;
use bytes::Bytes;
use std::{fmt::Debug, str::FromStr};
mod base64;
pub mod error;
pub mod http;
pub const CRC_32_NAME: &str = "crc32";
pub const CRC_32_C_NAME: &str = "crc32c";
pub const CRC_64_NVME_NAME: &str = "crc64nvme";
pub const SHA_1_NAME: &str = "sha1";
pub const SHA_256_NAME: &str = "sha256";
pub const MD5_NAME: &str = "md5";
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[non_exhaustive]
pub enum ChecksumAlgorithm {
#[default]
Crc32,
Crc32c,
#[deprecated]
Md5,
Sha1,
Sha256,
Crc64Nvme,
}
impl FromStr for ChecksumAlgorithm {
type Err = UnknownChecksumAlgorithmError;
fn from_str(checksum_algorithm: &str) -> Result<Self, Self::Err> {
if checksum_algorithm.eq_ignore_ascii_case(CRC_32_NAME) {
Ok(Self::Crc32)
} else if checksum_algorithm.eq_ignore_ascii_case(CRC_32_C_NAME) {
Ok(Self::Crc32c)
} else if checksum_algorithm.eq_ignore_ascii_case(SHA_1_NAME) {
Ok(Self::Sha1)
} else if checksum_algorithm.eq_ignore_ascii_case(SHA_256_NAME) {
Ok(Self::Sha256)
} else if checksum_algorithm.eq_ignore_ascii_case(MD5_NAME) {
// MD5 is now an alias for the default Crc32 since it is deprecated
Ok(Self::Crc32)
} else if checksum_algorithm.eq_ignore_ascii_case(CRC_64_NVME_NAME) {
Ok(Self::Crc64Nvme)
} else {
Err(UnknownChecksumAlgorithmError::new(checksum_algorithm))
}
}
}
impl ChecksumAlgorithm {
pub fn into_impl(self) -> Box<dyn http::HttpChecksum> {
match self {
Self::Crc32 => Box::<Crc32>::default(),
Self::Crc32c => Box::<Crc32c>::default(),
Self::Crc64Nvme => Box::<Crc64Nvme>::default(),
#[allow(deprecated)]
Self::Md5 => Box::<Crc32>::default(),
Self::Sha1 => Box::<Sha1>::default(),
Self::Sha256 => Box::<Sha256>::default(),
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::Crc32 => CRC_32_NAME,
Self::Crc32c => CRC_32_C_NAME,
Self::Crc64Nvme => CRC_64_NVME_NAME,
#[allow(deprecated)]
Self::Md5 => MD5_NAME,
Self::Sha1 => SHA_1_NAME,
Self::Sha256 => SHA_256_NAME,
}
}
}
pub trait Checksum: Send + Sync {
fn update(&mut self, bytes: &[u8]);
fn finalize(self: Box<Self>) -> Bytes;
fn size(&self) -> u64;
}
#[derive(Debug)]
struct Crc32 {
hasher: crc_fast::Digest,
}
impl Default for Crc32 {
fn default() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc),
}
}
}
impl Crc32 {
fn update(&mut self, bytes: &[u8]) {
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
let checksum = self.hasher.finalize() as u32;
Bytes::copy_from_slice(checksum.to_be_bytes().as_slice())
}
fn size() -> u64 {
4
}
}
impl Checksum for Crc32 {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes)
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[derive(Debug)]
struct Crc32c {
hasher: crc_fast::Digest,
}
impl Default for Crc32c {
fn default() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi),
}
}
}
impl Crc32c {
fn update(&mut self, bytes: &[u8]) {
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
let checksum = self.hasher.finalize() as u32;
Bytes::copy_from_slice(checksum.to_be_bytes().as_slice())
}
fn size() -> u64 {
4
}
}
impl Checksum for Crc32c {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes)
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[derive(Debug)]
struct Crc64Nvme {
hasher: crc_fast::Digest,
}
impl Default for Crc64Nvme {
fn default() -> Self {
Self {
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme),
}
}
}
impl Crc64Nvme {
fn update(&mut self, bytes: &[u8]) {
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
Bytes::copy_from_slice(self.hasher.finalize().to_be_bytes().as_slice())
}
fn size() -> u64 {
8
}
}
impl Checksum for Crc64Nvme {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes)
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[derive(Debug, Default)]
struct Sha1 {
hasher: sha1::Sha1,
}
impl Sha1 {
fn update(&mut self, bytes: &[u8]) {
use sha1::Digest;
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
use sha1::Digest;
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
}
fn size() -> u64 {
use sha1::Digest;
sha1::Sha1::output_size() as u64
}
}
impl Checksum for Sha1 {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes)
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[derive(Debug, Default)]
struct Sha256 {
hasher: sha2::Sha256,
}
impl Sha256 {
fn update(&mut self, bytes: &[u8]) {
use sha2::Digest;
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
use sha2::Digest;
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
}
fn size() -> u64 {
use sha2::Digest;
sha2::Sha256::output_size() as u64
}
}
impl Checksum for Sha256 {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes);
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[allow(dead_code)]
#[derive(Debug, Default)]
struct Md5 {
hasher: md5::Md5,
}
impl Md5 {
fn update(&mut self, bytes: &[u8]) {
use md5::Digest;
self.hasher.update(bytes);
}
fn finalize(self) -> Bytes {
use md5::Digest;
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
}
fn size() -> u64 {
use md5::Digest;
md5::Md5::output_size() as u64
}
}
impl Checksum for Md5 {
fn update(&mut self, bytes: &[u8]) {
Self::update(self, bytes)
}
fn finalize(self: Box<Self>) -> Bytes {
Self::finalize(*self)
}
fn size(&self) -> u64 {
Self::size()
}
}
#[cfg(test)]
mod tests {
use super::{
Crc32, Crc32c, Md5, Sha1, Sha256,
http::{CRC_32_C_HEADER_NAME, CRC_32_HEADER_NAME, MD5_HEADER_NAME, SHA_1_HEADER_NAME, SHA_256_HEADER_NAME},
};
use crate::ChecksumAlgorithm;
use crate::http::HttpChecksum;
use crate::base64;
use http::HeaderValue;
use pretty_assertions::assert_eq;
use std::fmt::Write;
const TEST_DATA: &str = r#"test data"#;
fn base64_encoded_checksum_to_hex_string(header_value: &HeaderValue) -> String {
let decoded_checksum = base64::decode(header_value.to_str().unwrap()).unwrap();
let decoded_checksum = decoded_checksum.into_iter().fold(String::new(), |mut acc, byte| {
write!(acc, "{byte:02X?}").expect("string will always be writeable");
acc
});
format!("0x{decoded_checksum}")
}
#[test]
fn test_crc32_checksum() {
let mut checksum = Crc32::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(CRC_32_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0xD308AEB2";
assert_eq!(decoded_checksum, expected_checksum);
}
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
#[test]
fn test_crc32c_checksum() {
let mut checksum = Crc32c::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(CRC_32_C_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0x3379B4CA";
assert_eq!(decoded_checksum, expected_checksum);
}
#[test]
fn test_crc64nvme_checksum() {
use crate::{Crc64Nvme, http::CRC_64_NVME_HEADER_NAME};
let mut checksum = Crc64Nvme::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(CRC_64_NVME_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0xAECAF3AF9C98A855";
assert_eq!(decoded_checksum, expected_checksum);
}
#[test]
fn test_sha1_checksum() {
let mut checksum = Sha1::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(SHA_1_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0xF48DD853820860816C75D54D0F584DC863327A7C";
assert_eq!(decoded_checksum, expected_checksum);
}
#[test]
fn test_sha256_checksum() {
let mut checksum = Sha256::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(SHA_256_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0x916F0027A575074CE72A331777C3478D6513F786A591BD892DA1A577BF2335F9";
assert_eq!(decoded_checksum, expected_checksum);
}
#[test]
fn test_md5_checksum() {
let mut checksum = Md5::default();
checksum.update(TEST_DATA.as_bytes());
let checksum_result = Box::new(checksum).headers();
let encoded_checksum = checksum_result.get(MD5_HEADER_NAME).unwrap();
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
let expected_checksum = "0xEB733A00C0C9D336E65691A37AB54293";
assert_eq!(decoded_checksum, expected_checksum);
}
#[test]
fn test_checksum_algorithm_returns_error_for_unknown() {
let error = "some invalid checksum algorithm"
.parse::<ChecksumAlgorithm>()
.expect_err("it should error");
assert_eq!("some invalid checksum algorithm", error.checksum_algorithm());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/checksums/src/http.rs | crates/checksums/src/http.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::base64;
use http::header::{HeaderMap, HeaderValue};
use crate::Crc64Nvme;
use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, Checksum, Crc32, Crc32c, Md5, SHA_1_NAME, SHA_256_NAME, Sha1, Sha256};
pub const CRC_32_HEADER_NAME: &str = "x-amz-checksum-crc32";
pub const CRC_32_C_HEADER_NAME: &str = "x-amz-checksum-crc32c";
pub const SHA_1_HEADER_NAME: &str = "x-amz-checksum-sha1";
pub const SHA_256_HEADER_NAME: &str = "x-amz-checksum-sha256";
pub const CRC_64_NVME_HEADER_NAME: &str = "x-amz-checksum-crc64nvme";
#[allow(dead_code)]
pub(crate) static MD5_HEADER_NAME: &str = "content-md5";
pub const CHECKSUM_ALGORITHMS_IN_PRIORITY_ORDER: [&str; 5] =
[CRC_64_NVME_NAME, CRC_32_C_NAME, CRC_32_NAME, SHA_1_NAME, SHA_256_NAME];
pub trait HttpChecksum: Checksum + Send + Sync {
fn headers(self: Box<Self>) -> HeaderMap<HeaderValue> {
let mut header_map = HeaderMap::new();
header_map.insert(self.header_name(), self.header_value());
header_map
}
fn header_name(&self) -> &'static str;
fn header_value(self: Box<Self>) -> HeaderValue {
let hash = self.finalize();
HeaderValue::from_str(&base64::encode(&hash[..])).expect("base64 encoded bytes are always valid header values")
}
fn size(&self) -> u64 {
let trailer_name_size_in_bytes = self.header_name().len();
let base64_encoded_checksum_size_in_bytes = base64::encoded_length(Checksum::size(self) as usize);
let size = trailer_name_size_in_bytes + ":".len() + base64_encoded_checksum_size_in_bytes;
size as u64
}
}
impl HttpChecksum for Crc32 {
fn header_name(&self) -> &'static str {
CRC_32_HEADER_NAME
}
}
impl HttpChecksum for Crc32c {
fn header_name(&self) -> &'static str {
CRC_32_C_HEADER_NAME
}
}
impl HttpChecksum for Crc64Nvme {
fn header_name(&self) -> &'static str {
CRC_64_NVME_HEADER_NAME
}
}
impl HttpChecksum for Sha1 {
fn header_name(&self) -> &'static str {
SHA_1_HEADER_NAME
}
}
impl HttpChecksum for Sha256 {
fn header_name(&self) -> &'static str {
SHA_256_HEADER_NAME
}
}
impl HttpChecksum for Md5 {
fn header_name(&self) -> &'static str {
MD5_HEADER_NAME
}
}
#[cfg(test)]
mod tests {
use crate::base64;
use bytes::Bytes;
use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, ChecksumAlgorithm, SHA_1_NAME, SHA_256_NAME};
use super::HttpChecksum;
#[test]
fn test_trailer_length_of_crc32_checksum_body() {
let checksum = CRC_32_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_size = 29;
let actual_size = HttpChecksum::size(&*checksum);
assert_eq!(expected_size, actual_size)
}
#[test]
fn test_trailer_value_of_crc32_checksum_body() {
let checksum = CRC_32_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
// The CRC32 of an empty string is all zeroes
let expected_value = Bytes::from_static(b"\0\0\0\0");
let expected_value = base64::encode(&expected_value);
let actual_value = checksum.header_value();
assert_eq!(expected_value, actual_value)
}
#[test]
fn test_trailer_length_of_crc32c_checksum_body() {
let checksum = CRC_32_C_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_size = 30;
let actual_size = HttpChecksum::size(&*checksum);
assert_eq!(expected_size, actual_size)
}
#[test]
fn test_trailer_value_of_crc32c_checksum_body() {
let checksum = CRC_32_C_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
// The CRC32C of an empty string is all zeroes
let expected_value = Bytes::from_static(b"\0\0\0\0");
let expected_value = base64::encode(&expected_value);
let actual_value = checksum.header_value();
assert_eq!(expected_value, actual_value)
}
#[test]
fn test_trailer_length_of_crc64nvme_checksum_body() {
let checksum = CRC_64_NVME_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_size = 37;
let actual_size = HttpChecksum::size(&*checksum);
assert_eq!(expected_size, actual_size)
}
#[test]
fn test_trailer_value_of_crc64nvme_checksum_body() {
let checksum = CRC_64_NVME_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
// The CRC64NVME of an empty string is all zeroes
let expected_value = Bytes::from_static(b"\0\0\0\0\0\0\0\0");
let expected_value = base64::encode(&expected_value);
let actual_value = checksum.header_value();
assert_eq!(expected_value, actual_value)
}
#[test]
fn test_trailer_length_of_sha1_checksum_body() {
let checksum = SHA_1_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_size = 48;
let actual_size = HttpChecksum::size(&*checksum);
assert_eq!(expected_size, actual_size)
}
#[test]
fn test_trailer_value_of_sha1_checksum_body() {
let checksum = SHA_1_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
// The SHA1 of an empty string is da39a3ee5e6b4b0d3255bfef95601890afd80709
let expected_value = Bytes::from_static(&[
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07,
0x09,
]);
let expected_value = base64::encode(&expected_value);
let actual_value = checksum.header_value();
assert_eq!(expected_value, actual_value)
}
#[test]
fn test_trailer_length_of_sha256_checksum_body() {
let checksum = SHA_256_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_size = 66;
let actual_size = HttpChecksum::size(&*checksum);
assert_eq!(expected_size, actual_size)
}
#[test]
fn test_trailer_value_of_sha256_checksum_body() {
let checksum = SHA_256_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
let expected_value = Bytes::from_static(&[
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41,
0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
]);
let expected_value = base64::encode(&expected_value);
let actual_value = checksum.header_value();
assert_eq!(expected_value, actual_value)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/checksums/src/base64.rs | crates/checksums/src/base64.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use base64_simd::STANDARD;
use std::error::Error;
#[derive(Debug)]
pub(crate) struct DecodeError(base64_simd::Error);
impl Error for DecodeError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
Some(&self.0)
}
}
impl std::fmt::Display for DecodeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "failed to decode base64")
}
}
pub(crate) fn decode(input: impl AsRef<str>) -> Result<Vec<u8>, DecodeError> {
STANDARD.decode_to_vec(input.as_ref()).map_err(DecodeError)
}
pub(crate) fn encode(input: impl AsRef<[u8]>) -> String {
STANDARD.encode_to_string(input.as_ref())
}
pub(crate) fn encoded_length(length: usize) -> usize {
STANDARD.encoded_length(length)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/checksums/src/error.rs | crates/checksums/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub struct UnknownChecksumAlgorithmError {
checksum_algorithm: String,
}
impl UnknownChecksumAlgorithmError {
pub(crate) fn new(checksum_algorithm: impl Into<String>) -> Self {
Self {
checksum_algorithm: checksum_algorithm.into(),
}
}
pub fn checksum_algorithm(&self) -> &str {
&self.checksum_algorithm
}
}
impl fmt::Display for UnknownChecksumAlgorithmError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
r#"unknown checksum algorithm "{}", please pass a known algorithm name ("crc32", "crc32c", "sha1", "sha256", "md5")"#,
self.checksum_algorithm
)
}
}
impl Error for UnknownChecksumAlgorithmError {}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/global.rs | crates/audit/src/global.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{AuditEntry, AuditResult, AuditSystem};
use rustfs_ecstore::config::Config;
use std::sync::{Arc, OnceLock};
use tracing::{debug, error, trace, warn};
/// Global audit system instance
static AUDIT_SYSTEM: OnceLock<Arc<AuditSystem>> = OnceLock::new();
/// Initialize the global audit system
pub fn init_audit_system() -> Arc<AuditSystem> {
AUDIT_SYSTEM.get_or_init(|| Arc::new(AuditSystem::new())).clone()
}
/// Get the global audit system instance
pub fn audit_system() -> Option<Arc<AuditSystem>> {
AUDIT_SYSTEM.get().cloned()
}
/// A helper macro for executing closures if the global audit system is initialized.
/// If not initialized, log a warning and return `Ok(())`.
macro_rules! with_audit_system {
($async_closure:expr) => {
if let Some(system) = audit_system() {
(async move { $async_closure(system).await }).await
} else {
warn!("Audit system not initialized, operation skipped.");
Ok(())
}
};
}
/// Start the global audit system with configuration
pub async fn start_audit_system(config: Config) -> AuditResult<()> {
let system = init_audit_system();
system.start(config).await
}
/// Stop the global audit system
pub async fn stop_audit_system() -> AuditResult<()> {
with_audit_system!(|system: Arc<AuditSystem>| async move { system.close().await })
}
/// Pause the global audit system
pub async fn pause_audit_system() -> AuditResult<()> {
with_audit_system!(|system: Arc<AuditSystem>| async move { system.pause().await })
}
/// Resume the global audit system
pub async fn resume_audit_system() -> AuditResult<()> {
with_audit_system!(|system: Arc<AuditSystem>| async move { system.resume().await })
}
/// Dispatch an audit log entry to all targets
pub async fn dispatch_audit_log(entry: Arc<AuditEntry>) -> AuditResult<()> {
if let Some(system) = audit_system() {
if system.is_running().await {
system.dispatch(entry).await
} else {
// The system is initialized but not running (for example, it is suspended). Silently discard log entries based on original logic.
// For debugging purposes, it can be useful to add a trace log here.
trace!("Audit system is not running, dropping audit entry.");
Ok(())
}
} else {
// The system is not initialized at all. This is a more important state.
// It might be better to return an error or log a warning.
debug!("Audit system not initialized, dropping audit entry.");
// If this should be a hard failure, you can return Err(AuditError::NotInitialized("..."))
Ok(())
}
}
/// Reload the global audit system configuration
pub async fn reload_audit_config(config: Config) -> AuditResult<()> {
with_audit_system!(|system: Arc<AuditSystem>| async move { system.reload_config(config).await })
}
/// Check if the global audit system is running
pub async fn is_audit_system_running() -> bool {
if let Some(system) = audit_system() {
system.is_running().await
} else {
false
}
}
/// AuditLogger singleton for easy access
pub struct AuditLogger;
impl AuditLogger {
/// Log an audit entry
pub async fn log(entry: AuditEntry) {
if let Err(e) = dispatch_audit_log(Arc::new(entry)).await {
error!(error = %e, "Failed to dispatch audit log entry");
}
}
/// Check if audit logging is enabled
pub async fn is_enabled() -> bool {
is_audit_system_running().await
}
/// Get singleton instance
pub fn instance() -> &'static Self {
static INSTANCE: AuditLogger = AuditLogger;
&INSTANCE
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/lib.rs | crates/audit/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! RustFS Audit System
//!
//! This crate provides a comprehensive audit logging system with multi-target fan-out capabilities,
//! configuration management, and hot reload functionality. It is modeled after the notify system
//! but specifically designed for audit logging requirements.
pub mod entity;
pub mod error;
pub mod factory;
pub mod global;
pub mod observability;
pub mod registry;
pub mod system;
pub use entity::{ApiDetails, AuditEntry, ObjectVersion};
pub use error::{AuditError, AuditResult};
pub use global::*;
pub use observability::{AuditMetrics, AuditMetricsReport, PerformanceValidation};
pub use registry::AuditRegistry;
pub use system::AuditSystem;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/entity.rs | crates/audit/src/entity.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use chrono::{DateTime, Utc};
use hashbrown::HashMap;
use rustfs_targets::EventName;
use serde::{Deserialize, Serialize};
use serde_json::Value;
/// ObjectVersion represents an object version with key and versionId
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct ObjectVersion {
#[serde(rename = "objectName")]
pub object_name: String,
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
pub version_id: Option<String>,
}
impl ObjectVersion {
pub fn new(object_name: String, version_id: Option<String>) -> Self {
Self { object_name, version_id }
}
}
/// `ApiDetails` contains API information for the audit entry.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ApiDetails {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bucket: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub objects: Option<Vec<ObjectVersion>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub status_code: Option<i32>,
#[serde(rename = "rx", skip_serializing_if = "Option::is_none")]
pub input_bytes: Option<i64>,
#[serde(rename = "tx", skip_serializing_if = "Option::is_none")]
pub output_bytes: Option<i64>,
#[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")]
pub header_bytes: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub time_to_first_byte: Option<String>,
#[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")]
pub time_to_first_byte_in_ns: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub time_to_response: Option<String>,
#[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")]
pub time_to_response_in_ns: Option<String>,
}
/// Builder for `ApiDetails`.
#[derive(Default, Clone)]
pub struct ApiDetailsBuilder(pub ApiDetails);
impl ApiDetailsBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.0.name = Some(name.into());
self
}
pub fn bucket(mut self, bucket: impl Into<String>) -> Self {
self.0.bucket = Some(bucket.into());
self
}
pub fn object(mut self, object: impl Into<String>) -> Self {
self.0.object = Some(object.into());
self
}
pub fn objects(mut self, objects: Vec<ObjectVersion>) -> Self {
self.0.objects = Some(objects);
self
}
pub fn status(mut self, status: impl Into<String>) -> Self {
self.0.status = Some(status.into());
self
}
pub fn status_code(mut self, code: i32) -> Self {
self.0.status_code = Some(code);
self
}
pub fn input_bytes(mut self, bytes: i64) -> Self {
self.0.input_bytes = Some(bytes);
self
}
pub fn output_bytes(mut self, bytes: i64) -> Self {
self.0.output_bytes = Some(bytes);
self
}
pub fn header_bytes(mut self, bytes: i64) -> Self {
self.0.header_bytes = Some(bytes);
self
}
pub fn time_to_first_byte(mut self, t: impl Into<String>) -> Self {
self.0.time_to_first_byte = Some(t.into());
self
}
pub fn time_to_first_byte_in_ns(mut self, t: impl Into<String>) -> Self {
self.0.time_to_first_byte_in_ns = Some(t.into());
self
}
pub fn time_to_response(mut self, t: impl Into<String>) -> Self {
self.0.time_to_response = Some(t.into());
self
}
pub fn time_to_response_in_ns(mut self, t: impl Into<String>) -> Self {
self.0.time_to_response_in_ns = Some(t.into());
self
}
pub fn build(self) -> ApiDetails {
self.0
}
}
/// `AuditEntry` represents an audit log entry.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct AuditEntry {
pub version: String,
#[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")]
pub deployment_id: Option<String>,
#[serde(rename = "siteName", skip_serializing_if = "Option::is_none")]
pub site_name: Option<String>,
#[serde(with = "chrono::serde::ts_milliseconds")]
pub time: DateTime<Utc>,
pub event: EventName,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub entry_type: Option<String>,
pub trigger: String,
pub api: ApiDetails,
#[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")]
pub remote_host: Option<String>,
#[serde(rename = "requestID", skip_serializing_if = "Option::is_none")]
pub request_id: Option<String>,
#[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")]
pub user_agent: Option<String>,
#[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")]
pub req_path: Option<String>,
#[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")]
pub req_host: Option<String>,
#[serde(rename = "requestNode", skip_serializing_if = "Option::is_none")]
pub req_node: Option<String>,
#[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")]
pub req_claims: Option<HashMap<String, Value>>,
#[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")]
pub req_query: Option<HashMap<String, String>>,
#[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")]
pub req_header: Option<HashMap<String, String>>,
#[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")]
pub resp_header: Option<HashMap<String, String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<HashMap<String, Value>>,
#[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")]
pub access_key: Option<String>,
#[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")]
pub parent_user: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
/// Constructor for `AuditEntry`.
pub struct AuditEntryBuilder(AuditEntry);
impl AuditEntryBuilder {
/// Create a new builder with all required fields.
pub fn new(version: impl Into<String>, event: EventName, trigger: impl Into<String>, api: ApiDetails) -> Self {
Self(AuditEntry {
version: version.into(),
time: Utc::now(),
event,
trigger: trigger.into(),
api,
..Default::default()
})
}
// event
pub fn version(mut self, version: impl Into<String>) -> Self {
self.0.version = version.into();
self
}
pub fn event(mut self, event: EventName) -> Self {
self.0.event = event;
self
}
pub fn api(mut self, api_details: ApiDetails) -> Self {
self.0.api = api_details;
self
}
pub fn deployment_id(mut self, id: impl Into<String>) -> Self {
self.0.deployment_id = Some(id.into());
self
}
pub fn site_name(mut self, name: impl Into<String>) -> Self {
self.0.site_name = Some(name.into());
self
}
pub fn time(mut self, time: DateTime<Utc>) -> Self {
self.0.time = time;
self
}
pub fn entry_type(mut self, entry_type: impl Into<String>) -> Self {
self.0.entry_type = Some(entry_type.into());
self
}
pub fn remote_host(mut self, host: impl Into<String>) -> Self {
self.0.remote_host = Some(host.into());
self
}
pub fn request_id(mut self, id: impl Into<String>) -> Self {
self.0.request_id = Some(id.into());
self
}
pub fn user_agent(mut self, agent: impl Into<String>) -> Self {
self.0.user_agent = Some(agent.into());
self
}
pub fn req_path(mut self, path: impl Into<String>) -> Self {
self.0.req_path = Some(path.into());
self
}
pub fn req_host(mut self, host: impl Into<String>) -> Self {
self.0.req_host = Some(host.into());
self
}
pub fn req_node(mut self, node: impl Into<String>) -> Self {
self.0.req_node = Some(node.into());
self
}
pub fn req_claims(mut self, claims: HashMap<String, Value>) -> Self {
self.0.req_claims = Some(claims);
self
}
pub fn req_query(mut self, query: HashMap<String, String>) -> Self {
self.0.req_query = Some(query);
self
}
pub fn req_header(mut self, header: HashMap<String, String>) -> Self {
self.0.req_header = Some(header);
self
}
pub fn resp_header(mut self, header: HashMap<String, String>) -> Self {
self.0.resp_header = Some(header);
self
}
pub fn tags(mut self, tags: HashMap<String, Value>) -> Self {
self.0.tags = Some(tags);
self
}
pub fn access_key(mut self, key: impl Into<String>) -> Self {
self.0.access_key = Some(key.into());
self
}
pub fn parent_user(mut self, user: impl Into<String>) -> Self {
self.0.parent_user = Some(user.into());
self
}
pub fn error(mut self, error: impl Into<String>) -> Self {
self.0.error = Some(error.into());
self
}
/// Construct the final `AuditEntry`.
pub fn build(self) -> AuditEntry {
self.0
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/observability.rs | crates/audit/src/observability.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Observability and metrics for the audit system
//!
//! This module provides comprehensive observability features including:
//! - Performance metrics (EPS, latency)
//! - Target health monitoring
//! - Configuration change tracking
//! - Error rate monitoring
//! - Queue depth monitoring
use metrics::{counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, OnceLock};
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use tracing::info;
const RUSTFS_AUDIT_METRICS_NAMESPACE: &str = "rustfs.audit.";
const M_AUDIT_EVENTS_TOTAL: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "events.total");
const M_AUDIT_EVENTS_FAILED: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "events.failed");
const M_AUDIT_DISPATCH_NS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "dispatch.ns");
const M_AUDIT_EPS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "eps");
const M_AUDIT_TARGET_OPS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "target.ops");
const M_AUDIT_CONFIG_RELOADS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "config.reloads");
const M_AUDIT_SYSTEM_STARTS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "system.starts");
const L_RESULT: &str = "result";
const L_STATUS: &str = "status";
const V_SUCCESS: &str = "success";
const V_FAILURE: &str = "failure";
/// One-time registration of indicator meta information
/// This function ensures that metric descriptors are registered only once.
pub fn init_observability_metrics() {
static METRICS_DESC_INIT: OnceLock<()> = OnceLock::new();
METRICS_DESC_INIT.get_or_init(|| {
// Event/Time-consuming
describe_counter!(M_AUDIT_EVENTS_TOTAL, "Total audit events (labeled by result).");
describe_counter!(M_AUDIT_EVENTS_FAILED, "Total failed audit events.");
describe_histogram!(M_AUDIT_DISPATCH_NS, "Dispatch time per event (ns).");
describe_gauge!(M_AUDIT_EPS, "Events per second since last reset.");
// Target operation/system event
describe_counter!(M_AUDIT_TARGET_OPS, "Total target operations (labeled by status).");
describe_counter!(M_AUDIT_CONFIG_RELOADS, "Total configuration reloads.");
describe_counter!(M_AUDIT_SYSTEM_STARTS, "Total system starts.");
});
}
/// Metrics collector for audit system observability
#[derive(Debug)]
pub struct AuditMetrics {
// Performance metrics
total_events_processed: AtomicU64,
total_events_failed: AtomicU64,
total_dispatch_time_ns: AtomicU64,
// Target metrics
target_success_count: AtomicU64,
target_failure_count: AtomicU64,
// System metrics
config_reload_count: AtomicU64,
system_start_count: AtomicU64,
// Performance tracking
last_reset_time: Arc<RwLock<Instant>>,
}
impl Default for AuditMetrics {
fn default() -> Self {
Self::new()
}
}
impl AuditMetrics {
/// Creates a new metrics collector
pub fn new() -> Self {
init_observability_metrics();
Self {
total_events_processed: AtomicU64::new(0),
total_events_failed: AtomicU64::new(0),
total_dispatch_time_ns: AtomicU64::new(0),
target_success_count: AtomicU64::new(0),
target_failure_count: AtomicU64::new(0),
config_reload_count: AtomicU64::new(0),
system_start_count: AtomicU64::new(0),
last_reset_time: Arc::new(RwLock::new(Instant::now())),
}
}
// Suggestion: Call this auxiliary function in the existing "Successful Event Recording" method body to complete the instrumentation
#[inline]
fn emit_event_success_metrics(&self, dispatch_time: Duration) {
// count + histogram
counter!(M_AUDIT_EVENTS_TOTAL, L_RESULT => V_SUCCESS).increment(1);
histogram!(M_AUDIT_DISPATCH_NS).record(dispatch_time.as_nanos() as f64);
}
// Suggestion: Call this auxiliary function in the existing "Failure Event Recording" method body to complete the instrumentation
#[inline]
fn emit_event_failure_metrics(&self, dispatch_time: Duration) {
counter!(M_AUDIT_EVENTS_TOTAL, L_RESULT => V_FAILURE).increment(1);
counter!(M_AUDIT_EVENTS_FAILED).increment(1);
histogram!(M_AUDIT_DISPATCH_NS).record(dispatch_time.as_nanos() as f64);
}
/// Records a successful event dispatch
pub fn record_event_success(&self, dispatch_time: Duration) {
self.total_events_processed.fetch_add(1, Ordering::Relaxed);
self.total_dispatch_time_ns
.fetch_add(dispatch_time.as_nanos() as u64, Ordering::Relaxed);
self.emit_event_success_metrics(dispatch_time);
}
/// Records a failed event dispatch
pub fn record_event_failure(&self, dispatch_time: Duration) {
self.total_events_failed.fetch_add(1, Ordering::Relaxed);
self.total_dispatch_time_ns
.fetch_add(dispatch_time.as_nanos() as u64, Ordering::Relaxed);
self.emit_event_failure_metrics(dispatch_time);
}
/// Records a successful target operation
pub fn record_target_success(&self) {
self.target_success_count.fetch_add(1, Ordering::Relaxed);
counter!(M_AUDIT_TARGET_OPS, L_STATUS => V_SUCCESS).increment(1);
}
/// Records a failed target operation
pub fn record_target_failure(&self) {
self.target_failure_count.fetch_add(1, Ordering::Relaxed);
counter!(M_AUDIT_TARGET_OPS, L_STATUS => V_FAILURE).increment(1);
}
/// Records a configuration reload
pub fn record_config_reload(&self) {
self.config_reload_count.fetch_add(1, Ordering::Relaxed);
counter!(M_AUDIT_CONFIG_RELOADS).increment(1);
info!("Audit configuration reloaded");
}
/// Records a system start
pub fn record_system_start(&self) {
self.system_start_count.fetch_add(1, Ordering::Relaxed);
counter!(M_AUDIT_SYSTEM_STARTS).increment(1);
info!("Audit system started");
}
/// Gets the current events per second (EPS)
pub async fn get_events_per_second(&self) -> f64 {
let reset_time = *self.last_reset_time.read().await;
let elapsed = reset_time.elapsed();
let total_events = self.total_events_processed.load(Ordering::Relaxed) + self.total_events_failed.load(Ordering::Relaxed);
let eps = if elapsed.as_secs_f64() > 0.0 {
total_events as f64 / elapsed.as_secs_f64()
} else {
0.0
};
// EPS is reported in gauge
gauge!(M_AUDIT_EPS).set(eps);
eps
}
/// Gets the average dispatch latency in milliseconds
pub fn get_average_latency_ms(&self) -> f64 {
let total_events = self.total_events_processed.load(Ordering::Relaxed) + self.total_events_failed.load(Ordering::Relaxed);
let total_time_ns = self.total_dispatch_time_ns.load(Ordering::Relaxed);
if total_events > 0 {
(total_time_ns as f64 / total_events as f64) / 1_000_000.0 // Convert ns to ms
} else {
0.0
}
}
/// Gets the error rate as a percentage
pub fn get_error_rate(&self) -> f64 {
let total_events = self.total_events_processed.load(Ordering::Relaxed) + self.total_events_failed.load(Ordering::Relaxed);
let failed_events = self.total_events_failed.load(Ordering::Relaxed);
if total_events > 0 {
(failed_events as f64 / total_events as f64) * 100.0
} else {
0.0
}
}
/// Gets target success rate as a percentage
pub fn get_target_success_rate(&self) -> f64 {
let total_ops = self.target_success_count.load(Ordering::Relaxed) + self.target_failure_count.load(Ordering::Relaxed);
let success_ops = self.target_success_count.load(Ordering::Relaxed);
if total_ops > 0 {
(success_ops as f64 / total_ops as f64) * 100.0
} else {
100.0 // No operations = 100% success rate
}
}
/// Resets all metrics and timing
pub async fn reset(&self) {
self.total_events_processed.store(0, Ordering::Relaxed);
self.total_events_failed.store(0, Ordering::Relaxed);
self.total_dispatch_time_ns.store(0, Ordering::Relaxed);
self.target_success_count.store(0, Ordering::Relaxed);
self.target_failure_count.store(0, Ordering::Relaxed);
self.config_reload_count.store(0, Ordering::Relaxed);
self.system_start_count.store(0, Ordering::Relaxed);
let mut reset_time = self.last_reset_time.write().await;
*reset_time = Instant::now();
// Reset EPS to zero after reset
gauge!(M_AUDIT_EPS).set(0.0);
info!("Audit metrics reset");
}
/// Generates a comprehensive metrics report
pub async fn generate_report(&self) -> AuditMetricsReport {
AuditMetricsReport {
events_per_second: self.get_events_per_second().await,
average_latency_ms: self.get_average_latency_ms(),
error_rate_percent: self.get_error_rate(),
target_success_rate_percent: self.get_target_success_rate(),
total_events_processed: self.total_events_processed.load(Ordering::Relaxed),
total_events_failed: self.total_events_failed.load(Ordering::Relaxed),
config_reload_count: self.config_reload_count.load(Ordering::Relaxed),
system_start_count: self.system_start_count.load(Ordering::Relaxed),
}
}
/// Validates performance requirements
pub async fn validate_performance_requirements(&self) -> PerformanceValidation {
let eps = self.get_events_per_second().await;
let avg_latency_ms = self.get_average_latency_ms();
let error_rate = self.get_error_rate();
let mut validation = PerformanceValidation {
meets_eps_requirement: eps >= 3000.0,
meets_latency_requirement: avg_latency_ms <= 30.0,
meets_error_rate_requirement: error_rate <= 1.0, // Less than 1% error rate
current_eps: eps,
current_latency_ms: avg_latency_ms,
current_error_rate: error_rate,
recommendations: Vec::new(),
};
// Generate recommendations
if !validation.meets_eps_requirement {
validation.recommendations.push(format!(
"EPS ({eps:.0}) is below requirement (3000). Consider optimizing target dispatch or adding more target instances."
));
}
if !validation.meets_latency_requirement {
validation.recommendations.push(format!(
"Average latency ({avg_latency_ms:.2}ms) exceeds requirement (30ms). Consider optimizing target responses or increasing timeout values."
));
}
if !validation.meets_error_rate_requirement {
validation.recommendations.push(format!(
"Error rate ({error_rate:.2}%) exceeds recommendation (1%). Check target connectivity and configuration."
));
}
if validation.meets_eps_requirement && validation.meets_latency_requirement && validation.meets_error_rate_requirement {
validation
.recommendations
.push("All performance requirements are met.".to_string());
}
validation
}
}
/// Comprehensive metrics report
#[derive(Debug, Clone)]
pub struct AuditMetricsReport {
pub events_per_second: f64,
pub average_latency_ms: f64,
pub error_rate_percent: f64,
pub target_success_rate_percent: f64,
pub total_events_processed: u64,
pub total_events_failed: u64,
pub config_reload_count: u64,
pub system_start_count: u64,
}
impl AuditMetricsReport {
/// Formats the report as a human-readable string
pub fn format(&self) -> String {
format!(
"Audit System Metrics Report:\n\
Events per Second: {:.2}\n\
Average Latency: {:.2}ms\n\
Error Rate: {:.2}%\n\
Target Success Rate: {:.2}%\n\
Total Events Processed: {}\n\
Total Events Failed: {}\n\
Configuration Reloads: {}\n\
System Starts: {}",
self.events_per_second,
self.average_latency_ms,
self.error_rate_percent,
self.target_success_rate_percent,
self.total_events_processed,
self.total_events_failed,
self.config_reload_count,
self.system_start_count
)
}
}
/// Performance validation results
#[derive(Debug, Clone)]
pub struct PerformanceValidation {
pub meets_eps_requirement: bool,
pub meets_latency_requirement: bool,
pub meets_error_rate_requirement: bool,
pub current_eps: f64,
pub current_latency_ms: f64,
pub current_error_rate: f64,
pub recommendations: Vec<String>,
}
impl PerformanceValidation {
/// Checks if all performance requirements are met
pub fn all_requirements_met(&self) -> bool {
self.meets_eps_requirement && self.meets_latency_requirement && self.meets_error_rate_requirement
}
/// Formats the validation as a human-readable string
pub fn format(&self) -> String {
let status = if self.all_requirements_met() { "✅ PASS" } else { "❌ FAIL" };
let mut result = format!(
"Performance Requirements Validation: {}\n\
EPS Requirement (≥3000): {} ({:.2})\n\
Latency Requirement (≤30ms): {} ({:.2}ms)\n\
Error Rate Requirement (≤1%): {} ({:.2}%)\n\
\nRecommendations:",
status,
if self.meets_eps_requirement { "✅" } else { "❌" },
self.current_eps,
if self.meets_latency_requirement { "✅" } else { "❌" },
self.current_latency_ms,
if self.meets_error_rate_requirement { "✅" } else { "❌" },
self.current_error_rate
);
for rec in &self.recommendations {
result.push_str(&format!("\n• {rec}"));
}
result
}
}
/// Global metrics instance
static GLOBAL_METRICS: OnceLock<Arc<AuditMetrics>> = OnceLock::new();
/// Get or initialize the global metrics instance
pub fn global_metrics() -> Arc<AuditMetrics> {
GLOBAL_METRICS.get_or_init(|| Arc::new(AuditMetrics::new())).clone()
}
/// Record a successful audit event dispatch
pub fn record_audit_success(dispatch_time: Duration) {
global_metrics().record_event_success(dispatch_time);
}
/// Record a failed audit event dispatch
pub fn record_audit_failure(dispatch_time: Duration) {
global_metrics().record_event_failure(dispatch_time);
}
/// Record a successful target operation
pub fn record_target_success() {
global_metrics().record_target_success();
}
/// Record a failed target operation
pub fn record_target_failure() {
global_metrics().record_target_failure();
}
/// Record a configuration reload
pub fn record_config_reload() {
global_metrics().record_config_reload();
}
/// Record a system start
pub fn record_system_start() {
global_metrics().record_system_start();
}
/// Get the current metrics report
pub async fn get_metrics_report() -> AuditMetricsReport {
global_metrics().generate_report().await
}
/// Validate performance requirements
pub async fn validate_performance() -> PerformanceValidation {
global_metrics().validate_performance_requirements().await
}
/// Reset all metrics
pub async fn reset_metrics() {
global_metrics().reset().await;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/error.rs | crates/audit/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use thiserror::Error;
/// Result type for audit operations
pub type AuditResult<T> = Result<T, AuditError>;
/// Errors that can occur during audit operations
#[derive(Error, Debug)]
pub enum AuditError {
#[error("Configuration error: {0}")]
Configuration(String, #[source] Option<Box<dyn std::error::Error + Send + Sync>>),
#[error("config not loaded")]
ConfigNotLoaded,
#[error("Target error: {0}")]
Target(#[from] rustfs_targets::TargetError),
#[error("System not initialized: {0}")]
NotInitialized(String),
#[error("System already initialized")]
AlreadyInitialized,
#[error("Storage not available: {0}")]
StorageNotAvailable(String),
#[error("Failed to save configuration: {0}")]
SaveConfig(#[source] Box<dyn std::error::Error + Send + Sync>),
#[error("Failed to load configuration: {0}")]
LoadConfig(#[source] Box<dyn std::error::Error + Send + Sync>),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Join error: {0}")]
Join(#[from] tokio::task::JoinError),
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/system.rs | crates/audit/src/system.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{AuditEntry, AuditError, AuditRegistry, AuditResult, observability};
use rustfs_ecstore::config::Config;
use rustfs_targets::{
StoreError, Target, TargetError,
store::{Key, Store},
target::EntityTarget,
};
use std::sync::Arc;
use tokio::sync::{Mutex, RwLock};
use tracing::{error, info, warn};
/// State of the audit system
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AuditSystemState {
Stopped,
Starting,
Running,
Paused,
Stopping,
}
/// Main audit system that manages target lifecycle and audit log dispatch
#[derive(Clone)]
pub struct AuditSystem {
registry: Arc<Mutex<AuditRegistry>>,
state: Arc<RwLock<AuditSystemState>>,
config: Arc<RwLock<Option<Config>>>,
}
impl Default for AuditSystem {
fn default() -> Self {
Self::new()
}
}
impl AuditSystem {
/// Creates a new audit system
pub fn new() -> Self {
Self {
registry: Arc::new(Mutex::new(AuditRegistry::new())),
state: Arc::new(RwLock::new(AuditSystemState::Stopped)),
config: Arc::new(RwLock::new(None)),
}
}
/// Starts the audit system with the given configuration
///
/// # Arguments
/// * `config` - The configuration to use for starting the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn start(&self, config: Config) -> AuditResult<()> {
let state = self.state.write().await;
match *state {
AuditSystemState::Running => {
return Err(AuditError::AlreadyInitialized);
}
AuditSystemState::Starting => {
warn!("Audit system is already starting");
return Ok(());
}
_ => {}
}
drop(state);
info!("Starting audit system");
// Record system start
observability::record_system_start();
// Store configuration
{
let mut config_guard = self.config.write().await;
*config_guard = Some(config.clone());
}
// Create targets from configuration
let mut registry = self.registry.lock().await;
match registry.create_audit_targets_from_config(&config).await {
Ok(targets) => {
if targets.is_empty() {
info!("No enabled audit targets found, keeping audit system stopped");
drop(registry);
return Ok(());
}
{
let mut state = self.state.write().await;
*state = AuditSystemState::Starting;
}
info!(target_count = targets.len(), "Created audit targets successfully");
// Initialize all targets
for target in targets {
let target_id = target.id().to_string();
if let Err(e) = target.init().await {
error!(target_id = %target_id, error = %e, "Failed to initialize audit target");
} else {
// After successful initialization, if enabled and there is a store, start the send from storage task
if target.is_enabled() {
if let Some(store) = target.store() {
info!(target_id = %target_id, "Start audit stream processing for target");
let store_clone: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send> =
store.boxed_clone();
let target_arc: Arc<dyn Target<AuditEntry> + Send + Sync> = Arc::from(target.clone_dyn());
self.start_audit_stream_with_batching(store_clone, target_arc);
info!(target_id = %target_id, "Audit stream processing started");
} else {
info!(target_id = %target_id, "No store configured, skip audit stream processing");
}
} else {
info!(target_id = %target_id, "Target disabled, skip audit stream processing");
}
registry.add_target(target_id, target);
}
}
// Update state to running
let mut state = self.state.write().await;
*state = AuditSystemState::Running;
info!("Audit system started successfully");
Ok(())
}
Err(e) => {
error!(error = %e, "Failed to create audit targets");
let mut state = self.state.write().await;
*state = AuditSystemState::Stopped;
Err(e)
}
}
}
/// Pauses the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn pause(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
match *state {
AuditSystemState::Running => {
*state = AuditSystemState::Paused;
info!("Audit system paused");
Ok(())
}
AuditSystemState::Paused => {
warn!("Audit system is already paused");
Ok(())
}
_ => Err(AuditError::Configuration("Cannot pause audit system in current state".to_string(), None)),
}
}
/// Resumes the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn resume(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
match *state {
AuditSystemState::Paused => {
*state = AuditSystemState::Running;
info!("Audit system resumed");
Ok(())
}
AuditSystemState::Running => {
warn!("Audit system is already running");
Ok(())
}
_ => Err(AuditError::Configuration("Cannot resume audit system in current state".to_string(), None)),
}
}
/// Stops the audit system and closes all targets
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn close(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
match *state {
AuditSystemState::Stopped => {
warn!("Audit system is already stopped");
return Ok(());
}
AuditSystemState::Stopping => {
warn!("Audit system is already stopping");
return Ok(());
}
_ => {}
}
*state = AuditSystemState::Stopping;
drop(state);
info!("Stopping audit system");
// Close all targets
let mut registry = self.registry.lock().await;
if let Err(e) = registry.close_all().await {
error!(error = %e, "Failed to close some audit targets");
}
// Update state to stopped
let mut state = self.state.write().await;
*state = AuditSystemState::Stopped;
// Clear configuration
let mut config_guard = self.config.write().await;
*config_guard = None;
info!("Audit system stopped");
Ok(())
}
/// Gets the current state of the audit system
pub async fn get_state(&self) -> AuditSystemState {
self.state.read().await.clone()
}
/// Checks if the audit system is running
///
/// # Returns
/// * `bool` - True if running, false otherwise
pub async fn is_running(&self) -> bool {
matches!(*self.state.read().await, AuditSystemState::Running)
}
/// Dispatches an audit log entry to all active targets
///
/// # Arguments
/// * `entry` - The audit log entry to dispatch
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn dispatch(&self, entry: Arc<AuditEntry>) -> AuditResult<()> {
let start_time = std::time::Instant::now();
let state = self.state.read().await;
match *state {
AuditSystemState::Running => {
// Continue with dispatch
info!("Dispatching audit log entry");
}
AuditSystemState::Paused => {
// Skip dispatch when paused
return Ok(());
}
_ => {
// Don't dispatch when not running
return Err(AuditError::NotInitialized("Audit system is not running".to_string()));
}
}
drop(state);
let registry = self.registry.lock().await;
let target_ids = registry.list_targets();
if target_ids.is_empty() {
warn!("No audit targets configured for dispatch");
return Ok(());
}
// Dispatch to all targets concurrently
let mut tasks = Vec::new();
for target_id in target_ids {
if let Some(target) = registry.get_target(&target_id) {
let entry_clone = Arc::clone(&entry);
let target_id_clone = target_id.clone();
// Create EntityTarget for the audit log entry
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
data: (*entry_clone).clone(),
};
let task = async move {
let result = target.save(Arc::new(entity_target)).await;
(target_id_clone, result)
};
tasks.push(task);
}
}
// Execute all dispatch tasks
let results = futures::future::join_all(tasks).await;
let mut errors = Vec::new();
let mut success_count = 0;
for (target_id, result) in results {
match result {
Ok(_) => {
success_count += 1;
observability::record_target_success();
}
Err(e) => {
error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target");
errors.push(e);
observability::record_target_failure();
}
}
}
let dispatch_time = start_time.elapsed();
if errors.is_empty() {
observability::record_audit_success(dispatch_time);
} else {
observability::record_audit_failure(dispatch_time);
// Log errors but don't fail the entire dispatch
warn!(
error_count = errors.len(),
success_count = success_count,
"Some audit targets failed to receive log entry"
);
}
Ok(())
}
/// Dispatches a batch of audit log entries to all active targets
///
/// # Arguments
/// * `entries` - A vector of audit log entries to dispatch
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn dispatch_batch(&self, entries: Vec<Arc<AuditEntry>>) -> AuditResult<()> {
let start_time = std::time::Instant::now();
let state = self.state.read().await;
if *state != AuditSystemState::Running {
return Err(AuditError::NotInitialized("Audit system is not running".to_string()));
}
drop(state);
let registry = self.registry.lock().await;
let target_ids = registry.list_targets();
if target_ids.is_empty() {
warn!("No audit targets configured for batch dispatch");
return Ok(());
}
let mut tasks = Vec::new();
for target_id in target_ids {
if let Some(target) = registry.get_target(&target_id) {
let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect();
let target_id_clone = target_id.clone();
let task = async move {
let mut success_count = 0;
let mut errors = Vec::new();
for entry in entries_clone {
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut,
data: (*entry).clone(),
};
match target.save(Arc::new(entity_target)).await {
Ok(_) => success_count += 1,
Err(e) => errors.push(e),
}
}
(target_id_clone, success_count, errors)
};
tasks.push(task);
}
}
let results = futures::future::join_all(tasks).await;
let mut total_success = 0;
let mut total_errors = 0;
for (_target_id, success_count, errors) in results {
total_success += success_count;
total_errors += errors.len();
for e in errors {
error!("Batch dispatch error: {:?}", e);
}
}
let dispatch_time = start_time.elapsed();
info!(
"Batch dispatched {} entries, success: {}, errors: {}, time: {:?}",
entries.len(),
total_success,
total_errors,
dispatch_time
);
Ok(())
}
/// Starts the audit stream processing for a target with batching and retry logic
/// # Arguments
/// * `store` - The store from which to read audit entries
/// * `target` - The target to which audit entries will be sent
///
/// This function spawns a background task that continuously reads audit entries from the provided store
/// and attempts to send them to the specified target. It implements retry logic with exponential backoff
fn start_audit_stream_with_batching(
&self,
store: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send>,
target: Arc<dyn Target<AuditEntry> + Send + Sync>,
) {
let state = self.state.clone();
tokio::spawn(async move {
use std::time::Duration;
use tokio::time::sleep;
info!("Starting audit stream for target: {}", target.id());
const MAX_RETRIES: usize = 5;
const BASE_RETRY_DELAY: Duration = Duration::from_secs(2);
loop {
match *state.read().await {
AuditSystemState::Running | AuditSystemState::Paused | AuditSystemState::Starting => {}
_ => {
info!("Audit stream stopped for target: {}", target.id());
break;
}
}
let keys: Vec<Key> = store.list();
if keys.is_empty() {
sleep(Duration::from_millis(500)).await;
continue;
}
for key in keys {
let mut retries = 0usize;
let mut success = false;
while retries < MAX_RETRIES && !success {
match target.send_from_store(key.clone()).await {
Ok(_) => {
info!("Successfully sent audit entry, target: {}, key: {}", target.id(), key.to_string());
observability::record_target_success();
success = true;
}
Err(e) => {
match &e {
TargetError::NotConnected => {
warn!("Target {} not connected, retrying...", target.id());
}
TargetError::Timeout(_) => {
warn!("Timeout sending to target {}, retrying...", target.id());
}
_ => {
error!("Permanent error for target {}: {}", target.id(), e);
observability::record_target_failure();
break;
}
}
retries += 1;
let backoff = BASE_RETRY_DELAY * (1 << retries);
sleep(backoff).await;
}
}
}
if retries >= MAX_RETRIES && !success {
warn!("Max retries exceeded for key {}, target: {}, skipping", key.to_string(), target.id());
observability::record_target_failure();
}
}
sleep(Duration::from_millis(100)).await;
}
});
}
/// Enables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to enable
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn enable_target(&self, target_id: &str) -> AuditResult<()> {
// This would require storing enabled/disabled state per target
// For now, just check if target exists
let registry = self.registry.lock().await;
if registry.get_target(target_id).is_some() {
info!(target_id = %target_id, "Target enabled");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
}
}
/// Disables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to disable
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn disable_target(&self, target_id: &str) -> AuditResult<()> {
// This would require storing enabled/disabled state per target
// For now, just check if target exists
let registry = self.registry.lock().await;
if registry.get_target(target_id).is_some() {
info!(target_id = %target_id, "Target disabled");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
}
}
/// Removes a target from the system
///
/// # Arguments
/// * `target_id` - The ID of the target to remove
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn remove_target(&self, target_id: &str) -> AuditResult<()> {
let mut registry = self.registry.lock().await;
if let Some(target) = registry.remove_target(target_id) {
if let Err(e) = target.close().await {
error!(target_id = %target_id, error = %e, "Failed to close removed target");
}
info!(target_id = %target_id, "Target removed");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
}
}
/// Updates or inserts a target
///
/// # Arguments
/// * `target_id` - The ID of the target to upsert
/// * `target` - The target instance to insert or update
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn upsert_target(&self, target_id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) -> AuditResult<()> {
let mut registry = self.registry.lock().await;
// Initialize the target
if let Err(e) = target.init().await {
return Err(AuditError::Target(e));
}
// Remove existing target if present
if let Some(old_target) = registry.remove_target(&target_id)
&& let Err(e) = old_target.close().await
{
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
}
registry.add_target(target_id.clone(), target);
info!(target_id = %target_id, "Target upserted");
Ok(())
}
/// Lists all targets
///
/// # Returns
/// * `Vec<String>` - List of target IDs
pub async fn list_targets(&self) -> Vec<String> {
let registry = self.registry.lock().await;
registry.list_targets()
}
/// Gets information about a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to retrieve
///
/// # Returns
/// * `Option<String>` - Target ID if found
pub async fn get_target(&self, target_id: &str) -> Option<String> {
let registry = self.registry.lock().await;
registry.get_target(target_id).map(|target| target.id().to_string())
}
/// Reloads configuration and updates targets
///
/// # Arguments
/// * `new_config` - The new configuration to load
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn reload_config(&self, new_config: Config) -> AuditResult<()> {
info!("Reloading audit system configuration");
// Record config reload
observability::record_config_reload();
// Store new configuration
{
let mut config_guard = self.config.write().await;
*config_guard = Some(new_config.clone());
}
// Close all existing targets
let mut registry = self.registry.lock().await;
if let Err(e) = registry.close_all().await {
error!(error = %e, "Failed to close existing targets during reload");
}
// Create new targets from updated configuration
match registry.create_audit_targets_from_config(&new_config).await {
Ok(targets) => {
info!(target_count = targets.len(), "Reloaded audit targets successfully");
// Initialize all new targets
for target in targets {
let target_id = target.id().to_string();
if let Err(e) = target.init().await {
error!(target_id = %target_id, error = %e, "Failed to initialize reloaded audit target");
} else {
// Same starts the storage stream after a heavy load
if target.is_enabled() {
if let Some(store) = target.store() {
info!(target_id = %target_id, "Start audit stream processing for target (reload)");
let store_clone: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send> =
store.boxed_clone();
let target_arc: Arc<dyn Target<AuditEntry> + Send + Sync> = Arc::from(target.clone_dyn());
self.start_audit_stream_with_batching(store_clone, target_arc);
info!(target_id = %target_id, "Audit stream processing started (reload)");
} else {
info!(target_id = %target_id, "No store configured, skip audit stream processing (reload)");
}
} else {
info!(target_id = %target_id, "Target disabled, skip audit stream processing (reload)");
}
registry.add_target(target.id().to_string(), target);
}
}
info!("Audit configuration reloaded successfully");
Ok(())
}
Err(e) => {
error!(error = %e, "Failed to reload audit configuration");
Err(e)
}
}
}
/// Gets current audit system metrics
///
/// # Returns
/// * `AuditMetricsReport` - Current metrics report
pub async fn get_metrics(&self) -> observability::AuditMetricsReport {
observability::get_metrics_report().await
}
/// Validates system performance against requirements
///
/// # Returns
/// * `PerformanceValidation` - Performance validation results
pub async fn validate_performance(&self) -> observability::PerformanceValidation {
observability::validate_performance().await
}
/// Resets all metrics to initial state
pub async fn reset_metrics(&self) {
observability::reset_metrics().await;
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/registry.rs | crates/audit/src/registry.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
AuditEntry, AuditError, AuditResult,
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
};
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use hashbrown::{HashMap, HashSet};
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
use rustfs_ecstore::config::{Config, KVS};
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
use std::str::FromStr;
use std::sync::Arc;
use tracing::{debug, error, info, warn};
/// Registry for managing audit targets
pub struct AuditRegistry {
/// Storage for created targets
targets: HashMap<String, Box<dyn Target<AuditEntry> + Send + Sync>>,
/// Factories for creating targets
factories: HashMap<String, Box<dyn TargetFactory>>,
}
impl Default for AuditRegistry {
fn default() -> Self {
Self::new()
}
}
impl AuditRegistry {
/// Creates a new AuditRegistry
pub fn new() -> Self {
let mut registry = AuditRegistry {
factories: HashMap::new(),
targets: HashMap::new(),
};
// Register built-in factories
registry.register(ChannelTargetType::Webhook.as_str(), Box::new(WebhookTargetFactory));
registry.register(ChannelTargetType::Mqtt.as_str(), Box::new(MQTTTargetFactory));
registry
}
/// Registers a new factory for a target type
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `factory` - The factory instance to create targets of this type.
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
self.factories.insert(target_type.to_string(), factory);
}
/// Creates a target of the specified type with the given ID and configuration
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `id` - The identifier for the target instance.
/// * `config` - The configuration key-value store for the target.
///
/// # Returns
/// * `Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>` - The created target or an error.
pub async fn create_target(
&self,
target_type: &str,
id: String,
config: &KVS,
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
let factory = self
.factories
.get(target_type)
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {target_type}")))?;
// Validate configuration before creating target
factory.validate_config(&id, config)?;
// Create target
factory.create_target(id, config).await
}
/// Creates all targets from a configuration
/// Create all notification targets from system configuration and environment variables.
/// This method processes the creation of each target concurrently as follows:
/// 1. Iterate through all registered target types (e.g. webhooks, mqtt).
/// 2. For each type, resolve its configuration in the configuration file and environment variables.
/// 3. Identify all target instance IDs that need to be created.
/// 4. Combine the default configuration, file configuration, and environment variable configuration for each instance.
/// 5. If the instance is enabled, create an asynchronous task for it to instantiate.
/// 6. Concurrency executes all creation tasks and collects results.
pub async fn create_audit_targets_from_config(
&self,
config: &Config,
) -> AuditResult<Vec<Box<dyn Target<AuditEntry> + Send + Sync>>> {
// Collect only environment variables with the relevant prefix to reduce memory usage
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
// A collection of asynchronous tasks for concurrently executing target creation
let mut tasks = FuturesUnordered::new();
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
// Record the defaults for each segment so that the segment can eventually be rebuilt
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
// 1. Traverse all registered plants and process them by target type
for (target_type, factory) in &self.factories {
tracing::Span::current().record("target_type", target_type.as_str());
info!("Start working on target types...");
// 2. Prepare the configuration source
// 2.1. Get the configuration segment in the file, e.g. 'audit_webhook'
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
// 2.2. Get the default configuration for that type
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
debug!(?default_cfg, "Get the default configuration");
// Save defaults for eventual write back
section_defaults.insert(section_name.clone(), default_cfg.clone());
// *** Optimization point 1: Get all legitimate fields of the current target type ***
let valid_fields = factory.get_valid_fields();
debug!(?valid_fields, "Get the legitimate configuration fields");
// 3. Resolve instance IDs and configuration overrides from environment variables
let mut instance_ids_from_env = HashSet::new();
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
let enable_prefix =
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
.to_uppercase();
for (key, value) in &all_env {
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false)
&& let Some(id) = key.strip_prefix(&enable_prefix)
&& !id.is_empty()
{
instance_ids_from_env.insert(id.to_lowercase());
}
}
// 3.2. Parse all relevant environment variable configurations
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_AUDIT_WEBHOOK_'
let env_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
for (key, value) in &all_env {
if let Some(rest) = key.strip_prefix(&env_prefix) {
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
// The first part from the right is INSTANCE_ID
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
// The remaining part is FIELD_NAME
let field_name_part = parts.next();
let (field_name, instance_id) = match field_name_part {
// Case 1: The format is <FIELD_NAME>_<INSTANCE_ID>
// e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY"
Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()),
// Case 2: The format is <FIELD_NAME> (without INSTANCE_ID)
// e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`)
None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()),
};
// *** Optimization point 2: Verify whether the parsed field_name is legal ***
if !field_name.is_empty() && valid_fields.contains(&field_name) {
debug!(
instance_id = %if instance_id.is_empty() { DEFAULT_DELIMITER } else { &instance_id },
%field_name,
%value,
"Parsing to environment variables"
);
env_overrides
.entry(instance_id)
.or_default()
.insert(field_name, value.clone());
} else {
// Ignore illegal field names
warn!(
field_name = %field_name,
"Ignore environment variable fields, not found in the list of valid fields for target type {}",
target_type
);
}
}
}
debug!(?env_overrides, "Complete the environment variable analysis");
// 4. Determine all instance IDs that need to be processed
let mut all_instance_ids: HashSet<String> =
file_configs.keys().filter(|k| *k != DEFAULT_DELIMITER).cloned().collect();
all_instance_ids.extend(instance_ids_from_env);
debug!(?all_instance_ids, "Determine all instance IDs");
// 5. Merge configurations and create tasks for each instance
for id in all_instance_ids {
// 5.1. Merge configuration, priority: Environment variables > File instance configuration > File default configuration
let mut merged_config = default_cfg.clone();
// Instance-specific configuration in application files
if let Some(file_instance_cfg) = file_configs.get(&id) {
merged_config.extend(file_instance_cfg.clone());
}
// Application instance-specific environment variable configuration
if let Some(env_instance_cfg) = env_overrides.get(&id) {
// Convert HashMap<String, String> to KVS
let mut kvs_from_env = KVS::new();
for (k, v) in env_instance_cfg {
kvs_from_env.insert(k.clone(), v.clone());
}
merged_config.extend(kvs_from_env);
}
debug!(instance_id = %id, ?merged_config, "Complete configuration merge");
// 5.2. Check if the instance is enabled
let enabled = merged_config
.lookup(ENABLE_KEY)
.map(|v| {
EnableState::from_str(v.as_str())
.ok()
.map(|s| s.is_enabled())
.unwrap_or(false)
})
.unwrap_or(false);
if enabled {
info!(instance_id = %id, "Target is enabled, ready to create a task");
// 5.3. Create asynchronous tasks for enabled instances
let target_type_clone = target_type.clone();
let tid = id.clone();
let merged_config_arc = Arc::new(merged_config);
tasks.push(async move {
let result = factory.create_target(tid.clone(), &merged_config_arc).await;
(target_type_clone, tid, result, Arc::clone(&merged_config_arc))
});
} else {
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
// Remove disabled target from final configuration
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
}
}
}
// 6. Concurrently execute all creation tasks and collect results
let mut successful_targets = Vec::new();
let mut successful_configs = Vec::new();
while let Some((target_type, id, result, final_config)) = tasks.next().await {
match result {
Ok(target) => {
info!(target_type = %target_type, instance_id = %id, "Create a target successfully");
successful_targets.push(target);
successful_configs.push((target_type, id, final_config));
}
Err(e) => {
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create a target");
}
}
}
// 7. Aggregate new configuration and write back to system configuration
if !successful_configs.is_empty() || !section_defaults.is_empty() {
info!(
"Prepare to update {} successfully created target configurations to the system configuration...",
successful_configs.len()
);
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
for (target_type, id, kvs) in successful_configs {
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
successes_by_section
.entry(section_name)
.or_default()
.insert(id.to_lowercase(), (*kvs).clone());
}
let mut new_config = config.clone();
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
let mut sections: HashSet<String> = HashSet::new();
sections.extend(section_defaults.keys().cloned());
sections.extend(successes_by_section.keys().cloned());
for section in sections {
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
// Add default item
if let Some(default_kvs) = section_defaults.get(§ion)
&& !default_kvs.is_empty()
{
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
// Add successful instance item
if let Some(instances) = successes_by_section.get(§ion) {
for (id, kvs) in instances {
section_map.insert(id.clone(), kvs.clone());
}
}
// Empty breaks are removed and non-empty breaks are replaced entirely.
if section_map.is_empty() {
new_config.0.remove(§ion);
} else {
new_config.0.insert(section, section_map);
}
}
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
return Err(AuditError::StorageNotAvailable(
"Failed to save target configuration: server storage not initialized".to_string(),
));
};
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
Ok(_) => {
info!("The new configuration was saved to the system successfully.")
}
Err(e) => {
error!("Failed to save the new configuration: {}", e);
return Err(AuditError::SaveConfig(Box::new(e)));
}
}
}
info!(count = successful_targets.len(), "All target processing completed");
Ok(successful_targets)
}
/// Adds a target to the registry
///
/// # Arguments
/// * `id` - The identifier for the target.
/// * `target` - The target instance to be added.
pub fn add_target(&mut self, id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) {
self.targets.insert(id, target);
}
/// Removes a target from the registry
///
/// # Arguments
/// * `id` - The identifier for the target to be removed.
///
/// # Returns
/// * `Option<Box<dyn Target<AuditEntry> + Send + Sync>>` - The removed target if it existed.
pub fn remove_target(&mut self, id: &str) -> Option<Box<dyn Target<AuditEntry> + Send + Sync>> {
self.targets.remove(id)
}
/// Gets a target from the registry
///
/// # Arguments
/// * `id` - The identifier for the target to be retrieved.
///
/// # Returns
/// * `Option<&(dyn Target<AuditEntry> + Send + Sync)>` - The target if it exists.
pub fn get_target(&self, id: &str) -> Option<&(dyn Target<AuditEntry> + Send + Sync)> {
self.targets.get(id).map(|t| t.as_ref())
}
/// Lists all target IDs
///
/// # Returns
/// * `Vec<String>` - A vector of all target IDs in the registry.
pub fn list_targets(&self) -> Vec<String> {
self.targets.keys().cloned().collect()
}
/// Closes all targets and clears the registry
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure.
pub async fn close_all(&mut self) -> AuditResult<()> {
let mut errors = Vec::new();
for (id, target) in self.targets.drain() {
if let Err(e) = target.close().await {
error!(target_id = %id, error = %e, "Failed to close audit target");
errors.push(e);
}
}
if !errors.is_empty() {
return Err(AuditError::Target(errors.into_iter().next().unwrap()));
}
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/src/factory.rs | crates/audit/src/factory.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::AuditEntry;
use async_trait::async_trait;
use hashbrown::HashSet;
use rumqttc::QoS;
use rustfs_config::audit::{AUDIT_MQTT_KEYS, AUDIT_WEBHOOK_KEYS, ENV_AUDIT_MQTT_KEYS, ENV_AUDIT_WEBHOOK_KEYS};
use rustfs_config::{
AUDIT_DEFAULT_DIR, DEFAULT_LIMIT, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_CLIENT_CERT,
WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
};
use rustfs_ecstore::config::KVS;
use rustfs_targets::{
Target,
error::TargetError,
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
};
use std::time::Duration;
use tracing::{debug, warn};
use url::Url;
/// Trait for creating targets from configuration
#[async_trait]
pub trait TargetFactory: Send + Sync {
/// Creates a target from configuration
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>;
/// Validates target configuration
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError>;
/// Returns a set of valid configuration field names for this target type.
/// This is used to filter environment variables.
fn get_valid_fields(&self) -> HashSet<String>;
/// Returns a set of valid configuration env field names for this target type.
/// This is used to filter environment variables.
fn get_valid_env_fields(&self) -> HashSet<String>;
}
/// Factory for creating Webhook targets
pub struct WebhookTargetFactory;
#[async_trait]
impl TargetFactory for WebhookTargetFactory {
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
// All config values are now read directly from the merged `config` KVS.
let endpoint = config
.lookup(WEBHOOK_ENDPOINT)
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
let parsed_endpoint = endpoint.trim();
let endpoint_url = Url::parse(parsed_endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
let args = WebhookArgs {
enable: true, // If we are here, it's already enabled.
endpoint: endpoint_url,
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
queue_limit: config
.lookup(WEBHOOK_QUEUE_LIMIT)
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT),
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
target_type: rustfs_targets::target::TargetType::AuditLog,
};
let target = rustfs_targets::target::webhook::WebhookTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
// Validation also uses the merged `config` KVS directly.
let endpoint = config
.lookup(WEBHOOK_ENDPOINT)
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
debug!("endpoint: {}", endpoint);
let parsed_endpoint = endpoint.trim();
Url::parse(parsed_endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
let client_cert = config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default();
let client_key = config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default();
if client_cert.is_empty() != client_key.is_empty() {
return Err(TargetError::Configuration(
"Both client_cert and client_key must be specified together".to_string(),
));
}
let queue_dir = config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string());
if !queue_dir.is_empty() && !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("Webhook queue directory must be an absolute path".to_string()));
}
Ok(())
}
fn get_valid_fields(&self) -> HashSet<String> {
AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
}
fn get_valid_env_fields(&self) -> HashSet<String> {
ENV_AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
}
}
/// Factory for creating MQTT targets
pub struct MQTTTargetFactory;
#[async_trait]
impl TargetFactory for MQTTTargetFactory {
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
let broker = config
.lookup(MQTT_BROKER)
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let broker_url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
let topic = config
.lookup(MQTT_TOPIC)
.ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
let args = MQTTArgs {
enable: true, // Assumed enabled.
broker: broker_url,
topic,
qos: config
.lookup(MQTT_QOS)
.and_then(|v| v.parse::<u8>().ok())
.map(|q| match q {
0 => QoS::AtMostOnce,
1 => QoS::AtLeastOnce,
2 => QoS::ExactlyOnce,
_ => QoS::AtLeastOnce,
})
.unwrap_or(QoS::AtLeastOnce),
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
max_reconnect_interval: config
.lookup(MQTT_RECONNECT_INTERVAL)
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or_else(|| Duration::from_secs(5)),
keep_alive: config
.lookup(MQTT_KEEP_ALIVE_INTERVAL)
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or_else(|| Duration::from_secs(30)),
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
queue_limit: config
.lookup(MQTT_QUEUE_LIMIT)
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT),
target_type: rustfs_targets::target::TargetType::AuditLog,
};
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
let broker = config
.lookup(MQTT_BROKER)
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
_ => {
return Err(TargetError::Configuration("Unsupported broker URL scheme".to_string()));
}
}
if config.lookup(MQTT_TOPIC).is_none() {
return Err(TargetError::Configuration("Missing MQTT topic".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS) {
let qos = qos_str
.parse::<u8>()
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
if qos > 2 {
return Err(TargetError::Configuration("QoS must be 0, 1, or 2".to_string()));
}
}
let queue_dir = config.lookup(MQTT_QUEUE_DIR).unwrap_or_default();
if !queue_dir.is_empty() {
if !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS)
&& qos_str == "0"
{
warn!("Using queue_dir with QoS 0 may result in event loss");
}
}
Ok(())
}
fn get_valid_fields(&self) -> HashSet<String> {
AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
}
fn get_valid_env_fields(&self) -> HashSet<String> {
ENV_AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/tests/config_parsing_test.rs | crates/audit/tests/config_parsing_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tests for audit configuration parsing and validation
use rustfs_ecstore::config::KVS;
#[test]
fn test_webhook_valid_fields() {
let expected_fields = vec![
"enable",
"endpoint",
"auth_token",
"client_cert",
"client_key",
"batch_size",
"queue_size",
"queue_dir",
"max_retry",
"retry_interval",
"http_timeout",
];
// This tests the webhook configuration fields we support
for field in expected_fields {
// Basic validation that field names are consistent
assert!(!field.is_empty());
assert!(!field.contains(" "));
}
}
#[test]
fn test_mqtt_valid_fields() {
let expected_fields = vec![
"enable",
"broker",
"topic",
"username",
"password",
"qos",
"keep_alive_interval",
"reconnect_interval",
"queue_dir",
"queue_limit",
];
// This tests the MQTT configuration fields we support
for field in expected_fields {
// Basic validation that field names are consistent
assert!(!field.is_empty());
assert!(!field.contains(" "));
}
}
#[test]
fn test_config_section_names() {
// Test audit route prefix and section naming
let webhook_section = "audit_webhook";
let mqtt_section = "audit_mqtt";
assert_eq!(webhook_section, "audit_webhook");
assert_eq!(mqtt_section, "audit_mqtt");
// Verify section names follow expected pattern
assert!(webhook_section.starts_with("audit_"));
assert!(mqtt_section.starts_with("audit_"));
}
#[test]
fn test_environment_variable_parsing() {
// Test environment variable prefix patterns
let env_prefix = "RUSTFS_";
let audit_webhook_prefix = format!("{env_prefix}AUDIT_WEBHOOK_");
let audit_mqtt_prefix = format!("{env_prefix}AUDIT_MQTT_");
assert_eq!(audit_webhook_prefix, "RUSTFS_AUDIT_WEBHOOK_");
assert_eq!(audit_mqtt_prefix, "RUSTFS_AUDIT_MQTT_");
// Test instance parsing
let example_env_var = "RUSTFS_AUDIT_WEBHOOK_ENABLE_PRIMARY";
assert!(example_env_var.starts_with(&audit_webhook_prefix));
let suffix = &example_env_var[audit_webhook_prefix.len()..];
assert_eq!(suffix, "ENABLE_PRIMARY");
// Parse field and instance
if let Some(last_underscore) = suffix.rfind('_') {
let field = &suffix[..last_underscore];
let instance = &suffix[last_underscore + 1..];
assert_eq!(field, "ENABLE");
assert_eq!(instance, "PRIMARY");
}
}
#[test]
fn test_configuration_merge() {
// Test configuration merging precedence: ENV > file instance > file default
let mut default_config = KVS::new();
default_config.insert("enable".to_string(), "off".to_string());
default_config.insert("endpoint".to_string(), "http://default".to_string());
let mut instance_config = KVS::new();
instance_config.insert("endpoint".to_string(), "http://instance".to_string());
let mut env_config = KVS::new();
env_config.insert("enable".to_string(), "on".to_string());
// Simulate merge: default < instance < env
let mut merged = default_config.clone();
merged.extend(instance_config);
merged.extend(env_config);
// Verify merge results
assert_eq!(merged.lookup("enable"), Some("on".to_string()));
assert_eq!(merged.lookup("endpoint"), Some("http://instance".to_string()));
}
#[test]
fn test_duration_parsing_formats() {
let test_cases = vec![
("3s", Some(3)),
("5m", Some(300)), // 5 minutes = 300 seconds
("1000ms", Some(1)), // 1000ms = 1 second
("60", Some(60)), // Default to seconds
("invalid", None),
("", None),
];
for (input, expected_seconds) in test_cases {
let result = parse_duration_test(input);
match (result, expected_seconds) {
(Some(duration), Some(expected)) => {
assert_eq!(duration.as_secs(), expected, "Failed for input: {input}");
}
(None, None) => {
// Both None, test passes
}
_ => {
panic!("Mismatch for input: {input}, got: {result:?}, expected: {expected_seconds:?}");
}
}
}
}
// Helper function for duration parsing (extracted from registry.rs logic)
fn parse_duration_test(s: &str) -> Option<std::time::Duration> {
use std::time::Duration;
if let Some(stripped) = s.strip_suffix("ms") {
stripped.parse::<u64>().ok().map(Duration::from_millis)
} else if let Some(stripped) = s.strip_suffix('s') {
stripped.parse::<u64>().ok().map(Duration::from_secs)
} else if let Some(stripped) = s.strip_suffix('m') {
stripped.parse::<u64>().ok().map(|m| Duration::from_secs(m * 60))
} else {
s.parse::<u64>().ok().map(Duration::from_secs)
}
}
#[test]
fn test_url_validation() {
use url::Url;
let valid_urls = vec![
"http://localhost:3020/webhook",
"https://api.example.com/audit",
"mqtt://broker.example.com:1883",
"tcp://localhost:1883",
];
let invalid_urls = [
"",
"not-a-url",
"http://",
"ftp://unsupported.com", // Not invalid, but might not be supported
];
for url_str in valid_urls {
let result = Url::parse(url_str);
assert!(result.is_ok(), "Valid URL should parse: {url_str}");
}
for url_str in &invalid_urls[..3] {
// Skip the ftp one as it's technically valid
let result = Url::parse(url_str);
assert!(result.is_err(), "Invalid URL should not parse: {url_str}");
}
}
#[test]
fn test_qos_parsing() {
// Test QoS level parsing for MQTT
let test_cases = vec![
("0", Some(0)),
("1", Some(1)),
("2", Some(2)),
("3", None), // Invalid QoS level
("invalid", None),
];
for (input, expected) in test_cases {
let result = input.parse::<u8>().ok().and_then(|q| match q {
0..=2 => Some(q),
_ => None,
});
assert_eq!(result, expected, "Failed for QoS input: {input}");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/tests/integration_test.rs | crates/audit/tests/integration_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_audit::*;
use rustfs_ecstore::config::{Config, KVS};
use std::collections::HashMap;
#[tokio::test]
async fn test_audit_system_creation() {
let system = AuditSystem::new();
let state = system.get_state().await;
assert_eq!(state, rustfs_audit::system::AuditSystemState::Stopped);
}
#[tokio::test]
async fn test_audit_registry_creation() {
let registry = AuditRegistry::new();
let targets = registry.list_targets();
assert!(targets.is_empty());
}
#[tokio::test]
async fn test_config_parsing_webhook() {
let mut config = Config(HashMap::new());
let mut audit_webhook_section = HashMap::new();
// Create default configuration
let mut default_kvs = KVS::new();
default_kvs.insert("enable".to_string(), "on".to_string());
default_kvs.insert("endpoint".to_string(), "http://localhost:3020/webhook".to_string());
audit_webhook_section.insert("_".to_string(), default_kvs);
config.0.insert("audit_webhook".to_string(), audit_webhook_section);
let registry = AuditRegistry::new();
// This should not fail even if server storage is not initialized
// as it's an integration test
let result = registry.create_audit_targets_from_config(&config).await;
// We expect this to fail due to server storage not being initialized
// but the parsing should work correctly
match result {
Err(AuditError::StorageNotAvailable(_)) => {
// This is expected in test environment
}
Err(e) => {
// Other errors might indicate parsing issues
println!("Unexpected error: {e}");
}
Ok(_) => {
// Unexpected success in test environment without server storage
}
}
}
#[test]
fn test_event_name_parsing() {
use rustfs_targets::EventName;
// Test basic event name parsing
let event = EventName::parse("s3:ObjectCreated:Put").unwrap();
assert_eq!(event, EventName::ObjectCreatedPut);
let event = EventName::parse("s3:ObjectAccessed:*").unwrap();
assert_eq!(event, EventName::ObjectAccessedAll);
// Test event name expansion
let expanded = EventName::ObjectCreatedAll.expand();
assert!(expanded.contains(&EventName::ObjectCreatedPut));
assert!(expanded.contains(&EventName::ObjectCreatedPost));
// Test event name mask
let mask = EventName::ObjectCreatedPut.mask();
assert!(mask > 0);
}
#[test]
fn test_enable_value_parsing() {
// Test different enable value formats
let test_cases = vec![
("1", true),
("on", true),
("true", true),
("yes", true),
("0", false),
("off", false),
("false", false),
("no", false),
("invalid", false),
];
for (input, expected) in test_cases {
let result = matches!(input.to_lowercase().as_str(), "1" | "on" | "true" | "yes");
assert_eq!(result, expected, "Failed for input: {input}");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/tests/observability_test.rs | crates/audit/tests/observability_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tests for audit system observability and metrics
use rustfs_audit::observability::*;
use std::time::Duration;
#[tokio::test]
async fn test_metrics_collection() {
let metrics = AuditMetrics::new();
// Initially all metrics should be zero
let report = metrics.generate_report().await;
assert_eq!(report.total_events_processed, 0);
assert_eq!(report.total_events_failed, 0);
assert_eq!(report.events_per_second, 0.0);
// Record some events
metrics.record_event_success(Duration::from_millis(10));
metrics.record_event_success(Duration::from_millis(20));
metrics.record_event_failure(Duration::from_millis(30));
// Check updated metrics
let report = metrics.generate_report().await;
assert_eq!(report.total_events_processed, 2);
assert_eq!(report.total_events_failed, 1);
assert_eq!(report.error_rate_percent, 33.33333333333333); // 1/3 * 100
assert_eq!(report.average_latency_ms, 20.0); // (10+20+30)/3
}
#[tokio::test]
async fn test_target_metrics() {
let metrics = AuditMetrics::new();
// Record target operations
metrics.record_target_success();
metrics.record_target_success();
metrics.record_target_failure();
let success_rate = metrics.get_target_success_rate();
assert_eq!(success_rate, 66.66666666666666); // 2/3 * 100
}
#[tokio::test]
async fn test_performance_validation_pass() {
let metrics = AuditMetrics::new();
// Simulate high EPS with low latency
for _ in 0..5000 {
metrics.record_event_success(Duration::from_millis(5));
}
// Small delay to make EPS calculation meaningful
tokio::time::sleep(Duration::from_millis(1)).await;
let validation = metrics.validate_performance_requirements().await;
// Should meet latency requirement
assert!(validation.meets_latency_requirement, "Latency requirement should be met");
assert!(validation.current_latency_ms <= 30.0);
// Should meet error rate requirement (no failures)
assert!(validation.meets_error_rate_requirement, "Error rate requirement should be met");
assert_eq!(validation.current_error_rate, 0.0);
}
#[tokio::test]
async fn test_performance_validation_fail() {
let metrics = AuditMetrics::new();
// Simulate high latency
metrics.record_event_success(Duration::from_millis(50)); // Above 30ms requirement
metrics.record_event_failure(Duration::from_millis(60));
let validation = metrics.validate_performance_requirements().await;
// Should fail latency requirement
assert!(!validation.meets_latency_requirement, "Latency requirement should fail");
assert!(validation.current_latency_ms > 30.0);
// Should fail error rate requirement
assert!(!validation.meets_error_rate_requirement, "Error rate requirement should fail");
assert!(validation.current_error_rate > 1.0);
// Should have recommendations
assert!(!validation.recommendations.is_empty());
}
#[tokio::test]
async fn test_global_metrics() {
// Test global metrics functions
record_audit_success(Duration::from_millis(10));
record_audit_failure(Duration::from_millis(20));
record_target_success();
record_target_failure();
record_config_reload();
record_system_start();
let report = get_metrics_report().await;
assert!(report.total_events_processed > 0);
assert!(report.total_events_failed > 0);
assert!(report.config_reload_count > 0);
assert!(report.system_start_count > 0);
// Reset metrics
reset_metrics().await;
let report_after_reset = get_metrics_report().await;
assert_eq!(report_after_reset.total_events_processed, 0);
assert_eq!(report_after_reset.total_events_failed, 0);
}
#[test]
fn test_metrics_report_formatting() {
let report = AuditMetricsReport {
events_per_second: 1500.5,
average_latency_ms: 25.75,
error_rate_percent: 0.5,
target_success_rate_percent: 99.5,
total_events_processed: 10000,
total_events_failed: 50,
config_reload_count: 3,
system_start_count: 1,
};
let formatted = report.format();
assert!(formatted.contains("1500.50")); // EPS
assert!(formatted.contains("25.75")); // Latency
assert!(formatted.contains("0.50")); // Error rate
assert!(formatted.contains("99.50")); // Success rate
assert!(formatted.contains("10000")); // Events processed
assert!(formatted.contains("50")); // Events failed
}
#[test]
fn test_performance_validation_formatting() {
let validation = PerformanceValidation {
meets_eps_requirement: false,
meets_latency_requirement: true,
meets_error_rate_requirement: true,
current_eps: 2500.0,
current_latency_ms: 15.0,
current_error_rate: 0.1,
recommendations: vec![
"EPS too low, consider optimization".to_string(),
"Latency is good".to_string(),
],
};
let formatted = validation.format();
assert!(formatted.contains("❌ FAIL")); // Should show fail
assert!(formatted.contains("2500.00")); // Current EPS
assert!(formatted.contains("15.00")); // Current latency
assert!(formatted.contains("0.10")); // Current error rate
assert!(formatted.contains("EPS too low")); // Recommendation
assert!(formatted.contains("Latency is good")); // Recommendation
}
#[test]
fn test_performance_validation_all_pass() {
let validation = PerformanceValidation {
meets_eps_requirement: true,
meets_latency_requirement: true,
meets_error_rate_requirement: true,
current_eps: 5000.0,
current_latency_ms: 10.0,
current_error_rate: 0.01,
recommendations: vec!["All requirements met".to_string()],
};
assert!(validation.all_requirements_met());
let formatted = validation.format();
assert!(formatted.contains("✅ PASS")); // Should show pass
assert!(formatted.contains("All requirements met"));
}
#[tokio::test]
async fn test_eps_calculation() {
let metrics = AuditMetrics::new();
// Record events
for _ in 0..100 {
metrics.record_event_success(Duration::from_millis(1));
}
// Small delay to allow EPS calculation
tokio::time::sleep(Duration::from_millis(10)).await;
let eps = metrics.get_events_per_second().await;
// Should have some EPS value > 0
assert!(eps > 0.0, "EPS should be greater than 0");
// EPS should be reasonable (events / time)
// With 100 events in ~10ms, should be very high
assert!(eps > 1000.0, "EPS should be high for short time period");
}
#[test]
fn test_error_rate_calculation() {
let metrics = AuditMetrics::new();
// No events - should be 0% error rate
assert_eq!(metrics.get_error_rate(), 0.0);
// Record 7 successes, 3 failures = 30% error rate
for _ in 0..7 {
metrics.record_event_success(Duration::from_millis(1));
}
for _ in 0..3 {
metrics.record_event_failure(Duration::from_millis(1));
}
let error_rate = metrics.get_error_rate();
assert_eq!(error_rate, 30.0);
}
#[test]
fn test_target_success_rate_calculation() {
let metrics = AuditMetrics::new();
// No operations - should be 100% success rate
assert_eq!(metrics.get_target_success_rate(), 100.0);
// Record 8 successes, 2 failures = 80% success rate
for _ in 0..8 {
metrics.record_target_success();
}
for _ in 0..2 {
metrics.record_target_failure();
}
let success_rate = metrics.get_target_success_rate();
assert_eq!(success_rate, 80.0);
}
#[tokio::test]
async fn test_metrics_reset() {
let metrics = AuditMetrics::new();
// Record some data
metrics.record_event_success(Duration::from_millis(10));
metrics.record_target_success();
metrics.record_config_reload();
metrics.record_system_start();
// Verify data exists
let report_before = metrics.generate_report().await;
assert!(report_before.total_events_processed > 0);
assert!(report_before.config_reload_count > 0);
assert!(report_before.system_start_count > 0);
// Reset
metrics.reset().await;
// Verify data is reset
let report_after = metrics.generate_report().await;
assert_eq!(report_after.total_events_processed, 0);
assert_eq!(report_after.total_events_failed, 0);
// Note: config_reload_count and system_start_count are reset to 0 as well
assert_eq!(report_after.config_reload_count, 0);
assert_eq!(report_after.system_start_count, 0);
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/tests/performance_test.rs | crates/audit/tests/performance_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Performance and observability tests for audit system
use rustfs_audit::*;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::timeout;
#[tokio::test]
async fn test_audit_system_startup_performance() {
// Test that audit system starts within reasonable time
let system = AuditSystem::new();
let start = Instant::now();
// Create minimal config for testing
let config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
// System should start quickly even with empty config
let _result = timeout(Duration::from_secs(5), system.start(config)).await;
let elapsed = start.elapsed();
println!("Audit system startup took: {elapsed:?}");
// Should complete within 5 seconds
assert!(elapsed < Duration::from_secs(5), "Startup took too long: {elapsed:?}");
// Clean up
let _ = system.close().await;
}
#[tokio::test]
async fn test_concurrent_target_creation() {
// Test that multiple targets can be created concurrently
let registry = AuditRegistry::new();
// Create config with multiple webhook instances
let mut config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
let mut webhook_section = std::collections::HashMap::new();
// Create multiple instances for concurrent creation test
for i in 1..=5 {
let mut kvs = rustfs_ecstore::config::KVS::new();
kvs.insert("enable".to_string(), "on".to_string());
kvs.insert("endpoint".to_string(), format!("http://localhost:302{i}/webhook"));
webhook_section.insert(format!("instance_{i}"), kvs);
}
config.0.insert("audit_webhook".to_string(), webhook_section);
let start = Instant::now();
// This will fail due to server storage not being initialized, but we can measure timing
let result = registry.create_audit_targets_from_config(&config).await;
let elapsed = start.elapsed();
println!("Concurrent target creation took: {elapsed:?}");
// Should complete quickly even with multiple targets
assert!(elapsed < Duration::from_secs(10), "Target creation took too long: {elapsed:?}");
// Verify it fails with expected error (server not initialized)
match result {
Err(AuditError::StorageNotAvailable(_)) => {
// Expected in test environment
}
Err(e) => {
println!("Unexpected error during concurrent creation: {e}");
}
Ok(_) => {
println!("Unexpected success in test environment");
}
}
}
#[tokio::test]
async fn test_audit_log_dispatch_performance() {
let system = AuditSystem::new();
// Create minimal config
let config = rustfs_ecstore::config::Config(HashMap::new());
let start_result = system.start(config).await;
if start_result.is_err() {
println!("AuditSystem failed to start: {start_result:?}");
return; // Alternatively: assert!(false, "AuditSystem failed to start");
}
use chrono::Utc;
use rustfs_targets::EventName;
use serde_json::json;
use std::collections::HashMap;
let id = 1;
let mut req_header = hashbrown::HashMap::new();
req_header.insert("authorization".to_string(), format!("Bearer test-token-{id}"));
req_header.insert("content-type".to_string(), "application/octet-stream".to_string());
let mut resp_header = hashbrown::HashMap::new();
resp_header.insert("x-response".to_string(), "ok".to_string());
let mut tags = hashbrown::HashMap::new();
tags.insert(format!("tag-{id}"), json!("sample"));
let mut req_query = hashbrown::HashMap::new();
req_query.insert("id".to_string(), id.to_string());
let api_details = ApiDetails {
name: Some("PutObject".to_string()),
bucket: Some("test-bucket".to_string()),
object: Some(format!("test-object-{id}")),
status: Some("success".to_string()),
status_code: Some(200),
input_bytes: Some(1024),
output_bytes: Some(0),
header_bytes: Some(128),
time_to_first_byte: Some("1ms".to_string()),
time_to_first_byte_in_ns: Some("1000000".to_string()),
time_to_response: Some("2ms".to_string()),
time_to_response_in_ns: Some("2000000".to_string()),
..Default::default()
};
// Create sample audit log entry
let audit_entry = AuditEntry {
version: "1".to_string(),
deployment_id: Some(format!("test-deployment-{id}")),
site_name: Some("test-site".to_string()),
time: Utc::now(),
event: EventName::ObjectCreatedPut,
entry_type: Some("object".to_string()),
trigger: "api".to_string(),
api: api_details,
remote_host: Some("127.0.0.1".to_string()),
request_id: Some(format!("test-request-{id}")),
user_agent: Some("test-agent".to_string()),
req_path: Some(format!("/test-bucket/test-object-{id}")),
req_host: Some("test-host".to_string()),
req_node: Some("node-1".to_string()),
req_claims: None,
req_query: Some(req_query),
req_header: Some(req_header),
resp_header: Some(resp_header),
tags: Some(tags),
access_key: Some(format!("AKIA{id}")),
parent_user: Some(format!("parent-{id}")),
error: None,
};
let start = Instant::now();
// Dispatch audit log (should be fast since no targets are configured)
let result = system.dispatch(Arc::new(audit_entry)).await;
let elapsed = start.elapsed();
println!("Audit log dispatch took: {elapsed:?}");
// Should be very fast (sub-millisecond for no targets)
assert!(elapsed < Duration::from_millis(100), "Dispatch took too long: {elapsed:?}");
// Should succeed even with no targets
assert!(result.is_ok(), "Dispatch should succeed with no targets");
// Clean up
let _ = system.close().await;
}
#[tokio::test]
async fn test_system_state_transitions() {
let system = AuditSystem::new();
// Initial state should be stopped
assert_eq!(system.get_state().await, rustfs_audit::system::AuditSystemState::Stopped);
// Start system
let config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
let start_result = system.start(config).await;
// Should be running (or failed due to server storage)
let state = system.get_state().await;
match start_result {
Ok(_) => {
assert_eq!(state, rustfs_audit::system::AuditSystemState::Running);
}
Err(_) => {
// Expected in test environment due to server storage not being initialized
assert_eq!(state, rustfs_audit::system::AuditSystemState::Stopped);
}
}
// Clean up
let _ = system.close().await;
assert_eq!(system.get_state().await, rustfs_audit::system::AuditSystemState::Stopped);
}
#[test]
fn test_event_name_mask_performance() {
use rustfs_targets::EventName;
// Test that event name mask calculation is efficient
let events = vec![
EventName::ObjectCreatedPut,
EventName::ObjectAccessedGet,
EventName::ObjectRemovedDelete,
EventName::ObjectCreatedAll,
EventName::Everything,
];
let start = Instant::now();
// Calculate masks for many events
for _ in 0..1000 {
for event in &events {
let _mask = event.mask();
}
}
let elapsed = start.elapsed();
println!("Event mask calculation (5000 ops) took: {elapsed:?}");
// Should be very fast
assert!(elapsed < Duration::from_millis(100), "Mask calculation too slow: {elapsed:?}");
}
#[test]
fn test_event_name_expansion_performance() {
use rustfs_targets::EventName;
// Test that event name expansion is efficient
let compound_events = vec![
EventName::ObjectCreatedAll,
EventName::ObjectAccessedAll,
EventName::ObjectRemovedAll,
EventName::Everything,
];
let start = Instant::now();
// Expand events many times
for _ in 0..1000 {
for event in &compound_events {
let _expanded = event.expand();
}
}
let elapsed = start.elapsed();
println!("Event expansion (4000 ops) took: {elapsed:?}");
// Should be very fast
assert!(elapsed < Duration::from_millis(100), "Expansion too slow: {elapsed:?}");
}
#[tokio::test]
async fn test_registry_operations_performance() {
let registry = AuditRegistry::new();
let start = Instant::now();
// Test basic registry operations
for _ in 0..1000 {
let targets = registry.list_targets();
let _target = registry.get_target("nonexistent");
assert!(targets.is_empty());
}
let elapsed = start.elapsed();
println!("Registry operations (2000 ops) took: {elapsed:?}");
// Should be very fast for empty registry
assert!(elapsed < Duration::from_millis(100), "Registry ops too slow: {elapsed:?}");
}
// Performance requirements validation
#[test]
fn test_performance_requirements() {
// According to requirements: ≥ 3k EPS/node; P99 < 30ms (default)
// These are synthetic tests since we can't actually achieve 3k EPS
// without real server storage and network targets, but we can validate
// that our core algorithms are efficient enough
let start = Instant::now();
// Simulate processing 3000 events worth of operations
for i in 0..3000 {
// Simulate event name parsing and processing
let _event_id = format!("s3:ObjectCreated:Put_{i}");
let _timestamp = chrono::Utc::now().to_rfc3339();
// Simulate basic audit entry creation overhead
let _entry_size = 512; // bytes
let _processing_time = std::time::Duration::from_nanos(100); // simulated
}
let elapsed = start.elapsed();
let eps = 3000.0 / elapsed.as_secs_f64();
println!("Simulated 3000 events in {elapsed:?} ({eps:.0} EPS)");
// Our core processing should easily handle 3k EPS worth of CPU overhead
// The actual EPS limit will be determined by network I/O to targets
assert!(eps > 10000.0, "Core processing too slow for 3k EPS target: {eps} EPS");
// P99 latency requirement: < 30ms
// For core processing, we should be much faster than this
let avg_latency = elapsed / 3000;
println!("Average processing latency: {avg_latency:?}");
assert!(avg_latency < Duration::from_millis(1), "Processing latency too high: {avg_latency:?}");
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/audit/tests/system_integration_test.rs | crates/audit/tests/system_integration_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Comprehensive integration tests for the complete audit system
use rustfs_audit::*;
use rustfs_ecstore::config::{Config, KVS};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
#[tokio::test]
async fn test_complete_audit_system_lifecycle() {
// Test the complete lifecycle of the audit system
let system = AuditSystem::new();
// 1. Initial state should be stopped
assert_eq!(system.get_state().await, system::AuditSystemState::Stopped);
assert!(!system.is_running().await);
// 2. Start with empty config (will fail due to no server storage in test)
let config = Config(HashMap::new());
let start_result = system.start(config).await;
// Should fail in test environment but state handling should work
match start_result {
Err(AuditError::StorageNotAvailable(_)) => {
// Expected in test environment
assert_eq!(system.get_state().await, system::AuditSystemState::Stopped);
}
Ok(_) => {
// If it somehow succeeds, verify running state
assert_eq!(system.get_state().await, system::AuditSystemState::Running);
assert!(system.is_running().await);
// Test pause/resume
system.pause().await.expect("Should pause successfully");
assert_eq!(system.get_state().await, system::AuditSystemState::Paused);
system.resume().await.expect("Should resume successfully");
assert_eq!(system.get_state().await, system::AuditSystemState::Running);
}
Err(e) => {
panic!("Unexpected error: {e}");
}
}
// 3. Test close
system.close().await.expect("Should close successfully");
assert_eq!(system.get_state().await, system::AuditSystemState::Stopped);
assert!(!system.is_running().await);
}
#[tokio::test]
async fn test_audit_system_with_metrics() {
let system = AuditSystem::new();
// Reset metrics for clean test
system.reset_metrics().await;
// Try to start system (will fail but should record metrics)
let config = Config(HashMap::new());
let _ = system.start(config).await; // Ignore result
// Check metrics
let metrics = system.get_metrics().await;
assert!(metrics.system_start_count > 0, "Should have recorded system start attempt");
// Test performance validation
let validation = system.validate_performance().await;
assert!(validation.current_eps >= 0.0);
assert!(validation.current_latency_ms >= 0.0);
assert!(validation.current_error_rate >= 0.0);
}
#[tokio::test]
async fn test_audit_log_dispatch_with_no_targets() {
let system = AuditSystem::new();
// Create sample audit entry
let audit_entry = create_sample_audit_entry();
// Try to dispatch with no targets (should succeed but do nothing)
let result = system.dispatch(Arc::new(audit_entry)).await;
// Should succeed even with no targets configured
match result {
Ok(_) => {
// Success expected
}
Err(AuditError::NotInitialized(_)) => {
// Also acceptable since system not running
}
Err(e) => {
panic!("Unexpected error: {e}");
}
}
}
#[tokio::test]
async fn test_global_audit_functions() {
use rustfs_audit::*;
// Test global functions
let system = init_audit_system();
assert!(system.get_state().await == system::AuditSystemState::Stopped);
// Test audit logging function (should not panic even if system not running)
let entry = create_sample_audit_entry();
let result = dispatch_audit_log(Arc::new(entry)).await;
assert!(result.is_ok(), "Dispatch should succeed even with no running system");
// Test system status
assert!(!is_audit_system_running().await);
// Test AuditLogger singleton
let _logger = AuditLogger::instance();
assert!(!AuditLogger::is_enabled().await);
// Test logging (should not panic)
let entry = create_sample_audit_entry();
AuditLogger::log(entry).await; // Should not panic
}
#[tokio::test]
async fn test_config_parsing_with_multiple_instances() {
let registry = AuditRegistry::new();
// Create config with multiple webhook instances
let mut config = Config(HashMap::new());
let mut webhook_section = HashMap::new();
// Default instance
let mut default_kvs = KVS::new();
default_kvs.insert("enable".to_string(), "off".to_string());
default_kvs.insert("endpoint".to_string(), "http://default.example.com/audit".to_string());
webhook_section.insert("_".to_string(), default_kvs);
// Primary instance
let mut primary_kvs = KVS::new();
primary_kvs.insert("enable".to_string(), "on".to_string());
primary_kvs.insert("endpoint".to_string(), "http://primary.example.com/audit".to_string());
primary_kvs.insert("auth_token".to_string(), "primary-token-123".to_string());
webhook_section.insert("primary".to_string(), primary_kvs);
// Secondary instance
let mut secondary_kvs = KVS::new();
secondary_kvs.insert("enable".to_string(), "on".to_string());
secondary_kvs.insert("endpoint".to_string(), "http://secondary.example.com/audit".to_string());
secondary_kvs.insert("auth_token".to_string(), "secondary-token-456".to_string());
webhook_section.insert("secondary".to_string(), secondary_kvs);
config.0.insert("audit_webhook".to_string(), webhook_section);
// Try to create targets from config
let result = registry.create_audit_targets_from_config(&config).await;
// Should fail due to server storage not initialized, but parsing should work
match result {
Err(AuditError::StorageNotAvailable(_)) => {
// Expected - parsing worked but save failed
}
Err(e) => {
println!("Config parsing error: {e}");
// Other errors might indicate parsing issues, but not necessarily failures
}
Ok(_) => {
// Unexpected success in test environment
println!("Unexpected success - server storage somehow available");
}
}
}
#[test]
fn test_target_type_validation() {
use rustfs_targets::target::TargetType;
// Test that TargetType::AuditLog is properly defined
let audit_type = TargetType::AuditLog;
assert_eq!(audit_type.as_str(), "audit_log");
let notify_type = TargetType::NotifyEvent;
assert_eq!(notify_type.as_str(), "notify_event");
// Test that they are different
assert_ne!(audit_type.as_str(), notify_type.as_str());
}
#[tokio::test]
async fn test_concurrent_operations() {
let system = AuditSystem::new();
// Test concurrent state checks
let mut tasks = Vec::new();
for i in 0..10 {
let system_clone = system.clone();
let task = tokio::spawn(async move {
let state = system_clone.get_state().await;
let is_running = system_clone.is_running().await;
(i, state, is_running)
});
tasks.push(task);
}
// All tasks should complete without panic
for task in tasks {
let (i, state, is_running) = task.await.expect("Task should complete");
assert_eq!(state, system::AuditSystemState::Stopped);
assert!(!is_running);
println!("Task {i} completed successfully");
}
}
#[tokio::test]
async fn test_performance_under_load() {
use std::time::Instant;
let system = AuditSystem::new();
// Test multiple rapid dispatch calls
let start = Instant::now();
let mut tasks = Vec::new();
for i in 0..100 {
let system_clone = system.clone();
let entry = Arc::new(create_sample_audit_entry_with_id(i));
let task = tokio::spawn(async move { system_clone.dispatch(entry).await });
tasks.push(task);
}
// Wait for all dispatches to complete
let mut success_count = 0;
let mut error_count = 0;
for task in tasks {
match task.await.expect("Task should complete") {
Ok(_) => success_count += 1,
Err(_) => error_count += 1,
}
}
let elapsed = start.elapsed();
println!("100 concurrent dispatches took: {elapsed:?}");
println!("Successes: {success_count}, Errors: {error_count}");
// Should complete reasonably quickly
assert!(elapsed < Duration::from_secs(5), "Concurrent operations took too long");
// All should either succeed (if targets available) or fail consistently
assert_eq!(success_count + error_count, 100);
}
// Helper functions
fn create_sample_audit_entry() -> AuditEntry {
create_sample_audit_entry_with_id(0)
}
fn create_sample_audit_entry_with_id(id: u32) -> AuditEntry {
use chrono::Utc;
use rustfs_targets::EventName;
use serde_json::json;
let mut req_header = hashbrown::HashMap::new();
req_header.insert("authorization".to_string(), format!("Bearer test-token-{id}"));
req_header.insert("content-type".to_string(), "application/octet-stream".to_string());
let mut resp_header = hashbrown::HashMap::new();
resp_header.insert("x-response".to_string(), "ok".to_string());
let mut tags = hashbrown::HashMap::new();
tags.insert(format!("tag-{id}"), json!("sample"));
let mut req_query = hashbrown::HashMap::new();
req_query.insert("id".to_string(), id.to_string());
let api_details = ApiDetails {
name: Some("PutObject".to_string()),
bucket: Some("test-bucket".to_string()),
object: Some(format!("test-object-{id}")),
status: Some("success".to_string()),
status_code: Some(200),
input_bytes: Some(1024),
output_bytes: Some(0),
header_bytes: Some(128),
time_to_first_byte: Some("1ms".to_string()),
time_to_first_byte_in_ns: Some("1000000".to_string()),
time_to_response: Some("2ms".to_string()),
time_to_response_in_ns: Some("2000000".to_string()),
..Default::default()
};
AuditEntry {
version: "1".to_string(),
deployment_id: Some(format!("test-deployment-{id}")),
site_name: Some("test-site".to_string()),
time: Utc::now(),
event: EventName::ObjectCreatedPut,
entry_type: Some("object".to_string()),
trigger: "api".to_string(),
api: api_details,
remote_host: Some("127.0.0.1".to_string()),
request_id: Some(format!("test-request-{id}")),
user_agent: Some("test-agent".to_string()),
req_path: Some(format!("/test-bucket/test-object-{id}")),
req_host: Some("test-host".to_string()),
req_node: Some("node-1".to_string()),
req_claims: None,
req_query: Some(req_query),
req_header: Some(req_header),
resp_header: Some(resp_header),
tags: Some(tags),
access_key: Some(format!("AKIA{id}")),
parent_user: Some(format!("parent-{id}")),
error: None,
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/lib.rs | crates/s3select-api/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use datafusion::{common::DataFusionError, sql::sqlparser::parser::ParserError};
use snafu::{Backtrace, Location, Snafu};
use std::fmt::Display;
pub mod object_store;
pub mod query;
pub mod server;
#[cfg(test)]
mod test;
pub type QueryResult<T> = Result<T, QueryError>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum QueryError {
#[snafu(display("DataFusion error: {}", source))]
Datafusion {
source: Box<DataFusionError>,
location: Location,
backtrace: Backtrace,
},
#[snafu(display("This feature is not implemented: {}", err))]
NotImplemented { err: String },
#[snafu(display("Multi-statement not allow, found num:{}, sql:{}", num, sql))]
MultiStatement { num: usize, sql: String },
#[snafu(display("Failed to build QueryDispatcher. err: {}", err))]
BuildQueryDispatcher { err: String },
#[snafu(display("The query has been canceled"))]
Cancel,
#[snafu(display("{}", source))]
Parser { source: ParserError },
#[snafu(display("Udf not exists, name:{}.", name))]
FunctionNotExists { name: String },
#[snafu(display("Udf already exists, name:{}.", name))]
FunctionExists { name: String },
#[snafu(display("Store Error, e:{}.", e))]
StoreError { e: String },
}
impl From<DataFusionError> for QueryError {
fn from(value: DataFusionError) -> Self {
match value {
DataFusionError::External(e) if e.downcast_ref::<QueryError>().is_some() => *e.downcast::<QueryError>().unwrap(),
v => Self::Datafusion {
source: Box::new(v),
location: Default::default(),
backtrace: Backtrace::capture(),
},
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ResolvedTable {
// path
table: String,
}
impl ResolvedTable {
pub fn table(&self) -> &str {
&self.table
}
}
impl Display for ResolvedTable {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self { table } = self;
write!(f, "{table}")
}
}
#[cfg(test)]
mod tests {
use super::*;
use datafusion::common::DataFusionError;
use datafusion::sql::sqlparser::parser::ParserError;
#[test]
fn test_query_error_display() {
let err = QueryError::NotImplemented {
err: "feature X".to_string(),
};
assert_eq!(err.to_string(), "This feature is not implemented: feature X");
let err = QueryError::MultiStatement {
num: 2,
sql: "SELECT 1; SELECT 2;".to_string(),
};
assert_eq!(err.to_string(), "Multi-statement not allow, found num:2, sql:SELECT 1; SELECT 2;");
let err = QueryError::Cancel;
assert_eq!(err.to_string(), "The query has been canceled");
let err = QueryError::FunctionNotExists {
name: "my_func".to_string(),
};
assert_eq!(err.to_string(), "Udf not exists, name:my_func.");
let err = QueryError::StoreError {
e: "connection failed".to_string(),
};
assert_eq!(err.to_string(), "Store Error, e:connection failed.");
}
#[test]
fn test_query_error_from_datafusion_error() {
let df_error = DataFusionError::Plan("invalid plan".to_string());
let query_error: QueryError = df_error.into();
match query_error {
QueryError::Datafusion { source, .. } => {
assert!(source.to_string().contains("invalid plan"));
}
_ => panic!("Expected Datafusion error"),
}
}
#[test]
fn test_query_error_from_parser_error() {
let parser_error = ParserError::ParserError("syntax error".to_string());
let query_error = QueryError::Parser { source: parser_error };
assert!(query_error.to_string().contains("syntax error"));
}
#[test]
fn test_resolved_table() {
let table = ResolvedTable {
table: "my_table".to_string(),
};
assert_eq!(table.table(), "my_table");
assert_eq!(table.to_string(), "my_table");
}
#[test]
fn test_resolved_table_clone_and_eq() {
let table1 = ResolvedTable {
table: "table1".to_string(),
};
let table2 = table1.clone();
let table3 = ResolvedTable {
table: "table2".to_string(),
};
assert_eq!(table1, table2);
assert_ne!(table1, table3);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/object_store.rs | crates/s3select-api/src/object_store.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use bytes::Bytes;
use chrono::Utc;
use futures::pin_mut;
use futures::{Stream, StreamExt};
use futures_core::stream::BoxStream;
use http::HeaderMap;
use object_store::{
Attributes, Error as o_Error, GetOptions, GetResult, ListResult, MultipartUpload, ObjectMeta, ObjectStore,
PutMultipartOptions, PutOptions, PutPayload, PutResult, Result, path::Path,
};
use pin_project_lite::pin_project;
use rustfs_common::DEFAULT_DELIMITER;
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::DEFAULT_READ_BUFFER_SIZE;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::ObjectIO;
use rustfs_ecstore::store_api::ObjectOptions;
use s3s::S3Result;
use s3s::dto::SelectObjectContentInput;
use s3s::s3_error;
use std::ops::Range;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Poll;
use std::task::ready;
use tokio::io::AsyncRead;
use tokio_util::io::ReaderStream;
use tracing::info;
use transform_stream::AsyncTryStream;
#[derive(Debug)]
pub struct EcObjectStore {
input: Arc<SelectObjectContentInput>,
need_convert: bool,
delimiter: String,
store: Arc<ECStore>,
}
impl EcObjectStore {
pub fn new(input: Arc<SelectObjectContentInput>) -> S3Result<Self> {
let Some(store) = new_object_layer_fn() else {
return Err(s3_error!(InternalError, "ec store not inited"));
};
let (need_convert, delimiter) = if let Some(csv) = input.request.input_serialization.csv.as_ref() {
if let Some(delimiter) = csv.field_delimiter.as_ref() {
if delimiter.len() > 1 {
(true, delimiter.to_owned())
} else {
(false, String::new())
}
} else {
(false, String::new())
}
} else {
(false, String::new())
};
Ok(Self {
input,
need_convert,
delimiter,
store,
})
}
}
impl std::fmt::Display for EcObjectStore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("EcObjectStore")
}
}
#[async_trait]
impl ObjectStore for EcObjectStore {
async fn put_opts(&self, _location: &Path, _payload: PutPayload, _opts: PutOptions) -> Result<PutResult> {
unimplemented!()
}
async fn put_multipart_opts(&self, _location: &Path, _opts: PutMultipartOptions) -> Result<Box<dyn MultipartUpload>> {
unimplemented!()
}
async fn get_opts(&self, location: &Path, _options: GetOptions) -> Result<GetResult> {
info!("{:?}", location);
let opts = ObjectOptions::default();
let h = HeaderMap::new();
let reader = self
.store
.get_object_reader(&self.input.bucket, &self.input.key, None, h, &opts)
.await
.map_err(|_| o_Error::NotFound {
path: format!("{}/{}", self.input.bucket, self.input.key),
source: "can not get object info".into(),
})?;
let meta = ObjectMeta {
location: location.clone(),
last_modified: Utc::now(),
size: reader.object_info.size as u64,
e_tag: reader.object_info.etag,
version: None,
};
let attributes = Attributes::default();
let payload = if self.need_convert {
object_store::GetResultPayload::Stream(
bytes_stream(
ReaderStream::with_capacity(
ConvertStream::new(reader.stream, self.delimiter.clone()),
DEFAULT_READ_BUFFER_SIZE,
),
reader.object_info.size as usize,
)
.boxed(),
)
} else {
object_store::GetResultPayload::Stream(
bytes_stream(
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
reader.object_info.size as usize,
)
.boxed(),
)
};
Ok(GetResult {
payload,
meta,
range: 0..reader.object_info.size as u64,
attributes,
})
}
async fn get_ranges(&self, _location: &Path, _ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
unimplemented!()
}
async fn head(&self, location: &Path) -> Result<ObjectMeta> {
info!("{:?}", location);
let opts = ObjectOptions::default();
let info = self
.store
.get_object_info(&self.input.bucket, &self.input.key, &opts)
.await
.map_err(|_| o_Error::NotFound {
path: format!("{}/{}", self.input.bucket, self.input.key),
source: "can not get object info".into(),
})?;
Ok(ObjectMeta {
location: location.clone(),
last_modified: Utc::now(),
size: info.size as u64,
e_tag: info.etag,
version: None,
})
}
async fn delete(&self, _location: &Path) -> Result<()> {
unimplemented!()
}
fn list(&self, _prefix: Option<&Path>) -> BoxStream<'static, Result<ObjectMeta>> {
unimplemented!()
}
async fn list_with_delimiter(&self, _prefix: Option<&Path>) -> Result<ListResult> {
unimplemented!()
}
async fn copy(&self, _from: &Path, _to: &Path) -> Result<()> {
unimplemented!()
}
async fn copy_if_not_exists(&self, _from: &Path, _too: &Path) -> Result<()> {
unimplemented!()
}
}
pin_project! {
struct ConvertStream<R> {
inner: R,
delimiter: Vec<u8>,
}
}
impl<R> ConvertStream<R> {
fn new(inner: R, delimiter: String) -> Self {
ConvertStream {
inner,
delimiter: delimiter.as_bytes().to_vec(),
}
}
}
impl<R: AsyncRead + Unpin> AsyncRead for ConvertStream<R> {
#[tracing::instrument(level = "debug", skip_all)]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let me = self.project();
ready!(Pin::new(&mut *me.inner).poll_read(cx, buf))?;
let bytes = buf.filled();
let replaced = replace_symbol(me.delimiter, bytes);
buf.clear();
buf.put_slice(&replaced);
Poll::Ready(Ok(()))
}
}
fn replace_symbol(delimiter: &[u8], slice: &[u8]) -> Vec<u8> {
let mut result = Vec::with_capacity(slice.len());
let mut i = 0;
while i < slice.len() {
if slice[i..].starts_with(delimiter) {
result.push(DEFAULT_DELIMITER);
i += delimiter.len();
} else {
result.push(slice[i]);
i += 1;
}
}
result
}
pub fn bytes_stream<S>(stream: S, content_length: usize) -> impl Stream<Item = Result<Bytes>> + Send + 'static
where
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + 'static,
{
AsyncTryStream::<Bytes, o_Error, _>::new(|mut y| async move {
pin_mut!(stream);
let mut remaining: usize = content_length;
while let Some(result) = stream.next().await {
let mut bytes = result.map_err(|e| o_Error::Generic {
store: "",
source: Box::new(e),
})?;
if bytes.len() > remaining {
bytes.truncate(remaining);
}
remaining -= bytes.len();
y.yield_ok(bytes).await;
}
Ok(())
})
}
#[cfg(test)]
mod test {
use super::replace_symbol;
#[test]
fn test_replace() {
let ss = String::from("dandan&&is&&best");
let slice = ss.as_bytes();
let delimiter = b"&&";
println!("len: {}", "╦".len());
let result = replace_symbol(delimiter, slice);
match String::from_utf8(result) {
Ok(s) => println!("slice: {s}"),
Err(e) => eprintln!("Error converting to string: {e}"),
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/ast.rs | crates/s3select-api/src/query/ast.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use datafusion::sql::sqlparser::ast::Statement;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExtStatement {
/// ANSI SQL AST node
SqlStatement(Box<Statement>),
// we can expand command
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/analyzer.rs | crates/s3select-api/src/query/analyzer.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use datafusion::logical_expr::LogicalPlan;
use super::session::SessionCtx;
use crate::QueryResult;
pub type AnalyzerRef = Arc<dyn Analyzer + Send + Sync>;
pub trait Analyzer {
fn analyze(&self, plan: &LogicalPlan, session: &SessionCtx) -> QueryResult<LogicalPlan>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/parser.rs | crates/s3select-api/src/query/parser.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::VecDeque;
use super::ast::ExtStatement;
use crate::QueryResult;
pub trait Parser {
fn parse(&self, sql: &str) -> QueryResult<VecDeque<ExtStatement>>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/session.rs | crates/s3select-api/src/query/session.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::query::Context;
use crate::{QueryError, QueryResult, object_store::EcObjectStore};
use datafusion::{
execution::{SessionStateBuilder, context::SessionState, runtime_env::RuntimeEnvBuilder},
parquet::data_type::AsBytes,
prelude::SessionContext,
};
use object_store::{ObjectStore, memory::InMemory, path::Path};
use std::sync::Arc;
use tracing::error;
#[derive(Clone)]
pub struct SessionCtx {
_desc: Arc<SessionCtxDesc>,
inner: SessionState,
}
impl SessionCtx {
pub fn inner(&self) -> &SessionState {
&self.inner
}
}
#[derive(Clone)]
pub struct SessionCtxDesc {
// maybe we need some info
}
#[derive(Default)]
pub struct SessionCtxFactory {
pub is_test: bool,
}
impl SessionCtxFactory {
pub async fn create_session_ctx(&self, context: &Context) -> QueryResult<SessionCtx> {
let df_session_ctx = self.build_df_session_context(context).await?;
Ok(SessionCtx {
_desc: Arc::new(SessionCtxDesc {}),
inner: df_session_ctx.state(),
})
}
async fn build_df_session_context(&self, context: &Context) -> QueryResult<SessionContext> {
let path = format!("s3://{}", context.input.bucket);
let store_url = url::Url::parse(&path).unwrap();
let rt = RuntimeEnvBuilder::new().build()?;
let df_session_state = SessionStateBuilder::new()
.with_runtime_env(Arc::new(rt))
.with_default_features();
let df_session_state = if self.is_test {
let store: Arc<dyn ObjectStore> = Arc::new(InMemory::new());
let data = b"id,name,age,department,salary
1,Alice,25,HR,5000
2,Bob,30,IT,6000
3,Charlie,35,Finance,7000
4,Diana,22,Marketing,4500
5,Eve,28,IT,5500
6,Frank,40,Finance,8000
7,Grace,26,HR,5200
8,Henry,32,IT,6200
9,Ivy,24,Marketing,4800
10,Jack,38,Finance,7500";
let data_bytes = data.as_bytes();
// let data = r#""year"╦"gender"╦"ethnicity"╦"firstname"╦"count"╦"rank"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"SOPHIA"╦"119"╦"1"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"CHLOE"╦"106"╦"2"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMILY"╦"93"╦"3"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"OLIVIA"╦"89"╦"4"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMMA"╦"75"╦"5"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ISABELLA"╦"67"╦"6"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"TIFFANY"╦"54"╦"7"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ASHLEY"╦"52"╦"8"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"FIONA"╦"48"╦"9"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ANGELA"╦"47"╦"10""#;
// let data_bytes = Bytes::from(data);
let path = Path::from(context.input.key.clone());
store.put(&path, data_bytes.into()).await.map_err(|e| {
error!("put data into memory failed: {}", e.to_string());
QueryError::StoreError { e: e.to_string() }
})?;
df_session_state.with_object_store(&store_url, Arc::new(store)).build()
} else {
let store =
EcObjectStore::new(context.input.clone()).map_err(|_| QueryError::NotImplemented { err: String::new() })?;
df_session_state.with_object_store(&store_url, Arc::new(store)).build()
};
let df_session_ctx = SessionContext::new_with_state(df_session_state);
Ok(df_session_ctx)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/dispatcher.rs | crates/s3select-api/src/query/dispatcher.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use crate::QueryResult;
use super::{
Query,
execution::{Output, QueryStateMachine},
logical_planner::Plan,
};
#[async_trait]
pub trait QueryDispatcher: Send + Sync {
// fn create_query_id(&self) -> QueryId;
// fn query_info(&self, id: &QueryId);
async fn execute_query(&self, query: &Query) -> QueryResult<Output>;
async fn build_logical_plan(&self, query_state_machine: Arc<QueryStateMachine>) -> QueryResult<Option<Plan>>;
async fn execute_logical_plan(&self, logical_plan: Plan, query_state_machine: Arc<QueryStateMachine>) -> QueryResult<Output>;
async fn build_query_state_machine(&self, query: Query) -> QueryResult<Arc<QueryStateMachine>>;
// fn running_query_infos(&self) -> Vec<QueryInfo>;
// fn running_query_status(&self) -> Vec<QueryStatus>;
// fn cancel_query(&self, id: &QueryId);
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/function.rs | crates/s3select-api/src/query/function.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use datafusion::logical_expr::{AggregateUDF, ScalarUDF, WindowUDF};
use crate::QueryResult;
pub type FuncMetaManagerRef = Arc<dyn FunctionMetadataManager + Send + Sync>;
pub trait FunctionMetadataManager {
fn register_udf(&mut self, udf: Arc<ScalarUDF>) -> QueryResult<()>;
fn register_udaf(&mut self, udaf: Arc<AggregateUDF>) -> QueryResult<()>;
fn register_udwf(&mut self, udwf: Arc<WindowUDF>) -> QueryResult<()>;
fn udf(&self, name: &str) -> QueryResult<Arc<ScalarUDF>>;
fn udaf(&self, name: &str) -> QueryResult<Arc<AggregateUDF>>;
fn udwf(&self, name: &str) -> QueryResult<Arc<WindowUDF>>;
fn udfs(&self) -> Vec<String>;
fn udafs(&self) -> Vec<String>;
fn udwfs(&self) -> Vec<String>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/optimizer.rs | crates/s3select-api/src/query/optimizer.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use datafusion::physical_plan::ExecutionPlan;
use super::logical_planner::QueryPlan;
use super::session::SessionCtx;
use crate::QueryResult;
pub type OptimizerRef = Arc<dyn Optimizer + Send + Sync>;
#[async_trait]
pub trait Optimizer {
async fn optimize(&self, plan: &QueryPlan, session: &SessionCtx) -> QueryResult<Arc<dyn ExecutionPlan>>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/logical_planner.rs | crates/s3select-api/src/query/logical_planner.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use datafusion::arrow::datatypes::SchemaRef;
use datafusion::logical_expr::LogicalPlan as DFPlan;
use std::sync::Arc;
use crate::QueryResult;
use super::ast::ExtStatement;
use super::session::SessionCtx;
#[derive(Clone)]
pub enum Plan {
// only support query sql
/// Query plan
Query(QueryPlan),
}
impl Plan {
pub fn schema(&self) -> SchemaRef {
match self {
Self::Query(p) => Arc::new(p.df_plan.schema().as_arrow().clone()),
}
}
}
#[derive(Debug, Clone)]
pub struct QueryPlan {
pub df_plan: DFPlan,
pub is_tag_scan: bool,
}
impl QueryPlan {
pub fn is_explain(&self) -> bool {
matches!(self.df_plan, DFPlan::Explain(_) | DFPlan::Analyze(_))
}
}
#[async_trait]
pub trait LogicalPlanner {
async fn create_logical_plan(&self, statement: ExtStatement, session: &SessionCtx) -> QueryResult<Plan>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/mod.rs | crates/s3select-api/src/query/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use s3s::dto::SelectObjectContentInput;
use std::sync::Arc;
pub mod analyzer;
pub mod ast;
pub mod dispatcher;
pub mod execution;
pub mod function;
pub mod logical_planner;
pub mod optimizer;
pub mod parser;
pub mod physical_planner;
pub mod scheduler;
pub mod session;
#[derive(Clone)]
pub struct Context {
// maybe we need transfer some info?
pub input: Arc<SelectObjectContentInput>,
}
#[derive(Clone)]
pub struct Query {
context: Context,
content: String,
}
impl Query {
#[inline(always)]
pub fn new(context: Context, content: String) -> Self {
Self { context, content }
}
pub fn context(&self) -> &Context {
&self.context
}
pub fn content(&self) -> &str {
self.content.as_str()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/physical_planner.rs | crates/s3select-api/src/query/physical_planner.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use datafusion::logical_expr::LogicalPlan;
use datafusion::physical_plan::ExecutionPlan;
use datafusion::physical_planner::ExtensionPlanner;
use super::session::SessionCtx;
use crate::QueryResult;
#[async_trait]
pub trait PhysicalPlanner {
/// Given a `LogicalPlan`, create an `ExecutionPlan` suitable for execution
async fn create_physical_plan(
&self,
logical_plan: &LogicalPlan,
session_state: &SessionCtx,
) -> QueryResult<Arc<dyn ExecutionPlan>>;
fn inject_physical_transform_rule(&mut self, rule: Arc<dyn ExtensionPlanner + Send + Sync>);
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/execution.rs | crates/s3select-api/src/query/execution.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Display;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use parking_lot::RwLock;
use async_trait::async_trait;
use datafusion::arrow::datatypes::{Schema, SchemaRef};
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::physical_plan::SendableRecordBatchStream;
use futures::{Stream, StreamExt, TryStreamExt};
use crate::{QueryError, QueryResult};
use super::Query;
use super::logical_planner::Plan;
use super::session::SessionCtx;
pub type QueryExecutionRef = Arc<dyn QueryExecution>;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum QueryType {
Batch,
Stream,
}
impl Display for QueryType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Batch => write!(f, "batch"),
Self::Stream => write!(f, "stream"),
}
}
}
#[async_trait]
pub trait QueryExecution: Send + Sync {
fn query_type(&self) -> QueryType {
QueryType::Batch
}
// Start
async fn start(&self) -> QueryResult<Output>;
// Stop
fn cancel(&self) -> QueryResult<()>;
}
pub enum Output {
StreamData(SendableRecordBatchStream),
Nil(()),
}
impl Output {
pub fn schema(&self) -> SchemaRef {
match self {
Self::StreamData(stream) => stream.schema(),
Self::Nil(_) => Arc::new(Schema::empty()),
}
}
pub async fn chunk_result(self) -> QueryResult<Vec<RecordBatch>> {
match self {
Self::Nil(_) => Ok(vec![]),
Self::StreamData(stream) => {
let schema = stream.schema();
let mut res: Vec<RecordBatch> = stream.try_collect::<Vec<RecordBatch>>().await?;
if res.is_empty() {
res.push(RecordBatch::new_empty(schema));
}
Ok(res)
}
}
}
pub async fn num_rows(self) -> usize {
match self.chunk_result().await {
Ok(rb) => rb.iter().map(|e| e.num_rows()).sum(),
Err(_) => 0,
}
}
/// Returns the number of records affected by the query operation
///
/// If it is a select statement, returns the number of rows in the result set
///
/// -1 means unknown
///
/// panic! when StreamData's number of records greater than i64::Max
pub async fn affected_rows(self) -> i64 {
self.num_rows().await as i64
}
}
impl Stream for Output {
type Item = Result<RecordBatch, QueryError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match this {
Output::StreamData(stream) => stream.poll_next_unpin(cx).map_err(|e| e.into()),
Output::Nil(_) => Poll::Ready(None),
}
}
}
#[async_trait]
pub trait QueryExecutionFactory {
async fn create_query_execution(
&self,
plan: Plan,
query_state_machine: QueryStateMachineRef,
) -> QueryResult<QueryExecutionRef>;
}
pub type QueryStateMachineRef = Arc<QueryStateMachine>;
pub struct QueryStateMachine {
pub session: SessionCtx,
pub query: Query,
state: RwLock<QueryState>,
start: Instant,
}
impl QueryStateMachine {
pub fn begin(query: Query, session: SessionCtx) -> Self {
Self {
session,
query,
state: RwLock::new(QueryState::ACCEPTING),
start: Instant::now(),
}
}
pub fn begin_analyze(&self) {
// TODO record time
self.translate_to(QueryState::RUNNING(RUNNING::ANALYZING));
}
pub fn end_analyze(&self) {
// TODO record time
}
pub fn begin_optimize(&self) {
// TODO record time
self.translate_to(QueryState::RUNNING(RUNNING::OPTIMIZING));
}
pub fn end_optimize(&self) {
// TODO
}
pub fn begin_schedule(&self) {
// TODO
self.translate_to(QueryState::RUNNING(RUNNING::SCHEDULING));
}
pub fn end_schedule(&self) {
// TODO
}
pub fn finish(&self) {
// TODO
self.translate_to(QueryState::DONE(DONE::FINISHED));
}
pub fn cancel(&self) {
// TODO
self.translate_to(QueryState::DONE(DONE::CANCELLED));
}
pub fn fail(&self) {
// TODO
self.translate_to(QueryState::DONE(DONE::FAILED));
}
pub fn state(&self) -> QueryState {
self.state.read().clone()
}
pub fn duration(&self) -> Duration {
self.start.elapsed()
}
fn translate_to(&self, state: QueryState) {
*self.state.write() = state;
}
}
#[derive(Debug, Clone)]
pub enum QueryState {
ACCEPTING,
RUNNING(RUNNING),
DONE(DONE),
}
impl AsRef<str> for QueryState {
fn as_ref(&self) -> &str {
match self {
QueryState::ACCEPTING => "ACCEPTING",
QueryState::RUNNING(e) => e.as_ref(),
QueryState::DONE(e) => e.as_ref(),
}
}
}
#[derive(Debug, Clone)]
pub enum RUNNING {
DISPATCHING,
ANALYZING,
OPTIMIZING,
SCHEDULING,
}
impl AsRef<str> for RUNNING {
fn as_ref(&self) -> &str {
match self {
Self::DISPATCHING => "DISPATCHING",
Self::ANALYZING => "ANALYZING",
Self::OPTIMIZING => "OPTIMIZING",
Self::SCHEDULING => "SCHEDULING",
}
}
}
#[derive(Debug, Clone)]
pub enum DONE {
FINISHED,
FAILED,
CANCELLED,
}
impl AsRef<str> for DONE {
fn as_ref(&self) -> &str {
match self {
Self::FINISHED => "FINISHED",
Self::FAILED => "FAILED",
Self::CANCELLED => "CANCELLED",
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/query/scheduler.rs | crates/s3select-api/src/query/scheduler.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use async_trait::async_trait;
use datafusion::common::Result;
use datafusion::execution::context::TaskContext;
use datafusion::physical_plan::{ExecutionPlan, SendableRecordBatchStream};
pub type SchedulerRef = Arc<dyn Scheduler + Send + Sync>;
#[async_trait]
pub trait Scheduler {
/// Schedule the provided [`ExecutionPlan`] on this [`Scheduler`].
///
/// Returns a [`ExecutionResults`] that can be used to receive results as they are produced,
/// as a [`futures::Stream`] of [`RecordBatch`]
async fn schedule(&self, plan: Arc<dyn ExecutionPlan>, context: Arc<TaskContext>) -> Result<ExecutionResults>;
}
pub struct ExecutionResults {
stream: SendableRecordBatchStream,
}
impl ExecutionResults {
pub fn new(stream: SendableRecordBatchStream) -> Self {
Self { stream }
}
/// Returns a [`SendableRecordBatchStream`] of this execution
pub fn stream(self) -> SendableRecordBatchStream {
self.stream
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/test/mod.rs | crates/s3select-api/src/test/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test modules for s3select-api
pub mod query_execution_test;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/test/query_execution_test.rs | crates/s3select-api/src/test/query_execution_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(test)]
mod tests {
use crate::query::execution::{DONE, Output, QueryExecution, QueryState, QueryType, RUNNING};
use crate::{QueryError, QueryResult};
use async_trait::async_trait;
#[test]
fn test_query_type_display() {
assert_eq!(format!("{}", QueryType::Batch), "batch");
assert_eq!(format!("{}", QueryType::Stream), "stream");
}
#[test]
fn test_query_type_equality() {
assert_eq!(QueryType::Batch, QueryType::Batch);
assert_ne!(QueryType::Batch, QueryType::Stream);
assert_eq!(QueryType::Stream, QueryType::Stream);
}
#[tokio::test]
async fn test_output_nil_methods() {
let output = Output::Nil(());
let result = output.chunk_result().await;
assert!(result.is_ok(), "Output::Nil result should be Ok");
let output2 = Output::Nil(());
let rows = output2.num_rows().await;
assert_eq!(rows, 0, "Output::Nil should have 0 rows");
let output3 = Output::Nil(());
let affected = output3.affected_rows().await;
assert_eq!(affected, 0, "Output::Nil should have 0 affected rows");
}
#[test]
fn test_query_state_as_ref() {
let accepting = QueryState::ACCEPTING;
assert_eq!(accepting.as_ref(), "ACCEPTING");
let running = QueryState::RUNNING(RUNNING::ANALYZING);
assert_eq!(running.as_ref(), "ANALYZING");
let done = QueryState::DONE(DONE::FINISHED);
assert_eq!(done.as_ref(), "FINISHED");
}
#[test]
fn test_running_state_as_ref() {
assert_eq!(RUNNING::DISPATCHING.as_ref(), "DISPATCHING");
assert_eq!(RUNNING::ANALYZING.as_ref(), "ANALYZING");
assert_eq!(RUNNING::OPTIMIZING.as_ref(), "OPTIMIZING");
assert_eq!(RUNNING::SCHEDULING.as_ref(), "SCHEDULING");
}
#[test]
fn test_done_state_as_ref() {
assert_eq!(DONE::FINISHED.as_ref(), "FINISHED");
assert_eq!(DONE::FAILED.as_ref(), "FAILED");
assert_eq!(DONE::CANCELLED.as_ref(), "CANCELLED");
}
// Mock implementation for testing
struct MockQueryExecution {
should_succeed: bool,
should_cancel: bool,
}
#[async_trait]
impl QueryExecution for MockQueryExecution {
async fn start(&self) -> QueryResult<Output> {
if self.should_cancel {
return Err(QueryError::Cancel);
}
if self.should_succeed {
Ok(Output::Nil(()))
} else {
Err(QueryError::NotImplemented {
err: "Mock execution failed".to_string(),
})
}
}
fn cancel(&self) -> QueryResult<()> {
Ok(())
}
}
#[tokio::test]
async fn test_mock_query_execution_success() {
let execution = MockQueryExecution {
should_succeed: true,
should_cancel: false,
};
let result = execution.start().await;
assert!(result.is_ok(), "Mock execution should succeed");
if let Ok(Output::Nil(_)) = result {
// Expected result
} else {
panic!("Expected Output::Nil");
}
}
#[tokio::test]
async fn test_mock_query_execution_failure() {
let execution = MockQueryExecution {
should_succeed: false,
should_cancel: false,
};
let result = execution.start().await;
assert!(result.is_err(), "Mock execution should fail");
if let Err(QueryError::NotImplemented { .. }) = result {
// Expected error
} else {
panic!("Expected NotImplemented error");
}
}
#[tokio::test]
async fn test_mock_query_execution_cancel() {
let execution = MockQueryExecution {
should_succeed: false,
should_cancel: true,
};
let result = execution.start().await;
assert!(result.is_err(), "Cancelled execution should fail");
if let Err(QueryError::Cancel) = result {
// Expected cancellation error
} else {
panic!("Expected Cancel error");
}
let cancel_result = execution.cancel();
assert!(cancel_result.is_ok(), "Cancel should succeed");
}
#[test]
fn test_query_execution_default_type() {
let execution = MockQueryExecution {
should_succeed: true,
should_cancel: false,
};
assert_eq!(execution.query_type(), QueryType::Batch);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/server/dbms.rs | crates/s3select-api/src/server/dbms.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use crate::{
QueryResult,
query::{
Query,
execution::{Output, QueryStateMachineRef},
logical_planner::Plan,
},
};
pub struct QueryHandle {
query: Query,
result: Output,
}
impl QueryHandle {
pub fn new(query: Query, result: Output) -> Self {
Self { query, result }
}
pub fn query(&self) -> &Query {
&self.query
}
pub fn result(self) -> Output {
self.result
}
}
#[async_trait]
pub trait DatabaseManagerSystem {
async fn execute(&self, query: &Query) -> QueryResult<QueryHandle>;
async fn build_query_state_machine(&self, query: Query) -> QueryResult<QueryStateMachineRef>;
async fn build_logical_plan(&self, query_state_machine: QueryStateMachineRef) -> QueryResult<Option<Plan>>;
async fn execute_logical_plan(
&self,
logical_plan: Plan,
query_state_machine: QueryStateMachineRef,
) -> QueryResult<QueryHandle>;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/s3select-api/src/server/mod.rs | crates/s3select-api/src/server/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod dbms;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/user.rs | crates/madmin/src/user.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::value::RawValue;
use std::collections::HashMap;
use time::OffsetDateTime;
use crate::BackendInfo;
#[derive(Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
pub enum AccountStatus {
#[serde(rename = "enabled")]
Enabled,
#[serde(rename = "disabled")]
#[default]
Disabled,
}
impl AsRef<str> for AccountStatus {
fn as_ref(&self) -> &str {
match self {
AccountStatus::Enabled => "enabled",
AccountStatus::Disabled => "disabled",
}
}
}
impl TryFrom<&str> for AccountStatus {
type Error = String;
fn try_from(s: &str) -> Result<Self, Self::Error> {
match s {
"enabled" => Ok(AccountStatus::Enabled),
"disabled" => Ok(AccountStatus::Disabled),
_ => Err(format!("invalid account status: {s}")),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum UserAuthType {
#[serde(rename = "builtin")]
Builtin,
#[serde(rename = "ldap")]
Ldap,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct UserAuthInfo {
#[serde(rename = "type")]
pub auth_type: UserAuthType,
#[serde(rename = "authServer", skip_serializing_if = "Option::is_none")]
pub auth_server: Option<String>,
#[serde(rename = "authServerUserID", skip_serializing_if = "Option::is_none")]
pub auth_server_user_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct UserInfo {
#[serde(rename = "userAuthInfo", skip_serializing_if = "Option::is_none")]
pub auth_info: Option<UserAuthInfo>,
#[serde(rename = "secretKey", skip_serializing_if = "Option::is_none")]
pub secret_key: Option<String>,
#[serde(rename = "policyName", skip_serializing_if = "Option::is_none")]
pub policy_name: Option<String>,
#[serde(rename = "status")]
pub status: AccountStatus,
#[serde(rename = "memberOf", skip_serializing_if = "Option::is_none")]
pub member_of: Option<Vec<String>>,
#[serde(rename = "updatedAt")]
pub updated_at: Option<OffsetDateTime>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AddOrUpdateUserReq {
#[serde(rename = "secretKey")]
pub secret_key: String,
#[serde(rename = "policy", skip_serializing_if = "Option::is_none")]
pub policy: Option<String>,
#[serde(rename = "status")]
pub status: AccountStatus,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ServiceAccountInfo {
#[serde(rename = "parentUser")]
pub parent_user: String,
#[serde(rename = "accountStatus")]
pub account_status: String,
#[serde(rename = "impliedPolicy")]
pub implied_policy: bool,
#[serde(rename = "accessKey")]
pub access_key: String,
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "expiration", with = "time::serde::rfc3339::option")]
pub expiration: Option<OffsetDateTime>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ListServiceAccountsResp {
#[serde(rename = "accounts")]
pub accounts: Vec<ServiceAccountInfo>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AddServiceAccountReq {
#[serde(rename = "policy", skip_serializing_if = "Option::is_none")]
pub policy: Option<String>,
#[serde(rename = "targetUser", skip_serializing_if = "Option::is_none")]
pub target_user: Option<String>,
#[serde(rename = "accessKey")]
pub access_key: String,
#[serde(rename = "secretKey")]
pub secret_key: String,
#[serde(rename = "name")]
pub name: Option<String>,
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "expiration", with = "time::serde::rfc3339::option")]
pub expiration: Option<OffsetDateTime>,
}
impl AddServiceAccountReq {
pub fn validate(&self) -> Result<(), String> {
if self.access_key.is_empty() {
return Err("accessKey is empty".to_string());
}
if self.secret_key.is_empty() {
return Err("secretKey is empty".to_string());
}
if self.name.is_none() {
return Err("name is empty".to_string());
}
// TODO: validate
Ok(())
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Credentials<'a> {
pub access_key: &'a str,
pub secret_key: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
pub session_token: Option<&'a str>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "time::serde::rfc3339::option")]
pub expiration: Option<OffsetDateTime>,
}
#[derive(Serialize)]
pub struct AddServiceAccountResp<'a> {
pub credentials: Credentials<'a>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct InfoServiceAccountResp {
pub parent_user: String,
pub account_status: String,
pub implied_policy: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub policy: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "time::serde::rfc3339::option")]
pub expiration: Option<OffsetDateTime>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct UpdateServiceAccountReq {
#[serde(rename = "newPolicy", skip_serializing_if = "Option::is_none")]
pub new_policy: Option<String>,
#[serde(rename = "newSecretKey", skip_serializing_if = "Option::is_none")]
pub new_secret_key: Option<String>,
#[serde(rename = "newStatus", skip_serializing_if = "Option::is_none")]
pub new_status: Option<String>,
#[serde(rename = "newName", skip_serializing_if = "Option::is_none")]
pub new_name: Option<String>,
#[serde(rename = "newDescription", skip_serializing_if = "Option::is_none")]
pub new_description: Option<String>,
#[serde(rename = "newExpiration", skip_serializing_if = "Option::is_none")]
#[serde(with = "time::serde::rfc3339::option")]
pub new_expiration: Option<OffsetDateTime>,
}
impl UpdateServiceAccountReq {
pub fn validate(&self) -> Result<(), String> {
// TODO: validate
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct AccountInfo {
pub account_name: String,
pub server: BackendInfo,
pub policy: serde_json::Value, // Use iam/policy::parse to parse the result, to be done by the caller.
pub buckets: Vec<BucketAccessInfo>,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct BucketAccessInfo {
pub name: String,
pub size: u64,
pub objects: u64,
pub object_sizes_histogram: HashMap<String, u64>,
pub object_versions_histogram: HashMap<String, u64>,
pub details: Option<BucketDetails>,
pub prefix_usage: HashMap<String, u64>,
#[serde(rename = "expiration", with = "time::serde::rfc3339::option")]
pub created: Option<OffsetDateTime>,
pub access: AccountAccess,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct BucketDetails {
pub versioning: bool,
pub versioning_suspended: bool,
pub locking: bool,
pub replication: bool,
// pub tagging: Option<Tagging>,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct AccountAccess {
pub read: bool,
pub write: bool,
}
/// SRSessionPolicy - represents a session policy to be replicated.
#[derive(Debug, Clone)]
pub struct SRSessionPolicy(Option<Box<RawValue>>);
impl SRSessionPolicy {
pub fn new() -> Self {
SRSessionPolicy(None)
}
pub fn from_json(json: &str) -> Result<Self, serde_json::Error> {
if json == "null" {
Ok(SRSessionPolicy(None))
} else {
let raw_value = serde_json::from_str(json)?;
Ok(SRSessionPolicy(Some(raw_value)))
}
}
pub fn is_null(&self) -> bool {
self.0.is_none()
}
pub fn as_str(&self) -> Option<&str> {
self.0.as_ref().map(|v| v.get())
}
}
impl Default for SRSessionPolicy {
fn default() -> Self {
Self::new()
}
}
impl PartialEq for SRSessionPolicy {
fn eq(&self, other: &Self) -> bool {
self.0.as_ref().map(|v| v.get()) == other.0.as_ref().map(|v| v.get())
}
}
impl Serialize for SRSessionPolicy {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match &self.0 {
Some(raw_value) => raw_value.serialize(serializer),
None => serializer.serialize_none(),
}
}
}
impl<'de> Deserialize<'de> for SRSessionPolicy {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let raw_value: Option<Box<RawValue>> = Option::deserialize(deserializer)?;
Ok(SRSessionPolicy(raw_value))
}
}
/// SRSvcAccCreate - create operation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SRSvcAccCreate {
pub parent: String,
#[serde(rename = "accessKey")]
pub access_key: String,
#[serde(rename = "secretKey")]
pub secret_key: String,
pub groups: Vec<String>,
pub claims: HashMap<String, serde_json::Value>,
#[serde(rename = "sessionPolicy")]
pub session_policy: SRSessionPolicy,
pub status: String,
pub name: String,
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub expiration: Option<OffsetDateTime>,
#[serde(rename = "apiVersion", skip_serializing_if = "Option::is_none")]
pub api_version: Option<String>,
}
/// ImportIAMResult - represents the structure iam import response
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ImportIAMResult {
/// Skipped entries while import
/// This could be due to groups, policies etc missing for
/// imported entries. We dont fail hard in this case and
pub skipped: IAMEntities,
/// Removed entries - this mostly happens for policies
/// where empty might be getting imported and that's invalid
pub removed: IAMEntities,
/// Newly added entries
pub added: IAMEntities,
/// Failed entries while import. This would have details of
/// failed entities with respective errors
pub failed: IAMErrEntities,
}
/// IAMEntities - represents different IAM entities
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct IAMEntities {
/// List of policy names
pub policies: Vec<String>,
/// List of user names
pub users: Vec<String>,
/// List of group names
pub groups: Vec<String>,
/// List of Service Account names
#[serde(rename = "serviceAccounts")]
pub service_accounts: Vec<String>,
/// List of user policies, each entry in map represents list of policies
/// applicable to the user
#[serde(rename = "userPolicies")]
pub user_policies: Vec<HashMap<String, Vec<String>>>,
/// List of group policies, each entry in map represents list of policies
/// applicable to the group
#[serde(rename = "groupPolicies")]
pub group_policies: Vec<HashMap<String, Vec<String>>>,
/// List of STS policies, each entry in map represents list of policies
/// applicable to the STS
#[serde(rename = "stsPolicies")]
pub sts_policies: Vec<HashMap<String, Vec<String>>>,
}
/// IAMErrEntities - represents errored out IAM entries while import with error
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct IAMErrEntities {
/// List of errored out policies with errors
pub policies: Vec<IAMErrEntity>,
/// List of errored out users with errors
pub users: Vec<IAMErrEntity>,
/// List of errored out groups with errors
pub groups: Vec<IAMErrEntity>,
/// List of errored out service accounts with errors
#[serde(rename = "serviceAccounts")]
pub service_accounts: Vec<IAMErrEntity>,
/// List of errored out user policies with errors
#[serde(rename = "userPolicies")]
pub user_policies: Vec<IAMErrPolicyEntity>,
/// List of errored out group policies with errors
#[serde(rename = "groupPolicies")]
pub group_policies: Vec<IAMErrPolicyEntity>,
/// List of errored out STS policies with errors
#[serde(rename = "stsPolicies")]
pub sts_policies: Vec<IAMErrPolicyEntity>,
}
/// IAMErrEntity - represents an errored IAM entity with error details
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IAMErrEntity {
pub name: String,
pub error: String,
}
/// IAMErrPolicyEntity - represents an errored policy entity with error details
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IAMErrPolicyEntity {
pub name: String,
pub policies: Vec<String>,
pub error: String,
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use time::OffsetDateTime;
#[test]
fn test_account_status_default() {
let status = AccountStatus::default();
assert_eq!(status, AccountStatus::Disabled);
}
#[test]
fn test_account_status_as_ref() {
assert_eq!(AccountStatus::Enabled.as_ref(), "enabled");
assert_eq!(AccountStatus::Disabled.as_ref(), "disabled");
}
#[test]
fn test_account_status_try_from_valid() {
assert_eq!(AccountStatus::try_from("enabled").unwrap(), AccountStatus::Enabled);
assert_eq!(AccountStatus::try_from("disabled").unwrap(), AccountStatus::Disabled);
}
#[test]
fn test_account_status_try_from_invalid() {
let result = AccountStatus::try_from("invalid");
assert!(result.is_err());
assert!(result.unwrap_err().contains("invalid account status"));
}
#[test]
fn test_account_status_serialization() {
let enabled = AccountStatus::Enabled;
let disabled = AccountStatus::Disabled;
let enabled_json = serde_json::to_string(&enabled).unwrap();
let disabled_json = serde_json::to_string(&disabled).unwrap();
assert_eq!(enabled_json, "\"enabled\"");
assert_eq!(disabled_json, "\"disabled\"");
}
#[test]
fn test_account_status_deserialization() {
let enabled: AccountStatus = serde_json::from_str("\"enabled\"").unwrap();
let disabled: AccountStatus = serde_json::from_str("\"disabled\"").unwrap();
assert_eq!(enabled, AccountStatus::Enabled);
assert_eq!(disabled, AccountStatus::Disabled);
}
#[test]
fn test_user_auth_type_serialization() {
let builtin = UserAuthType::Builtin;
let ldap = UserAuthType::Ldap;
let builtin_json = serde_json::to_string(&builtin).unwrap();
let ldap_json = serde_json::to_string(&ldap).unwrap();
assert_eq!(builtin_json, "\"builtin\"");
assert_eq!(ldap_json, "\"ldap\"");
}
#[test]
fn test_user_auth_info_creation() {
let auth_info = UserAuthInfo {
auth_type: UserAuthType::Ldap,
auth_server: Some("ldap.example.com".to_string()),
auth_server_user_id: Some("user123".to_string()),
};
assert!(matches!(auth_info.auth_type, UserAuthType::Ldap));
assert_eq!(auth_info.auth_server.unwrap(), "ldap.example.com");
assert_eq!(auth_info.auth_server_user_id.unwrap(), "user123");
}
#[test]
fn test_user_auth_info_serialization() {
let auth_info = UserAuthInfo {
auth_type: UserAuthType::Builtin,
auth_server: None,
auth_server_user_id: None,
};
let json = serde_json::to_string(&auth_info).unwrap();
assert!(json.contains("builtin"));
assert!(!json.contains("authServer"), "None fields should be skipped");
}
#[test]
fn test_user_info_default() {
let user_info = UserInfo::default();
assert!(user_info.auth_info.is_none());
assert!(user_info.secret_key.is_none());
assert!(user_info.policy_name.is_none());
assert_eq!(user_info.status, AccountStatus::Disabled);
assert!(user_info.member_of.is_none());
assert!(user_info.updated_at.is_none());
}
#[test]
fn test_user_info_with_values() {
let now = OffsetDateTime::now_utc();
let user_info = UserInfo {
auth_info: Some(UserAuthInfo {
auth_type: UserAuthType::Builtin,
auth_server: None,
auth_server_user_id: None,
}),
secret_key: Some("secret123".to_string()),
policy_name: Some("ReadOnlyAccess".to_string()),
status: AccountStatus::Enabled,
member_of: Some(vec!["group1".to_string(), "group2".to_string()]),
updated_at: Some(now),
};
assert!(user_info.auth_info.is_some());
assert_eq!(user_info.secret_key.unwrap(), "secret123");
assert_eq!(user_info.policy_name.unwrap(), "ReadOnlyAccess");
assert_eq!(user_info.status, AccountStatus::Enabled);
assert_eq!(user_info.member_of.unwrap().len(), 2);
assert!(user_info.updated_at.is_some());
}
#[test]
fn test_add_or_update_user_req_creation() {
let req = AddOrUpdateUserReq {
secret_key: "newsecret".to_string(),
policy: Some("FullAccess".to_string()),
status: AccountStatus::Enabled,
};
assert_eq!(req.secret_key, "newsecret");
assert_eq!(req.policy.unwrap(), "FullAccess");
assert_eq!(req.status, AccountStatus::Enabled);
}
#[test]
fn test_service_account_info_creation() {
let now = OffsetDateTime::now_utc();
let service_account = ServiceAccountInfo {
parent_user: "admin".to_string(),
account_status: "enabled".to_string(),
implied_policy: true,
access_key: "AKIAIOSFODNN7EXAMPLE".to_string(),
name: Some("test-service".to_string()),
description: Some("Test service account".to_string()),
expiration: Some(now),
};
assert_eq!(service_account.parent_user, "admin");
assert_eq!(service_account.account_status, "enabled");
assert!(service_account.implied_policy);
assert_eq!(service_account.access_key, "AKIAIOSFODNN7EXAMPLE");
assert_eq!(service_account.name.unwrap(), "test-service");
assert!(service_account.expiration.is_some());
}
#[test]
fn test_list_service_accounts_resp_creation() {
let resp = ListServiceAccountsResp {
accounts: vec![
ServiceAccountInfo {
parent_user: "user1".to_string(),
account_status: "enabled".to_string(),
implied_policy: false,
access_key: "KEY1".to_string(),
name: Some("service1".to_string()),
description: None,
expiration: None,
},
ServiceAccountInfo {
parent_user: "user2".to_string(),
account_status: "disabled".to_string(),
implied_policy: true,
access_key: "KEY2".to_string(),
name: Some("service2".to_string()),
description: Some("Second service".to_string()),
expiration: None,
},
],
};
assert_eq!(resp.accounts.len(), 2);
assert_eq!(resp.accounts[0].parent_user, "user1");
assert_eq!(resp.accounts[1].account_status, "disabled");
}
#[test]
fn test_add_service_account_req_validate_success() {
let req = AddServiceAccountReq {
policy: Some("ReadOnlyAccess".to_string()),
target_user: Some("testuser".to_string()),
access_key: "AKIAIOSFODNN7EXAMPLE".to_string(),
secret_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY".to_string(),
name: Some("test-service".to_string()),
description: Some("Test service account".to_string()),
expiration: None,
};
let result = req.validate();
assert!(result.is_ok());
}
#[test]
fn test_add_service_account_req_validate_empty_access_key() {
let req = AddServiceAccountReq {
policy: None,
target_user: None,
access_key: "".to_string(),
secret_key: "secret".to_string(),
name: Some("test".to_string()),
description: None,
expiration: None,
};
let result = req.validate();
assert!(result.is_err());
assert!(result.unwrap_err().contains("accessKey is empty"));
}
#[test]
fn test_add_service_account_req_validate_empty_secret_key() {
let req = AddServiceAccountReq {
policy: None,
target_user: None,
access_key: "AKIAIOSFODNN7EXAMPLE".to_string(),
secret_key: "".to_string(),
name: Some("test".to_string()),
description: None,
expiration: None,
};
let result = req.validate();
assert!(result.is_err());
assert!(result.unwrap_err().contains("secretKey is empty"));
}
#[test]
fn test_add_service_account_req_validate_empty_name() {
let req = AddServiceAccountReq {
policy: None,
target_user: None,
access_key: "AKIAIOSFODNN7EXAMPLE".to_string(),
secret_key: "secret".to_string(),
name: None,
description: None,
expiration: None,
};
let result = req.validate();
assert!(result.is_err());
assert!(result.unwrap_err().contains("name is empty"));
}
#[test]
fn test_credentials_serialization() {
let now = OffsetDateTime::now_utc();
let credentials = Credentials {
access_key: "AKIAIOSFODNN7EXAMPLE",
secret_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
session_token: Some("session123"),
expiration: Some(now),
};
let json = serde_json::to_string(&credentials).unwrap();
assert!(json.contains("AKIAIOSFODNN7EXAMPLE"));
assert!(json.contains("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"));
assert!(json.contains("session123"));
}
#[test]
fn test_credentials_without_optional_fields() {
let credentials = Credentials {
access_key: "AKIAIOSFODNN7EXAMPLE",
secret_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
session_token: None,
expiration: None,
};
let json = serde_json::to_string(&credentials).unwrap();
assert!(json.contains("AKIAIOSFODNN7EXAMPLE"));
assert!(!json.contains("sessionToken"), "None fields should be skipped");
assert!(!json.contains("expiration"), "None fields should be skipped");
}
#[test]
fn test_add_service_account_resp_creation() {
let credentials = Credentials {
access_key: "AKIAIOSFODNN7EXAMPLE",
secret_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
session_token: None,
expiration: None,
};
let resp = AddServiceAccountResp { credentials };
assert_eq!(resp.credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
assert_eq!(resp.credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
}
#[test]
fn test_info_service_account_resp_creation() {
let now = OffsetDateTime::now_utc();
let resp = InfoServiceAccountResp {
parent_user: "admin".to_string(),
account_status: "enabled".to_string(),
implied_policy: true,
policy: Some("ReadOnlyAccess".to_string()),
name: Some("test-service".to_string()),
description: Some("Test service account".to_string()),
expiration: Some(now),
};
assert_eq!(resp.parent_user, "admin");
assert_eq!(resp.account_status, "enabled");
assert!(resp.implied_policy);
assert_eq!(resp.policy.unwrap(), "ReadOnlyAccess");
assert_eq!(resp.name.unwrap(), "test-service");
assert!(resp.expiration.is_some());
}
#[test]
fn test_update_service_account_req_validate() {
let req = UpdateServiceAccountReq {
new_policy: Some("FullAccess".to_string()),
new_secret_key: Some("newsecret".to_string()),
new_status: Some("enabled".to_string()),
new_name: Some("updated-service".to_string()),
new_description: Some("Updated description".to_string()),
new_expiration: None,
};
let result = req.validate();
assert!(result.is_ok());
}
#[test]
fn test_account_info_creation() {
use crate::BackendInfo;
let account_info = AccountInfo {
account_name: "testuser".to_string(),
server: BackendInfo::default(),
policy: serde_json::json!({"Version": "2012-10-17"}),
buckets: vec![],
};
assert_eq!(account_info.account_name, "testuser");
assert!(account_info.buckets.is_empty());
assert!(account_info.policy.is_object());
}
#[test]
fn test_bucket_access_info_creation() {
let now = OffsetDateTime::now_utc();
let mut sizes_histogram = HashMap::new();
sizes_histogram.insert("small".to_string(), 100);
sizes_histogram.insert("large".to_string(), 50);
let mut versions_histogram = HashMap::new();
versions_histogram.insert("v1".to_string(), 80);
versions_histogram.insert("v2".to_string(), 70);
let mut prefix_usage = HashMap::new();
prefix_usage.insert("logs/".to_string(), 1000000);
prefix_usage.insert("data/".to_string(), 5000000);
let bucket_info = BucketAccessInfo {
name: "test-bucket".to_string(),
size: 6000000,
objects: 150,
object_sizes_histogram: sizes_histogram,
object_versions_histogram: versions_histogram,
details: Some(BucketDetails {
versioning: true,
versioning_suspended: false,
locking: true,
replication: false,
}),
prefix_usage,
created: Some(now),
access: AccountAccess {
read: true,
write: false,
},
};
assert_eq!(bucket_info.name, "test-bucket");
assert_eq!(bucket_info.size, 6000000);
assert_eq!(bucket_info.objects, 150);
assert_eq!(bucket_info.object_sizes_histogram.len(), 2);
assert_eq!(bucket_info.object_versions_histogram.len(), 2);
assert!(bucket_info.details.is_some());
assert_eq!(bucket_info.prefix_usage.len(), 2);
assert!(bucket_info.created.is_some());
assert!(bucket_info.access.read);
assert!(!bucket_info.access.write);
}
#[test]
fn test_bucket_details_creation() {
let details = BucketDetails {
versioning: true,
versioning_suspended: false,
locking: true,
replication: true,
};
assert!(details.versioning);
assert!(!details.versioning_suspended);
assert!(details.locking);
assert!(details.replication);
}
#[test]
fn test_account_access_creation() {
let read_only = AccountAccess {
read: true,
write: false,
};
let full_access = AccountAccess { read: true, write: true };
let no_access = AccountAccess {
read: false,
write: false,
};
assert!(read_only.read && !read_only.write);
assert!(full_access.read && full_access.write);
assert!(!no_access.read && !no_access.write);
}
#[test]
fn test_serialization_deserialization_roundtrip() {
let user_info = UserInfo {
auth_info: Some(UserAuthInfo {
auth_type: UserAuthType::Ldap,
auth_server: Some("ldap.example.com".to_string()),
auth_server_user_id: Some("user123".to_string()),
}),
secret_key: Some("secret123".to_string()),
policy_name: Some("ReadOnlyAccess".to_string()),
status: AccountStatus::Enabled,
member_of: Some(vec!["group1".to_string()]),
updated_at: None,
};
let json = serde_json::to_string(&user_info).unwrap();
let deserialized: UserInfo = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.secret_key.unwrap(), "secret123");
assert_eq!(deserialized.policy_name.unwrap(), "ReadOnlyAccess");
assert_eq!(deserialized.status, AccountStatus::Enabled);
assert_eq!(deserialized.member_of.unwrap().len(), 1);
}
#[test]
fn test_debug_format_all_structures() {
let account_status = AccountStatus::Enabled;
let user_auth_type = UserAuthType::Builtin;
let user_info = UserInfo::default();
let service_account = ServiceAccountInfo {
parent_user: "test".to_string(),
account_status: "enabled".to_string(),
implied_policy: false,
access_key: "key".to_string(),
name: None,
description: None,
expiration: None,
};
// Test that all structures can be formatted with Debug
assert!(!format!("{account_status:?}").is_empty());
assert!(!format!("{user_auth_type:?}").is_empty());
assert!(!format!("{user_info:?}").is_empty());
assert!(!format!("{service_account:?}").is_empty());
}
#[test]
fn test_memory_efficiency() {
// Test that structures don't use excessive memory
assert!(std::mem::size_of::<AccountStatus>() < 100);
assert!(std::mem::size_of::<UserAuthType>() < 100);
assert!(std::mem::size_of::<UserInfo>() < 2000);
assert!(std::mem::size_of::<ServiceAccountInfo>() < 2000);
assert!(std::mem::size_of::<AccountAccess>() < 100);
}
#[test]
fn test_edge_cases() {
// Test empty strings and edge cases
let req = AddServiceAccountReq {
policy: Some("".to_string()),
target_user: Some("".to_string()),
access_key: "valid_key".to_string(),
secret_key: "valid_secret".to_string(),
name: Some("valid_name".to_string()),
description: Some("".to_string()),
expiration: None,
};
// Should still validate successfully with empty optional strings
assert!(req.validate().is_ok());
// Test very long strings
let long_string = "a".repeat(1000);
let long_req = AddServiceAccountReq {
policy: Some(long_string.clone()),
target_user: Some(long_string.clone()),
access_key: long_string.clone(),
secret_key: long_string.clone(),
name: Some(long_string.clone()),
description: Some(long_string),
expiration: None,
};
assert!(long_req.validate().is_ok());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/lib.rs | crates/madmin/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod group;
pub mod heal_commands;
pub mod health;
pub mod info_commands;
pub mod metrics;
pub mod net;
pub mod policy;
pub mod service_commands;
pub mod trace;
pub mod user;
pub mod utils;
pub use group::*;
pub use info_commands::*;
pub use policy::*;
pub use user::*;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/group.rs | crates/madmin/src/group.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use serde::Serialize;
use time::OffsetDateTime;
#[derive(Debug, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum GroupStatus {
#[default]
Enabled,
Disabled,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct GroupAddRemove {
pub group: String,
pub members: Vec<String>,
#[serde(rename = "groupStatus")]
pub status: GroupStatus,
#[serde(rename = "isRemove")]
pub is_remove: bool,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct GroupDesc {
pub name: String,
pub status: String,
pub members: Vec<String>,
pub policy: String,
#[serde(rename = "updatedAt", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<OffsetDateTime>,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/info_commands.rs | crates/madmin/src/info_commands.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, time::SystemTime};
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
use crate::metrics::TimedAction;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ItemState {
Offline,
Initializing,
Online,
}
impl ItemState {
pub fn to_string(&self) -> &str {
match self {
ItemState::Offline => "offline",
ItemState::Initializing => "initializing",
ItemState::Online => "online",
}
}
pub fn from_string(s: &str) -> Option<ItemState> {
match s {
"offline" => Some(ItemState::Offline),
"initializing" => Some(ItemState::Initializing),
"online" => Some(ItemState::Online),
_ => None,
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct DiskMetrics {
pub last_minute: HashMap<String, TimedAction>,
pub api_calls: HashMap<String, u64>,
pub total_waiting: u32,
pub total_errors_availability: u64,
pub total_errors_timeout: u64,
pub total_writes: u64,
pub total_deletes: u64,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Disk {
pub endpoint: String,
#[serde(rename = "rootDisk")]
pub root_disk: bool,
#[serde(rename = "path")]
pub drive_path: String,
pub healing: bool,
pub scanning: bool,
pub state: String,
pub uuid: String,
pub major: u32,
pub minor: u32,
pub model: Option<String>,
#[serde(rename = "totalspace")]
pub total_space: u64,
#[serde(rename = "usedspace")]
pub used_space: u64,
#[serde(rename = "availspace")]
pub available_space: u64,
#[serde(rename = "readthroughput")]
pub read_throughput: f64,
#[serde(rename = "writethroughput")]
pub write_throughput: f64,
#[serde(rename = "readlatency")]
pub read_latency: f64,
#[serde(rename = "writelatency")]
pub write_latency: f64,
pub utilization: f64,
pub metrics: Option<DiskMetrics>,
pub heal_info: Option<HealingDisk>,
pub used_inodes: u64,
pub free_inodes: u64,
pub local: bool,
pub pool_index: i32,
pub set_index: i32,
pub disk_index: i32,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct HealingDisk {
pub id: String,
pub heal_id: String,
pub pool_index: Option<usize>,
pub set_index: Option<usize>,
pub disk_index: Option<usize>,
pub endpoint: String,
pub path: String,
pub started: Option<OffsetDateTime>,
pub last_update: Option<SystemTime>,
pub retry_attempts: u64,
pub objects_total_count: u64,
pub objects_total_size: u64,
pub items_healed: u64,
pub items_failed: u64,
pub item_skipped: u64,
pub bytes_done: u64,
pub bytes_failed: u64,
pub bytes_skipped: u64,
pub objects_healed: u64,
pub objects_failed: u64,
pub bucket: String,
pub object: String,
pub queue_buckets: Vec<String>,
pub healed_buckets: Vec<String>,
pub finished: bool,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub enum BackendByte {
#[default]
Unknown,
FS,
Erasure,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct StorageInfo {
pub disks: Vec<Disk>,
pub backend: BackendInfo,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct BackendDisks(pub HashMap<String, usize>);
impl BackendDisks {
pub fn new() -> Self {
Self(HashMap::new())
}
pub fn sum(&self) -> usize {
self.0.values().sum()
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase", default)]
pub struct BackendInfo {
pub backend_type: BackendByte,
pub online_disks: BackendDisks,
pub offline_disks: BackendDisks,
#[serde(rename = "StandardSCData")]
pub standard_sc_data: Vec<usize>,
#[serde(rename = "StandardSCParities")]
pub standard_sc_parities: Vec<usize>,
#[serde(rename = "StandardSCParity")]
pub standard_sc_parity: Option<usize>,
#[serde(rename = "RRSCData")]
pub rr_sc_data: Vec<usize>,
#[serde(rename = "RRSCParities")]
pub rr_sc_parities: Vec<usize>,
#[serde(rename = "RRSCParity")]
pub rr_sc_parity: Option<usize>,
pub total_sets: Vec<usize>,
pub drives_per_set: Vec<usize>,
}
pub const ITEM_OFFLINE: &str = "offline";
pub const ITEM_INITIALIZING: &str = "initializing";
pub const ITEM_ONLINE: &str = "online";
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct MemStats {
pub alloc: u64,
pub total_alloc: u64,
pub mallocs: u64,
pub frees: u64,
pub heap_alloc: u64,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct ServerProperties {
pub state: String,
pub endpoint: String,
pub scheme: String,
pub uptime: u64,
pub version: String,
#[serde(rename = "commitID")]
pub commit_id: String,
pub network: HashMap<String, String>,
#[serde(rename = "drives")]
pub disks: Vec<Disk>,
#[serde(rename = "poolNumber")]
pub pool_number: i32,
#[serde(rename = "poolNumbers")]
pub pool_numbers: Vec<i32>,
pub mem_stats: MemStats,
pub max_procs: u64,
pub num_cpu: u64,
pub runtime_version: String,
pub rustfs_env_vars: HashMap<String, String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Kms {
pub status: Option<String>,
pub encrypt: Option<String>,
pub decrypt: Option<String>,
pub endpoint: Option<String>,
pub version: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Ldap {
pub status: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Status {
pub status: Option<String>,
}
pub type Audit = HashMap<String, Status>;
pub type Logger = HashMap<String, Status>;
pub type TargetIDStatus = HashMap<String, Status>;
#[derive(Serialize, Deserialize, Default, Debug)]
pub struct Services {
pub kms: Option<Kms>, // deprecated july 2023
#[serde(rename = "kmsStatus")]
pub kms_status: Option<Vec<Kms>>,
pub ldap: Option<Ldap>,
pub logger: Option<Vec<Logger>>,
pub audit: Option<Vec<Audit>>,
pub notifications: Option<Vec<HashMap<String, Vec<TargetIDStatus>>>>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Buckets {
pub count: u64,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Objects {
pub count: u64,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Versions {
pub count: u64,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct DeleteMarkers {
pub count: u64,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct Usage {
pub size: u64,
pub error: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct ErasureSetInfo {
pub id: i32,
#[serde(rename = "rawUsage")]
pub raw_usage: u64,
#[serde(rename = "rawCapacity")]
pub raw_capacity: u64,
pub usage: u64,
#[serde(rename = "objectsCount")]
pub objects_count: u64,
#[serde(rename = "versionsCount")]
pub versions_count: u64,
#[serde(rename = "deleteMarkersCount")]
pub delete_markers_count: u64,
#[serde(rename = "healDisks")]
pub heal_disks: i32,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub enum BackendType {
#[default]
#[serde(rename = "FS")]
FsType,
#[serde(rename = "Erasure")]
ErasureType,
}
#[derive(Serialize, Deserialize)]
pub struct FSBackend {
#[serde(rename = "backendType")]
pub backend_type: BackendType,
}
#[derive(Serialize, Deserialize, Debug, Default)]
pub struct ErasureBackend {
#[serde(rename = "backendType")]
pub backend_type: BackendType,
#[serde(rename = "onlineDisks")]
pub online_disks: usize,
#[serde(rename = "offlineDisks")]
pub offline_disks: usize,
#[serde(rename = "standardSCParity")]
pub standard_sc_parity: Option<usize>,
#[serde(rename = "rrSCParity")]
pub rr_sc_parity: Option<usize>,
#[serde(rename = "totalSets")]
pub total_sets: Vec<usize>,
#[serde(rename = "totalDrivesPerSet")]
pub drives_per_set: Vec<usize>,
}
#[derive(Serialize, Deserialize)]
pub struct InfoMessage {
pub mode: Option<String>,
pub domain: Option<Vec<String>>,
pub region: Option<String>,
#[serde(rename = "sqsARN")]
pub sqs_arn: Option<Vec<String>>,
#[serde(rename = "deploymentID")]
pub deployment_id: Option<String>,
pub buckets: Option<Buckets>,
pub objects: Option<Objects>,
pub versions: Option<Versions>,
#[serde(rename = "deletemarkers")]
pub delete_markers: Option<DeleteMarkers>,
pub usage: Option<Usage>,
pub services: Option<Services>,
pub backend: Option<ErasureBackend>,
pub servers: Option<Vec<ServerProperties>>,
pub pools: Option<std::collections::HashMap<i32, std::collections::HashMap<i32, ErasureSetInfo>>>,
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::collections::HashMap;
use time::OffsetDateTime;
#[test]
fn test_item_state_to_string() {
assert_eq!(ItemState::Offline.to_string(), ITEM_OFFLINE);
assert_eq!(ItemState::Initializing.to_string(), ITEM_INITIALIZING);
assert_eq!(ItemState::Online.to_string(), ITEM_ONLINE);
}
#[test]
fn test_item_state_from_string_valid() {
assert_eq!(ItemState::from_string(ITEM_OFFLINE), Some(ItemState::Offline));
assert_eq!(ItemState::from_string(ITEM_INITIALIZING), Some(ItemState::Initializing));
assert_eq!(ItemState::from_string(ITEM_ONLINE), Some(ItemState::Online));
}
#[test]
fn test_item_state_from_string_invalid() {
assert_eq!(ItemState::from_string("invalid"), None);
assert_eq!(ItemState::from_string(""), None);
assert_eq!(ItemState::from_string("OFFLINE"), None); // Case sensitive
}
#[test]
fn test_disk_metrics_default() {
let metrics = DiskMetrics::default();
assert!(metrics.last_minute.is_empty());
assert!(metrics.api_calls.is_empty());
assert_eq!(metrics.total_waiting, 0);
assert_eq!(metrics.total_errors_availability, 0);
assert_eq!(metrics.total_errors_timeout, 0);
assert_eq!(metrics.total_writes, 0);
assert_eq!(metrics.total_deletes, 0);
}
#[test]
fn test_disk_metrics_with_values() {
let mut last_minute = HashMap::new();
last_minute.insert("read".to_string(), TimedAction::default());
let mut api_calls = HashMap::new();
api_calls.insert("GET".to_string(), 100);
api_calls.insert("PUT".to_string(), 50);
let metrics = DiskMetrics {
last_minute,
api_calls,
total_waiting: 5,
total_errors_availability: 2,
total_errors_timeout: 1,
total_writes: 1000,
total_deletes: 50,
};
assert_eq!(metrics.last_minute.len(), 1);
assert_eq!(metrics.api_calls.len(), 2);
assert_eq!(metrics.total_waiting, 5);
assert_eq!(metrics.total_writes, 1000);
assert_eq!(metrics.total_deletes, 50);
}
#[test]
fn test_disk_default() {
let disk = Disk::default();
assert!(disk.endpoint.is_empty());
assert!(!disk.root_disk);
assert!(disk.drive_path.is_empty());
assert!(!disk.healing);
assert!(!disk.scanning);
assert!(disk.state.is_empty());
assert!(disk.uuid.is_empty());
assert_eq!(disk.major, 0);
assert_eq!(disk.minor, 0);
assert!(disk.model.is_none());
assert_eq!(disk.total_space, 0);
assert_eq!(disk.used_space, 0);
assert_eq!(disk.available_space, 0);
assert_eq!(disk.read_throughput, 0.0);
assert_eq!(disk.write_throughput, 0.0);
assert_eq!(disk.read_latency, 0.0);
assert_eq!(disk.write_latency, 0.0);
assert_eq!(disk.utilization, 0.0);
assert!(disk.metrics.is_none());
assert!(disk.heal_info.is_none());
assert_eq!(disk.used_inodes, 0);
assert_eq!(disk.free_inodes, 0);
assert!(!disk.local);
assert_eq!(disk.pool_index, 0);
assert_eq!(disk.set_index, 0);
assert_eq!(disk.disk_index, 0);
}
#[test]
fn test_disk_with_values() {
let disk = Disk {
endpoint: "http://localhost:9000".to_string(),
root_disk: true,
drive_path: "/data/disk1".to_string(),
healing: false,
scanning: true,
state: "online".to_string(),
uuid: "12345678-1234-1234-1234-123456789abc".to_string(),
major: 8,
minor: 1,
model: Some("Samsung SSD 980".to_string()),
total_space: 1000000000000,
used_space: 500000000000,
available_space: 500000000000,
read_throughput: 100.5,
write_throughput: 80.3,
read_latency: 5.2,
write_latency: 7.8,
utilization: 50.0,
metrics: Some(DiskMetrics::default()),
heal_info: None,
used_inodes: 1000000,
free_inodes: 9000000,
local: true,
pool_index: 0,
set_index: 1,
disk_index: 2,
};
assert_eq!(disk.endpoint, "http://localhost:9000");
assert!(disk.root_disk);
assert_eq!(disk.drive_path, "/data/disk1");
assert!(disk.scanning);
assert_eq!(disk.state, "online");
assert_eq!(disk.major, 8);
assert_eq!(disk.minor, 1);
assert_eq!(disk.model.unwrap(), "Samsung SSD 980");
assert_eq!(disk.total_space, 1000000000000);
assert_eq!(disk.utilization, 50.0);
assert!(disk.metrics.is_some());
assert!(disk.local);
}
#[test]
fn test_healing_disk_default() {
let healing_disk = HealingDisk::default();
assert!(healing_disk.id.is_empty());
assert!(healing_disk.heal_id.is_empty());
assert!(healing_disk.pool_index.is_none());
assert!(healing_disk.set_index.is_none());
assert!(healing_disk.disk_index.is_none());
assert!(healing_disk.endpoint.is_empty());
assert!(healing_disk.path.is_empty());
assert!(healing_disk.started.is_none());
assert!(healing_disk.last_update.is_none());
assert_eq!(healing_disk.retry_attempts, 0);
assert_eq!(healing_disk.objects_total_count, 0);
assert_eq!(healing_disk.objects_total_size, 0);
assert_eq!(healing_disk.items_healed, 0);
assert_eq!(healing_disk.items_failed, 0);
assert_eq!(healing_disk.item_skipped, 0);
assert_eq!(healing_disk.bytes_done, 0);
assert_eq!(healing_disk.bytes_failed, 0);
assert_eq!(healing_disk.bytes_skipped, 0);
assert_eq!(healing_disk.objects_healed, 0);
assert_eq!(healing_disk.objects_failed, 0);
assert!(healing_disk.bucket.is_empty());
assert!(healing_disk.object.is_empty());
assert!(healing_disk.queue_buckets.is_empty());
assert!(healing_disk.healed_buckets.is_empty());
assert!(!healing_disk.finished);
}
#[test]
fn test_healing_disk_with_values() {
let now = OffsetDateTime::now_utc();
let system_time = std::time::SystemTime::now();
let healing_disk = HealingDisk {
id: "heal-001".to_string(),
heal_id: "heal-session-123".to_string(),
pool_index: Some(0),
set_index: Some(1),
disk_index: Some(2),
endpoint: "http://node1:9000".to_string(),
path: "/data/disk1".to_string(),
started: Some(now),
last_update: Some(system_time),
retry_attempts: 3,
objects_total_count: 10000,
objects_total_size: 1000000000,
items_healed: 8000,
items_failed: 100,
item_skipped: 50,
bytes_done: 800000000,
bytes_failed: 10000000,
bytes_skipped: 5000000,
objects_healed: 7900,
objects_failed: 100,
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
queue_buckets: vec!["bucket1".to_string(), "bucket2".to_string()],
healed_buckets: vec!["bucket3".to_string()],
finished: false,
};
assert_eq!(healing_disk.id, "heal-001");
assert_eq!(healing_disk.heal_id, "heal-session-123");
assert_eq!(healing_disk.pool_index.unwrap(), 0);
assert_eq!(healing_disk.set_index.unwrap(), 1);
assert_eq!(healing_disk.disk_index.unwrap(), 2);
assert_eq!(healing_disk.retry_attempts, 3);
assert_eq!(healing_disk.objects_total_count, 10000);
assert_eq!(healing_disk.items_healed, 8000);
assert_eq!(healing_disk.queue_buckets.len(), 2);
assert_eq!(healing_disk.healed_buckets.len(), 1);
assert!(!healing_disk.finished);
}
#[test]
fn test_backend_byte_default() {
let backend = BackendByte::default();
assert!(matches!(backend, BackendByte::Unknown));
}
#[test]
fn test_backend_byte_variants() {
let unknown = BackendByte::Unknown;
let fs = BackendByte::FS;
let erasure = BackendByte::Erasure;
// Test that all variants can be created
assert!(matches!(unknown, BackendByte::Unknown));
assert!(matches!(fs, BackendByte::FS));
assert!(matches!(erasure, BackendByte::Erasure));
}
#[test]
fn test_storage_info_creation() {
let storage_info = StorageInfo {
disks: vec![
Disk {
endpoint: "node1:9000".to_string(),
state: "online".to_string(),
..Default::default()
},
Disk {
endpoint: "node2:9000".to_string(),
state: "offline".to_string(),
..Default::default()
},
],
backend: BackendInfo::default(),
};
assert_eq!(storage_info.disks.len(), 2);
assert_eq!(storage_info.disks[0].endpoint, "node1:9000");
assert_eq!(storage_info.disks[1].state, "offline");
}
#[test]
fn test_backend_disks_new() {
let backend_disks = BackendDisks::new();
assert!(backend_disks.0.is_empty());
}
#[test]
fn test_backend_disks_sum() {
let mut backend_disks = BackendDisks::new();
backend_disks.0.insert("pool1".to_string(), 4);
backend_disks.0.insert("pool2".to_string(), 6);
backend_disks.0.insert("pool3".to_string(), 2);
assert_eq!(backend_disks.sum(), 12);
}
#[test]
fn test_backend_disks_sum_empty() {
let backend_disks = BackendDisks::new();
assert_eq!(backend_disks.sum(), 0);
}
#[test]
fn test_backend_info_default() {
let backend_info = BackendInfo::default();
assert!(matches!(backend_info.backend_type, BackendByte::Unknown));
assert_eq!(backend_info.online_disks.sum(), 0);
assert_eq!(backend_info.offline_disks.sum(), 0);
assert!(backend_info.standard_sc_data.is_empty());
assert!(backend_info.standard_sc_parities.is_empty());
assert!(backend_info.standard_sc_parity.is_none());
assert!(backend_info.rr_sc_data.is_empty());
assert!(backend_info.rr_sc_parities.is_empty());
assert!(backend_info.rr_sc_parity.is_none());
assert!(backend_info.total_sets.is_empty());
assert!(backend_info.drives_per_set.is_empty());
}
#[test]
fn test_backend_info_with_values() {
let mut online_disks = BackendDisks::new();
online_disks.0.insert("set1".to_string(), 4);
online_disks.0.insert("set2".to_string(), 4);
let mut offline_disks = BackendDisks::new();
offline_disks.0.insert("set1".to_string(), 0);
offline_disks.0.insert("set2".to_string(), 1);
let backend_info = BackendInfo {
backend_type: BackendByte::Erasure,
online_disks,
offline_disks,
standard_sc_data: vec![4, 4],
standard_sc_parities: vec![2, 2],
standard_sc_parity: Some(2),
rr_sc_data: vec![2, 2],
rr_sc_parities: vec![1, 1],
rr_sc_parity: Some(1),
total_sets: vec![2],
drives_per_set: vec![6, 6],
};
assert!(matches!(backend_info.backend_type, BackendByte::Erasure));
assert_eq!(backend_info.online_disks.sum(), 8);
assert_eq!(backend_info.offline_disks.sum(), 1);
assert_eq!(backend_info.standard_sc_data.len(), 2);
assert_eq!(backend_info.standard_sc_parity.unwrap(), 2);
assert_eq!(backend_info.total_sets.len(), 1);
assert_eq!(backend_info.drives_per_set.len(), 2);
}
#[test]
fn test_mem_stats_default() {
let mem_stats = MemStats::default();
assert_eq!(mem_stats.alloc, 0);
assert_eq!(mem_stats.total_alloc, 0);
assert_eq!(mem_stats.mallocs, 0);
assert_eq!(mem_stats.frees, 0);
assert_eq!(mem_stats.heap_alloc, 0);
}
#[test]
fn test_mem_stats_with_values() {
let mem_stats = MemStats {
alloc: 1024000,
total_alloc: 5120000,
mallocs: 1000,
frees: 800,
heap_alloc: 2048000,
};
assert_eq!(mem_stats.alloc, 1024000);
assert_eq!(mem_stats.total_alloc, 5120000);
assert_eq!(mem_stats.mallocs, 1000);
assert_eq!(mem_stats.frees, 800);
assert_eq!(mem_stats.heap_alloc, 2048000);
}
#[test]
fn test_server_properties_default() {
let server_props = ServerProperties::default();
assert!(server_props.state.is_empty());
assert!(server_props.endpoint.is_empty());
assert!(server_props.scheme.is_empty());
assert_eq!(server_props.uptime, 0);
assert!(server_props.version.is_empty());
assert!(server_props.commit_id.is_empty());
assert!(server_props.network.is_empty());
assert!(server_props.disks.is_empty());
assert_eq!(server_props.pool_number, 0);
assert!(server_props.pool_numbers.is_empty());
assert_eq!(server_props.mem_stats.alloc, 0);
assert_eq!(server_props.max_procs, 0);
assert_eq!(server_props.num_cpu, 0);
assert!(server_props.runtime_version.is_empty());
assert!(server_props.rustfs_env_vars.is_empty());
}
#[test]
fn test_server_properties_with_values() {
let mut network = HashMap::new();
network.insert("interface".to_string(), "eth0".to_string());
network.insert("ip".to_string(), "192.168.1.100".to_string());
let mut env_vars = HashMap::new();
env_vars.insert("RUSTFS_ROOT_USER".to_string(), "admin".to_string());
env_vars.insert("RUSTFS_ROOT_PASSWORD".to_string(), "password".to_string());
let server_props = ServerProperties {
state: "online".to_string(),
endpoint: "http://localhost:9000".to_string(),
scheme: "http".to_string(),
uptime: 3600,
version: "1.0.0".to_string(),
commit_id: "abc123def456".to_string(),
network,
disks: vec![Disk::default()],
pool_number: 1,
pool_numbers: vec![0, 1],
mem_stats: MemStats {
alloc: 1024000,
total_alloc: 5120000,
mallocs: 1000,
frees: 800,
heap_alloc: 2048000,
},
max_procs: 8,
num_cpu: 4,
runtime_version: "1.70.0".to_string(),
rustfs_env_vars: env_vars,
};
assert_eq!(server_props.state, "online");
assert_eq!(server_props.endpoint, "http://localhost:9000");
assert_eq!(server_props.uptime, 3600);
assert_eq!(server_props.version, "1.0.0");
assert_eq!(server_props.network.len(), 2);
assert_eq!(server_props.disks.len(), 1);
assert_eq!(server_props.pool_number, 1);
assert_eq!(server_props.pool_numbers.len(), 2);
assert_eq!(server_props.mem_stats.alloc, 1024000);
assert_eq!(server_props.max_procs, 8);
assert_eq!(server_props.num_cpu, 4);
assert_eq!(server_props.rustfs_env_vars.len(), 2);
}
#[test]
fn test_kms_default() {
let kms = Kms::default();
assert!(kms.status.is_none());
assert!(kms.encrypt.is_none());
assert!(kms.decrypt.is_none());
assert!(kms.endpoint.is_none());
assert!(kms.version.is_none());
}
#[test]
fn test_kms_with_values() {
let kms = Kms {
status: Some("enabled".to_string()),
encrypt: Some("AES256".to_string()),
decrypt: Some("AES256".to_string()),
endpoint: Some("https://kms.example.com".to_string()),
version: Some("1.0".to_string()),
};
assert_eq!(kms.status.unwrap(), "enabled");
assert_eq!(kms.encrypt.unwrap(), "AES256");
assert_eq!(kms.decrypt.unwrap(), "AES256");
assert_eq!(kms.endpoint.unwrap(), "https://kms.example.com");
assert_eq!(kms.version.unwrap(), "1.0");
}
#[test]
fn test_ldap_default() {
let ldap = Ldap::default();
assert!(ldap.status.is_none());
}
#[test]
fn test_ldap_with_values() {
let ldap = Ldap {
status: Some("enabled".to_string()),
};
assert_eq!(ldap.status.unwrap(), "enabled");
}
#[test]
fn test_status_default() {
let status = Status::default();
assert!(status.status.is_none());
}
#[test]
fn test_status_with_values() {
let status = Status {
status: Some("active".to_string()),
};
assert_eq!(status.status.unwrap(), "active");
}
#[test]
fn test_services_default() {
let services = Services::default();
assert!(services.kms.is_none());
assert!(services.kms_status.is_none());
assert!(services.ldap.is_none());
assert!(services.logger.is_none());
assert!(services.audit.is_none());
assert!(services.notifications.is_none());
}
#[test]
fn test_services_with_values() {
let services = Services {
kms: Some(Kms::default()),
kms_status: Some(vec![Kms::default()]),
ldap: Some(Ldap::default()),
logger: Some(vec![HashMap::new()]),
audit: Some(vec![HashMap::new()]),
notifications: Some(vec![HashMap::new()]),
};
assert!(services.kms.is_some());
assert_eq!(services.kms_status.unwrap().len(), 1);
assert!(services.ldap.is_some());
assert_eq!(services.logger.unwrap().len(), 1);
assert_eq!(services.audit.unwrap().len(), 1);
assert_eq!(services.notifications.unwrap().len(), 1);
}
#[test]
fn test_buckets_default() {
let buckets = Buckets::default();
assert_eq!(buckets.count, 0);
assert!(buckets.error.is_none());
}
#[test]
fn test_buckets_with_values() {
let buckets = Buckets {
count: 10,
error: Some("Access denied".to_string()),
};
assert_eq!(buckets.count, 10);
assert_eq!(buckets.error.unwrap(), "Access denied");
}
#[test]
fn test_objects_default() {
let objects = Objects::default();
assert_eq!(objects.count, 0);
assert!(objects.error.is_none());
}
#[test]
fn test_versions_default() {
let versions = Versions::default();
assert_eq!(versions.count, 0);
assert!(versions.error.is_none());
}
#[test]
fn test_delete_markers_default() {
let delete_markers = DeleteMarkers::default();
assert_eq!(delete_markers.count, 0);
assert!(delete_markers.error.is_none());
}
#[test]
fn test_usage_default() {
let usage = Usage::default();
assert_eq!(usage.size, 0);
assert!(usage.error.is_none());
}
#[test]
fn test_erasure_set_info_default() {
let erasure_set = ErasureSetInfo::default();
assert_eq!(erasure_set.id, 0);
assert_eq!(erasure_set.raw_usage, 0);
assert_eq!(erasure_set.raw_capacity, 0);
assert_eq!(erasure_set.usage, 0);
assert_eq!(erasure_set.objects_count, 0);
assert_eq!(erasure_set.versions_count, 0);
assert_eq!(erasure_set.delete_markers_count, 0);
assert_eq!(erasure_set.heal_disks, 0);
}
#[test]
fn test_erasure_set_info_with_values() {
let erasure_set = ErasureSetInfo {
id: 1,
raw_usage: 1000000000,
raw_capacity: 2000000000,
usage: 800000000,
objects_count: 10000,
versions_count: 15000,
delete_markers_count: 500,
heal_disks: 2,
};
assert_eq!(erasure_set.id, 1);
assert_eq!(erasure_set.raw_usage, 1000000000);
assert_eq!(erasure_set.raw_capacity, 2000000000);
assert_eq!(erasure_set.usage, 800000000);
assert_eq!(erasure_set.objects_count, 10000);
assert_eq!(erasure_set.versions_count, 15000);
assert_eq!(erasure_set.delete_markers_count, 500);
assert_eq!(erasure_set.heal_disks, 2);
}
#[test]
fn test_backend_type_default() {
let backend_type = BackendType::default();
assert!(matches!(backend_type, BackendType::FsType));
}
#[test]
fn test_backend_type_variants() {
let fs_type = BackendType::FsType;
let erasure_type = BackendType::ErasureType;
assert!(matches!(fs_type, BackendType::FsType));
assert!(matches!(erasure_type, BackendType::ErasureType));
}
#[test]
fn test_fs_backend_creation() {
let fs_backend = FSBackend {
backend_type: BackendType::FsType,
};
assert!(matches!(fs_backend.backend_type, BackendType::FsType));
}
#[test]
fn test_erasure_backend_default() {
let erasure_backend = ErasureBackend::default();
assert!(matches!(erasure_backend.backend_type, BackendType::FsType));
assert_eq!(erasure_backend.online_disks, 0);
assert_eq!(erasure_backend.offline_disks, 0);
assert!(erasure_backend.standard_sc_parity.is_none());
assert!(erasure_backend.rr_sc_parity.is_none());
assert!(erasure_backend.total_sets.is_empty());
assert!(erasure_backend.drives_per_set.is_empty());
}
#[test]
fn test_erasure_backend_with_values() {
let erasure_backend = ErasureBackend {
backend_type: BackendType::ErasureType,
online_disks: 8,
offline_disks: 0,
standard_sc_parity: Some(2),
rr_sc_parity: Some(1),
total_sets: vec![2],
drives_per_set: vec![4, 4],
};
assert!(matches!(erasure_backend.backend_type, BackendType::ErasureType));
assert_eq!(erasure_backend.online_disks, 8);
assert_eq!(erasure_backend.offline_disks, 0);
assert_eq!(erasure_backend.standard_sc_parity.unwrap(), 2);
assert_eq!(erasure_backend.rr_sc_parity.unwrap(), 1);
assert_eq!(erasure_backend.total_sets.len(), 1);
assert_eq!(erasure_backend.drives_per_set.len(), 2);
}
#[test]
fn test_info_message_creation() {
let mut pools = HashMap::new();
let mut pool_sets = HashMap::new();
pool_sets.insert(0, ErasureSetInfo::default());
pools.insert(0, pool_sets);
let info_message = InfoMessage {
mode: Some("distributed".to_string()),
domain: Some(vec!["example.com".to_string()]),
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/policy.rs | crates/madmin/src/policy.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use time::OffsetDateTime;
#[derive(Debug, Serialize, Deserialize)]
pub struct PolicyInfo {
pub policy_name: String,
pub policy: Value,
#[serde(skip_serializing_if = "Option::is_none")]
pub create_date: Option<OffsetDateTime>,
#[serde(skip_serializing_if = "Option::is_none")]
pub update_date: Option<OffsetDateTime>,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/trace.rs | crates/madmin/src/trace.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, time::Duration};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use crate::heal_commands::HealResultItem;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
pub struct TraceType(u64);
impl TraceType {
// Define some constants
pub const OS: TraceType = TraceType(1 << 0);
pub const STORAGE: TraceType = TraceType(1 << 1);
pub const S3: TraceType = TraceType(1 << 2);
pub const INTERNAL: TraceType = TraceType(1 << 3);
pub const SCANNER: TraceType = TraceType(1 << 4);
pub const DECOMMISSION: TraceType = TraceType(1 << 5);
pub const HEALING: TraceType = TraceType(1 << 6);
pub const BATCH_REPLICATION: TraceType = TraceType(1 << 7);
pub const BATCH_KEY_ROTATION: TraceType = TraceType(1 << 8);
pub const BATCH_EXPIRE: TraceType = TraceType(1 << 9);
pub const REBALANCE: TraceType = TraceType(1 << 10);
pub const REPLICATION_RESYNC: TraceType = TraceType(1 << 11);
pub const BOOTSTRAP: TraceType = TraceType(1 << 12);
pub const FTP: TraceType = TraceType(1 << 13);
pub const ILM: TraceType = TraceType(1 << 14);
// MetricsAll must be last.
pub const ALL: TraceType = TraceType((1 << 15) - 1);
pub fn new(t: u64) -> Self {
Self(t)
}
}
impl TraceType {
pub fn contains(&self, x: &TraceType) -> bool {
(self.0 & x.0) == x.0
}
pub fn overlaps(&self, x: &TraceType) -> bool {
(self.0 & x.0) != 0
}
pub fn single_type(&self) -> bool {
todo!()
}
pub fn merge(&mut self, other: &TraceType) {
self.0 |= other.0
}
pub fn set_if(&mut self, b: bool, other: &TraceType) {
if b {
self.0 |= other.0
}
}
pub fn mask(&self) -> u64 {
self.0
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceInfo {
#[serde(rename = "type")]
trace_type: u64,
#[serde(rename = "nodename")]
node_name: String,
#[serde(rename = "funcname")]
func_name: String,
#[serde(rename = "time")]
time: DateTime<Utc>,
#[serde(rename = "path")]
path: String,
#[serde(rename = "dur")]
duration: Duration,
#[serde(rename = "bytes", skip_serializing_if = "Option::is_none")]
bytes: Option<i64>,
#[serde(rename = "msg", skip_serializing_if = "Option::is_none")]
message: Option<String>,
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
error: Option<String>,
#[serde(rename = "custom", skip_serializing_if = "Option::is_none")]
custom: Option<HashMap<String, String>>,
#[serde(rename = "http", skip_serializing_if = "Option::is_none")]
http: Option<TraceHTTPStats>,
#[serde(rename = "healResult", skip_serializing_if = "Option::is_none")]
heal_result: Option<HealResultItem>,
}
impl TraceInfo {
pub fn mask(&self) -> u64 {
TraceType::new(self.trace_type).mask()
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceInfoLegacy {
trace_info: TraceInfo,
#[serde(rename = "request")]
req_info: Option<TraceRequestInfo>,
#[serde(rename = "response")]
resp_info: Option<TraceResponseInfo>,
#[serde(rename = "stats")]
call_stats: Option<TraceCallStats>,
#[serde(rename = "storageStats")]
storage_stats: Option<StorageStats>,
#[serde(rename = "osStats")]
os_stats: Option<OSStats>,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct StorageStats {
path: String,
duration: Duration,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct OSStats {
path: String,
duration: Duration,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceHTTPStats {
req_info: TraceRequestInfo,
resp_info: TraceResponseInfo,
call_stats: TraceCallStats,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceCallStats {
input_bytes: i32,
output_bytes: i32,
latency: Duration,
time_to_first_byte: Duration,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceRequestInfo {
time: DateTime<Utc>,
proto: String,
method: String,
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
raw_query: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
headers: Option<HashMap<String, String>>,
#[serde(skip_serializing_if = "Option::is_none")]
body: Option<Vec<u8>>,
client: String,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TraceResponseInfo {
time: DateTime<Utc>,
#[serde(skip_serializing_if = "Option::is_none")]
headers: Option<HashMap<String, String>>,
#[serde(skip_serializing_if = "Option::is_none")]
body: Option<Vec<u8>>,
#[serde(skip_serializing_if = "Option::is_none")]
status_code: Option<i32>,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/service_commands.rs | crates/madmin/src/service_commands.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, time::Duration};
use hyper::Uri;
use crate::{trace::TraceType, utils::parse_duration};
#[derive(Debug, Default)]
#[allow(dead_code)]
pub struct ServiceTraceOpts {
s3: bool,
internal: bool,
storage: bool,
os: bool,
scanner: bool,
decommission: bool,
healing: bool,
batch_replication: bool,
batch_key_rotation: bool,
batch_expire: bool,
batch_all: bool,
rebalance: bool,
replication_resync: bool,
bootstrap: bool,
ftp: bool,
ilm: bool,
only_errors: bool,
threshold: Duration,
}
#[allow(dead_code)]
impl ServiceTraceOpts {
fn trace_types(&self) -> TraceType {
let mut tt = TraceType::default();
tt.set_if(self.s3, &TraceType::S3);
tt.set_if(self.internal, &TraceType::INTERNAL);
tt.set_if(self.storage, &TraceType::STORAGE);
tt.set_if(self.os, &TraceType::OS);
tt.set_if(self.scanner, &TraceType::SCANNER);
tt.set_if(self.decommission, &TraceType::DECOMMISSION);
tt.set_if(self.healing, &TraceType::HEALING);
if self.batch_all {
tt.set_if(true, &TraceType::BATCH_REPLICATION);
tt.set_if(true, &TraceType::BATCH_KEY_ROTATION);
tt.set_if(true, &TraceType::BATCH_EXPIRE);
} else {
tt.set_if(self.batch_replication, &TraceType::BATCH_REPLICATION);
tt.set_if(self.batch_key_rotation, &TraceType::BATCH_KEY_ROTATION);
tt.set_if(self.batch_expire, &TraceType::BATCH_EXPIRE);
}
tt.set_if(self.rebalance, &TraceType::REBALANCE);
tt.set_if(self.replication_resync, &TraceType::REPLICATION_RESYNC);
tt.set_if(self.bootstrap, &TraceType::BOOTSTRAP);
tt.set_if(self.ftp, &TraceType::FTP);
tt.set_if(self.ilm, &TraceType::ILM);
tt
}
pub fn parse_params(&mut self, uri: &Uri) -> Result<(), String> {
let query_pairs: HashMap<_, _> = uri
.query()
.unwrap_or("")
.split('&')
.filter_map(|pair| {
let mut split = pair.split('=');
let key = split.next()?.to_string();
let value = split.next().map(|v| v.to_string()).unwrap_or_else(|| "false".to_string());
Some((key, value))
})
.collect();
self.s3 = query_pairs.get("s3").is_some_and(|v| v == "true");
self.os = query_pairs.get("os").is_some_and(|v| v == "true");
self.scanner = query_pairs.get("scanner").is_some_and(|v| v == "true");
self.decommission = query_pairs.get("decommission").is_some_and(|v| v == "true");
self.healing = query_pairs.get("healing").is_some_and(|v| v == "true");
self.batch_replication = query_pairs.get("batch-replication").is_some_and(|v| v == "true");
self.batch_key_rotation = query_pairs.get("batch-keyrotation").is_some_and(|v| v == "true");
self.batch_expire = query_pairs.get("batch-expire").is_some_and(|v| v == "true");
if query_pairs.get("all").is_some_and(|v| v == "true") {
self.s3 = true;
self.internal = true;
self.storage = true;
self.os = true;
}
self.rebalance = query_pairs.get("rebalance").is_some_and(|v| v == "true");
self.storage = query_pairs.get("storage").is_some_and(|v| v == "true");
self.internal = query_pairs.get("internal").is_some_and(|v| v == "true");
self.only_errors = query_pairs.get("err").is_some_and(|v| v == "true");
self.replication_resync = query_pairs.get("replication-resync").is_some_and(|v| v == "true");
self.bootstrap = query_pairs.get("bootstrap").is_some_and(|v| v == "true");
self.ftp = query_pairs.get("ftp").is_some_and(|v| v == "true");
self.ilm = query_pairs.get("ilm").is_some_and(|v| v == "true");
if let Some(threshold) = query_pairs.get("threshold") {
let duration = parse_duration(threshold)?;
self.threshold = duration;
}
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/utils.rs | crates/madmin/src/utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::time::Duration;
pub fn parse_duration(s: &str) -> Result<Duration, String> {
// Implement your own duration parsing logic here
// For example, you could use the humantime crate or a custom parser
humantime::parse_duration(s).map_err(|e| e.to_string())
}
#[cfg(test)]
mod test {
use std::time::Duration;
use super::parse_duration;
#[test]
fn test_parse_dur() {
let s = String::from("3s");
let dur = parse_duration(&s);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3)), dur);
let s = String::from("3ms");
let dur = parse_duration(&s);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_millis(3)), dur);
let s = String::from("3m");
let dur = parse_duration(&s);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3 * 60)), dur);
let s = String::from("3h");
let dur = parse_duration(&s);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3 * 60 * 60)), dur);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/health.rs | crates/madmin/src/health.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct NodeCommon {
pub addr: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Cpu {
pub vendor_id: String,
pub family: String,
pub model: String,
pub stepping: i32,
pub physical_id: String,
pub model_name: String,
pub mhz: f64,
pub cache_size: i32,
pub flags: Vec<String>,
pub microcode: String,
pub cores: u64,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct CpuFreqStats {
name: String,
cpuinfo_current_frequency: Option<u64>,
cpuinfo_minimum_frequency: Option<u64>,
cpuinfo_maximum_frequency: Option<u64>,
cpuinfo_transition_latency: Option<u64>,
scaling_current_frequency: Option<u64>,
scaling_minimum_frequency: Option<u64>,
scaling_maximum_frequency: Option<u64>,
available_governors: String,
driver: String,
governor: String,
related_cpus: String,
set_speed: String,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Cpus {
node_common: NodeCommon,
cpus: Vec<Cpu>,
cpu_freq_stats: Vec<CpuFreqStats>,
}
pub fn get_cpus() -> Cpus {
// todo
Cpus::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Partition {
pub error: String,
device: String,
model: String,
revision: String,
mountpoint: String,
fs_type: String,
mount_options: String,
space_total: u64,
space_free: u64,
inode_total: u64,
inode_free: u64,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Partitions {
node_common: NodeCommon,
partitions: Vec<Partition>,
}
pub fn get_partitions() -> Partitions {
Partitions::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct OsInfo {
node_common: NodeCommon,
}
pub fn get_os_info() -> OsInfo {
OsInfo::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct ProcInfo {
node_common: NodeCommon,
pid: i32,
is_background: bool,
cpu_percent: f64,
children_pids: Vec<i32>,
cmd_line: String,
num_connections: usize,
create_time: u64,
cwd: String,
exec_path: String,
gids: Vec<i32>,
// io_counters:
is_running: bool,
// mem_info:
// mem_maps:
mem_percent: f32,
name: String,
nice: i32,
//num_ctx_switches:
num_fds: i32,
num_threads: i32,
// page_faults:
ppid: i32,
status: String,
tgid: i32,
uids: Vec<i32>,
username: String,
}
pub fn get_proc_info(_addr: &str) -> ProcInfo {
ProcInfo::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct SysService {
name: String,
status: String,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct SysServices {
node_common: NodeCommon,
services: Vec<SysService>,
}
pub fn get_sys_services(_add: &str) -> SysServices {
SysServices::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct SysConfig {
node_common: NodeCommon,
config: HashMap<String, String>,
}
pub fn get_sys_config(_addr: &str) -> SysConfig {
SysConfig::default()
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct SysErrors {
node_common: NodeCommon,
errors: Vec<String>,
}
pub fn get_sys_errors(_add: &str) -> SysErrors {
SysErrors::default()
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct MemInfo {
node_common: NodeCommon,
#[serde(skip_serializing_if = "Option::is_none")]
total: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
used: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
free: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
available: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
shared: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
cache: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
buffers: Option<u64>,
#[serde(rename = "swap_space_total", skip_serializing_if = "Option::is_none")]
swap_space_total: Option<u64>,
#[serde(rename = "swap_space_free", skip_serializing_if = "Option::is_none")]
swap_space_free: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
limit: Option<u64>,
}
pub fn get_mem_info(_addr: &str) -> MemInfo {
MemInfo::default()
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn test_node_common_creation() {
let node = NodeCommon::default();
assert!(node.addr.is_empty(), "Default addr should be empty");
assert!(node.error.is_none(), "Default error should be None");
}
#[test]
fn test_node_common_with_values() {
let node = NodeCommon {
addr: "127.0.0.1:9000".to_string(),
error: Some("Connection failed".to_string()),
};
assert_eq!(node.addr, "127.0.0.1:9000");
assert_eq!(node.error.unwrap(), "Connection failed");
}
#[test]
fn test_node_common_serialization() {
let node = NodeCommon {
addr: "localhost:8080".to_string(),
error: None,
};
let json = serde_json::to_string(&node).unwrap();
assert!(json.contains("localhost:8080"));
assert!(!json.contains("error"), "None error should be skipped in serialization");
}
#[test]
fn test_node_common_deserialization() {
let json = r#"{"addr":"test.example.com:9000","error":"Test error"}"#;
let node: NodeCommon = serde_json::from_str(json).unwrap();
assert_eq!(node.addr, "test.example.com:9000");
assert_eq!(node.error.unwrap(), "Test error");
}
#[test]
fn test_cpu_default() {
let cpu = Cpu::default();
assert!(cpu.vendor_id.is_empty());
assert!(cpu.family.is_empty());
assert!(cpu.model.is_empty());
assert_eq!(cpu.stepping, 0);
assert_eq!(cpu.mhz, 0.0);
assert_eq!(cpu.cache_size, 0);
assert!(cpu.flags.is_empty());
assert_eq!(cpu.cores, 0);
}
#[test]
fn test_cpu_with_values() {
let cpu = Cpu {
vendor_id: "GenuineIntel".to_string(),
family: "6".to_string(),
model: "142".to_string(),
stepping: 12,
physical_id: "0".to_string(),
model_name: "Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz".to_string(),
mhz: 1800.0,
cache_size: 8192,
flags: vec!["fpu".to_string(), "vme".to_string(), "de".to_string()],
microcode: "0xf0".to_string(),
cores: 4,
};
assert_eq!(cpu.vendor_id, "GenuineIntel");
assert_eq!(cpu.cores, 4);
assert_eq!(cpu.flags.len(), 3);
assert!(cpu.flags.contains(&"fpu".to_string()));
}
#[test]
fn test_cpu_serialization() {
let cpu = Cpu {
vendor_id: "AMD".to_string(),
model_name: "AMD Ryzen 7".to_string(),
cores: 8,
..Default::default()
};
let json = serde_json::to_string(&cpu).unwrap();
assert!(json.contains("AMD"));
assert!(json.contains("AMD Ryzen 7"));
assert!(json.contains("8"));
}
#[test]
fn test_cpu_freq_stats_default() {
let stats = CpuFreqStats::default();
assert!(stats.name.is_empty());
assert!(stats.cpuinfo_current_frequency.is_none());
assert!(stats.available_governors.is_empty());
assert!(stats.driver.is_empty());
}
#[test]
fn test_cpus_structure() {
let cpus = Cpus {
node_common: NodeCommon {
addr: "node1".to_string(),
error: None,
},
cpus: vec![Cpu {
vendor_id: "Intel".to_string(),
cores: 4,
..Default::default()
}],
cpu_freq_stats: vec![CpuFreqStats {
name: "cpu0".to_string(),
cpuinfo_current_frequency: Some(2400),
..Default::default()
}],
};
assert_eq!(cpus.node_common.addr, "node1");
assert_eq!(cpus.cpus.len(), 1);
assert_eq!(cpus.cpu_freq_stats.len(), 1);
assert_eq!(cpus.cpus[0].cores, 4);
}
#[test]
fn test_get_cpus_function() {
let cpus = get_cpus();
assert!(cpus.node_common.addr.is_empty());
assert!(cpus.cpus.is_empty());
assert!(cpus.cpu_freq_stats.is_empty());
}
#[test]
fn test_partition_default() {
let partition = Partition::default();
assert!(partition.error.is_empty());
assert!(partition.device.is_empty());
assert_eq!(partition.space_total, 0);
assert_eq!(partition.space_free, 0);
assert_eq!(partition.inode_total, 0);
assert_eq!(partition.inode_free, 0);
}
#[test]
fn test_partition_with_values() {
let partition = Partition {
error: "".to_string(),
device: "/dev/sda1".to_string(),
model: "Samsung SSD".to_string(),
revision: "1.0".to_string(),
mountpoint: "/".to_string(),
fs_type: "ext4".to_string(),
mount_options: "rw,relatime".to_string(),
space_total: 1000000000,
space_free: 500000000,
inode_total: 1000000,
inode_free: 800000,
};
assert_eq!(partition.device, "/dev/sda1");
assert_eq!(partition.fs_type, "ext4");
assert_eq!(partition.space_total, 1000000000);
assert_eq!(partition.space_free, 500000000);
}
#[test]
fn test_partitions_structure() {
let partitions = Partitions {
node_common: NodeCommon {
addr: "storage-node".to_string(),
error: None,
},
partitions: vec![
Partition {
device: "/dev/sda1".to_string(),
mountpoint: "/".to_string(),
space_total: 1000000,
space_free: 500000,
..Default::default()
},
Partition {
device: "/dev/sdb1".to_string(),
mountpoint: "/data".to_string(),
space_total: 2000000,
space_free: 1500000,
..Default::default()
},
],
};
assert_eq!(partitions.partitions.len(), 2);
assert_eq!(partitions.partitions[0].device, "/dev/sda1");
assert_eq!(partitions.partitions[1].mountpoint, "/data");
}
#[test]
fn test_get_partitions_function() {
let partitions = get_partitions();
assert!(partitions.node_common.addr.is_empty());
assert!(partitions.partitions.is_empty());
}
#[test]
fn test_os_info_default() {
let os_info = OsInfo::default();
assert!(os_info.node_common.addr.is_empty());
assert!(os_info.node_common.error.is_none());
}
#[test]
fn test_get_os_info_function() {
let os_info = get_os_info();
assert!(os_info.node_common.addr.is_empty());
}
#[test]
fn test_proc_info_default() {
let proc_info = ProcInfo::default();
assert_eq!(proc_info.pid, 0);
assert!(!proc_info.is_background);
assert_eq!(proc_info.cpu_percent, 0.0);
assert!(proc_info.children_pids.is_empty());
assert!(proc_info.cmd_line.is_empty());
assert_eq!(proc_info.num_connections, 0);
assert!(!proc_info.is_running);
assert_eq!(proc_info.mem_percent, 0.0);
assert!(proc_info.name.is_empty());
assert_eq!(proc_info.nice, 0);
assert_eq!(proc_info.num_fds, 0);
assert_eq!(proc_info.num_threads, 0);
assert_eq!(proc_info.ppid, 0);
assert!(proc_info.status.is_empty());
assert_eq!(proc_info.tgid, 0);
assert!(proc_info.uids.is_empty());
assert!(proc_info.username.is_empty());
}
#[test]
fn test_proc_info_with_values() {
let proc_info = ProcInfo {
node_common: NodeCommon {
addr: "worker-node".to_string(),
error: None,
},
pid: 1234,
is_background: true,
cpu_percent: 15.5,
children_pids: vec![1235, 1236],
cmd_line: "rustfs --config /etc/rustfs.conf".to_string(),
num_connections: 10,
create_time: 1640995200,
cwd: "/opt/rustfs".to_string(),
exec_path: "/usr/bin/rustfs".to_string(),
gids: vec![1000, 1001],
is_running: true,
mem_percent: 8.2,
name: "rustfs".to_string(),
nice: 0,
num_fds: 25,
num_threads: 4,
ppid: 1,
status: "running".to_string(),
tgid: 1234,
uids: vec![1000],
username: "rustfs".to_string(),
};
assert_eq!(proc_info.pid, 1234);
assert!(proc_info.is_background);
assert_eq!(proc_info.cpu_percent, 15.5);
assert_eq!(proc_info.children_pids.len(), 2);
assert_eq!(proc_info.name, "rustfs");
assert!(proc_info.is_running);
}
#[test]
fn test_get_proc_info_function() {
let proc_info = get_proc_info("127.0.0.1:9000");
assert_eq!(proc_info.pid, 0);
assert!(!proc_info.is_running);
}
#[test]
fn test_sys_service_default() {
let service = SysService::default();
assert!(service.name.is_empty());
assert!(service.status.is_empty());
}
#[test]
fn test_sys_service_with_values() {
let service = SysService {
name: "rustfs".to_string(),
status: "active".to_string(),
};
assert_eq!(service.name, "rustfs");
assert_eq!(service.status, "active");
}
#[test]
fn test_sys_services_structure() {
let services = SysServices {
node_common: NodeCommon {
addr: "service-node".to_string(),
error: None,
},
services: vec![
SysService {
name: "rustfs".to_string(),
status: "active".to_string(),
},
SysService {
name: "nginx".to_string(),
status: "inactive".to_string(),
},
],
};
assert_eq!(services.services.len(), 2);
assert_eq!(services.services[0].name, "rustfs");
assert_eq!(services.services[1].status, "inactive");
}
#[test]
fn test_get_sys_services_function() {
let services = get_sys_services("localhost");
assert!(services.node_common.addr.is_empty());
assert!(services.services.is_empty());
}
#[test]
fn test_sys_config_default() {
let config = SysConfig::default();
assert!(config.node_common.addr.is_empty());
assert!(config.config.is_empty());
}
#[test]
fn test_sys_config_with_values() {
let mut config_map = HashMap::new();
config_map.insert("max_connections".to_string(), "1000".to_string());
config_map.insert("timeout".to_string(), "30".to_string());
let config = SysConfig {
node_common: NodeCommon {
addr: "config-node".to_string(),
error: None,
},
config: config_map,
};
assert_eq!(config.config.len(), 2);
assert_eq!(config.config.get("max_connections").unwrap(), "1000");
assert_eq!(config.config.get("timeout").unwrap(), "30");
}
#[test]
fn test_get_sys_config_function() {
let config = get_sys_config("192.168.1.100");
assert!(config.node_common.addr.is_empty());
assert!(config.config.is_empty());
}
#[test]
fn test_sys_errors_default() {
let errors = SysErrors::default();
assert!(errors.node_common.addr.is_empty());
assert!(errors.errors.is_empty());
}
#[test]
fn test_sys_errors_with_values() {
let errors = SysErrors {
node_common: NodeCommon {
addr: "error-node".to_string(),
error: None,
},
errors: vec![
"Connection timeout".to_string(),
"Memory allocation failed".to_string(),
"Disk full".to_string(),
],
};
assert_eq!(errors.errors.len(), 3);
assert!(errors.errors.contains(&"Connection timeout".to_string()));
assert!(errors.errors.contains(&"Disk full".to_string()));
}
#[test]
fn test_get_sys_errors_function() {
let errors = get_sys_errors("test-node");
assert!(errors.node_common.addr.is_empty());
assert!(errors.errors.is_empty());
}
#[test]
fn test_mem_info_default() {
let mem_info = MemInfo::default();
assert!(mem_info.node_common.addr.is_empty());
assert!(mem_info.total.is_none());
assert!(mem_info.used.is_none());
assert!(mem_info.free.is_none());
assert!(mem_info.available.is_none());
assert!(mem_info.shared.is_none());
assert!(mem_info.cache.is_none());
assert!(mem_info.buffers.is_none());
assert!(mem_info.swap_space_total.is_none());
assert!(mem_info.swap_space_free.is_none());
assert!(mem_info.limit.is_none());
}
#[test]
fn test_mem_info_with_values() {
let mem_info = MemInfo {
node_common: NodeCommon {
addr: "memory-node".to_string(),
error: None,
},
total: Some(16777216000),
used: Some(8388608000),
free: Some(4194304000),
available: Some(12582912000),
shared: Some(1048576000),
cache: Some(2097152000),
buffers: Some(524288000),
swap_space_total: Some(4294967296),
swap_space_free: Some(2147483648),
limit: Some(16777216000),
};
assert_eq!(mem_info.total.unwrap(), 16777216000);
assert_eq!(mem_info.used.unwrap(), 8388608000);
assert_eq!(mem_info.free.unwrap(), 4194304000);
assert_eq!(mem_info.swap_space_total.unwrap(), 4294967296);
}
#[test]
fn test_mem_info_serialization() {
let mem_info = MemInfo {
node_common: NodeCommon {
addr: "test-node".to_string(),
error: None,
},
total: Some(8000000000),
used: Some(4000000000),
free: None,
available: Some(6000000000),
..Default::default()
};
let json = serde_json::to_string(&mem_info).unwrap();
assert!(json.contains("8000000000"));
assert!(json.contains("4000000000"));
assert!(json.contains("6000000000"));
assert!(!json.contains("free"), "None values should be skipped");
}
#[test]
fn test_get_mem_info_function() {
let mem_info = get_mem_info("memory-server");
assert!(mem_info.node_common.addr.is_empty());
assert!(mem_info.total.is_none());
assert!(mem_info.used.is_none());
}
#[test]
fn test_all_structures_debug_format() {
let node = NodeCommon::default();
let cpu = Cpu::default();
let partition = Partition::default();
let proc_info = ProcInfo::default();
let service = SysService::default();
let mem_info = MemInfo::default();
// Test that all structures can be formatted with Debug
assert!(!format!("{node:?}").is_empty());
assert!(!format!("{cpu:?}").is_empty());
assert!(!format!("{partition:?}").is_empty());
assert!(!format!("{proc_info:?}").is_empty());
assert!(!format!("{service:?}").is_empty());
assert!(!format!("{mem_info:?}").is_empty());
}
#[test]
fn test_memory_efficiency() {
// Test that structures don't use excessive memory
assert!(std::mem::size_of::<NodeCommon>() < 1000);
assert!(std::mem::size_of::<Cpu>() < 2000);
assert!(std::mem::size_of::<Partition>() < 2000);
assert!(std::mem::size_of::<MemInfo>() < 1000);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/heal_commands.rs | crates/madmin/src/heal_commands.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
pub type HealItemType = String;
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct HealDriveInfo {
pub uuid: String,
pub endpoint: String,
pub state: String,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Infos {
#[serde(rename = "drives")]
pub drives: Vec<HealDriveInfo>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct HealResultItem {
#[serde(rename = "resultId")]
pub result_index: usize,
#[serde(rename = "type")]
pub heal_item_type: HealItemType,
#[serde(rename = "bucket")]
pub bucket: String,
#[serde(rename = "object")]
pub object: String,
#[serde(rename = "versionId")]
pub version_id: String,
#[serde(rename = "detail")]
pub detail: String,
#[serde(rename = "parityBlocks")]
pub parity_blocks: usize,
#[serde(rename = "dataBlocks")]
pub data_blocks: usize,
#[serde(rename = "diskCount")]
pub disk_count: usize,
#[serde(rename = "setCount")]
pub set_count: usize,
#[serde(rename = "before")]
pub before: Infos,
#[serde(rename = "after")]
pub after: Infos,
#[serde(rename = "objectSize")]
pub object_size: usize,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/metrics.rs | crates/madmin/src/metrics.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use crate::health::MemInfo;
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct TimedAction {
#[serde(rename = "count")]
pub count: u64,
#[serde(rename = "acc_time_ns")]
pub acc_time: u64,
#[serde(rename = "bytes")]
pub bytes: u64,
}
impl TimedAction {
pub fn merge(&mut self, other: &TimedAction) {
self.count += other.count;
self.acc_time += other.acc_time;
self.bytes += other.bytes;
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DiskIOStats {
#[serde(rename = "read_ios")]
pub read_ios: u64,
#[serde(rename = "read_merges")]
pub read_merges: u64,
#[serde(rename = "read_sectors")]
pub read_sectors: u64,
#[serde(rename = "read_ticks")]
pub read_ticks: u64,
#[serde(rename = "write_ios")]
pub write_ios: u64,
#[serde(rename = "write_merges")]
pub write_merges: u64,
#[serde(rename = "write_sectors")]
pub write_sectors: u64,
#[serde(rename = "write_ticks")]
pub write_ticks: u64,
#[serde(rename = "current_ios")]
pub current_ios: u64,
#[serde(rename = "total_ticks")]
pub total_ticks: u64,
#[serde(rename = "req_ticks")]
pub req_ticks: u64,
#[serde(rename = "discard_ios")]
pub discard_ios: u64,
#[serde(rename = "discard_merges")]
pub discard_merges: u64,
#[serde(rename = "discard_secotrs")]
pub discard_sectors: u64,
#[serde(rename = "discard_ticks")]
pub discard_ticks: u64,
#[serde(rename = "flush_ios")]
pub flush_ios: u64,
#[serde(rename = "flush_ticks")]
pub flush_ticks: u64,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DiskMetric {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "n_disks")]
pub n_disks: usize,
#[serde(rename = "offline")]
pub offline: usize,
#[serde(rename = "healing")]
pub healing: usize,
#[serde(rename = "life_time_ops")]
pub life_time_ops: HashMap<String, u64>,
#[serde(rename = "last_minute")]
pub last_minute: Operations,
#[serde(rename = "iostats")]
pub io_stats: DiskIOStats,
}
impl DiskMetric {
pub fn merge(&mut self, other: &DiskMetric) {
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
self.n_disks += other.n_disks;
self.offline += other.offline;
self.healing += other.healing;
for (k, v) in other.life_time_ops.iter() {
*self.life_time_ops.entry(k.clone()).or_insert(0) += v;
}
for (k, v) in other.last_minute.operations.iter() {
self.last_minute.operations.entry(k.clone()).or_default().merge(v);
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct LastMinute {
#[serde(rename = "actions")]
pub actions: HashMap<String, TimedAction>,
#[serde(rename = "ilm")]
pub ilm: HashMap<String, TimedAction>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ScannerMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "current_cycle")]
pub current_cycle: u64,
#[serde(rename = "current_started")]
pub current_started: DateTime<Utc>,
#[serde(rename = "cycle_complete_times")]
pub cycles_completed_at: Vec<DateTime<Utc>>,
#[serde(rename = "ongoing_buckets")]
pub ongoing_buckets: usize,
#[serde(rename = "life_time_ops")]
pub life_time_ops: HashMap<String, u64>,
#[serde(rename = "ilm_ops")]
pub life_time_ilm: HashMap<String, u64>,
#[serde(rename = "last_minute")]
pub last_minute: LastMinute,
#[serde(rename = "active")]
pub active_paths: Vec<String>,
}
impl ScannerMetrics {
pub fn merge(&mut self, other: &Self) {
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
if self.ongoing_buckets < other.ongoing_buckets {
self.ongoing_buckets = other.ongoing_buckets;
}
if self.current_cycle < other.current_cycle {
self.current_cycle = other.current_cycle;
self.cycles_completed_at = other.cycles_completed_at.clone();
self.current_started = other.current_started;
}
if other.cycles_completed_at.len() > self.cycles_completed_at.len() {
self.cycles_completed_at = other.cycles_completed_at.clone();
}
if !other.life_time_ops.is_empty() && self.life_time_ops.is_empty() {
self.life_time_ops = other.life_time_ops.clone();
}
for (k, v) in other.life_time_ops.iter() {
*self.life_time_ops.entry(k.clone()).or_default() += v;
}
for (k, v) in other.last_minute.actions.iter() {
self.last_minute.actions.entry(k.clone()).or_default().merge(v);
}
for (k, v) in other.life_time_ilm.iter() {
*self.life_time_ilm.entry(k.clone()).or_default() += v;
}
for (k, v) in other.last_minute.ilm.iter() {
self.last_minute.ilm.entry(k.clone()).or_default().merge(v);
}
self.active_paths.extend(other.active_paths.clone());
self.active_paths.sort();
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Metrics {
#[serde(rename = "scanner", skip_serializing_if = "Option::is_none")]
pub scanner: Option<ScannerMetrics>,
#[serde(rename = "disk", skip_serializing_if = "Option::is_none")]
pub disk: Option<DiskMetric>,
#[serde(rename = "os", skip_serializing_if = "Option::is_none")]
pub os: Option<OsMetrics>,
#[serde(rename = "batchJobs", skip_serializing_if = "Option::is_none")]
pub batch_jobs: Option<BatchJobMetrics>,
#[serde(rename = "siteResync", skip_serializing_if = "Option::is_none")]
pub site_resync: Option<SiteResyncMetrics>,
#[serde(rename = "net", skip_serializing_if = "Option::is_none")]
pub net: Option<NetMetrics>,
#[serde(rename = "mem", skip_serializing_if = "Option::is_none")]
pub mem: Option<MemMetrics>,
#[serde(rename = "cpu", skip_serializing_if = "Option::is_none")]
pub cpu: Option<CPUMetrics>,
#[serde(rename = "rpc", skip_serializing_if = "Option::is_none")]
pub rpc: Option<RPCMetrics>,
}
impl Metrics {
pub fn merge(&mut self, other: &Self) {
if let Some(scanner) = other.scanner.as_ref() {
match self.scanner {
Some(ref mut s_scanner) => s_scanner.merge(scanner),
None => self.scanner = Some(scanner.clone()),
}
}
if let Some(disk) = other.disk.as_ref() {
match self.disk {
Some(ref mut s_disk) => s_disk.merge(disk),
None => self.disk = Some(disk.clone()),
}
}
if let Some(os) = other.os.as_ref() {
match self.os {
Some(ref mut s_os) => s_os.merge(os),
None => self.os = Some(os.clone()),
}
}
if let Some(batch_jobs) = other.batch_jobs.as_ref() {
match self.batch_jobs {
Some(ref mut s_batch_jobs) => s_batch_jobs.merge(batch_jobs),
None => self.batch_jobs = Some(batch_jobs.clone()),
}
}
if let Some(site_resync) = other.site_resync.as_ref() {
match self.site_resync {
Some(ref mut s_site_resync) => s_site_resync.merge(site_resync),
None => self.site_resync = Some(site_resync.clone()),
}
}
if let Some(net) = other.net.as_ref() {
match self.net {
Some(ref mut s_net) => s_net.merge(net),
None => self.net = Some(net.clone()),
}
}
if let Some(rpc) = other.rpc.as_ref() {
match self.rpc {
Some(ref mut s_rpc) => s_rpc.merge(rpc),
None => self.rpc = Some(rpc.clone()),
}
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct RPCMetrics {
#[serde(rename = "collectedAt")]
pub collected_at: DateTime<Utc>,
pub connected: i32,
#[serde(rename = "reconnectCount")]
pub reconnect_count: i32,
pub disconnected: i32,
#[serde(rename = "outgoingStreams")]
pub outgoing_streams: i32,
#[serde(rename = "incomingStreams")]
pub incoming_streams: i32,
#[serde(rename = "outgoingBytes")]
pub outgoing_bytes: i64,
#[serde(rename = "incomingBytes")]
pub incoming_bytes: i64,
#[serde(rename = "outgoingMessages")]
pub outgoing_messages: i64,
#[serde(rename = "incomingMessages")]
pub incoming_messages: i64,
pub out_queue: i32,
#[serde(rename = "lastPongTime")]
pub last_pong_time: DateTime<Utc>,
#[serde(rename = "lastPingMS")]
pub last_ping_ms: f64,
#[serde(rename = "maxPingDurMS")]
pub max_ping_dur_ms: f64, // Maximum across all merged entries.
#[serde(rename = "lastConnectTime")]
pub last_connect_time: DateTime<Utc>,
#[serde(rename = "byDestination", skip_serializing_if = "Option::is_none")]
pub by_destination: Option<HashMap<String, RPCMetrics>>,
#[serde(rename = "byCaller", skip_serializing_if = "Option::is_none")]
pub by_caller: Option<HashMap<String, RPCMetrics>>,
}
impl RPCMetrics {
pub fn merge(&mut self, other: &Self) {
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
if self.last_connect_time < other.last_connect_time {
self.last_connect_time = other.last_connect_time;
}
self.connected += other.connected;
self.disconnected += other.disconnected;
self.reconnect_count += other.reconnect_count;
self.outgoing_streams += other.outgoing_streams;
self.incoming_streams += other.incoming_streams;
self.outgoing_bytes += other.outgoing_bytes;
self.incoming_bytes += other.incoming_bytes;
self.outgoing_messages += other.outgoing_messages;
self.incoming_messages += other.incoming_messages;
self.out_queue += other.out_queue;
if self.last_pong_time < other.last_pong_time {
self.last_pong_time = other.last_pong_time;
self.last_ping_ms = other.last_ping_ms;
}
if self.max_ping_dur_ms < other.max_ping_dur_ms {
self.max_ping_dur_ms = other.max_ping_dur_ms;
}
if let Some(by_destination) = other.by_destination.as_ref() {
match self.by_destination.as_mut() {
Some(s_by_de) => {
for (key, value) in by_destination {
s_by_de
.entry(key.to_string())
.and_modify(|v| v.merge(value))
.or_insert(value.clone());
}
}
None => self.by_destination = Some(by_destination.clone()),
}
}
if let Some(by_caller) = other.by_caller.as_ref() {
match self.by_caller.as_mut() {
Some(s_by_caller) => {
for (key, value) in by_caller {
s_by_caller
.entry(key.to_string())
.and_modify(|v| v.merge(value))
.or_insert(value.clone());
}
}
None => self.by_caller = Some(by_caller.clone()),
}
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct CPUMetrics {}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct NetMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "interfaceName")]
pub interface_name: String,
#[serde(rename = "netstats")]
pub net_stats: NetDevLine,
}
impl NetMetrics {
pub fn merge(&mut self, other: &Self) {
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
self.net_stats.rx_bytes += other.net_stats.rx_bytes;
self.net_stats.rx_packets += other.net_stats.rx_packets;
self.net_stats.rx_errors += other.net_stats.rx_errors;
self.net_stats.rx_dropped += other.net_stats.rx_dropped;
self.net_stats.rx_fifo += other.net_stats.rx_fifo;
self.net_stats.rx_frame += other.net_stats.rx_frame;
self.net_stats.rx_compressed += other.net_stats.rx_compressed;
self.net_stats.rx_multicast += other.net_stats.rx_multicast;
self.net_stats.tx_bytes += other.net_stats.tx_bytes;
self.net_stats.tx_packets += other.net_stats.tx_packets;
self.net_stats.tx_errors += other.net_stats.tx_errors;
self.net_stats.tx_dropped += other.net_stats.tx_dropped;
self.net_stats.tx_fifo += other.net_stats.tx_fifo;
self.net_stats.tx_collisions += other.net_stats.tx_collisions;
self.net_stats.tx_carrier += other.net_stats.tx_carrier;
self.net_stats.tx_compressed += other.net_stats.tx_compressed;
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct NetDevLine {
#[serde(rename = "name")]
pub name: String, // The name of the interface.
#[serde(rename = "rx_bytes")]
pub rx_bytes: u64, // Cumulative count of bytes received.
#[serde(rename = "rx_packets")]
pub rx_packets: u64, // Cumulative count of packets received.
#[serde(rename = "rx_errors")]
pub rx_errors: u64, // Cumulative count of receive errors encountered.
#[serde(rename = "rx_dropped")]
pub rx_dropped: u64, // Cumulative count of packets dropped while receiving.
#[serde(rename = "rx_fifo")]
pub rx_fifo: u64, // Cumulative count of FIFO buffer errors.
#[serde(rename = "rx_frame")]
pub rx_frame: u64, // Cumulative count of packet framing errors.
#[serde(rename = "rx_compressed")]
pub rx_compressed: u64, // Cumulative count of compressed packets received by the device driver.
#[serde(rename = "rx_multicast")]
pub rx_multicast: u64, // Cumulative count of multicast frames received by the device driver.
#[serde(rename = "tx_bytes")]
pub tx_bytes: u64, // Cumulative count of bytes transmitted.
#[serde(rename = "tx_packets")]
pub tx_packets: u64, // Cumulative count of packets transmitted.
#[serde(rename = "tx_errors")]
pub tx_errors: u64, // Cumulative count of transmit errors encountered.
#[serde(rename = "tx_dropped")]
pub tx_dropped: u64, // Cumulative count of packets dropped while transmitting.
#[serde(rename = "tx_fifo")]
pub tx_fifo: u64, // Cumulative count of FIFO buffer errors.
#[serde(rename = "tx_collisions")]
pub tx_collisions: u64, // Cumulative count of collisions detected on the interface.
#[serde(rename = "tx_carrier")]
pub tx_carrier: u64, // Cumulative count of carrier losses detected by the device driver.
#[serde(rename = "tx_compressed")]
pub tx_compressed: u64, // Cumulative count of compressed packets transmitted by the device driver.
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct MemMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "memInfo")]
pub info: MemInfo,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct SiteResyncMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "resyncStatus", skip_serializing_if = "Option::is_none")]
pub resync_status: Option<String>,
#[serde(rename = "startTime")]
pub start_time: DateTime<Utc>,
#[serde(rename = "lastUpdate")]
pub last_update: DateTime<Utc>,
#[serde(rename = "numBuckets")]
pub num_buckets: i64,
#[serde(rename = "resyncID")]
pub resync_id: String,
#[serde(rename = "deplID")]
pub depl_id: String,
#[serde(rename = "completedReplicationSize")]
pub replicated_size: i64,
#[serde(rename = "replicationCount")]
pub replicated_count: i64,
#[serde(rename = "failedReplicationSize")]
pub failed_size: i64,
#[serde(rename = "failedReplicationCount")]
pub failed_count: i64,
#[serde(rename = "failedBuckets")]
pub failed_buckets: Vec<String>,
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
pub bucket: Option<String>,
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
pub object: Option<String>,
}
impl SiteResyncMetrics {
pub fn merge(&mut self, other: &Self) {
if self.collected_at < other.collected_at {
*self = other.clone();
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct BatchJobMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "Jobs")]
pub jobs: HashMap<String, JobMetric>,
}
impl BatchJobMetrics {
pub fn merge(&mut self, other: &BatchJobMetrics) {
if other.jobs.is_empty() {
return;
}
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
for (k, v) in other.jobs.clone().into_iter() {
self.jobs.insert(k, v);
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct JobMetric {
#[serde(rename = "jobID")]
pub job_id: String,
#[serde(rename = "jobType")]
pub job_type: String,
#[serde(rename = "startTime")]
pub start_time: DateTime<Utc>,
#[serde(rename = "lastUpdate")]
pub last_update: DateTime<Utc>,
#[serde(rename = "retryAttempts")]
pub retry_attempts: i32,
pub complete: bool,
pub failed: bool,
// Specific job type data
#[serde(skip_serializing_if = "Option::is_none")]
pub replicate: Option<ReplicateInfo>,
#[serde(skip_serializing_if = "Option::is_none")]
pub key_rotate: Option<KeyRotationInfo>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expired: Option<ExpirationInfo>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ReplicateInfo {
#[serde(rename = "lastBucket")]
pub bucket: String,
#[serde(rename = "lastObject")]
pub object: String,
#[serde(rename = "objects")]
pub objects: i64,
#[serde(rename = "objectsFailed")]
pub objects_failed: i64,
#[serde(rename = "bytesTransferred")]
pub bytes_transferred: i64,
#[serde(rename = "bytesFailed")]
pub bytes_failed: i64,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ExpirationInfo {
#[serde(rename = "lastBucket")]
pub bucket: String,
#[serde(rename = "lastObject")]
pub object: String,
#[serde(rename = "objects")]
pub objects: i64,
#[serde(rename = "objectsFailed")]
pub objects_failed: i64,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct KeyRotationInfo {
#[serde(rename = "lastBucket")]
pub bucket: String,
#[serde(rename = "lastObject")]
pub object: String,
#[serde(rename = "objects")]
pub objects: i64,
#[serde(rename = "objectsFailed")]
pub objects_failed: i64,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct RealtimeMetrics {
#[serde(rename = "errors")]
pub errors: Vec<String>,
#[serde(rename = "hosts")]
pub hosts: Vec<String>,
#[serde(rename = "aggregated")]
pub aggregated: Metrics,
#[serde(rename = "by_host")]
pub by_host: HashMap<String, Metrics>,
#[serde(rename = "by_disk")]
pub by_disk: HashMap<String, DiskMetric>,
#[serde(rename = "final")]
pub finally: bool,
}
impl RealtimeMetrics {
pub fn merge(&mut self, other: Self) {
if !other.errors.is_empty() {
self.errors.extend(other.errors);
}
for (k, v) in other.by_host.into_iter() {
*self.by_host.entry(k).or_default() = v;
}
self.hosts.extend(other.hosts);
self.aggregated.merge(&other.aggregated);
self.hosts.sort();
for (k, v) in other.by_disk.into_iter() {
self.by_disk.entry(k.to_string()).and_modify(|h| *h = v.clone()).or_insert(v);
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct OsMetrics {
#[serde(rename = "collected")]
pub collected_at: DateTime<Utc>,
#[serde(rename = "life_time_ops")]
pub life_time_ops: HashMap<String, u64>,
#[serde(rename = "last_minute")]
pub last_minute: Operations,
}
impl OsMetrics {
pub fn merge(&mut self, other: &Self) {
if self.collected_at < other.collected_at {
self.collected_at = other.collected_at;
}
for (k, v) in other.life_time_ops.iter() {
*self.life_time_ops.entry(k.clone()).or_default() += v;
}
for (k, v) in other.last_minute.operations.iter() {
self.last_minute.operations.entry(k.clone()).or_default().merge(v);
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Operations {
#[serde(rename = "operations")]
pub operations: HashMap<String, TimedAction>,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/madmin/src/net/mod.rs | crates/madmin/src/net/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use crate::health::NodeCommon;
#[cfg(target_os = "linux")]
pub fn get_net_info(addr: &str, iface: &str) -> NetInfo {
let mut ni = NetInfo::default();
ni.node_common.addr = addr.to_string();
ni.interface = iface.to_string();
ni
}
#[cfg(not(target_os = "linux"))]
pub fn get_net_info(addr: &str, iface: &str) -> NetInfo {
NetInfo {
node_common: NodeCommon {
addr: addr.to_owned(),
error: Some("Not implemented for non-linux platforms".to_string()),
},
interface: iface.to_owned(),
..Default::default()
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct NetInfo {
node_common: NodeCommon,
interface: String,
driver: String,
firmware_version: String,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/protos/src/lib.rs | crates/protos/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[allow(unsafe_code)]
mod generated;
use proto_gen::node_service::node_service_client::NodeServiceClient;
use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_MTLS_IDENTITY, GLOBAL_ROOT_CERT, evict_connection};
use std::{error::Error, time::Duration};
use tonic::{
Request, Status,
metadata::MetadataValue,
service::interceptor::InterceptedService,
transport::{Certificate, Channel, ClientTlsConfig, Endpoint},
};
use tracing::{debug, error, warn};
// Type alias for the complex client type
pub type NodeServiceClientType = NodeServiceClient<
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
>;
pub use generated::*;
// Default 100 MB
pub const DEFAULT_GRPC_SERVER_MESSAGE_LEN: usize = 100 * 1024 * 1024;
/// Timeout for connection establishment - reduced for faster failure detection
const CONNECT_TIMEOUT_SECS: u64 = 3;
/// TCP keepalive interval - how often to probe the connection
const TCP_KEEPALIVE_SECS: u64 = 10;
/// HTTP/2 keepalive interval - application-layer heartbeat
const HTTP2_KEEPALIVE_INTERVAL_SECS: u64 = 5;
/// HTTP/2 keepalive timeout - how long to wait for PING ACK
const HTTP2_KEEPALIVE_TIMEOUT_SECS: u64 = 3;
/// Overall RPC timeout - maximum time for any single RPC operation
const RPC_TIMEOUT_SECS: u64 = 30;
/// Default HTTPS prefix for rustfs
/// This is the default HTTPS prefix for rustfs.
/// It is used to identify HTTPS URLs.
/// Default value: https://
const RUSTFS_HTTPS_PREFIX: &str = "https://";
/// Creates a new gRPC channel with optimized keepalive settings for cluster resilience.
///
/// This function is designed to detect dead peers quickly:
/// - Fast connection timeout (3s instead of default 30s+)
/// - Aggressive TCP keepalive (10s)
/// - HTTP/2 PING every 5s, timeout at 3s
/// - Overall RPC timeout of 30s (reduced from 60s)
async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
debug!("Creating new gRPC channel to: {}", addr);
let mut connector = Endpoint::from_shared(addr.to_string())?
// Fast connection timeout for dead peer detection
.connect_timeout(Duration::from_secs(CONNECT_TIMEOUT_SECS))
// TCP-level keepalive - OS will probe connection
.tcp_keepalive(Some(Duration::from_secs(TCP_KEEPALIVE_SECS)))
// HTTP/2 PING frames for application-layer health check
.http2_keep_alive_interval(Duration::from_secs(HTTP2_KEEPALIVE_INTERVAL_SECS))
// How long to wait for PING ACK before considering connection dead
.keep_alive_timeout(Duration::from_secs(HTTP2_KEEPALIVE_TIMEOUT_SECS))
// Send PINGs even when no active streams (critical for idle connections)
.keep_alive_while_idle(true)
// Overall timeout for any RPC - fail fast on unresponsive peers
.timeout(Duration::from_secs(RPC_TIMEOUT_SECS));
let root_cert = GLOBAL_ROOT_CERT.read().await;
if addr.starts_with(RUSTFS_HTTPS_PREFIX) {
if root_cert.is_none() {
debug!("No custom root certificate configured; using system roots for TLS: {}", addr);
// If no custom root cert is configured, try to use system roots.
connector = connector.tls_config(ClientTlsConfig::new())?;
}
if let Some(cert_pem) = root_cert.as_ref() {
let ca = Certificate::from_pem(cert_pem);
// Derive the hostname from the HTTPS URL for TLS hostname verification.
let domain = addr
.trim_start_matches(RUSTFS_HTTPS_PREFIX)
.split('/')
.next()
.unwrap_or("")
.split(':')
.next()
.unwrap_or("");
let tls = if !domain.is_empty() {
let mut cfg = ClientTlsConfig::new().ca_certificate(ca).domain_name(domain);
let mtls_identity = GLOBAL_MTLS_IDENTITY.read().await;
if let Some(id) = mtls_identity.as_ref() {
let identity = tonic::transport::Identity::from_pem(id.cert_pem.clone(), id.key_pem.clone());
cfg = cfg.identity(identity);
}
cfg
} else {
// Fallback: configure TLS without explicit domain if parsing fails.
ClientTlsConfig::new().ca_certificate(ca)
};
connector = connector.tls_config(tls)?;
debug!("Configured TLS with custom root certificate for: {}", addr);
} else {
return Err(std::io::Error::other(
"HTTPS requested but no trusted roots are configured. Provide tls/ca.crt (or enable system roots via RUSTFS_TRUST_SYSTEM_CA=true)."
).into());
}
}
let channel = connector.connect().await?;
// Cache the new connection
{
GLOBAL_CONN_MAP.write().await.insert(addr.to_string(), channel.clone());
}
debug!("Successfully created and cached gRPC channel to: {}", addr);
Ok(channel)
}
/// Get a gRPC client for the NodeService with robust connection handling.
///
/// This function implements several resilience features:
/// 1. Connection caching for performance
/// 2. Automatic eviction of stale/dead connections on error
/// 3. Optimized keepalive settings for fast dead peer detection
/// 4. Reduced timeouts to fail fast when peers are unresponsive
///
/// # Connection Lifecycle
/// - Cached connections are reused for subsequent calls
/// - On any connection error, the cached connection is evicted
/// - Fresh connections are established with aggressive keepalive settings
///
/// # Cluster Power-Off Recovery
/// When a node experiences abrupt power-off:
/// 1. The cached connection will fail on next use
/// 2. The connection is automatically evicted from cache
/// 3. Subsequent calls will attempt fresh connections
/// 4. If node is still down, connection will fail fast (3s timeout)
pub async fn node_service_time_out_client(
addr: &String,
) -> Result<
NodeServiceClient<
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
>,
Box<dyn Error>,
> {
debug!("Obtaining gRPC client for NodeService at: {}", addr);
let token_str = rustfs_credentials::get_grpc_token();
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
error!(
"Failed to parse gRPC auth token into MetadataValue: {:?}; env={} token_len={} token_prefix={}",
e,
rustfs_credentials::ENV_GRPC_AUTH_TOKEN,
token_str.len(),
token_str.chars().take(2).collect::<String>(),
);
e
})?;
// Try to get cached channel
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
let channel = match cached_channel {
Some(channel) => {
debug!("Using cached gRPC channel for: {}", addr);
channel
}
None => {
// No cached connection, create new one
create_new_channel(addr).await?
}
};
Ok(NodeServiceClient::with_interceptor(
channel,
Box::new(move |mut req: Request<()>| {
req.metadata_mut().insert("authorization", token.clone());
Ok(req)
}),
))
}
/// Get a gRPC client with automatic connection eviction on failure.
///
/// This is the preferred method for cluster operations as it ensures
/// that failed connections are automatically cleaned up from the cache.
///
/// Returns the client and the address for later eviction if needed.
pub async fn node_service_client_with_eviction(
addr: &String,
) -> Result<
(
NodeServiceClient<
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
>,
String,
),
Box<dyn Error>,
> {
let client = node_service_time_out_client(addr).await?;
Ok((client, addr.clone()))
}
/// Evict a connection from the cache after a failure.
/// This should be called when an RPC fails to ensure fresh connections are tried.
pub async fn evict_failed_connection(addr: &str) {
warn!("Evicting failed gRPC connection: {}", addr);
evict_connection(addr).await;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/protos/src/main.rs | crates/protos/src/main.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{cmp, env, fs, io::Write, path::Path, process::Command};
type AnyError = Box<dyn std::error::Error>;
/// Expected version of `protoc` compiler.
const VERSION_PROTOBUF: Version = Version(33, 1, 0); // 31.1.0
/// Expected version of `flatc` compiler.
const VERSION_FLATBUFFERS: Version = Version(25, 9, 23); // 25.9.23
/// Build protos if the major version of `flatc` or `protoc` is greater
/// or lesser than the expected version.
const ENV_BUILD_PROTOS: &str = "BUILD_PROTOS";
/// Path of `flatc` binary.
const ENV_FLATC_PATH: &str = "FLATC_PATH";
fn main() -> Result<(), AnyError> {
let version = protobuf_compiler_version()?;
let need_compile = match version.compare_ext(&VERSION_PROTOBUF) {
Ok(cmp::Ordering::Greater) => true,
Ok(cmp::Ordering::Equal) => true,
Ok(_) => {
if let Some(version_err) = Version::build_error_message(&version, &VERSION_PROTOBUF) {
println!("cargo:warning=Tool `protoc` {version_err}, skip compiling.");
}
false
}
Err(version_err) => {
// return Err(format!("Tool `protoc` {version_err}, please update it.").into());
println!("cargo:warning=Tool `protoc` {version_err}, please update it.");
false
}
};
if !need_compile {
println!("no need to compile protos.{need_compile}");
return Ok(());
}
// path of proto file
let project_root_dir = env::current_dir()?.join("crates/protos/src");
let proto_dir = project_root_dir.clone();
println!("proto_dir: {proto_dir:?}");
let proto_files = &["node.proto"];
let proto_out_dir = project_root_dir.join("generated").join("proto_gen");
let flatbuffer_out_dir = project_root_dir.join("generated").join("flatbuffers_generated");
// let descriptor_set_path = PathBuf::from(env::var(ENV_OUT_DIR).unwrap()).join("proto-descriptor.bin");
tonic_prost_build::configure()
.out_dir(proto_out_dir)
// .file_descriptor_set_path(descriptor_set_path)
.protoc_arg("--experimental_allow_proto3_optional")
.compile_well_known_types(true)
.bytes(".")
.emit_rerun_if_changed(false)
.compile_protos(proto_files, &[proto_dir.to_string_lossy().as_ref()])
.map_err(|e| format!("Failed to generate protobuf file: {e}."))?;
// protos/gen/mod.rs
let generated_mod_rs_path = project_root_dir.join("generated").join("proto_gen").join("mod.rs");
let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?;
writeln!(
&mut generated_mod_rs,
r#"// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License."#
)?;
writeln!(&mut generated_mod_rs, "\n")?;
writeln!(&mut generated_mod_rs, "pub mod node_service;")?;
generated_mod_rs.flush()?;
let generated_mod_rs_path = project_root_dir.join("generated").join("mod.rs");
let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?;
writeln!(
&mut generated_mod_rs,
r#"// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License."#
)?;
writeln!(&mut generated_mod_rs, "\n")?;
writeln!(&mut generated_mod_rs, "#![allow(unused_imports)]")?;
writeln!(&mut generated_mod_rs, "\n")?;
writeln!(&mut generated_mod_rs, "#![allow(clippy::all)]")?;
writeln!(&mut generated_mod_rs, "pub mod proto_gen;")?;
generated_mod_rs.flush()?;
let flatc_path = match env::var(ENV_FLATC_PATH) {
Ok(path) => {
println!("cargo:warning=Specified flatc path by environment {ENV_FLATC_PATH}={path}");
path
}
Err(_) => "flatc".to_string(),
};
match compile_flatbuffers_models(
&mut generated_mod_rs,
&flatc_path,
proto_dir.clone(),
flatbuffer_out_dir.clone(),
vec!["models"],
) {
Ok(_) => {
println!("Successfully compiled flatbuffers models.");
}
Err(e) => {
return Err(format!("Failed to compile flatbuffers models: {e}").into());
}
}
fmt();
Ok(())
}
/// Compile proto/**.fbs files.
fn compile_flatbuffers_models<P: AsRef<Path>, S: AsRef<str>>(
generated_mod_rs: &mut fs::File,
flatc_path: &str,
in_fbs_dir: P,
out_rust_dir: P,
mod_names: Vec<S>,
) -> Result<(), AnyError> {
let version = flatbuffers_compiler_version(flatc_path)?;
let need_compile = match version.compare_ext(&VERSION_FLATBUFFERS) {
Ok(cmp::Ordering::Greater) => true,
Ok(cmp::Ordering::Equal) => true,
Ok(_) => {
if let Some(version_err) = Version::build_error_message(&version, &VERSION_FLATBUFFERS) {
println!("cargo:warning=Tool `{flatc_path}` {version_err}, skip compiling.");
}
false
}
Err(version_err) => {
return Err(format!("Tool `{flatc_path}` {version_err}, please update it.").into());
}
};
let fbs_dir = in_fbs_dir.as_ref();
let rust_dir = out_rust_dir.as_ref();
fs::create_dir_all(rust_dir)?;
// $rust_dir/mod.rs
let mut sub_mod_rs = fs::File::create(rust_dir.join("mod.rs"))?;
writeln!(
&mut sub_mod_rs,
r#"// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License."#
)?;
writeln!(&mut sub_mod_rs, "\n")?;
writeln!(generated_mod_rs)?;
writeln!(generated_mod_rs, "mod flatbuffers_generated;")?;
for mod_name in mod_names.iter() {
let mod_name = mod_name.as_ref();
writeln!(generated_mod_rs, "pub use flatbuffers_generated::{mod_name}::*;")?;
writeln!(&mut sub_mod_rs, "pub mod {mod_name};")?;
if need_compile {
let fbs_file_path = fbs_dir.join(format!("{mod_name}.fbs"));
let output = Command::new(flatc_path)
.arg("-o")
.arg(rust_dir)
.arg("--rust")
.arg("--gen-mutable")
.arg("--gen-onefile")
.arg("--gen-name-strings")
.arg("--filename-suffix")
.arg("")
.arg(&fbs_file_path)
.output()
.map_err(|e| format!("Failed to execute process of flatc: {e}"))?;
if !output.status.success() {
return Err(format!(
"Failed to generate file '{}' by flatc(path: '{flatc_path}'): {}.",
fbs_file_path.display(),
String::from_utf8_lossy(&output.stderr),
)
.into());
}
}
}
generated_mod_rs.flush()?;
sub_mod_rs.flush()?;
Ok(())
}
/// Run command `flatc --version` to get the version of flatc.
///
/// ```ignore
/// $ flatc --version
/// flatc version 24.3.25
/// ```
fn flatbuffers_compiler_version(flatc_path: impl AsRef<Path>) -> Result<Version, String> {
let flatc_path = flatc_path.as_ref();
Version::try_get(format!("{}", flatc_path.display()), |output| {
const PREFIX_OF_VERSION: &str = "flatc version ";
let output = output.trim();
if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) {
Ok(version.to_string())
} else {
Err(format!("Failed to get flatc version: {output}"))
}
})
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
struct Version(u32, u32, u32);
impl Version {
fn try_get<F: FnOnce(&str) -> Result<String, String>>(exe: String, output_to_version_string: F) -> Result<Self, String> {
let cmd = format!("{exe} --version");
let output = std::process::Command::new(exe)
.arg("--version")
.output()
.map_err(|e| format!("Failed to execute `{cmd}`: {e}",))?;
let output_utf8 = String::from_utf8(output.stdout).map_err(|e| {
let output_lossy = String::from_utf8_lossy(e.as_bytes());
format!("Command `{cmd}` returned invalid UTF-8('{output_lossy}'): {e}")
})?;
if output.status.success() {
let version_string = output_to_version_string(&output_utf8)?;
Ok(version_string.parse::<Self>()?)
} else {
Err(format!("Failed to get version by command `{cmd}`: {output_utf8}"))
}
}
fn build_error_message(version: &Self, expected: &Self) -> Option<String> {
match version.compare_major_version(expected) {
cmp::Ordering::Equal => None,
cmp::Ordering::Greater => Some(format!("version({version}) is greater than version({expected})")),
cmp::Ordering::Less => Some(format!("version({version}) is lesser than version({expected})")),
}
}
fn compare_ext(&self, expected_version: &Self) -> Result<cmp::Ordering, String> {
match env::var(ENV_BUILD_PROTOS) {
Ok(build_protos) => {
if build_protos.is_empty() || build_protos == "0" {
Ok(self.compare_major_version(expected_version))
} else {
match self.compare_major_version(expected_version) {
cmp::Ordering::Greater => Ok(cmp::Ordering::Greater),
_ => {
if let Some(error_msg) = Self::build_error_message(self, expected_version) {
Err(error_msg)
} else {
Err("Unknown version comparison error".to_string())
}
}
}
}
}
Err(_) => Ok(self.compare_major_version(expected_version)),
}
}
fn compare_major_version(&self, other: &Self) -> cmp::Ordering {
self.0.cmp(&other.0)
}
}
impl std::str::FromStr for Version {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut version = [0_u32; 3];
for (i, v) in s.split('.').take(3).enumerate() {
version[i] = v.parse().map_err(|e| format!("Failed to parse version string '{s}': {e}"))?;
}
Ok(Version(version[0], version[1], version[2]))
}
}
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}.{}", self.0, self.1, self.2)
}
}
/// Run command `protoc --version` to get the version of flatc.
///
/// ```ignore
/// $ protoc --version
/// libprotoc 27.0
/// ```
fn protobuf_compiler_version() -> Result<Version, String> {
Version::try_get("protoc".to_string(), |output| {
const PREFIX_OF_VERSION: &str = "libprotoc ";
let output = output.trim();
if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) {
Ok(version.to_string())
} else {
Err(format!("Failed to get protoc version: {output}"))
}
})
}
fn fmt() {
let output = Command::new("cargo").arg("fmt").arg("-p").arg("rustfs-protos").status();
match output {
Ok(status) => {
if status.success() {
println!("cargo fmt executed successfully.");
} else {
eprintln!("cargo fmt failed with status: {status:?}");
}
}
Err(e) => {
eprintln!("Failed to execute cargo fmt: {e}");
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/protos/src/generated/mod.rs | crates/protos/src/generated/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_imports)]
#![allow(clippy::all)]
pub mod proto_gen;
mod flatbuffers_generated;
pub use flatbuffers_generated::models::*;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/protos/src/generated/flatbuffers_generated/mod.rs | crates/protos/src/generated/flatbuffers_generated/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod models;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.