repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/rio/src/compress_reader.rs | crates/rio/src/compress_reader.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::compress_index::{Index, TryGetIndex};
use crate::{EtagResolvable, HashReaderDetector};
use crate::{HashReaderMut, Reader};
use pin_project_lite::pin_project;
use rustfs_utils::compress::{CompressionAlgorithm, compress_block, decompress_block};
use rustfs_utils::{put_uvarint, uvarint};
use std::cmp::min;
use std::io::{self};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
// use tracing::error;
const COMPRESS_TYPE_COMPRESSED: u8 = 0x00;
const COMPRESS_TYPE_UNCOMPRESSED: u8 = 0x01;
const COMPRESS_TYPE_END: u8 = 0xFF;
const DEFAULT_BLOCK_SIZE: usize = 1 << 20; // 1MB
const HEADER_LEN: usize = 8;
pin_project! {
#[derive(Debug)]
/// A reader wrapper that compresses data on the fly using DEFLATE algorithm.
pub struct CompressReader<R> {
#[pin]
pub inner: R,
buffer: Vec<u8>,
pos: usize,
done: bool,
block_size: usize,
compression_algorithm: CompressionAlgorithm,
index: Index,
written: usize,
uncomp_written: usize,
temp_buffer: Vec<u8>,
temp_pos: usize,
}
}
impl<R> CompressReader<R>
where
R: Reader,
{
pub fn new(inner: R, compression_algorithm: CompressionAlgorithm) -> Self {
Self {
inner,
buffer: Vec::new(),
pos: 0,
done: false,
compression_algorithm,
block_size: DEFAULT_BLOCK_SIZE,
index: Index::new(),
written: 0,
uncomp_written: 0,
temp_buffer: Vec::with_capacity(DEFAULT_BLOCK_SIZE), // Pre-allocate capacity
temp_pos: 0,
}
}
/// Optional: allow users to customize block_size
pub fn with_block_size(inner: R, block_size: usize, compression_algorithm: CompressionAlgorithm) -> Self {
Self {
inner,
buffer: Vec::new(),
pos: 0,
done: false,
compression_algorithm,
block_size,
index: Index::new(),
written: 0,
uncomp_written: 0,
temp_buffer: Vec::with_capacity(block_size),
temp_pos: 0,
}
}
}
impl<R> TryGetIndex for CompressReader<R>
where
R: Reader,
{
fn try_get_index(&self) -> Option<&Index> {
Some(&self.index)
}
}
impl<R> AsyncRead for CompressReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
let mut this = self.project();
// Copy from buffer first if available
if *this.pos < this.buffer.len() {
let to_copy = min(buf.remaining(), this.buffer.len() - *this.pos);
buf.put_slice(&this.buffer[*this.pos..*this.pos + to_copy]);
*this.pos += to_copy;
if *this.pos == this.buffer.len() {
this.buffer.clear();
*this.pos = 0;
}
return Poll::Ready(Ok(()));
}
if *this.done {
return Poll::Ready(Ok(()));
}
// Fill temporary buffer
while this.temp_buffer.len() < *this.block_size {
let remaining = *this.block_size - this.temp_buffer.len();
let mut temp = vec![0u8; remaining];
let mut temp_buf = ReadBuf::new(&mut temp);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => {
if this.temp_buffer.is_empty() {
return Poll::Pending;
}
break;
}
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
if this.temp_buffer.is_empty() {
return Poll::Ready(Ok(()));
}
break;
}
this.temp_buffer.extend_from_slice(&temp[..n]);
}
Poll::Ready(Err(e)) => {
// error!("CompressReader poll_read: read inner error: {e}");
return Poll::Ready(Err(e));
}
}
}
// Process accumulated data
if !this.temp_buffer.is_empty() {
let uncompressed_data = &this.temp_buffer;
let out = build_compressed_block(uncompressed_data, *this.compression_algorithm);
*this.written += out.len();
*this.uncomp_written += uncompressed_data.len();
if let Err(e) = this.index.add(*this.written as i64, *this.uncomp_written as i64) {
// error!("CompressReader index add error: {e}");
return Poll::Ready(Err(e));
}
*this.buffer = out;
*this.pos = 0;
this.temp_buffer.truncate(0); // More efficient way to clear
let to_copy = min(buf.remaining(), this.buffer.len());
buf.put_slice(&this.buffer[..to_copy]);
*this.pos += to_copy;
if *this.pos == this.buffer.len() {
this.buffer.clear();
*this.pos = 0;
}
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
}
impl<R> EtagResolvable for CompressReader<R>
where
R: EtagResolvable,
{
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl<R> HashReaderDetector for CompressReader<R>
where
R: HashReaderDetector,
{
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
pin_project! {
/// A reader wrapper that decompresses data on the fly using DEFLATE algorithm.
/// Header format:
/// - First byte: compression type (00 = compressed, 01 = uncompressed, FF = end)
/// - Bytes 1-3: length of compressed data (little-endian)
/// - Bytes 4-7: CRC32 checksum of uncompressed data (little-endian)
#[derive(Debug)]
pub struct DecompressReader<R> {
#[pin]
pub inner: R,
buffer: Vec<u8>,
buffer_pos: usize,
finished: bool,
// Fields for saving header read progress across polls
header_buf: [u8; 8],
header_read: usize,
header_done: bool,
// Fields for saving compressed block read progress across polls
compressed_buf: Option<Vec<u8>>,
compressed_read: usize,
compressed_len: usize,
compression_algorithm: CompressionAlgorithm,
}
}
impl<R> DecompressReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
pub fn new(inner: R, compression_algorithm: CompressionAlgorithm) -> Self {
Self {
inner,
buffer: Vec::new(),
buffer_pos: 0,
finished: false,
header_buf: [0u8; 8],
header_read: 0,
header_done: false,
compressed_buf: None,
compressed_read: 0,
compressed_len: 0,
compression_algorithm,
}
}
}
impl<R> AsyncRead for DecompressReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
let mut this = self.project();
// Copy from buffer first if available
if *this.buffer_pos < this.buffer.len() {
let to_copy = min(buf.remaining(), this.buffer.len() - *this.buffer_pos);
buf.put_slice(&this.buffer[*this.buffer_pos..*this.buffer_pos + to_copy]);
*this.buffer_pos += to_copy;
if *this.buffer_pos == this.buffer.len() {
this.buffer.clear();
*this.buffer_pos = 0;
}
return Poll::Ready(Ok(()));
}
if *this.finished {
return Poll::Ready(Ok(()));
}
// Read header
while !*this.header_done && *this.header_read < HEADER_LEN {
let mut temp = [0u8; HEADER_LEN];
let mut temp_buf = ReadBuf::new(&mut temp[0..HEADER_LEN - *this.header_read]);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
break;
}
this.header_buf[*this.header_read..*this.header_read + n].copy_from_slice(&temp_buf.filled()[..n]);
*this.header_read += n;
}
Poll::Ready(Err(e)) => {
// error!("DecompressReader poll_read: read header error: {e}");
return Poll::Ready(Err(e));
}
}
if *this.header_read < HEADER_LEN {
return Poll::Pending;
}
}
if !*this.header_done && *this.header_read == 0 {
return Poll::Ready(Ok(()));
}
let typ = this.header_buf[0];
let len = (this.header_buf[1] as usize) | ((this.header_buf[2] as usize) << 8) | ((this.header_buf[3] as usize) << 16);
let crc = (this.header_buf[4] as u32)
| ((this.header_buf[5] as u32) << 8)
| ((this.header_buf[6] as u32) << 16)
| ((this.header_buf[7] as u32) << 24);
*this.header_read = 0;
*this.header_done = true;
if this.compressed_buf.is_none() {
*this.compressed_len = len;
*this.compressed_buf = Some(vec![0u8; *this.compressed_len]);
*this.compressed_read = 0;
}
let compressed_buf = this.compressed_buf.as_mut().unwrap();
while *this.compressed_read < *this.compressed_len {
let mut temp_buf = ReadBuf::new(&mut compressed_buf[*this.compressed_read..]);
match this.inner.as_mut().poll_read(cx, &mut temp_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Ok(())) => {
let n = temp_buf.filled().len();
if n == 0 {
break;
}
*this.compressed_read += n;
}
Poll::Ready(Err(e)) => {
// error!("DecompressReader poll_read: read compressed block error: {e}");
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
return Poll::Ready(Err(e));
}
}
}
let (uncompress_len, uvarint) = uvarint(&compressed_buf[0..16]);
let compressed_data = &compressed_buf[uvarint as usize..];
let decompressed = if typ == COMPRESS_TYPE_COMPRESSED {
match decompress_block(compressed_data, *this.compression_algorithm) {
Ok(out) => out,
Err(e) => {
// error!("DecompressReader decompress_block error: {e}");
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
return Poll::Ready(Err(e));
}
}
} else if typ == COMPRESS_TYPE_UNCOMPRESSED {
compressed_data.to_vec()
} else if typ == COMPRESS_TYPE_END {
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
*this.finished = true;
return Poll::Ready(Ok(()));
} else {
// error!("DecompressReader unknown compression type: {typ}");
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
return Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidData, "Unknown compression type")));
};
if decompressed.len() != uncompress_len as usize {
// error!("DecompressReader decompressed length mismatch: {} != {}", decompressed.len(), uncompress_len);
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
return Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidData, "Decompressed length mismatch")));
}
let actual_crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(&decompressed);
hasher.finalize() as u32
};
if actual_crc != crc {
// error!("DecompressReader CRC32 mismatch: actual {actual_crc} != expected {crc}");
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
return Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidData, "CRC32 mismatch")));
}
*this.buffer = decompressed;
*this.buffer_pos = 0;
this.compressed_buf.take();
*this.compressed_read = 0;
*this.compressed_len = 0;
*this.header_done = false;
let to_copy = min(buf.remaining(), this.buffer.len());
buf.put_slice(&this.buffer[..to_copy]);
*this.buffer_pos += to_copy;
if *this.buffer_pos == this.buffer.len() {
this.buffer.clear();
*this.buffer_pos = 0;
}
Poll::Ready(Ok(()))
}
}
impl<R> EtagResolvable for DecompressReader<R>
where
R: EtagResolvable,
{
fn try_resolve_etag(&mut self) -> Option<String> {
self.inner.try_resolve_etag()
}
}
impl<R> HashReaderDetector for DecompressReader<R>
where
R: HashReaderDetector,
{
fn is_hash_reader(&self) -> bool {
self.inner.is_hash_reader()
}
fn as_hash_reader_mut(&mut self) -> Option<&mut dyn HashReaderMut> {
self.inner.as_hash_reader_mut()
}
}
/// Build compressed block with header + uvarint + compressed data
fn build_compressed_block(uncompressed_data: &[u8], compression_algorithm: CompressionAlgorithm) -> Vec<u8> {
let crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(uncompressed_data);
hasher.finalize() as u32
};
let compressed_data = compress_block(uncompressed_data, compression_algorithm);
let uncompressed_len = uncompressed_data.len();
let mut uncompressed_len_buf = [0u8; 10];
let int_len = put_uvarint(&mut uncompressed_len_buf[..], uncompressed_len as u64);
let len = compressed_data.len() + int_len;
let mut header = [0u8; HEADER_LEN];
header[0] = COMPRESS_TYPE_COMPRESSED;
header[1] = (len & 0xFF) as u8;
header[2] = ((len >> 8) & 0xFF) as u8;
header[3] = ((len >> 16) & 0xFF) as u8;
header[4] = (crc & 0xFF) as u8;
header[5] = ((crc >> 8) & 0xFF) as u8;
header[6] = ((crc >> 16) & 0xFF) as u8;
header[7] = ((crc >> 24) & 0xFF) as u8;
let mut out = Vec::with_capacity(len + HEADER_LEN);
out.extend_from_slice(&header);
out.extend_from_slice(&uncompressed_len_buf[..int_len]);
out.extend_from_slice(&compressed_data);
out
}
#[cfg(test)]
mod tests {
use crate::WarpReader;
use super::*;
use std::io::Cursor;
use tokio::io::{AsyncReadExt, BufReader};
#[tokio::test]
async fn test_compress_reader_basic() {
let data = b"hello world, hello world, hello world!";
let reader = Cursor::new(&data[..]);
let mut compress_reader = CompressReader::new(WarpReader::new(reader), CompressionAlgorithm::Gzip);
let mut compressed = Vec::new();
compress_reader.read_to_end(&mut compressed).await.unwrap();
// DecompressReader unpacking
let mut decompress_reader = DecompressReader::new(Cursor::new(compressed.clone()), CompressionAlgorithm::Gzip);
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(&decompressed, data);
}
#[tokio::test]
async fn test_compress_reader_basic_deflate() {
let data = b"hello world, hello world, hello world!";
let reader = BufReader::new(&data[..]);
let mut compress_reader = CompressReader::new(WarpReader::new(reader), CompressionAlgorithm::Deflate);
let mut compressed = Vec::new();
compress_reader.read_to_end(&mut compressed).await.unwrap();
// DecompressReader unpacking
let mut decompress_reader = DecompressReader::new(Cursor::new(compressed.clone()), CompressionAlgorithm::Deflate);
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(&decompressed, data);
}
#[tokio::test]
async fn test_compress_reader_empty() {
let data = b"";
let reader = BufReader::new(&data[..]);
let mut compress_reader = CompressReader::new(WarpReader::new(reader), CompressionAlgorithm::Gzip);
let mut compressed = Vec::new();
compress_reader.read_to_end(&mut compressed).await.unwrap();
let mut decompress_reader = DecompressReader::new(Cursor::new(compressed.clone()), CompressionAlgorithm::Gzip);
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(&decompressed, data);
}
#[tokio::test]
async fn test_compress_reader_large() {
use rand::Rng;
// Generate 1MB of random bytes
let mut data = vec![0u8; 1024 * 1024 * 32];
rand::rng().fill(&mut data[..]);
let reader = Cursor::new(data.clone());
let mut compress_reader = CompressReader::new(WarpReader::new(reader), CompressionAlgorithm::Gzip);
let mut compressed = Vec::new();
compress_reader.read_to_end(&mut compressed).await.unwrap();
let mut decompress_reader = DecompressReader::new(Cursor::new(compressed.clone()), CompressionAlgorithm::Gzip);
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(&decompressed, &data);
}
#[tokio::test]
async fn test_compress_reader_large_deflate() {
use rand::Rng;
// Generate 1MB of random bytes
let mut data = vec![0u8; 1024 * 1024 * 3 + 512];
rand::rng().fill(&mut data[..]);
let reader = Cursor::new(data.clone());
let mut compress_reader = CompressReader::new(WarpReader::new(reader), CompressionAlgorithm::default());
let mut compressed = Vec::new();
compress_reader.read_to_end(&mut compressed).await.unwrap();
let mut decompress_reader = DecompressReader::new(Cursor::new(compressed.clone()), CompressionAlgorithm::default());
let mut decompressed = Vec::new();
decompress_reader.read_to_end(&mut decompressed).await.unwrap();
assert_eq!(&decompressed, &data);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/workers/src/lib.rs | crates/workers/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod workers;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/workers/src/workers.rs | crates/workers/src/workers.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use tokio::sync::{Mutex, Notify};
use tracing::info;
pub struct Workers {
available: Mutex<usize>, // Available working slots
notify: Notify, // Used to notify waiting tasks
limit: usize, // Maximum number of concurrent jobs
}
impl Workers {
// Create a Workers object that allows up to n jobs to execute concurrently
pub fn new(n: usize) -> Result<Arc<Workers>, &'static str> {
if n == 0 {
return Err("n must be > 0");
}
Ok(Arc::new(Workers {
available: Mutex::new(n),
notify: Notify::new(),
limit: n,
}))
}
// Give a job a chance to be executed
pub async fn take(&self) {
loop {
let mut available = self.available.lock().await;
info!("worker take, {}", *available);
if *available == 0 {
drop(available);
self.notify.notified().await;
} else {
*available -= 1;
break;
}
}
}
// Release a job's slot
pub async fn give(&self) {
let mut available = self.available.lock().await;
info!("worker give, {}", *available);
*available += 1; // Increase available slots
self.notify.notify_one(); // Notify a waiting task
}
// Wait for all concurrent jobs to complete
pub async fn wait(&self) {
loop {
{
let available = self.available.lock().await;
if *available == self.limit {
break;
}
}
// Wait until all slots are freed
self.notify.notified().await;
}
info!("worker wait end");
}
pub async fn available(&self) -> usize {
*self.available.lock().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
use tokio::time::sleep;
#[tokio::test]
async fn test_workers() {
let workers = Arc::new(Workers::new(5).unwrap());
for _ in 0..5 {
let workers = workers.clone();
tokio::spawn(async move {
workers.take().await;
sleep(Duration::from_secs(3)).await;
});
}
for _ in 0..5 {
workers.give().await;
}
// Sleep: wait for spawn task started
sleep(Duration::from_secs(1)).await;
workers.wait().await;
if workers.available().await != workers.limit {
unreachable!();
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/lib.rs | crates/filemeta/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod error;
mod fileinfo;
mod filemeta;
mod filemeta_inline;
// pub mod headers;
mod metacache;
mod replication;
pub mod test_data;
pub use error::*;
pub use fileinfo::*;
pub use filemeta::*;
pub use filemeta_inline::*;
pub use metacache::*;
pub use replication::*;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/test_data.rs | crates/filemeta/src/test_data.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{ChecksumAlgo, FileMeta, FileMetaShallowVersion, FileMetaVersion, MetaDeleteMarker, MetaObject, Result, VersionType};
use std::collections::HashMap;
use time::OffsetDateTime;
use uuid::Uuid;
/// Create real xl.meta file data for testing
pub fn create_real_xlmeta() -> Result<Vec<u8>> {
let mut fm = FileMeta::new();
// Create a real object version
let version_id = Uuid::parse_str("01234567-89ab-cdef-0123-456789abcdef")?;
let data_dir = Uuid::parse_str("fedcba98-7654-3210-fedc-ba9876543210")?;
let mut metadata = HashMap::new();
metadata.insert("Content-Type".to_string(), "text/plain".to_string());
metadata.insert("X-Amz-Meta-Author".to_string(), "test-user".to_string());
metadata.insert("X-Amz-Meta-Created".to_string(), "2024-01-15T10:30:00Z".to_string());
let object_version = MetaObject {
version_id: Some(version_id),
data_dir: Some(data_dir),
erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon,
erasure_m: 4,
erasure_n: 2,
erasure_block_size: 1024 * 1024, // 1MB
erasure_index: 1,
erasure_dist: vec![0, 1, 2, 3, 4, 5],
bitrot_checksum_algo: ChecksumAlgo::HighwayHash,
part_numbers: vec![1],
part_etags: vec!["d41d8cd98f00b204e9800998ecf8427e".to_string()],
part_sizes: vec![1024],
part_actual_sizes: vec![1024],
part_indices: Vec::new(),
size: 1024,
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200)?), // 2024-01-15 10:30:00 UTC
meta_sys: HashMap::new(),
meta_user: metadata,
};
let file_version = FileMetaVersion {
version_type: VersionType::Object,
object: Some(object_version),
delete_marker: None,
write_version: 1,
};
let shallow_version = FileMetaShallowVersion::try_from(file_version)?;
fm.versions.push(shallow_version);
// Add a delete marker version
let delete_version_id = Uuid::parse_str("11111111-2222-3333-4444-555555555555")?;
let delete_marker = MetaDeleteMarker {
version_id: Some(delete_version_id),
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312260)?), // 1 minute later
meta_sys: HashMap::new(),
};
let delete_file_version = FileMetaVersion {
version_type: VersionType::Delete,
object: None,
delete_marker: Some(delete_marker),
write_version: 2,
};
let delete_shallow_version = FileMetaShallowVersion::try_from(delete_file_version)?;
fm.versions.push(delete_shallow_version);
// Add a Legacy version for testing
let legacy_version_id = Uuid::parse_str("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")?;
let legacy_version = FileMetaVersion {
version_type: VersionType::Legacy,
object: None,
delete_marker: None,
write_version: 3,
};
let mut legacy_shallow = FileMetaShallowVersion::try_from(legacy_version)?;
legacy_shallow.header.version_id = Some(legacy_version_id);
legacy_shallow.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(1705312140)?); // earlier time
fm.versions.push(legacy_shallow);
// Sort by modification time (newest first)
fm.versions.sort_by(|a, b| b.header.mod_time.cmp(&a.header.mod_time));
fm.marshal_msg()
}
/// Create a complex xl.meta file with multiple versions
pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
let mut fm = FileMeta::new();
// Create 10 object versions
for i in 0i64..10i64 {
let version_id = Uuid::new_v4();
let data_dir = if i % 3 == 0 { Some(Uuid::new_v4()) } else { None };
let mut metadata = HashMap::new();
metadata.insert("Content-Type".to_string(), "application/octet-stream".to_string());
metadata.insert("X-Amz-Meta-Version".to_string(), i.to_string());
metadata.insert("X-Amz-Meta-Test".to_string(), format!("test-value-{i}"));
let object_version = MetaObject {
version_id: Some(version_id),
data_dir,
erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon,
erasure_m: 4,
erasure_n: 2,
erasure_block_size: 1024 * 1024,
erasure_index: (i % 6) as usize,
erasure_dist: vec![0, 1, 2, 3, 4, 5],
bitrot_checksum_algo: ChecksumAlgo::HighwayHash,
part_numbers: vec![1],
part_etags: vec![format!("etag-{:08x}", i)],
part_sizes: vec![1024 * (i + 1) as usize],
part_actual_sizes: vec![1024 * (i + 1)],
part_indices: Vec::new(),
size: 1024 * (i + 1),
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200 + i * 60)?),
meta_sys: HashMap::new(),
meta_user: metadata,
};
let file_version = FileMetaVersion {
version_type: VersionType::Object,
object: Some(object_version),
delete_marker: None,
write_version: (i + 1) as u64,
};
let shallow_version = FileMetaShallowVersion::try_from(file_version)?;
fm.versions.push(shallow_version);
// Add a delete marker every 3 versions
if i % 3 == 2 {
let delete_version_id = Uuid::new_v4();
let delete_marker = MetaDeleteMarker {
version_id: Some(delete_version_id),
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200 + i * 60 + 30)?),
meta_sys: HashMap::new(),
};
let delete_file_version = FileMetaVersion {
version_type: VersionType::Delete,
object: None,
delete_marker: Some(delete_marker),
write_version: (i + 100) as u64,
};
let delete_shallow_version = FileMetaShallowVersion::try_from(delete_file_version)?;
fm.versions.push(delete_shallow_version);
}
}
// Sort by modification time (newest first)
fm.versions.sort_by(|a, b| b.header.mod_time.cmp(&a.header.mod_time));
fm.marshal_msg()
}
/// Create a corrupted xl.meta file for error handling tests
pub fn create_corrupted_xlmeta() -> Vec<u8> {
let mut data = vec![
// Correct file header
b'X', b'L', b'2', b' ', // version
1, 0, 3, 0, // version
0xc6, 0x00, 0x00, 0x00, 0x10, // correct bin32 length marker, but data length mismatch
];
// Add insufficient data (less than declared length)
data.extend_from_slice(&[0x42; 8]); // only 8 bytes, but declared 16 bytes
data
}
/// Create an empty xl.meta file
pub fn create_empty_xlmeta() -> Result<Vec<u8>> {
let fm = FileMeta::new();
fm.marshal_msg()
}
/// Helper function to verify parsing results
pub fn verify_parsed_metadata(fm: &FileMeta, expected_versions: usize) -> Result<()> {
assert_eq!(fm.versions.len(), expected_versions, "Version count mismatch");
assert_eq!(fm.meta_ver, crate::filemeta::XL_META_VERSION, "Metadata version mismatch");
// Verify versions are sorted by modification time
for i in 1..fm.versions.len() {
let prev_time = fm.versions[i - 1].header.mod_time;
let curr_time = fm.versions[i].header.mod_time;
if let (Some(prev), Some(curr)) = (prev_time, curr_time) {
assert!(prev >= curr, "Versions not sorted correctly by modification time");
}
}
Ok(())
}
/// Create an xl.meta file with inline data
pub fn create_xlmeta_with_inline_data() -> Result<Vec<u8>> {
let mut fm = FileMeta::new();
// Add inline data
let inline_data = b"This is inline data for testing purposes";
let version_id = Uuid::new_v4();
fm.data.replace(&version_id.to_string(), inline_data.to_vec())?;
let object_version = MetaObject {
version_id: Some(version_id),
data_dir: None,
erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon,
erasure_m: 1,
erasure_n: 1,
erasure_block_size: 64 * 1024,
erasure_index: 0,
erasure_dist: vec![0, 1],
bitrot_checksum_algo: ChecksumAlgo::HighwayHash,
part_numbers: vec![1],
part_etags: Vec::new(),
part_sizes: vec![inline_data.len()],
part_actual_sizes: Vec::new(),
part_indices: Vec::new(),
size: inline_data.len() as i64,
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: HashMap::new(),
meta_user: HashMap::new(),
};
let file_version = FileMetaVersion {
version_type: VersionType::Object,
object: Some(object_version),
delete_marker: None,
write_version: 1,
};
let shallow_version = FileMetaShallowVersion::try_from(file_version)?;
fm.versions.push(shallow_version);
fm.marshal_msg()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::FileMeta;
#[test]
fn test_create_real_xlmeta() {
let data = create_real_xlmeta().expect("Failed to create test data");
assert!(!data.is_empty(), "Generated data should not be empty");
// Verify file header
assert_eq!(&data[0..4], b"XL2 ", "Incorrect file header");
// Try to parse
let fm = FileMeta::load(&data).expect("Failed to parse");
verify_parsed_metadata(&fm, 3).expect("Verification failed");
}
#[test]
fn test_create_complex_xlmeta() {
let data = create_complex_xlmeta().expect("Failed to create complex test data");
assert!(!data.is_empty(), "Generated data should not be empty");
let fm = FileMeta::load(&data).expect("Failed to parse");
assert!(fm.versions.len() >= 10, "Should have at least 10 versions");
}
#[test]
fn test_create_xlmeta_with_inline_data() {
let data = create_xlmeta_with_inline_data().expect("Failed to create inline data test");
assert!(!data.is_empty(), "Generated data should not be empty");
let fm = FileMeta::load(&data).expect("Failed to parse");
assert_eq!(fm.versions.len(), 1, "Should have 1 version");
assert!(!fm.data.as_slice().is_empty(), "Should contain inline data");
}
#[test]
fn test_corrupted_xlmeta_handling() {
let data = create_corrupted_xlmeta();
let result = FileMeta::load(&data);
assert!(result.is_err(), "Corrupted data should fail to parse");
}
#[test]
fn test_empty_xlmeta() {
let data = create_empty_xlmeta().expect("Failed to create empty test data");
let fm = FileMeta::load(&data).expect("Failed to parse empty data");
assert_eq!(fm.versions.len(), 0, "Empty file should have no versions");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/error.rs | crates/filemeta/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// FileMeta error type and Result alias.
/// This module defines a custom error type `Error` for handling various
/// error scenarios related to file metadata operations. It also provides
/// a `Result` type alias for convenience.
pub type Result<T> = core::result::Result<T, Error>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("File not found")]
FileNotFound,
#[error("File version not found")]
FileVersionNotFound,
#[error("Volume not found")]
VolumeNotFound,
#[error("File corrupt")]
FileCorrupt,
#[error("Done for now")]
DoneForNow,
#[error("Method not allowed")]
MethodNotAllowed,
#[error("Unexpected error")]
Unexpected,
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("rmp serde decode error: {0}")]
RmpSerdeDecode(String),
#[error("rmp serde encode error: {0}")]
RmpSerdeEncode(String),
#[error("Invalid UTF-8: {0}")]
FromUtf8(String),
#[error("rmp decode value read error: {0}")]
RmpDecodeValueRead(String),
#[error("rmp encode value write error: {0}")]
RmpEncodeValueWrite(String),
#[error("rmp decode num value read error: {0}")]
RmpDecodeNumValueRead(String),
#[error("rmp decode marker read error: {0}")]
RmpDecodeMarkerRead(String),
#[error("time component range error: {0}")]
TimeComponentRange(String),
#[error("uuid parse error: {0}")]
UuidParse(String),
}
impl Error {
pub fn other<E>(error: E) -> Error
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::other(error).into()
}
}
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Error::FileCorrupt, Error::FileCorrupt) => true,
(Error::DoneForNow, Error::DoneForNow) => true,
(Error::MethodNotAllowed, Error::MethodNotAllowed) => true,
(Error::FileNotFound, Error::FileNotFound) => true,
(Error::FileVersionNotFound, Error::FileVersionNotFound) => true,
(Error::VolumeNotFound, Error::VolumeNotFound) => true,
(Error::Io(e1), Error::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
(Error::RmpSerdeDecode(e1), Error::RmpSerdeDecode(e2)) => e1 == e2,
(Error::RmpSerdeEncode(e1), Error::RmpSerdeEncode(e2)) => e1 == e2,
(Error::RmpDecodeValueRead(e1), Error::RmpDecodeValueRead(e2)) => e1 == e2,
(Error::RmpEncodeValueWrite(e1), Error::RmpEncodeValueWrite(e2)) => e1 == e2,
(Error::RmpDecodeNumValueRead(e1), Error::RmpDecodeNumValueRead(e2)) => e1 == e2,
(Error::TimeComponentRange(e1), Error::TimeComponentRange(e2)) => e1 == e2,
(Error::UuidParse(e1), Error::UuidParse(e2)) => e1 == e2,
(Error::Unexpected, Error::Unexpected) => true,
(a, b) => a.to_string() == b.to_string(),
}
}
}
impl Clone for Error {
fn clone(&self) -> Self {
match self {
Error::FileNotFound => Error::FileNotFound,
Error::FileVersionNotFound => Error::FileVersionNotFound,
Error::FileCorrupt => Error::FileCorrupt,
Error::DoneForNow => Error::DoneForNow,
Error::MethodNotAllowed => Error::MethodNotAllowed,
Error::VolumeNotFound => Error::VolumeNotFound,
Error::Io(e) => Error::Io(std::io::Error::new(e.kind(), e.to_string())),
Error::RmpSerdeDecode(s) => Error::RmpSerdeDecode(s.clone()),
Error::RmpSerdeEncode(s) => Error::RmpSerdeEncode(s.clone()),
Error::FromUtf8(s) => Error::FromUtf8(s.clone()),
Error::RmpDecodeValueRead(s) => Error::RmpDecodeValueRead(s.clone()),
Error::RmpEncodeValueWrite(s) => Error::RmpEncodeValueWrite(s.clone()),
Error::RmpDecodeNumValueRead(s) => Error::RmpDecodeNumValueRead(s.clone()),
Error::RmpDecodeMarkerRead(s) => Error::RmpDecodeMarkerRead(s.clone()),
Error::TimeComponentRange(s) => Error::TimeComponentRange(s.clone()),
Error::UuidParse(s) => Error::UuidParse(s.clone()),
Error::Unexpected => Error::Unexpected,
}
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
match e.kind() {
std::io::ErrorKind::UnexpectedEof => Error::Unexpected,
_ => Error::Io(e),
}
}
}
impl From<Error> for std::io::Error {
fn from(e: Error) -> Self {
match e {
Error::Unexpected => std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "Unexpected EOF"),
Error::Io(e) => e,
_ => std::io::Error::other(e.to_string()),
}
}
}
impl From<rmp_serde::decode::Error> for Error {
fn from(e: rmp_serde::decode::Error) -> Self {
Error::RmpSerdeDecode(e.to_string())
}
}
impl From<rmp_serde::encode::Error> for Error {
fn from(e: rmp_serde::encode::Error) -> Self {
Error::RmpSerdeEncode(e.to_string())
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(e: std::string::FromUtf8Error) -> Self {
Error::FromUtf8(e.to_string())
}
}
impl From<rmp::decode::ValueReadError> for Error {
fn from(e: rmp::decode::ValueReadError) -> Self {
Error::RmpDecodeValueRead(e.to_string())
}
}
impl From<rmp::encode::ValueWriteError> for Error {
fn from(e: rmp::encode::ValueWriteError) -> Self {
Error::RmpEncodeValueWrite(e.to_string())
}
}
impl From<rmp::decode::NumValueReadError> for Error {
fn from(e: rmp::decode::NumValueReadError) -> Self {
Error::RmpDecodeNumValueRead(e.to_string())
}
}
impl From<time::error::ComponentRange> for Error {
fn from(e: time::error::ComponentRange) -> Self {
Error::TimeComponentRange(e.to_string())
}
}
impl From<uuid::Error> for Error {
fn from(e: uuid::Error) -> Self {
Error::UuidParse(e.to_string())
}
}
impl From<rmp::decode::MarkerReadError> for Error {
fn from(e: rmp::decode::MarkerReadError) -> Self {
let serr = format!("{e:?}");
Error::RmpDecodeMarkerRead(serr)
}
}
pub fn is_io_eof(e: &Error) -> bool {
match e {
Error::Io(e) => e.kind() == std::io::ErrorKind::UnexpectedEof,
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Error as IoError, ErrorKind};
#[test]
fn test_filemeta_error_from_io_error() {
let io_error = IoError::new(ErrorKind::PermissionDenied, "permission denied");
let filemeta_error: Error = io_error.into();
match filemeta_error {
Error::Io(inner_io) => {
assert_eq!(inner_io.kind(), ErrorKind::PermissionDenied);
assert!(inner_io.to_string().contains("permission denied"));
}
_ => panic!("Expected Io variant"),
}
}
#[test]
fn test_filemeta_error_other_function() {
let custom_error = "Custom filemeta error";
let filemeta_error = Error::other(custom_error);
match filemeta_error {
Error::Io(io_error) => {
assert!(io_error.to_string().contains(custom_error));
assert_eq!(io_error.kind(), ErrorKind::Other);
}
_ => panic!("Expected Io variant"),
}
}
#[test]
fn test_filemeta_error_conversions() {
// Test various error conversions
let serde_decode_err =
rmp_serde::decode::Error::InvalidMarkerRead(std::io::Error::new(ErrorKind::InvalidData, "invalid"));
let filemeta_error: Error = serde_decode_err.into();
assert!(matches!(filemeta_error, Error::RmpSerdeDecode(_)));
// Test with string-based error that we can actually create
let encode_error_string = "test encode error";
let filemeta_error = Error::RmpSerdeEncode(encode_error_string.to_string());
assert!(matches!(filemeta_error, Error::RmpSerdeEncode(_)));
let utf8_err = std::string::String::from_utf8(vec![0xFF]).unwrap_err();
let filemeta_error: Error = utf8_err.into();
assert!(matches!(filemeta_error, Error::FromUtf8(_)));
}
#[test]
fn test_filemeta_error_clone() {
let test_cases = vec![
Error::FileNotFound,
Error::FileVersionNotFound,
Error::VolumeNotFound,
Error::FileCorrupt,
Error::DoneForNow,
Error::MethodNotAllowed,
Error::Unexpected,
Error::Io(IoError::new(ErrorKind::NotFound, "test")),
Error::RmpSerdeDecode("test decode error".to_string()),
Error::RmpSerdeEncode("test encode error".to_string()),
Error::FromUtf8("test utf8 error".to_string()),
Error::RmpDecodeValueRead("test value read error".to_string()),
Error::RmpEncodeValueWrite("test value write error".to_string()),
Error::RmpDecodeNumValueRead("test num read error".to_string()),
Error::RmpDecodeMarkerRead("test marker read error".to_string()),
Error::TimeComponentRange("test time error".to_string()),
Error::UuidParse("test uuid error".to_string()),
];
for original_error in test_cases {
let cloned_error = original_error.clone();
assert_eq!(original_error, cloned_error);
}
}
#[test]
fn test_filemeta_error_partial_eq() {
// Test equality for simple variants
assert_eq!(Error::FileNotFound, Error::FileNotFound);
assert_ne!(Error::FileNotFound, Error::FileVersionNotFound);
// Test equality for Io variants
let io1 = Error::Io(IoError::new(ErrorKind::NotFound, "test"));
let io2 = Error::Io(IoError::new(ErrorKind::NotFound, "test"));
let io3 = Error::Io(IoError::new(ErrorKind::PermissionDenied, "test"));
assert_eq!(io1, io2);
assert_ne!(io1, io3);
// Test equality for string variants
let decode1 = Error::RmpSerdeDecode("error message".to_string());
let decode2 = Error::RmpSerdeDecode("error message".to_string());
let decode3 = Error::RmpSerdeDecode("different message".to_string());
assert_eq!(decode1, decode2);
assert_ne!(decode1, decode3);
}
#[test]
fn test_filemeta_error_display() {
let test_cases = vec![
(Error::FileNotFound, "File not found"),
(Error::FileVersionNotFound, "File version not found"),
(Error::VolumeNotFound, "Volume not found"),
(Error::FileCorrupt, "File corrupt"),
(Error::DoneForNow, "Done for now"),
(Error::MethodNotAllowed, "Method not allowed"),
(Error::Unexpected, "Unexpected error"),
(Error::RmpSerdeDecode("test".to_string()), "rmp serde decode error: test"),
(Error::RmpSerdeEncode("test".to_string()), "rmp serde encode error: test"),
(Error::FromUtf8("test".to_string()), "Invalid UTF-8: test"),
(Error::TimeComponentRange("test".to_string()), "time component range error: test"),
(Error::UuidParse("test".to_string()), "uuid parse error: test"),
];
for (error, expected_message) in test_cases {
assert_eq!(error.to_string(), expected_message);
}
}
#[test]
fn test_rmp_conversions() {
// Test rmp value read error (this one works since it has the same signature)
let value_read_err = rmp::decode::ValueReadError::InvalidMarkerRead(std::io::Error::new(ErrorKind::InvalidData, "test"));
let filemeta_error: Error = value_read_err.into();
assert!(matches!(filemeta_error, Error::RmpDecodeValueRead(_)));
// Test rmp num value read error
let num_value_err =
rmp::decode::NumValueReadError::InvalidMarkerRead(std::io::Error::new(ErrorKind::InvalidData, "test"));
let filemeta_error: Error = num_value_err.into();
assert!(matches!(filemeta_error, Error::RmpDecodeNumValueRead(_)));
}
#[test]
fn test_time_and_uuid_conversions() {
// Test time component range error
use time::{Date, Month};
let time_result = Date::from_calendar_date(2023, Month::January, 32); // Invalid day
assert!(time_result.is_err());
let time_error = time_result.unwrap_err();
let filemeta_error: Error = time_error.into();
assert!(matches!(filemeta_error, Error::TimeComponentRange(_)));
// Test UUID parse error
let uuid_result = uuid::Uuid::parse_str("invalid-uuid");
assert!(uuid_result.is_err());
let uuid_error = uuid_result.unwrap_err();
let filemeta_error: Error = uuid_error.into();
assert!(matches!(filemeta_error, Error::UuidParse(_)));
}
#[test]
fn test_marker_read_error_conversion() {
// Test rmp marker read error conversion
let marker_err = rmp::decode::MarkerReadError(std::io::Error::new(ErrorKind::InvalidData, "marker test"));
let filemeta_error: Error = marker_err.into();
assert!(matches!(filemeta_error, Error::RmpDecodeMarkerRead(_)));
assert!(filemeta_error.to_string().contains("marker"));
}
#[test]
fn test_is_io_eof_function() {
// Test is_io_eof helper function
let eof_error = Error::Io(IoError::new(ErrorKind::UnexpectedEof, "eof"));
assert!(is_io_eof(&eof_error));
let not_eof_error = Error::Io(IoError::new(ErrorKind::NotFound, "not found"));
assert!(!is_io_eof(¬_eof_error));
let non_io_error = Error::FileNotFound;
assert!(!is_io_eof(&non_io_error));
}
#[test]
fn test_filemeta_error_to_io_error_conversion() {
// Test conversion from FileMeta Error to io::Error through other function
let original_io_error = IoError::new(ErrorKind::InvalidData, "test data");
let filemeta_error = Error::other(original_io_error);
match filemeta_error {
Error::Io(io_err) => {
assert_eq!(io_err.kind(), ErrorKind::Other);
assert!(io_err.to_string().contains("test data"));
}
_ => panic!("Expected Io variant"),
}
}
#[test]
fn test_filemeta_error_roundtrip_conversion() {
// Test roundtrip conversion: io::Error -> FileMeta Error -> io::Error
let original_io_error = IoError::new(ErrorKind::PermissionDenied, "permission test");
// Convert to FileMeta Error
let filemeta_error: Error = original_io_error.into();
// Extract the io::Error back
match filemeta_error {
Error::Io(extracted_io_error) => {
assert_eq!(extracted_io_error.kind(), ErrorKind::PermissionDenied);
assert!(extracted_io_error.to_string().contains("permission test"));
}
_ => panic!("Expected Io variant"),
}
}
#[test]
fn test_filemeta_error_io_error_kinds_preservation() {
let io_error_kinds = vec![
ErrorKind::NotFound,
ErrorKind::PermissionDenied,
ErrorKind::ConnectionRefused,
ErrorKind::ConnectionReset,
ErrorKind::ConnectionAborted,
ErrorKind::NotConnected,
ErrorKind::AddrInUse,
ErrorKind::AddrNotAvailable,
ErrorKind::BrokenPipe,
ErrorKind::AlreadyExists,
ErrorKind::WouldBlock,
ErrorKind::InvalidInput,
ErrorKind::InvalidData,
ErrorKind::TimedOut,
ErrorKind::WriteZero,
ErrorKind::Interrupted,
ErrorKind::UnexpectedEof,
ErrorKind::Other,
];
for kind in io_error_kinds {
let io_error = IoError::new(kind, format!("test error for {kind:?}"));
let filemeta_error: Error = io_error.into();
match filemeta_error {
Error::Unexpected => {
assert_eq!(kind, ErrorKind::UnexpectedEof);
}
Error::Io(extracted_io_error) => {
assert_eq!(extracted_io_error.kind(), kind);
assert!(extracted_io_error.to_string().contains("test error"));
}
_ => panic!("Expected Io variant for kind {kind:?}"),
}
}
}
#[test]
fn test_filemeta_error_downcast_chain() {
// Test error downcast chain functionality
let original_io_error = IoError::new(ErrorKind::InvalidData, "original error");
let filemeta_error = Error::other(original_io_error);
// The error should be wrapped as an Io variant
if let Error::Io(io_err) = filemeta_error {
// The wrapped error should be Other kind (from std::io::Error::other)
assert_eq!(io_err.kind(), ErrorKind::Other);
// But the message should still contain the original error information
assert!(io_err.to_string().contains("original error"));
} else {
panic!("Expected Io variant");
}
}
#[test]
fn test_filemeta_error_maintains_error_information() {
let test_cases = vec![
(ErrorKind::NotFound, "file not found"),
(ErrorKind::PermissionDenied, "access denied"),
(ErrorKind::InvalidData, "corrupt data"),
(ErrorKind::TimedOut, "operation timed out"),
];
for (kind, message) in test_cases {
let io_error = IoError::new(kind, message);
let error_message = io_error.to_string();
let filemeta_error: Error = io_error.into();
match filemeta_error {
Error::Io(extracted_io_error) => {
assert_eq!(extracted_io_error.kind(), kind);
assert_eq!(extracted_io_error.to_string(), error_message);
}
_ => panic!("Expected Io variant"),
}
}
}
#[test]
fn test_filemeta_error_complex_conversion_chain() {
// Test conversion from string error types that we can actually create
// Test with UUID error conversion
let uuid_result = uuid::Uuid::parse_str("invalid-uuid-format");
assert!(uuid_result.is_err());
let uuid_error = uuid_result.unwrap_err();
let filemeta_error: Error = uuid_error.into();
match filemeta_error {
Error::UuidParse(message) => {
assert!(message.contains("invalid"));
}
_ => panic!("Expected UuidParse variant"),
}
// Test with time error conversion
use time::{Date, Month};
let time_result = Date::from_calendar_date(2023, Month::January, 32); // Invalid day
assert!(time_result.is_err());
let time_error = time_result.unwrap_err();
let filemeta_error2: Error = time_error.into();
match filemeta_error2 {
Error::TimeComponentRange(message) => {
assert!(message.contains("range"));
}
_ => panic!("Expected TimeComponentRange variant"),
}
// Test with UTF8 error conversion
let utf8_result = std::string::String::from_utf8(vec![0xFF]);
assert!(utf8_result.is_err());
let utf8_error = utf8_result.unwrap_err();
let filemeta_error3: Error = utf8_error.into();
match filemeta_error3 {
Error::FromUtf8(message) => {
assert!(message.contains("utf"));
}
_ => panic!("Expected FromUtf8 variant"),
}
}
#[test]
fn test_filemeta_error_equality_with_io_errors() {
// Test equality comparison for Io variants
let io_error1 = IoError::new(ErrorKind::NotFound, "test message");
let io_error2 = IoError::new(ErrorKind::NotFound, "test message");
let io_error3 = IoError::new(ErrorKind::PermissionDenied, "test message");
let io_error4 = IoError::new(ErrorKind::NotFound, "different message");
let filemeta_error1 = Error::Io(io_error1);
let filemeta_error2 = Error::Io(io_error2);
let filemeta_error3 = Error::Io(io_error3);
let filemeta_error4 = Error::Io(io_error4);
// Same kind and message should be equal
assert_eq!(filemeta_error1, filemeta_error2);
// Different kinds should not be equal
assert_ne!(filemeta_error1, filemeta_error3);
// Different messages should not be equal
assert_ne!(filemeta_error1, filemeta_error4);
}
#[test]
fn test_filemeta_error_clone_io_variants() {
let io_error = IoError::new(ErrorKind::ConnectionReset, "connection lost");
let original_error = Error::Io(io_error);
let cloned_error = original_error.clone();
// Cloned error should be equal to original
assert_eq!(original_error, cloned_error);
// Both should maintain the same properties
match (original_error, cloned_error) {
(Error::Io(orig_io), Error::Io(cloned_io)) => {
assert_eq!(orig_io.kind(), cloned_io.kind());
assert_eq!(orig_io.to_string(), cloned_io.to_string());
}
_ => panic!("Both should be Io variants"),
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/metacache.rs | crates/filemeta/src/metacache.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, FileInfo, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, merge_file_meta_versions};
use rmp::Marker;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::str::from_utf8;
use std::{
fmt::Debug,
future::Future,
pin::Pin,
ptr,
sync::{
Arc,
atomic::{AtomicPtr, AtomicU64, Ordering as AtomicOrdering},
},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use time::OffsetDateTime;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio::spawn;
use tokio::sync::Mutex;
use tracing::warn;
const SLASH_SEPARATOR: &str = "/";
#[derive(Clone, Debug, Default)]
pub struct MetadataResolutionParams {
pub dir_quorum: usize,
pub obj_quorum: usize,
pub requested_versions: usize,
pub bucket: String,
pub strict: bool,
pub candidates: Vec<Vec<FileMetaShallowVersion>>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct MetaCacheEntry {
/// name is the full name of the object including prefixes
pub name: String,
/// Metadata. If none is present it is not an object but only a prefix.
/// Entries without metadata will only be present in non-recursive scans.
pub metadata: Vec<u8>,
/// cached contains the metadata if decoded.
#[serde(skip)]
pub cached: Option<FileMeta>,
/// Indicates the entry can be reused and only one reference to metadata is expected.
pub reusable: bool,
}
impl MetaCacheEntry {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut wr = Vec::new();
rmp::encode::write_bool(&mut wr, true)?;
rmp::encode::write_str(&mut wr, &self.name)?;
rmp::encode::write_bin(&mut wr, &self.metadata)?;
Ok(wr)
}
pub fn is_dir(&self) -> bool {
self.metadata.is_empty() && self.name.ends_with('/')
}
pub fn is_in_dir(&self, dir: &str, separator: &str) -> bool {
if dir.is_empty() {
let idx = self.name.find(separator);
return idx.is_none() || idx.unwrap() == self.name.len() - separator.len();
}
let ext = self.name.trim_start_matches(dir);
if ext.len() != self.name.len() {
let idx = ext.find(separator);
return idx.is_none() || idx.unwrap() == ext.len() - separator.len();
}
false
}
pub fn is_object(&self) -> bool {
!self.metadata.is_empty()
}
pub fn is_object_dir(&self) -> bool {
!self.metadata.is_empty() && self.name.ends_with(SLASH_SEPARATOR)
}
pub fn is_latest_delete_marker(&mut self) -> bool {
if let Some(cached) = &self.cached {
if cached.versions.is_empty() {
return true;
}
return cached.versions[0].header.version_type == VersionType::Delete;
}
if !FileMeta::is_xl2_v1_format(&self.metadata) {
return false;
}
match FileMeta::is_indexed_meta(&self.metadata) {
Ok((meta, _inline_data)) => {
if !meta.is_empty() {
return FileMeta::is_latest_delete_marker(meta);
}
}
Err(_) => return true,
}
match self.xl_meta() {
Ok(res) => {
if res.versions.is_empty() {
return true;
}
res.versions[0].header.version_type == VersionType::Delete
}
Err(_) => true,
}
}
#[tracing::instrument(level = "debug", skip(self))]
pub fn to_fileinfo(&self, bucket: &str) -> Result<FileInfo> {
if self.is_dir() {
return Ok(FileInfo {
volume: bucket.to_owned(),
name: self.name.clone(),
..Default::default()
});
}
if self.cached.is_some() {
let fm = self.cached.as_ref().unwrap();
if fm.versions.is_empty() {
return Ok(FileInfo {
volume: bucket.to_owned(),
name: self.name.clone(),
deleted: true,
is_latest: true,
mod_time: Some(OffsetDateTime::UNIX_EPOCH),
..Default::default()
});
}
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
return Ok(fi);
}
let mut fm = FileMeta::new();
fm.unmarshal_msg(&self.metadata)?;
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
Ok(fi)
}
pub fn file_info_versions(&self, bucket: &str) -> Result<FileInfoVersions> {
if self.is_dir() {
return Ok(FileInfoVersions {
volume: bucket.to_string(),
name: self.name.clone(),
versions: vec![FileInfo {
volume: bucket.to_string(),
name: self.name.clone(),
..Default::default()
}],
..Default::default()
});
}
let mut fm = FileMeta::new();
fm.unmarshal_msg(&self.metadata)?;
fm.into_file_info_versions(bucket, self.name.as_str(), false)
}
pub fn matches(&self, other: Option<&MetaCacheEntry>, strict: bool) -> (Option<MetaCacheEntry>, bool) {
if other.is_none() {
return (None, false);
}
let other = other.unwrap();
if self.name != other.name {
if self.name < other.name {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
if other.is_dir() || self.is_dir() {
if self.is_dir() {
return (Some(self.clone()), other.is_dir() == self.is_dir());
}
return (Some(other.clone()), other.is_dir() == self.is_dir());
}
let self_vers = match &self.cached {
Some(file_meta) => file_meta.clone(),
None => match FileMeta::load(&self.metadata) {
Ok(meta) => meta,
Err(_) => return (None, false),
},
};
let other_vers = match &other.cached {
Some(file_meta) => file_meta.clone(),
None => match FileMeta::load(&other.metadata) {
Ok(meta) => meta,
Err(_) => return (None, false),
},
};
if self_vers.versions.len() != other_vers.versions.len() {
match self_vers.latest_mod_time().cmp(&other_vers.latest_mod_time()) {
Ordering::Greater => return (Some(self.clone()), false),
Ordering::Less => return (Some(other.clone()), false),
_ => {}
}
if self_vers.versions.len() > other_vers.versions.len() {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
let mut prefer = None;
for (s_version, o_version) in self_vers.versions.iter().zip(other_vers.versions.iter()) {
if s_version.header != o_version.header {
if s_version.header.has_ec() != o_version.header.has_ec() {
// One version has EC and the other doesn't - may have been written later.
// Compare without considering EC.
let (mut a, mut b) = (s_version.header.clone(), o_version.header.clone());
(a.ec_n, a.ec_m, b.ec_n, b.ec_m) = (0, 0, 0, 0);
if a == b {
continue;
}
}
if !strict && s_version.header.matches_not_strict(&o_version.header) {
if prefer.is_none() {
if s_version.header.sorts_before(&o_version.header) {
prefer = Some(self.clone());
} else {
prefer = Some(other.clone());
}
}
continue;
}
if prefer.is_some() {
return (prefer, false);
}
if s_version.header.sorts_before(&o_version.header) {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
}
if prefer.is_none() {
prefer = Some(self.clone());
}
(prefer, true)
}
pub fn xl_meta(&mut self) -> Result<FileMeta> {
if self.is_dir() {
return Err(Error::FileNotFound);
}
if let Some(meta) = &self.cached {
Ok(meta.clone())
} else {
if self.metadata.is_empty() {
return Err(Error::FileNotFound);
}
let meta = FileMeta::load(&self.metadata)?;
self.cached = Some(meta.clone());
Ok(meta)
}
}
}
#[derive(Debug, Default)]
pub struct MetaCacheEntries(pub Vec<Option<MetaCacheEntry>>);
impl MetaCacheEntries {
#[allow(clippy::should_implement_trait)]
pub fn as_ref(&self) -> &[Option<MetaCacheEntry>] {
&self.0
}
pub fn resolve(&self, mut params: MetadataResolutionParams) -> Option<MetaCacheEntry> {
if self.0.is_empty() {
warn!("decommission_pool: entries resolve empty");
return None;
}
let mut dir_exists = 0;
let mut selected = None;
params.candidates.clear();
let mut objs_agree = 0;
let mut objs_valid = 0;
for entry in self.0.iter().flatten() {
let mut entry = entry.clone();
warn!("decommission_pool: entries resolve entry {:?}", entry.name);
if entry.name.is_empty() {
continue;
}
if entry.is_dir() {
dir_exists += 1;
selected = Some(entry.clone());
warn!("decommission_pool: entries resolve entry dir {:?}", entry.name);
continue;
}
let xl = match entry.xl_meta() {
Ok(xl) => xl,
Err(e) => {
warn!("decommission_pool: entries resolve entry xl_meta {:?}", e);
continue;
}
};
objs_valid += 1;
params.candidates.push(xl.versions.clone());
if selected.is_none() {
selected = Some(entry.clone());
objs_agree = 1;
warn!("decommission_pool: entries resolve entry selected {:?}", entry.name);
continue;
}
if let (prefer, true) = entry.matches(selected.as_ref(), params.strict) {
selected = prefer;
objs_agree += 1;
warn!("decommission_pool: entries resolve entry prefer {:?}", entry.name);
continue;
}
}
let Some(selected) = selected else {
warn!("decommission_pool: entries resolve entry no selected");
return None;
};
if selected.is_dir() && dir_exists >= params.dir_quorum {
warn!("decommission_pool: entries resolve entry dir selected {:?}", selected.name);
return Some(selected);
}
// If we would never be able to reach read quorum.
if objs_valid < params.obj_quorum {
warn!(
"decommission_pool: entries resolve entry not enough objects {} < {}",
objs_valid, params.obj_quorum
);
return None;
}
if objs_agree == objs_valid {
warn!("decommission_pool: entries resolve entry all agree {} == {}", objs_agree, objs_valid);
return Some(selected);
}
let Some(cached) = selected.cached else {
warn!("decommission_pool: entries resolve entry no cached");
return None;
};
let versions = merge_file_meta_versions(params.obj_quorum, params.strict, params.requested_versions, ¶ms.candidates);
if versions.is_empty() {
warn!("decommission_pool: entries resolve entry no versions");
return None;
}
let metadata = match cached.marshal_msg() {
Ok(meta) => meta,
Err(e) => {
warn!("decommission_pool: entries resolve entry marshal_msg {:?}", e);
return None;
}
};
// Merge if we have disagreement.
// Create a new merged result.
let new_selected = MetaCacheEntry {
name: selected.name.clone(),
cached: Some(FileMeta {
meta_ver: cached.meta_ver,
versions,
..Default::default()
}),
reusable: true,
metadata,
};
warn!("decommission_pool: entries resolve entry selected {:?}", new_selected.name);
Some(new_selected)
}
pub fn first_found(&self) -> (Option<MetaCacheEntry>, usize) {
(self.0.iter().find(|x| x.is_some()).cloned().unwrap_or_default(), self.0.len())
}
}
#[derive(Debug, Default)]
pub struct MetaCacheEntriesSortedResult {
pub entries: Option<MetaCacheEntriesSorted>,
pub err: Option<Error>,
}
#[derive(Debug, Default)]
pub struct MetaCacheEntriesSorted {
pub o: MetaCacheEntries,
pub list_id: Option<String>,
pub reuse: bool,
pub last_skipped_entry: Option<String>,
}
impl MetaCacheEntriesSorted {
pub fn entries(&self) -> Vec<&MetaCacheEntry> {
let entries: Vec<&MetaCacheEntry> = self.o.0.iter().flatten().collect();
entries
}
pub fn forward_past(&mut self, marker: Option<String>) {
if let Some(val) = marker
&& let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val)
{
self.o.0 = self.o.0.split_off(idx);
}
}
}
const METACACHE_STREAM_VERSION: u8 = 2;
#[derive(Debug)]
pub struct MetacacheWriter<W> {
wr: W,
created: bool,
buf: Vec<u8>,
}
impl<W: AsyncWrite + Unpin> MetacacheWriter<W> {
pub fn new(wr: W) -> Self {
Self {
wr,
created: false,
buf: Vec::new(),
}
}
pub async fn flush(&mut self) -> Result<()> {
self.wr.write_all(&self.buf).await?;
self.buf.clear();
Ok(())
}
pub async fn init(&mut self) -> Result<()> {
if !self.created {
rmp::encode::write_u8(&mut self.buf, METACACHE_STREAM_VERSION).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
self.created = true;
}
Ok(())
}
pub async fn write(&mut self, objs: &[MetaCacheEntry]) -> Result<()> {
if objs.is_empty() {
return Ok(());
}
self.init().await?;
for obj in objs.iter() {
if obj.name.is_empty() {
return Err(Error::other("metacacheWriter: no name"));
}
self.write_obj(obj).await?;
}
Ok(())
}
pub async fn write_obj(&mut self, obj: &MetaCacheEntry) -> Result<()> {
self.init().await?;
rmp::encode::write_bool(&mut self.buf, true).map_err(|e| Error::other(format!("{e:?}")))?;
rmp::encode::write_str(&mut self.buf, &obj.name).map_err(|e| Error::other(format!("{e:?}")))?;
rmp::encode::write_bin(&mut self.buf, &obj.metadata).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
Ok(())
}
pub async fn close(&mut self) -> Result<()> {
rmp::encode::write_bool(&mut self.buf, false).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
Ok(())
}
}
pub struct MetacacheReader<R> {
rd: R,
init: bool,
err: Option<Error>,
buf: Vec<u8>,
offset: usize,
current: Option<MetaCacheEntry>,
}
impl<R: AsyncRead + Unpin> MetacacheReader<R> {
pub fn new(rd: R) -> Self {
Self {
rd,
init: false,
err: None,
buf: Vec::new(),
offset: 0,
current: None,
}
}
pub async fn read_more(&mut self, read_size: usize) -> Result<&[u8]> {
let ext_size = read_size + self.offset;
let extra = ext_size - self.offset;
if self.buf.capacity() >= ext_size {
// Extend the buffer if we have enough space.
self.buf.resize(ext_size, 0);
} else {
self.buf.extend(vec![0u8; extra]);
}
let pref = self.offset;
self.rd.read_exact(&mut self.buf[pref..ext_size]).await?;
self.offset += read_size;
let data = &self.buf[pref..ext_size];
Ok(data)
}
fn reset(&mut self) {
self.buf.clear();
self.offset = 0;
}
async fn check_init(&mut self) -> Result<()> {
if !self.init {
let ver = match rmp::decode::read_u8(&mut self.read_more(2).await?) {
Ok(res) => res,
Err(err) => {
self.err = Some(Error::other(format!("{err:?}")));
0
}
};
match ver {
1 | 2 => (),
_ => {
self.err = Some(Error::other("invalid version"));
}
}
self.init = true;
}
Ok(())
}
async fn read_str_len(&mut self) -> Result<u32> {
let mark = match rmp::decode::read_marker(&mut self.read_more(1).await?) {
Ok(res) => res,
Err(err) => {
let err: Error = err.into();
self.err = Some(err.clone());
return Err(err);
}
};
match mark {
Marker::FixStr(size) => Ok(u32::from(size)),
Marker::Str8 => Ok(u32::from(self.read_u8().await?)),
Marker::Str16 => Ok(u32::from(self.read_u16().await?)),
Marker::Str32 => Ok(self.read_u32().await?),
_marker => Err(Error::other("str marker err")),
}
}
async fn read_bin_len(&mut self) -> Result<u32> {
let mark = match rmp::decode::read_marker(&mut self.read_more(1).await?) {
Ok(res) => res,
Err(err) => {
let err: Error = err.into();
self.err = Some(err.clone());
return Err(err);
}
};
match mark {
Marker::Bin8 => Ok(u32::from(self.read_u8().await?)),
Marker::Bin16 => Ok(u32::from(self.read_u16().await?)),
Marker::Bin32 => Ok(self.read_u32().await?),
_ => Err(Error::other("bin marker err")),
}
}
async fn read_u8(&mut self) -> Result<u8> {
let buf = self.read_more(1).await?;
Ok(u8::from_be_bytes(buf.try_into().expect("Slice with incorrect length")))
}
async fn read_u16(&mut self) -> Result<u16> {
let buf = self.read_more(2).await?;
Ok(u16::from_be_bytes(buf.try_into().expect("Slice with incorrect length")))
}
async fn read_u32(&mut self) -> Result<u32> {
let buf = self.read_more(4).await?;
Ok(u32::from_be_bytes(buf.try_into().expect("Slice with incorrect length")))
}
pub async fn skip(&mut self, size: usize) -> Result<()> {
self.check_init().await?;
if let Some(err) = &self.err {
return Err(err.clone());
}
let mut n = size;
if self.current.is_some() {
n -= 1;
self.current = None;
}
while n > 0 {
match rmp::decode::read_bool(&mut self.read_more(1).await?) {
Ok(res) => {
if !res {
return Ok(());
}
}
Err(err) => {
let err: Error = err.into();
self.err = Some(err.clone());
return Err(err);
}
};
let l = self.read_str_len().await?;
let _ = self.read_more(l as usize).await?;
let l = self.read_bin_len().await?;
let _ = self.read_more(l as usize).await?;
n -= 1;
}
Ok(())
}
pub async fn peek(&mut self) -> Result<Option<MetaCacheEntry>> {
self.check_init().await?;
if let Some(err) = &self.err {
return Err(err.clone());
}
match rmp::decode::read_bool(&mut self.read_more(1).await?) {
Ok(res) => {
if !res {
return Ok(None);
}
}
Err(err) => {
let err: Error = err.into();
self.err = Some(err.clone());
return Err(err);
}
};
let l = self.read_str_len().await?;
let buf = self.read_more(l as usize).await?;
let name_buf = buf.to_vec();
let name = match from_utf8(&name_buf) {
Ok(decoded) => decoded.to_owned(),
Err(err) => {
self.err = Some(Error::other(err.to_string()));
return Err(Error::other(err.to_string()));
}
};
let l = self.read_bin_len().await?;
let buf = self.read_more(l as usize).await?;
let metadata = buf.to_vec();
self.reset();
let entry = Some(MetaCacheEntry {
name,
metadata,
cached: None,
reusable: false,
});
self.current = entry.clone();
Ok(entry)
}
pub async fn read_all(&mut self) -> Result<Vec<MetaCacheEntry>> {
let mut ret = Vec::new();
loop {
if let Some(entry) = self.peek().await? {
ret.push(entry);
continue;
}
break;
}
Ok(ret)
}
}
pub type UpdateFn<T> = Box<dyn Fn() -> Pin<Box<dyn Future<Output = std::io::Result<T>> + Send>> + Send + Sync + 'static>;
#[derive(Clone, Debug, Default)]
pub struct Opts {
pub return_last_good: bool,
pub no_wait: bool,
}
pub struct Cache<T: Clone + Debug + Send> {
update_fn: UpdateFn<T>,
ttl: Duration,
opts: Opts,
val: AtomicPtr<T>,
last_update_ms: AtomicU64,
updating: Arc<Mutex<bool>>,
}
impl<T: Clone + Debug + Send + 'static> Cache<T> {
pub fn new(update_fn: UpdateFn<T>, ttl: Duration, opts: Opts) -> Self {
let val = AtomicPtr::new(ptr::null_mut());
Self {
update_fn,
ttl,
opts,
val,
last_update_ms: AtomicU64::new(0),
updating: Arc::new(Mutex::new(false)),
}
}
#[allow(unsafe_code)]
pub async fn get(self: Arc<Self>) -> std::io::Result<T> {
let v_ptr = self.val.load(AtomicOrdering::SeqCst);
let v = if v_ptr.is_null() {
None
} else {
Some(unsafe { (*v_ptr).clone() })
};
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
if now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs()
&& let Some(v) = v
{
return Ok(v);
}
if self.opts.no_wait
&& now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() * 2
&& let Some(value) = v
{
if self.updating.try_lock().is_ok() {
let this = Arc::clone(&self);
spawn(async move {
let _ = this.update().await;
});
}
return Ok(value);
}
let _ = self.updating.lock().await;
if let (Ok(duration), Some(value)) = (
SystemTime::now().duration_since(UNIX_EPOCH + Duration::from_secs(self.last_update_ms.load(AtomicOrdering::SeqCst))),
v,
) && duration < self.ttl
{
return Ok(value);
}
match self.update().await {
Ok(_) => {
let v_ptr = self.val.load(AtomicOrdering::SeqCst);
let v = if v_ptr.is_null() {
None
} else {
Some(unsafe { (*v_ptr).clone() })
};
Ok(v.unwrap())
}
Err(err) => Err(err),
}
}
#[allow(unsafe_code)]
async fn update(&self) -> std::io::Result<()> {
match (self.update_fn)().await {
Ok(val) => {
let old = self.val.swap(Box::into_raw(Box::new(val)), AtomicOrdering::SeqCst);
if !old.is_null() {
unsafe {
drop(Box::from_raw(old));
}
}
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
self.last_update_ms.store(now, AtomicOrdering::SeqCst);
Ok(())
}
Err(err) => {
let v_ptr = self.val.load(AtomicOrdering::SeqCst);
if self.opts.return_last_good && !v_ptr.is_null() {
return Ok(());
}
Err(err)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[tokio::test]
async fn test_writer() {
let mut f = Cursor::new(Vec::new());
let mut w = MetacacheWriter::new(&mut f);
let mut objs = Vec::new();
for i in 0..10 {
let info = MetaCacheEntry {
name: format!("item{i}"),
metadata: vec![0u8, 10],
cached: None,
reusable: false,
};
objs.push(info);
}
w.write(&objs).await.unwrap();
w.close().await.unwrap();
let data = f.into_inner();
let nf = Cursor::new(data);
let mut r = MetacacheReader::new(nf);
let nobjs = r.read_all().await.unwrap();
assert_eq!(objs, nobjs);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/fileinfo.rs | crates/filemeta/src/fileinfo.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, ReplicationState, ReplicationStatusType, Result, TRANSITION_COMPLETE, VersionPurgeStatusType};
use bytes::Bytes;
use rmp_serde::Serializer;
use rustfs_utils::HashAlgorithm;
use rustfs_utils::http::headers::{RESERVED_METADATA_PREFIX_LOWER, RUSTFS_HEALING};
use s3s::dto::{RestoreStatus, Timestamp};
use s3s::header::X_AMZ_RESTORE;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
use uuid::Uuid;
pub const ERASURE_ALGORITHM: &str = "rs-vandermonde";
pub const BLOCK_SIZE_V2: usize = 1024 * 1024; // 1M
// Additional constants from Go version
pub const NULL_VERSION_ID: &str = "null";
// pub const RUSTFS_ERASURE_UPGRADED: &str = "x-rustfs-internal-erasure-upgraded";
pub const TIER_FV_ID: &str = "tier-free-versionID";
pub const TIER_FV_MARKER: &str = "tier-free-marker";
pub const TIER_SKIP_FV_ID: &str = "tier-skip-fvid";
const ERR_RESTORE_HDR_MALFORMED: &str = "x-amz-restore header malformed";
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
pub struct ObjectPartInfo {
pub etag: String,
pub number: usize,
pub size: usize,
pub actual_size: i64, // Original data size
pub mod_time: Option<OffsetDateTime>,
// Index holds the index of the part in the erasure coding
pub index: Option<Bytes>,
// Checksums holds checksums of the part
pub checksums: Option<HashMap<String, String>>,
pub error: Option<String>,
}
impl ObjectPartInfo {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut Serializer::new(&mut buf))?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: ObjectPartInfo = rmp_serde::from_slice(buf)?;
Ok(t)
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)]
// ChecksumInfo - carries checksums of individual scattered parts per disk.
pub struct ChecksumInfo {
pub part_number: usize,
pub algorithm: HashAlgorithm,
pub hash: Bytes,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)]
pub enum ErasureAlgo {
#[default]
Invalid = 0,
ReedSolomon = 1,
}
impl ErasureAlgo {
pub fn valid(&self) -> bool {
*self > ErasureAlgo::Invalid
}
pub fn to_u8(&self) -> u8 {
match self {
ErasureAlgo::Invalid => 0,
ErasureAlgo::ReedSolomon => 1,
}
}
pub fn from_u8(u: u8) -> Self {
match u {
1 => ErasureAlgo::ReedSolomon,
_ => ErasureAlgo::Invalid,
}
}
}
impl std::fmt::Display for ErasureAlgo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErasureAlgo::Invalid => write!(f, "Invalid"),
ErasureAlgo::ReedSolomon => write!(f, "{ERASURE_ALGORITHM}"),
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)]
// ErasureInfo holds erasure coding and bitrot related information.
pub struct ErasureInfo {
// Algorithm is the String representation of erasure-coding-algorithm
pub algorithm: String,
// DataBlocks is the number of data blocks for erasure-coding
pub data_blocks: usize,
// ParityBlocks is the number of parity blocks for erasure-coding
pub parity_blocks: usize,
// BlockSize is the size of one erasure-coded block
pub block_size: usize,
// Index is the index of the current disk
pub index: usize,
// Distribution is the distribution of the data and parity blocks
pub distribution: Vec<usize>,
// Checksums holds all bitrot checksums of all erasure encoded blocks
pub checksums: Vec<ChecksumInfo>,
}
pub fn calc_shard_size(block_size: usize, data_shards: usize) -> usize {
(block_size.div_ceil(data_shards) + 1) & !1
}
impl ErasureInfo {
pub fn get_checksum_info(&self, part_number: usize) -> ChecksumInfo {
for sum in &self.checksums {
if sum.part_number == part_number {
return sum.clone();
}
}
ChecksumInfo {
algorithm: HashAlgorithm::HighwayHash256S,
..Default::default()
}
}
/// Calculate the size of each shard.
pub fn shard_size(&self) -> usize {
calc_shard_size(self.block_size, self.data_blocks)
}
/// Calculate the total erasure file size for a given original size.
// Returns the final erasure size from the original size
pub fn shard_file_size(&self, total_length: i64) -> i64 {
if total_length == 0 {
return 0;
}
if total_length < 0 {
return total_length;
}
let total_length = total_length as usize;
let num_shards = total_length / self.block_size;
let last_block_size = total_length % self.block_size;
let last_shard_size = calc_shard_size(last_block_size, self.data_blocks);
(num_shards * self.shard_size() + last_shard_size) as i64
}
/// Check if this ErasureInfo equals another ErasureInfo
pub fn equals(&self, other: &ErasureInfo) -> bool {
self.algorithm == other.algorithm
&& self.data_blocks == other.data_blocks
&& self.parity_blocks == other.parity_blocks
&& self.block_size == other.block_size
&& self.index == other.index
&& self.distribution == other.distribution
}
}
// #[derive(Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
pub struct FileInfo {
pub volume: String,
pub name: String,
pub version_id: Option<Uuid>,
pub is_latest: bool,
pub deleted: bool,
pub transition_status: String,
pub transitioned_objname: String,
pub transition_tier: String,
pub transition_version_id: Option<Uuid>,
pub expire_restored: bool,
pub data_dir: Option<Uuid>,
pub mod_time: Option<OffsetDateTime>,
pub size: i64,
// File mode bits
pub mode: Option<u32>,
// WrittenByVersion is the unix time stamp of the version that created this version of the object
pub written_by_version: Option<u64>,
pub metadata: HashMap<String, String>,
pub parts: Vec<ObjectPartInfo>,
pub erasure: ErasureInfo,
// MarkDeleted marks this version as deleted
pub mark_deleted: bool,
// ReplicationState - Internal replication state to be passed back in ObjectInfo
pub replication_state_internal: Option<ReplicationState>,
pub data: Option<Bytes>,
pub num_versions: usize,
pub successor_mod_time: Option<OffsetDateTime>,
pub fresh: bool,
pub idx: usize,
// Combined checksum when object was uploaded
pub checksum: Option<Bytes>,
pub versioned: bool,
}
impl FileInfo {
pub fn new(object: &str, data_blocks: usize, parity_blocks: usize) -> Self {
let indices = {
let cardinality = data_blocks + parity_blocks;
let mut nums = vec![0; cardinality];
let key_crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(object.as_bytes());
hasher.finalize() as u32
};
let start = key_crc as usize % cardinality;
for i in 1..=cardinality {
nums[i - 1] = 1 + ((start + i) % cardinality);
}
nums
};
Self {
erasure: ErasureInfo {
algorithm: String::from(ERASURE_ALGORITHM),
data_blocks,
parity_blocks,
block_size: BLOCK_SIZE_V2,
distribution: indices,
..Default::default()
},
..Default::default()
}
}
pub fn is_valid(&self) -> bool {
if self.deleted {
return true;
}
let data_blocks = self.erasure.data_blocks;
let parity_blocks = self.erasure.parity_blocks;
(data_blocks >= parity_blocks)
&& (data_blocks > 0)
&& (self.erasure.index > 0
&& self.erasure.index <= data_blocks + parity_blocks
&& self.erasure.distribution.len() == (data_blocks + parity_blocks))
}
pub fn get_etag(&self) -> Option<String> {
self.metadata.get("etag").cloned()
}
pub fn write_quorum(&self, quorum: usize) -> usize {
if self.deleted {
return quorum;
}
if self.erasure.data_blocks == self.erasure.parity_blocks {
return self.erasure.data_blocks + 1;
}
self.erasure.data_blocks
}
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut Serializer::new(&mut buf))?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: FileInfo = rmp_serde::from_slice(buf)?;
Ok(t)
}
#[allow(clippy::too_many_arguments)]
pub fn add_object_part(
&mut self,
num: usize,
etag: String,
part_size: usize,
mod_time: Option<OffsetDateTime>,
actual_size: i64,
index: Option<Bytes>,
checksums: Option<HashMap<String, String>>,
) {
let part = ObjectPartInfo {
etag,
number: num,
size: part_size,
mod_time,
actual_size,
index,
checksums,
error: None,
};
for p in self.parts.iter_mut() {
if p.number == num {
*p = part;
return;
}
}
self.parts.push(part);
self.parts.sort_by(|a, b| a.number.cmp(&b.number));
}
// to_part_offset gets the part index where offset is located, returns part index and offset
pub fn to_part_offset(&self, offset: usize) -> Result<(usize, usize)> {
if offset == 0 {
return Ok((0, 0));
}
let mut part_offset = offset;
for (i, part) in self.parts.iter().enumerate() {
let part_index = i;
if part_offset < part.size {
return Ok((part_index, part_offset));
}
part_offset -= part.size
}
Err(Error::other("part not found"))
}
pub fn set_healing(&mut self) {
self.metadata.insert(RUSTFS_HEALING.to_string(), "true".to_string());
}
pub fn set_tier_free_version_id(&mut self, version_id: &str) {
self.metadata
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}"), version_id.to_string());
}
pub fn tier_free_version_id(&self) -> String {
self.metadata[&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}")].clone()
}
pub fn set_tier_free_version(&mut self) {
self.metadata
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}"), "".to_string());
}
pub fn set_skip_tier_free_version(&mut self) {
self.metadata
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_SKIP_FV_ID}"), "".to_string());
}
pub fn skip_tier_free_version(&self) -> bool {
self.metadata
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_SKIP_FV_ID}"))
}
pub fn tier_free_version(&self) -> bool {
self.metadata
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}"))
}
pub fn set_inline_data(&mut self) {
self.metadata
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").to_owned(), "true".to_owned());
}
pub fn set_data_moved(&mut self) {
self.metadata
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}data-moved").to_owned(), "true".to_owned());
}
pub fn inline_data(&self) -> bool {
self.metadata
.contains_key(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").as_str())
&& !self.is_remote()
}
/// Check if the object is compressed
pub fn is_compressed(&self) -> bool {
self.metadata
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
}
/// Check if the object is remote (transitioned to another tier)
pub fn is_remote(&self) -> bool {
if self.transition_status != TRANSITION_COMPLETE {
return false;
}
!is_restored_object_on_disk(&self.metadata)
}
/// Get the data directory for this object
pub fn get_data_dir(&self) -> String {
if self.deleted {
return "delete-marker".to_string();
}
self.data_dir.map_or("".to_string(), |dir| dir.to_string())
}
/// Read quorum returns expected read quorum for this FileInfo
pub fn read_quorum(&self, dquorum: usize) -> usize {
if self.deleted {
return dquorum;
}
self.erasure.data_blocks
}
/// Create a shallow copy with minimal information for READ MRF checks
pub fn shallow_copy(&self) -> Self {
Self {
volume: self.volume.clone(),
name: self.name.clone(),
version_id: self.version_id,
deleted: self.deleted,
erasure: self.erasure.clone(),
..Default::default()
}
}
/// Check if this FileInfo equals another FileInfo
pub fn equals(&self, other: &FileInfo) -> bool {
// Check if both are compressed or both are not compressed
if self.is_compressed() != other.is_compressed() {
return false;
}
// Check transition info
if !self.transition_info_equals(other) {
return false;
}
// Check mod time
if self.mod_time != other.mod_time {
return false;
}
// Check erasure info
self.erasure.equals(&other.erasure)
}
/// Check if transition related information are equal
pub fn transition_info_equals(&self, other: &FileInfo) -> bool {
self.transition_status == other.transition_status
&& self.transition_tier == other.transition_tier
&& self.transitioned_objname == other.transitioned_objname
&& self.transition_version_id == other.transition_version_id
}
/// Check if metadata maps are equal
pub fn metadata_equals(&self, other: &FileInfo) -> bool {
if self.metadata.len() != other.metadata.len() {
return false;
}
for (k, v) in &self.metadata {
if other.metadata.get(k) != Some(v) {
return false;
}
}
true
}
/// Check if replication related fields are equal
pub fn replication_info_equals(&self, other: &FileInfo) -> bool {
self.mark_deleted == other.mark_deleted
// TODO: Add replication_state comparison when implemented
// && self.replication_state == other.replication_state
}
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_version_purge_status())
.unwrap_or(VersionPurgeStatusType::Empty)
}
pub fn replication_status(&self) -> ReplicationStatusType {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
}
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
if self.deleted {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
} else {
ReplicationStatusType::Empty
}
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct FileInfoVersions {
// Name of the volume.
pub volume: String,
// Name of the file.
pub name: String,
// Represents the latest mod time of the
// latest version.
pub latest_mod_time: Option<OffsetDateTime>,
pub versions: Vec<FileInfo>,
pub free_versions: Vec<FileInfo>,
}
impl FileInfoVersions {
pub fn find_version_index(&self, vid: Uuid) -> Option<usize> {
self.versions.iter().position(|v| v.version_id == Some(vid))
}
/// Calculate the total size of all versions for this object
pub fn size(&self) -> i64 {
self.versions.iter().map(|v| v.size).sum()
}
}
#[derive(Default, Serialize, Deserialize)]
pub struct RawFileInfo {
pub buf: Vec<u8>,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct FilesInfo {
pub files: Vec<FileInfo>,
pub is_truncated: bool,
}
pub trait RestoreStatusOps {
fn expiry(&self) -> Option<OffsetDateTime>;
fn on_going(&self) -> bool;
fn on_disk(&self) -> bool;
fn to_string(&self) -> String;
}
impl RestoreStatusOps for RestoreStatus {
fn expiry(&self) -> Option<OffsetDateTime> {
if self.on_going() {
return None;
}
self.restore_expiry_date.clone().map(OffsetDateTime::from)
}
fn on_going(&self) -> bool {
if let Some(on_going) = self.is_restore_in_progress {
return on_going;
}
false
}
fn on_disk(&self) -> bool {
let expiry = self.expiry();
if let Some(expiry0) = expiry
&& OffsetDateTime::now_utc().unix_timestamp() < expiry0.unix_timestamp()
{
return true;
}
false
}
fn to_string(&self) -> String {
if self.on_going() {
return "ongoing-request=\"true\"".to_string();
}
format!(
"ongoing-request=\"false\", expiry-date=\"{}\"",
OffsetDateTime::from(self.restore_expiry_date.clone().unwrap())
.format(&Rfc3339)
.unwrap()
)
}
}
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
if progress_tokens.len() != 2 {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}
if progress_tokens[0].trim() != "ongoing-request" {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}
match progress_tokens[1] {
"true" | "\"true\"" => {
if tokens.len() == 1 {
return Ok(RestoreStatus {
is_restore_in_progress: Some(true),
..Default::default()
});
}
}
"false" | "\"false\"" => {
if tokens.len() != 2 {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}
let expiry_tokens: Vec<&str> = tokens[1].splitn(2, "=").collect();
if expiry_tokens.len() != 2 {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}
if expiry_tokens[0].trim() != "expiry-date" {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}
let expiry = OffsetDateTime::parse(expiry_tokens[1].trim_matches('"'), &Rfc3339).unwrap();
/*if err != nil {
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
}*/
return Ok(RestoreStatus {
is_restore_in_progress: Some(false),
restore_expiry_date: Some(Timestamp::from(expiry)),
});
}
_ => (),
}
Err(Error::other(ERR_RESTORE_HDR_MALFORMED))
}
pub fn is_restored_object_on_disk(meta: &HashMap<String, String>) -> bool {
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str())
&& let Ok(restore_status) = parse_restore_obj_status(restore_hdr)
{
return restore_status.on_disk();
}
false
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/replication.rs | crates/filemeta/src/replication.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use core::fmt;
use regex::Regex;
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::collections::HashMap;
use std::sync::LazyLock;
use std::time::Duration;
use time::OffsetDateTime;
use uuid::Uuid;
pub const REPLICATION_RESET: &str = "replication-reset";
pub const REPLICATION_STATUS: &str = "replication-status";
// ReplicateQueued - replication being queued trail
pub const REPLICATE_QUEUED: &str = "replicate:queue";
// ReplicateExisting - audit trail for existing objects replication
pub const REPLICATE_EXISTING: &str = "replicate:existing";
// ReplicateExistingDelete - audit trail for delete replication triggered for existing delete markers
pub const REPLICATE_EXISTING_DELETE: &str = "replicate:existing:delete";
// ReplicateMRF - audit trail for replication from Most Recent Failures (MRF) queue
pub const REPLICATE_MRF: &str = "replicate:mrf";
// ReplicateIncoming - audit trail of inline replication
pub const REPLICATE_INCOMING: &str = "replicate:incoming";
// ReplicateIncomingDelete - audit trail of inline replication of deletes.
pub const REPLICATE_INCOMING_DELETE: &str = "replicate:incoming:delete";
// ReplicateHeal - audit trail for healing of failed/pending replications
pub const REPLICATE_HEAL: &str = "replicate:heal";
// ReplicateHealDelete - audit trail of healing of failed/pending delete replications.
pub const REPLICATE_HEAL_DELETE: &str = "replicate:heal:delete";
/// StatusType of Replication for x-amz-replication-status header
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Hash)]
pub enum ReplicationStatusType {
/// Pending - replication is pending.
Pending,
/// Completed - replication completed ok.
Completed,
/// CompletedLegacy was called "COMPLETE" incorrectly.
CompletedLegacy,
/// Failed - replication failed.
Failed,
/// Replica - this is a replica.
Replica,
#[default]
Empty,
}
impl ReplicationStatusType {
/// Returns string representation of status
pub fn as_str(&self) -> &'static str {
match self {
ReplicationStatusType::Pending => "PENDING",
ReplicationStatusType::Completed => "COMPLETED",
ReplicationStatusType::CompletedLegacy => "COMPLETE",
ReplicationStatusType::Failed => "FAILED",
ReplicationStatusType::Replica => "REPLICA",
ReplicationStatusType::Empty => "",
}
}
pub fn is_empty(&self) -> bool {
matches!(self, ReplicationStatusType::Empty)
}
}
impl fmt::Display for ReplicationStatusType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationStatusType {
fn from(s: &str) -> Self {
match s {
"PENDING" => ReplicationStatusType::Pending,
"COMPLETED" => ReplicationStatusType::Completed,
"COMPLETE" => ReplicationStatusType::CompletedLegacy,
"FAILED" => ReplicationStatusType::Failed,
"REPLICA" => ReplicationStatusType::Replica,
_ => ReplicationStatusType::Empty,
}
}
}
impl From<VersionPurgeStatusType> for ReplicationStatusType {
fn from(status: VersionPurgeStatusType) -> Self {
match status {
VersionPurgeStatusType::Pending => ReplicationStatusType::Pending,
VersionPurgeStatusType::Complete => ReplicationStatusType::Completed,
VersionPurgeStatusType::Failed => ReplicationStatusType::Failed,
VersionPurgeStatusType::Empty => ReplicationStatusType::Empty,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum VersionPurgeStatusType {
Pending,
Complete,
Failed,
#[default]
Empty,
}
impl VersionPurgeStatusType {
/// Returns string representation of version purge status
pub fn as_str(&self) -> &'static str {
match self {
VersionPurgeStatusType::Pending => "PENDING",
VersionPurgeStatusType::Complete => "COMPLETE",
VersionPurgeStatusType::Failed => "FAILED",
VersionPurgeStatusType::Empty => "",
}
}
/// Returns true if the version is pending purge.
pub fn is_pending(&self) -> bool {
matches!(self, VersionPurgeStatusType::Pending | VersionPurgeStatusType::Failed)
}
pub fn is_empty(&self) -> bool {
matches!(self, VersionPurgeStatusType::Empty)
}
}
impl fmt::Display for VersionPurgeStatusType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for VersionPurgeStatusType {
fn from(s: &str) -> Self {
match s {
"PENDING" => VersionPurgeStatusType::Pending,
"COMPLETE" => VersionPurgeStatusType::Complete,
"FAILED" => VersionPurgeStatusType::Failed,
_ => VersionPurgeStatusType::Empty,
}
}
}
/// Type - replication type enum
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum ReplicationType {
#[default]
Unset,
Object,
Delete,
Metadata,
Heal,
ExistingObject,
Resync,
All,
}
impl ReplicationType {
pub fn as_str(&self) -> &'static str {
match self {
ReplicationType::Unset => "",
ReplicationType::Object => "OBJECT",
ReplicationType::Delete => "DELETE",
ReplicationType::Metadata => "METADATA",
ReplicationType::Heal => "HEAL",
ReplicationType::ExistingObject => "EXISTING_OBJECT",
ReplicationType::Resync => "RESYNC",
ReplicationType::All => "ALL",
}
}
pub fn is_valid(&self) -> bool {
matches!(
self,
ReplicationType::Object
| ReplicationType::Delete
| ReplicationType::Metadata
| ReplicationType::Heal
| ReplicationType::ExistingObject
| ReplicationType::Resync
| ReplicationType::All
)
}
pub fn is_data_replication(&self) -> bool {
matches!(self, ReplicationType::Object | ReplicationType::Delete | ReplicationType::Heal)
}
}
impl fmt::Display for ReplicationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationType {
fn from(s: &str) -> Self {
match s {
"UNSET" => ReplicationType::Unset,
"OBJECT" => ReplicationType::Object,
"DELETE" => ReplicationType::Delete,
"METADATA" => ReplicationType::Metadata,
"HEAL" => ReplicationType::Heal,
"EXISTING_OBJECT" => ReplicationType::ExistingObject,
"RESYNC" => ReplicationType::Resync,
"ALL" => ReplicationType::All,
_ => ReplicationType::Unset,
}
}
}
/// ReplicationState represents internal replication state
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)]
pub struct ReplicationState {
pub replica_timestamp: Option<OffsetDateTime>,
pub replica_status: ReplicationStatusType,
pub delete_marker: bool,
pub replication_timestamp: Option<OffsetDateTime>,
pub replication_status_internal: Option<String>,
pub version_purge_status_internal: Option<String>,
pub replicate_decision_str: String,
pub targets: HashMap<String, ReplicationStatusType>,
pub purge_targets: HashMap<String, VersionPurgeStatusType>,
pub reset_statuses_map: HashMap<String, String>,
}
impl ReplicationState {
pub fn new() -> Self {
Self::default()
}
/// Returns true if replication state is identical for version purge statuses and replication statuses
pub fn equal(&self, other: &ReplicationState) -> bool {
self.replica_status == other.replica_status
&& self.replication_status_internal == other.replication_status_internal
&& self.version_purge_status_internal == other.version_purge_status_internal
}
/// Returns overall replication status for the object version being replicated
pub fn composite_replication_status(&self) -> ReplicationStatusType {
if let Some(replication_status_internal) = &self.replication_status_internal {
match ReplicationStatusType::from(replication_status_internal.as_str()) {
ReplicationStatusType::Pending
| ReplicationStatusType::Completed
| ReplicationStatusType::Failed
| ReplicationStatusType::Replica => {
return ReplicationStatusType::from(replication_status_internal.as_str());
}
_ => {
let repl_status = get_composite_replication_status(&self.targets);
if self.replica_timestamp.is_none() {
return repl_status;
}
if repl_status == ReplicationStatusType::Completed
&& let (Some(replica_timestamp), Some(replication_timestamp)) =
(self.replica_timestamp, self.replication_timestamp)
&& replica_timestamp > replication_timestamp
{
return self.replica_status.clone();
}
return repl_status;
}
}
} else if self.replica_status != ReplicationStatusType::default() {
return self.replica_status.clone();
}
ReplicationStatusType::default()
}
/// Returns overall replication purge status for the permanent delete being replicated
pub fn composite_version_purge_status(&self) -> VersionPurgeStatusType {
match VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str()) {
VersionPurgeStatusType::Pending | VersionPurgeStatusType::Complete | VersionPurgeStatusType::Failed => {
VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str())
}
_ => get_composite_version_purge_status(&self.purge_targets),
}
}
/// Returns replicatedInfos struct initialized with the previous state of replication
pub fn target_state(&self, arn: &str) -> ReplicatedTargetInfo {
ReplicatedTargetInfo {
arn: arn.to_string(),
prev_replication_status: self.targets.get(arn).cloned().unwrap_or_default(),
version_purge_status: self.purge_targets.get(arn).cloned().unwrap_or_default(),
resync_timestamp: self.reset_statuses_map.get(arn).cloned().unwrap_or_default(),
..Default::default()
}
}
}
pub fn get_composite_replication_status(targets: &HashMap<String, ReplicationStatusType>) -> ReplicationStatusType {
if targets.is_empty() {
return ReplicationStatusType::Empty;
}
let mut completed = 0;
for status in targets.values() {
match status {
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
ReplicationStatusType::Completed => completed += 1,
_ => {}
}
}
if completed == targets.len() {
ReplicationStatusType::Completed
} else {
ReplicationStatusType::Pending
}
}
pub fn get_composite_version_purge_status(targets: &HashMap<String, VersionPurgeStatusType>) -> VersionPurgeStatusType {
if targets.is_empty() {
return VersionPurgeStatusType::default();
}
let mut completed = 0;
for status in targets.values() {
match status {
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
VersionPurgeStatusType::Complete => completed += 1,
_ => {}
}
}
if completed == targets.len() {
VersionPurgeStatusType::Complete
} else {
VersionPurgeStatusType::Pending
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum ReplicationAction {
/// Replicate all data
All,
/// Replicate only metadata
Metadata,
/// Do not replicate
#[default]
None,
}
impl ReplicationAction {
/// Returns string representation of replication action
pub fn as_str(&self) -> &'static str {
match self {
ReplicationAction::All => "all",
ReplicationAction::Metadata => "metadata",
ReplicationAction::None => "none",
}
}
}
impl fmt::Display for ReplicationAction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationAction {
fn from(s: &str) -> Self {
match s {
"all" => ReplicationAction::All,
"metadata" => ReplicationAction::Metadata,
"none" => ReplicationAction::None,
_ => ReplicationAction::None,
}
}
}
/// ReplicatedTargetInfo struct represents replication info on a target
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ReplicatedTargetInfo {
pub arn: String,
pub size: i64,
pub duration: Duration,
pub replication_action: ReplicationAction,
pub op_type: ReplicationType,
pub replication_status: ReplicationStatusType,
pub prev_replication_status: ReplicationStatusType,
pub version_purge_status: VersionPurgeStatusType,
pub resync_timestamp: String,
pub replication_resynced: bool,
pub endpoint: String,
pub secure: bool,
pub error: Option<String>,
}
impl ReplicatedTargetInfo {
/// Returns true for a target if arn is empty
pub fn is_empty(&self) -> bool {
self.arn.is_empty()
}
}
/// ReplicatedInfos struct contains replication information for multiple targets
#[derive(Debug, Clone)]
pub struct ReplicatedInfos {
pub replication_timestamp: Option<OffsetDateTime>,
pub targets: Vec<ReplicatedTargetInfo>,
}
impl ReplicatedInfos {
/// Returns the total size of completed replications
pub fn completed_size(&self) -> i64 {
let mut sz = 0i64;
for target in &self.targets {
if target.is_empty() {
continue;
}
if target.replication_status == ReplicationStatusType::Completed
&& target.prev_replication_status != ReplicationStatusType::Completed
{
sz += target.size;
}
}
sz
}
/// Returns true if replication was attempted on any of the targets for the object version queued
pub fn replication_resynced(&self) -> bool {
for target in &self.targets {
if target.is_empty() || !target.replication_resynced {
continue;
}
return true;
}
false
}
/// Returns internal representation of replication status for all targets
pub fn replication_status_internal(&self) -> Option<String> {
let mut result = String::new();
for target in &self.targets {
if target.is_empty() {
continue;
}
result.push_str(&format!("{}={};", target.arn, target.replication_status));
}
if result.is_empty() { None } else { Some(result) }
}
/// Returns overall replication status across all targets
pub fn replication_status(&self) -> ReplicationStatusType {
if self.targets.is_empty() {
return ReplicationStatusType::Empty;
}
let mut completed = 0;
for target in &self.targets {
match target.replication_status {
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
ReplicationStatusType::Completed => completed += 1,
_ => {}
}
}
if completed == self.targets.len() {
ReplicationStatusType::Completed
} else {
ReplicationStatusType::Pending
}
}
/// Returns overall version purge status across all targets
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
if self.targets.is_empty() {
return VersionPurgeStatusType::Empty;
}
let mut completed = 0;
for target in &self.targets {
match target.version_purge_status {
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
VersionPurgeStatusType::Complete => completed += 1,
_ => {}
}
}
if completed == self.targets.len() {
VersionPurgeStatusType::Complete
} else {
VersionPurgeStatusType::Pending
}
}
/// Returns internal representation of version purge status for all targets
pub fn version_purge_status_internal(&self) -> Option<String> {
let mut result = String::new();
for target in &self.targets {
if target.is_empty() || target.version_purge_status.is_empty() {
continue;
}
result.push_str(&format!("{}={};", target.arn, target.version_purge_status));
}
if result.is_empty() { None } else { Some(result) }
}
/// Returns replication action based on target that actually performed replication
pub fn action(&self) -> ReplicationAction {
for target in &self.targets {
if target.is_empty() {
continue;
}
// rely on replication action from target that actually performed replication now.
if target.prev_replication_status != ReplicationStatusType::Completed {
return target.replication_action;
}
}
ReplicationAction::None
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MrfReplicateEntry {
#[serde(rename = "bucket")]
pub bucket: String,
#[serde(rename = "object")]
pub object: String,
#[serde(skip_serializing, skip_deserializing)]
pub version_id: Option<Uuid>,
#[serde(rename = "retryCount")]
pub retry_count: i32,
#[serde(skip_serializing, skip_deserializing)]
pub size: i64,
}
pub trait ReplicationWorkerOperation: Any + Send + Sync {
fn to_mrf_entry(&self) -> MrfReplicateEntry;
fn as_any(&self) -> &dyn Any;
fn get_bucket(&self) -> &str;
fn get_object(&self) -> &str;
fn get_size(&self) -> i64;
fn is_delete_marker(&self) -> bool;
fn get_op_type(&self) -> ReplicationType;
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ReplicateTargetDecision {
pub replicate: bool,
pub synchronous: bool,
pub arn: String,
pub id: String,
}
impl ReplicateTargetDecision {
pub fn new(arn: String, replicate: bool, sync: bool) -> Self {
Self {
replicate,
synchronous: sync,
arn,
id: String::new(),
}
}
}
impl fmt::Display for ReplicateTargetDecision {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{};{};{};{}", self.replicate, self.synchronous, self.arn, self.id)
}
}
/// ReplicateDecision represents replication decision for each target
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicateDecision {
pub targets_map: HashMap<String, ReplicateTargetDecision>,
}
impl ReplicateDecision {
pub fn new() -> Self {
Self {
targets_map: HashMap::new(),
}
}
/// Returns true if at least one target qualifies for replication
pub fn replicate_any(&self) -> bool {
self.targets_map.values().any(|t| t.replicate)
}
/// Returns true if at least one target qualifies for synchronous replication
pub fn is_synchronous(&self) -> bool {
self.targets_map.values().any(|t| t.synchronous)
}
/// Updates ReplicateDecision with target's replication decision
pub fn set(&mut self, target: ReplicateTargetDecision) {
self.targets_map.insert(target.arn.clone(), target);
}
/// Returns a stringified representation of internal replication status with all targets marked as `PENDING`
pub fn pending_status(&self) -> Option<String> {
let mut result = String::new();
for target in self.targets_map.values() {
if target.replicate {
result.push_str(&format!("{}={};", target.arn, ReplicationStatusType::Pending.as_str()));
}
}
if result.is_empty() { None } else { Some(result) }
}
}
impl fmt::Display for ReplicateDecision {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut result = String::new();
for (key, value) in &self.targets_map {
result.push_str(&format!("{key}={value},"));
}
write!(f, "{}", result.trim_end_matches(','))
}
}
impl Default for ReplicateDecision {
fn default() -> Self {
Self::new()
}
}
// parse k-v pairs of target ARN to stringified ReplicateTargetDecision delimited by ',' into a
// ReplicateDecision struct
pub fn parse_replicate_decision(_bucket: &str, s: &str) -> std::io::Result<ReplicateDecision> {
let mut decision = ReplicateDecision::new();
if s.is_empty() {
return Ok(decision);
}
for p in s.split(',') {
if p.is_empty() {
continue;
}
let slc = p.split('=').collect::<Vec<&str>>();
if slc.len() != 2 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("invalid replicate decision format: {s}"),
));
}
let tgt_str = slc[1].trim_matches('"');
let tgt = tgt_str.split(';').collect::<Vec<&str>>();
if tgt.len() != 4 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("invalid replicate decision format: {s}"),
));
}
let tgt = ReplicateTargetDecision {
replicate: tgt[0] == "true",
synchronous: tgt[1] == "true",
arn: tgt[2].to_string(),
id: tgt[3].to_string(),
};
decision.targets_map.insert(slc[0].to_string(), tgt);
}
Ok(decision)
// r = ReplicateDecision{
// targetsMap: make(map[string]replicateTargetDecision),
// }
// if len(s) == 0 {
// return
// }
// for _, p := range strings.Split(s, ",") {
// if p == "" {
// continue
// }
// slc := strings.Split(p, "=")
// if len(slc) != 2 {
// return r, errInvalidReplicateDecisionFormat
// }
// tgtStr := strings.TrimSuffix(strings.TrimPrefix(slc[1], `"`), `"`)
// tgt := strings.Split(tgtStr, ";")
// if len(tgt) != 4 {
// return r, errInvalidReplicateDecisionFormat
// }
// r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
// }
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicateObjectInfo {
pub name: String,
pub size: i64,
pub actual_size: i64,
pub bucket: String,
pub version_id: Option<Uuid>,
pub etag: Option<String>,
pub mod_time: Option<OffsetDateTime>,
pub replication_status: ReplicationStatusType,
pub replication_status_internal: Option<String>,
pub delete_marker: bool,
pub version_purge_status_internal: Option<String>,
pub version_purge_status: VersionPurgeStatusType,
pub replication_state: Option<ReplicationState>,
pub op_type: ReplicationType,
pub event_type: String,
pub dsc: ReplicateDecision,
pub existing_obj_resync: ResyncDecision,
pub target_statuses: HashMap<String, ReplicationStatusType>,
pub target_purge_statuses: HashMap<String, VersionPurgeStatusType>,
pub replication_timestamp: Option<OffsetDateTime>,
pub ssec: bool,
pub user_tags: String,
pub checksum: Option<Bytes>,
pub retry_count: u32,
}
impl ReplicationWorkerOperation for ReplicateObjectInfo {
fn as_any(&self) -> &dyn Any {
self
}
fn to_mrf_entry(&self) -> MrfReplicateEntry {
MrfReplicateEntry {
bucket: self.bucket.clone(),
object: self.name.clone(),
version_id: self.version_id,
retry_count: self.retry_count as i32,
size: self.size,
}
}
fn get_bucket(&self) -> &str {
&self.bucket
}
fn get_object(&self) -> &str {
&self.name
}
fn get_size(&self) -> i64 {
self.size
}
fn is_delete_marker(&self) -> bool {
self.delete_marker
}
fn get_op_type(&self) -> ReplicationType {
self.op_type
}
}
static REPL_STATUS_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"([^=].*?)=([^,].*?);").unwrap());
impl ReplicateObjectInfo {
/// Returns replication status of a target
pub fn target_replication_status(&self, arn: &str) -> ReplicationStatusType {
let binding = self.replication_status_internal.clone().unwrap_or_default();
let captures = REPL_STATUS_REGEX.captures_iter(&binding);
for cap in captures {
if cap.len() == 3 && &cap[1] == arn {
return ReplicationStatusType::from(&cap[2]);
}
}
ReplicationStatusType::default()
}
/// Returns the relevant info needed by MRF
pub fn to_mrf_entry(&self) -> MrfReplicateEntry {
MrfReplicateEntry {
bucket: self.bucket.clone(),
object: self.name.clone(),
version_id: self.version_id,
retry_count: self.retry_count as i32,
size: self.size,
}
}
}
// constructs a replication status map from string representation
pub fn replication_statuses_map(s: &str) -> HashMap<String, ReplicationStatusType> {
let mut targets = HashMap::new();
let rep_stat_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
for (_, [arn, status]) in rep_stat_matches {
if arn.is_empty() {
continue;
}
let status = ReplicationStatusType::from(status);
targets.insert(arn.to_string(), status);
}
targets
}
// constructs a version purge status map from string representation
pub fn version_purge_statuses_map(s: &str) -> HashMap<String, VersionPurgeStatusType> {
let mut targets = HashMap::new();
let purge_status_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
for (_, [arn, status]) in purge_status_matches {
if arn.is_empty() {
continue;
}
let status = VersionPurgeStatusType::from(status);
targets.insert(arn.to_string(), status);
}
targets
}
pub fn get_replication_state(rinfos: &ReplicatedInfos, prev_state: &ReplicationState, _vid: Option<String>) -> ReplicationState {
let reset_status_map: Vec<(String, String)> = rinfos
.targets
.iter()
.filter(|v| !v.resync_timestamp.is_empty())
.map(|t| (target_reset_header(t.arn.as_str()), t.resync_timestamp.clone()))
.collect();
let repl_statuses = rinfos.replication_status_internal();
let vpurge_statuses = rinfos.version_purge_status_internal();
let mut reset_statuses_map = prev_state.reset_statuses_map.clone();
for (key, value) in reset_status_map {
reset_statuses_map.insert(key, value);
}
ReplicationState {
replicate_decision_str: prev_state.replicate_decision_str.clone(),
reset_statuses_map,
replica_timestamp: prev_state.replica_timestamp,
replica_status: prev_state.replica_status.clone(),
targets: replication_statuses_map(&repl_statuses.clone().unwrap_or_default()),
replication_status_internal: repl_statuses,
replication_timestamp: rinfos.replication_timestamp,
purge_targets: version_purge_statuses_map(&vpurge_statuses.clone().unwrap_or_default()),
version_purge_status_internal: vpurge_statuses,
..Default::default()
}
}
pub fn target_reset_header(arn: &str) -> String {
format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-{arn}")
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ResyncTargetDecision {
pub replicate: bool,
pub reset_id: String,
pub reset_before_date: Option<OffsetDateTime>,
}
/// ResyncDecision is a struct representing a map with target's individual resync decisions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResyncDecision {
pub targets: HashMap<String, ResyncTargetDecision>,
}
impl ResyncDecision {
pub fn new() -> Self {
Self { targets: HashMap::new() }
}
/// Returns true if no targets with resync decision present
pub fn is_empty(&self) -> bool {
self.targets.is_empty()
}
pub fn must_resync(&self) -> bool {
self.targets.values().any(|v| v.replicate)
}
pub fn must_resync_target(&self, tgt_arn: &str) -> bool {
self.targets.get(tgt_arn).map(|v| v.replicate).unwrap_or(false)
}
}
impl Default for ResyncDecision {
fn default() -> Self {
Self::new()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/filemeta_inline.rs | crates/filemeta/src/filemeta_inline.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, Result};
use serde::{Deserialize, Serialize};
use std::io::{Cursor, Read};
use uuid::Uuid;
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct InlineData(Vec<u8>);
const INLINE_DATA_VER: u8 = 1;
impl InlineData {
pub fn new() -> Self {
Self(Vec::new())
}
pub fn update(&mut self, buf: &[u8]) {
self.0 = buf.to_vec()
}
pub fn as_slice(&self) -> &[u8] {
self.0.as_slice()
}
pub fn version_ok(&self) -> bool {
if self.0.is_empty() {
return true;
}
self.0[0] > 0 && self.0[0] <= INLINE_DATA_VER
}
pub fn after_version(&self) -> &[u8] {
if self.0.is_empty() { &self.0 } else { &self.0[1..] }
}
pub fn entries(&self) -> Result<usize> {
if self.0.is_empty() || !self.version_ok() {
return Ok(0);
}
let buf = self.after_version();
let mut cur = Cursor::new(buf);
let fields_len = rmp::decode::read_map_len(&mut cur)?;
Ok(fields_len as usize)
}
pub fn find(&self, key: &str) -> Result<Option<Vec<u8>>> {
if self.0.is_empty() || !self.version_ok() {
return Ok(None);
}
let buf = self.after_version();
let mut cur = Cursor::new(buf);
let mut fields_len = rmp::decode::read_map_len(&mut cur)?;
while fields_len > 0 {
fields_len -= 1;
let str_len = rmp::decode::read_str_len(&mut cur)?;
let mut field_buff = vec![0u8; str_len as usize];
cur.read_exact(&mut field_buff)?;
let field = String::from_utf8(field_buff)?;
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
cur.set_position(end as u64);
if field.as_str() == key {
let buf = &buf[start..end];
return Ok(Some(buf.to_vec()));
}
}
Ok(None)
}
pub fn validate(&self) -> Result<()> {
if self.0.is_empty() {
return Ok(());
}
let mut cur = Cursor::new(self.after_version());
let mut fields_len = rmp::decode::read_map_len(&mut cur)?;
while fields_len > 0 {
fields_len -= 1;
let str_len = rmp::decode::read_str_len(&mut cur)?;
let mut field_buff = vec![0u8; str_len as usize];
cur.read_exact(&mut field_buff)?;
let field = String::from_utf8(field_buff)?;
if field.is_empty() {
return Err(Error::other("InlineData key empty"));
}
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
cur.set_position(end as u64);
}
Ok(())
}
pub fn replace(&mut self, key: &str, value: Vec<u8>) -> Result<()> {
if self.after_version().is_empty() {
let mut keys = Vec::with_capacity(1);
let mut values = Vec::with_capacity(1);
keys.push(key.to_owned());
values.push(value);
return self.serialize(keys, values);
}
let buf = self.after_version();
let mut cur = Cursor::new(buf);
let mut fields_len = rmp::decode::read_map_len(&mut cur)? as usize;
let mut keys = Vec::with_capacity(fields_len + 1);
let mut values = Vec::with_capacity(fields_len + 1);
let mut replaced = false;
while fields_len > 0 {
fields_len -= 1;
let str_len = rmp::decode::read_str_len(&mut cur)?;
let mut field_buff = vec![0u8; str_len as usize];
cur.read_exact(&mut field_buff)?;
let find_key = String::from_utf8(field_buff)?;
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
cur.set_position(end as u64);
let find_value = &buf[start..end];
if find_key.as_str() == key {
values.push(value.clone());
replaced = true
} else {
values.push(find_value.to_vec());
}
keys.push(find_key);
}
if !replaced {
keys.push(key.to_owned());
values.push(value);
}
self.serialize(keys, values)
}
pub fn remove(&mut self, remove_keys: Vec<Uuid>) -> Result<bool> {
let buf = self.after_version();
if buf.is_empty() {
return Ok(false);
}
let mut cur = Cursor::new(buf);
let mut fields_len = rmp::decode::read_map_len(&mut cur)? as usize;
let mut keys = Vec::with_capacity(fields_len + 1);
let mut values = Vec::with_capacity(fields_len + 1);
let remove_key = |found_key: &str| {
for key in remove_keys.iter() {
if key.to_string().as_str() == found_key {
return true;
}
}
false
};
let mut found = false;
while fields_len > 0 {
fields_len -= 1;
let str_len = rmp::decode::read_str_len(&mut cur)?;
let mut field_buff = vec![0u8; str_len as usize];
cur.read_exact(&mut field_buff)?;
let find_key = String::from_utf8(field_buff)?;
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
cur.set_position(end as u64);
let find_value = &buf[start..end];
if !remove_key(&find_key) {
values.push(find_value.to_vec());
keys.push(find_key);
} else {
found = true;
}
}
if !found {
return Ok(false);
}
if keys.is_empty() {
self.0 = Vec::new();
return Ok(true);
}
self.serialize(keys, values)?;
Ok(true)
}
fn serialize(&mut self, keys: Vec<String>, values: Vec<Vec<u8>>) -> Result<()> {
assert_eq!(keys.len(), values.len(), "InlineData serialize: keys/values not match");
if keys.is_empty() {
self.0 = Vec::new();
return Ok(());
}
let mut wr = Vec::new();
wr.push(INLINE_DATA_VER);
let map_len = keys.len();
rmp::encode::write_map_len(&mut wr, map_len as u32)?;
for i in 0..map_len {
rmp::encode::write_str(&mut wr, keys[i].as_str())?;
rmp::encode::write_bin(&mut wr, values[i].as_slice())?;
}
self.0 = wr;
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/src/filemeta.rs | crates/filemeta/src/filemeta.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
ErasureAlgo, ErasureInfo, Error, FileInfo, FileInfoVersions, InlineData, ObjectPartInfo, RawFileInfo, ReplicationState,
ReplicationStatusType, Result, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
};
use byteorder::ByteOrder;
use bytes::Bytes;
use rustfs_utils::http::AMZ_BUCKET_REPLICATION_STATUS;
use rustfs_utils::http::headers::{
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_RESTORE_EXPIRY_DAYS,
AMZ_RESTORE_REQUEST_DATE, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER,
VERSION_PURGE_STATUS_KEY,
};
use s3s::header::X_AMZ_RESTORE;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::convert::TryFrom;
use std::hash::Hasher;
use std::io::{Read, Write};
use std::{collections::HashMap, io::Cursor};
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
use tokio::io::AsyncRead;
use tracing::{error, warn};
use uuid::Uuid;
use xxhash_rust::xxh64;
// XL header specifies the format
pub static XL_FILE_HEADER: [u8; 4] = [b'X', b'L', b'2', b' '];
// pub static XL_FILE_VERSION_CURRENT: [u8; 4] = [0; 4];
// Current version being written.
// static XL_FILE_VERSION: [u8; 4] = [1, 0, 3, 0];
static XL_FILE_VERSION_MAJOR: u16 = 1;
static XL_FILE_VERSION_MINOR: u16 = 3;
static XL_HEADER_VERSION: u8 = 3;
pub static XL_META_VERSION: u8 = 2;
static XXHASH_SEED: u64 = 0;
const XL_FLAG_FREE_VERSION: u8 = 1 << 0;
// const XL_FLAG_USES_DATA_DIR: u8 = 1 << 1;
const _XL_FLAG_INLINE_DATA: u8 = 1 << 2;
const META_DATA_READ_DEFAULT: usize = 4 << 10;
const MSGP_UINT32_SIZE: usize = 5;
pub const TRANSITION_COMPLETE: &str = "complete";
pub const TRANSITION_PENDING: &str = "pending";
pub const FREE_VERSION: &str = "free-version";
pub const TRANSITION_STATUS: &str = "transition-status";
pub const TRANSITIONED_OBJECTNAME: &str = "transitioned-object";
pub const TRANSITIONED_VERSION_ID: &str = "transitioned-versionID";
pub const TRANSITION_TIER: &str = "transition-tier";
// type ScanHeaderVersionFn = Box<dyn Fn(usize, &[u8], &[u8]) -> Result<()>>;
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct FileMeta {
pub versions: Vec<FileMetaShallowVersion>,
pub data: InlineData, // TODO: xlMetaInlineData
pub meta_ver: u8,
}
impl FileMeta {
pub fn new() -> Self {
Self {
meta_ver: XL_META_VERSION,
data: InlineData::new(),
..Default::default()
}
}
pub fn is_xl2_v1_format(buf: &[u8]) -> bool {
!matches!(Self::check_xl2_v1(buf), Err(_e))
}
pub fn load(buf: &[u8]) -> Result<FileMeta> {
let mut xl = FileMeta::default();
xl.unmarshal_msg(buf)?;
Ok(xl)
}
pub fn check_xl2_v1(buf: &[u8]) -> Result<(&[u8], u16, u16)> {
if buf.len() < 8 {
return Err(Error::other("xl file header not exists"));
}
if buf[0..4] != XL_FILE_HEADER {
return Err(Error::other("xl file header err"));
}
let major = byteorder::LittleEndian::read_u16(&buf[4..6]);
let minor = byteorder::LittleEndian::read_u16(&buf[6..8]);
if major > XL_FILE_VERSION_MAJOR {
return Err(Error::other("xl file version err"));
}
Ok((&buf[8..], major, minor))
}
// Returns (meta, inline_data)
pub fn is_indexed_meta(buf: &[u8]) -> Result<(&[u8], &[u8])> {
let (buf, major, minor) = Self::check_xl2_v1(buf)?;
if major != 1 || minor < 3 {
return Ok((&[], &[]));
}
let (mut size_buf, buf) = buf.split_at(5);
// Get meta data, buf = crc + data
let bin_len = rmp::decode::read_bin_len(&mut size_buf)?;
if buf.len() < bin_len as usize {
return Ok((&[], &[]));
}
let (meta, buf) = buf.split_at(bin_len as usize);
if buf.len() < 5 {
return Err(Error::other("insufficient data for CRC"));
}
let (mut crc_buf, inline_data) = buf.split_at(5);
// crc check
let crc = rmp::decode::read_u32(&mut crc_buf)?;
let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32;
if crc != meta_crc {
return Err(Error::other("xl file crc check failed"));
}
Ok((meta, inline_data))
}
// Fixed u32
pub fn read_bytes_header(buf: &[u8]) -> Result<(u32, &[u8])> {
let (mut size_buf, _) = buf.split_at(5);
// Get meta data, buf = crc + data
let bin_len = rmp::decode::read_bin_len(&mut size_buf)?;
Ok((bin_len, &buf[5..]))
}
pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result<u64> {
let i = buf.len() as u64;
// check version, buf = buf[8..]
let (buf, _, _) = Self::check_xl2_v1(buf).map_err(|e| {
error!("failed to check XL2 v1 format: {}", e);
e
})?;
let (mut size_buf, buf) = buf.split_at(5);
// Get meta data, buf = crc + data
let bin_len = rmp::decode::read_bin_len(&mut size_buf).map_err(|e| {
error!("failed to read binary length for metadata: {}", e);
Error::other(format!("failed to read binary length for metadata: {e}"))
})?;
if buf.len() < bin_len as usize {
error!("insufficient data for metadata: expected {} bytes, got {} bytes", bin_len, buf.len());
return Err(Error::other("insufficient data for metadata"));
}
let (meta, buf) = buf.split_at(bin_len as usize);
if buf.len() < 5 {
error!("insufficient data for CRC: expected 5 bytes, got {} bytes", buf.len());
return Err(Error::other("insufficient data for CRC"));
}
let (mut crc_buf, buf) = buf.split_at(5);
// crc check
let crc = rmp::decode::read_u32(&mut crc_buf).map_err(|e| {
error!("failed to read CRC value: {}", e);
Error::other(format!("failed to read CRC value: {e}"))
})?;
let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32;
if crc != meta_crc {
error!("xl file crc check failed: expected CRC {:#x}, got {:#x}", meta_crc, crc);
return Err(Error::other("xl file crc check failed"));
}
if !buf.is_empty() {
self.data.update(buf);
self.data.validate().map_err(|e| {
error!("data validation failed: {}", e);
e
})?;
}
// Parse meta
if !meta.is_empty() {
let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta).map_err(|e| {
error!("failed to decode XL headers: {}", e);
e
})?;
// let (_, meta) = meta.split_at(read_size as usize);
self.meta_ver = meta_ver;
self.versions = Vec::with_capacity(versions_len);
let mut cur: Cursor<&[u8]> = Cursor::new(meta);
for _ in 0..versions_len {
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
error!("failed to read binary length for version header: {}", e);
Error::other(format!("failed to read binary length for version header: {e}"))
})? as usize;
let mut header_buf = vec![0u8; bin_len];
cur.read_exact(&mut header_buf)?;
let mut ver = FileMetaShallowVersion::default();
ver.header.unmarshal_msg(&header_buf).map_err(|e| {
error!("failed to unmarshal version header: {}", e);
e
})?;
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
error!("failed to read binary length for version metadata: {}", e);
Error::other(format!("failed to read binary length for version metadata: {e}"))
})? as usize;
let mut ver_meta_buf = vec![0u8; bin_len];
cur.read_exact(&mut ver_meta_buf)?;
ver.meta.extend_from_slice(&ver_meta_buf);
self.versions.push(ver);
}
}
Ok(i)
}
// decode_xl_headers parses meta header, returns (versions count, xl_header_version, xl_meta_version, read data length)
fn decode_xl_headers(buf: &[u8]) -> Result<(usize, u8, u8, &[u8])> {
let mut cur = Cursor::new(buf);
let header_ver: u8 = rmp::decode::read_int(&mut cur)?;
if header_ver > XL_HEADER_VERSION {
return Err(Error::other("xl header version invalid"));
}
let meta_ver: u8 = rmp::decode::read_int(&mut cur)?;
if meta_ver > XL_META_VERSION {
return Err(Error::other("xl meta version invalid"));
}
let versions_len: usize = rmp::decode::read_int(&mut cur)?;
Ok((versions_len, header_ver, meta_ver, &buf[cur.position() as usize..]))
}
fn decode_versions<F: FnMut(usize, &[u8], &[u8]) -> Result<()>>(buf: &[u8], versions: usize, mut fnc: F) -> Result<()> {
let mut cur: Cursor<&[u8]> = Cursor::new(buf);
for i in 0..versions {
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
let header_buf = &buf[start..end];
cur.set_position(end as u64);
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
let ver_meta_buf = &buf[start..end];
cur.set_position(end as u64);
if let Err(err) = fnc(i, header_buf, ver_meta_buf) {
if err == Error::DoneForNow {
return Ok(());
}
return Err(err);
}
}
Ok(())
}
pub fn is_latest_delete_marker(buf: &[u8]) -> bool {
let header = Self::decode_xl_headers(buf).ok();
if let Some((versions, _hdr_v, _meta_v, meta)) = header {
if versions == 0 {
return false;
}
let mut is_delete_marker = false;
let _ = Self::decode_versions(meta, versions, |_: usize, hdr: &[u8], _: &[u8]| {
let mut header = FileMetaVersionHeader::default();
if header.unmarshal_msg(hdr).is_err() {
return Err(Error::DoneForNow);
}
is_delete_marker = header.version_type == VersionType::Delete;
Err(Error::DoneForNow)
});
is_delete_marker
} else {
false
}
}
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut wr = Vec::new();
// header
wr.write_all(XL_FILE_HEADER.as_slice())?;
let mut major = [0u8; 2];
byteorder::LittleEndian::write_u16(&mut major, XL_FILE_VERSION_MAJOR);
wr.write_all(major.as_slice())?;
let mut minor = [0u8; 2];
byteorder::LittleEndian::write_u16(&mut minor, XL_FILE_VERSION_MINOR);
wr.write_all(minor.as_slice())?;
// size bin32 reserved for write_bin_len
wr.write_all(&[0xc6, 0, 0, 0, 0])?;
let offset = wr.len();
// xl header
rmp::encode::write_uint8(&mut wr, XL_HEADER_VERSION)?;
rmp::encode::write_uint8(&mut wr, XL_META_VERSION)?;
// versions
rmp::encode::write_sint(&mut wr, self.versions.len() as i64)?;
for ver in self.versions.iter() {
let hmsg = ver.header.marshal_msg()?;
rmp::encode::write_bin(&mut wr, &hmsg)?;
rmp::encode::write_bin(&mut wr, &ver.meta)?;
}
// Update bin length
let data_len = wr.len() - offset;
byteorder::BigEndian::write_u32(&mut wr[offset - 4..offset], data_len as u32);
let crc = xxh64::xxh64(&wr[offset..], XXHASH_SEED) as u32;
let mut crc_buf = [0u8; 5];
crc_buf[0] = 0xce; // u32
byteorder::BigEndian::write_u32(&mut crc_buf[1..], crc);
wr.write_all(&crc_buf)?;
wr.write_all(self.data.as_slice())?;
Ok(wr)
}
// pub fn unmarshal(buf: &[u8]) -> Result<Self> {
// let mut s = Self::default();
// s.unmarshal_msg(buf)?;
// Ok(s)
// // let t: FileMeta = rmp_serde::from_slice(buf)?;
// // Ok(t)
// }
// pub fn marshal_msg(&self) -> Result<Vec<u8>> {
// let mut buf = Vec::new();
// self.serialize(&mut Serializer::new(&mut buf))?;
// Ok(buf)
// }
fn get_idx(&self, idx: usize) -> Result<FileMetaVersion> {
if idx > self.versions.len() {
return Err(Error::FileNotFound);
}
FileMetaVersion::try_from(self.versions[idx].meta.as_slice())
}
fn set_idx(&mut self, idx: usize, ver: FileMetaVersion) -> Result<()> {
if idx >= self.versions.len() {
return Err(Error::FileNotFound);
}
// TODO: use old buf
let meta_buf = ver.marshal_msg()?;
let pre_mod_time = self.versions[idx].header.mod_time;
self.versions[idx].header = ver.header();
self.versions[idx].meta = meta_buf;
if pre_mod_time != self.versions[idx].header.mod_time {
self.sort_by_mod_time();
}
Ok(())
}
fn sort_by_mod_time(&mut self) {
if self.versions.len() <= 1 {
return;
}
self.versions.sort_by(|a, b| {
if a.header.mod_time != b.header.mod_time {
b.header.mod_time.cmp(&a.header.mod_time)
} else if a.header.version_type != b.header.version_type {
b.header.version_type.cmp(&a.header.version_type)
} else if a.header.version_id != b.header.version_id {
b.header.version_id.cmp(&a.header.version_id)
} else if a.header.flags != b.header.flags {
b.header.flags.cmp(&a.header.flags)
} else {
b.cmp(a)
}
});
}
// Find version
pub fn find_version(&self, vid: Option<Uuid>) -> Result<(usize, FileMetaVersion)> {
let vid = vid.unwrap_or_default();
for (i, fver) in self.versions.iter().enumerate() {
if fver.header.version_id == Some(vid) {
let version = self.get_idx(i)?;
return Ok((i, version));
}
}
Err(Error::FileVersionNotFound)
}
// shard_data_dir_count queries the count of data_dir under vid
pub fn shard_data_dir_count(&self, vid: &Option<Uuid>, data_dir: &Option<Uuid>) -> usize {
let vid = vid.unwrap_or_default();
self.versions
.iter()
.filter(|v| {
v.header.version_type == VersionType::Object && v.header.version_id != Some(vid) && v.header.user_data_dir()
})
.map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).unwrap_or_default())
.filter(|v| v == data_dir)
.count()
}
pub fn update_object_version(&mut self, fi: FileInfo) -> Result<()> {
for version in self.versions.iter_mut() {
match version.header.version_type {
VersionType::Invalid | VersionType::Legacy => (),
VersionType::Object => {
// For non-versioned buckets, treat None as Uuid::nil()
let fi_vid = fi.version_id.or(Some(Uuid::nil()));
let ver_vid = version.header.version_id.or(Some(Uuid::nil()));
if ver_vid == fi_vid {
let mut ver = FileMetaVersion::try_from(version.meta.as_slice())?;
if let Some(ref mut obj) = ver.object {
for (k, v) in fi.metadata.iter() {
// Split metadata into meta_user and meta_sys based on prefix
// This logic must match From<FileInfo> for MetaObject
if k.len() > RESERVED_METADATA_PREFIX.len()
&& (k.starts_with(RESERVED_METADATA_PREFIX) || k.starts_with(RESERVED_METADATA_PREFIX_LOWER))
{
// Skip internal flags that shouldn't be persisted
if k == headers::X_RUSTFS_HEALING || k == headers::X_RUSTFS_DATA_MOV {
continue;
}
// Insert into meta_sys
obj.meta_sys.insert(k.clone(), v.as_bytes().to_vec());
} else {
// Insert into meta_user
obj.meta_user.insert(k.clone(), v.clone());
}
}
if let Some(mod_time) = fi.mod_time {
obj.mod_time = Some(mod_time);
}
}
// Update
version.header = ver.header();
version.meta = ver.marshal_msg()?;
}
}
VersionType::Delete => {
if version.header.version_id == fi.version_id {
return Err(Error::MethodNotAllowed);
}
}
}
}
self.versions.sort_by(|a, b| {
if a.header.mod_time != b.header.mod_time {
b.header.mod_time.cmp(&a.header.mod_time)
} else if a.header.version_type != b.header.version_type {
b.header.version_type.cmp(&a.header.version_type)
} else if a.header.version_id != b.header.version_id {
b.header.version_id.cmp(&a.header.version_id)
} else if a.header.flags != b.header.flags {
b.header.flags.cmp(&a.header.flags)
} else {
b.cmp(a)
}
});
Ok(())
}
pub fn add_version(&mut self, mut fi: FileInfo) -> Result<()> {
if fi.version_id.is_none() {
fi.version_id = Some(Uuid::nil());
}
if let Some(ref data) = fi.data {
let key = fi.version_id.unwrap_or_default().to_string();
self.data.replace(&key, data.to_vec())?;
}
let version = FileMetaVersion::from(fi);
self.add_version_filemata(version)
}
pub fn add_version_filemata(&mut self, version: FileMetaVersion) -> Result<()> {
if !version.valid() {
return Err(Error::other("file meta version invalid"));
}
// TODO: make it configurable
// 1000 is the limit of versions
// if self.versions.len() + 1 > 1000 {
// return Err(Error::other(
// "You've exceeded the limit on the number of versions you can create on this object",
// ));
// }
if self.versions.is_empty() {
self.versions.push(FileMetaShallowVersion::try_from(version)?);
return Ok(());
}
let vid = version.get_version_id();
if let Some(fidx) = self.versions.iter().position(|v| v.header.version_id == vid) {
return self.set_idx(fidx, version);
}
let mod_time = version.get_mod_time();
for (idx, exist) in self.versions.iter().enumerate() {
if let Some(ref ex_mt) = exist.header.mod_time
&& let Some(ref in_md) = mod_time
&& ex_mt <= in_md
{
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
return Ok(());
}
}
Err(Error::other("add_version failed"))
// if !ver.valid() {
// return Err(Error::other("attempted to add invalid version"));
// }
// if self.versions.len() + 1 >= 100 {
// return Err(Error::other(
// "You've exceeded the limit on the number of versions you can create on this object",
// ));
// }
// let mod_time = ver.get_mod_time();
// let encoded = ver.marshal_msg()?;
// let new_version = FileMetaShallowVersion {
// header: ver.header(),
// meta: encoded,
// };
// // Find the insertion position: insert before the first element with mod_time >= new mod_time
// // This maintains descending order by mod_time (newest first)
// let insert_pos = self
// .versions
// .iter()
// .position(|existing| existing.header.mod_time <= mod_time)
// .unwrap_or(self.versions.len());
// self.versions.insert(insert_pos, new_version);
// Ok(())
}
// delete_version deletes version, returns data_dir
#[tracing::instrument(skip(self))]
pub fn delete_version(&mut self, fi: &FileInfo) -> Result<Option<Uuid>> {
let vid = if fi.version_id.is_none() {
Some(Uuid::nil())
} else {
Some(fi.version_id.unwrap())
};
let mut ventry = FileMetaVersion::default();
if fi.deleted {
ventry.version_type = VersionType::Delete;
ventry.delete_marker = Some(MetaDeleteMarker {
version_id: vid,
mod_time: fi.mod_time,
..Default::default()
});
if !fi.is_valid() {
return Err(Error::other("invalid file meta version"));
}
}
let mut update_version = false;
if fi.version_purge_status().is_empty()
&& (fi.delete_marker_replication_status() == ReplicationStatusType::Replica
|| fi.delete_marker_replication_status() == ReplicationStatusType::Empty)
{
update_version = fi.mark_deleted;
} else {
if fi.deleted
&& fi.version_purge_status() != VersionPurgeStatusType::Complete
&& (!fi.version_purge_status().is_empty() || fi.delete_marker_replication_status().is_empty())
{
update_version = true;
}
if !fi.version_purge_status().is_empty() && fi.version_purge_status() != VersionPurgeStatusType::Complete {
update_version = true;
}
}
if fi.deleted {
if !fi.delete_marker_replication_status().is_empty()
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
{
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
if !fi.version_purge_status().is_empty()
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
{
delete_marker.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
}
let mut found_index = None;
for (i, ver) in self.versions.iter().enumerate() {
if ver.header.version_id != vid {
continue;
}
match ver.header.version_type {
VersionType::Invalid | VersionType::Legacy => return Err(Error::other("invalid file meta version")),
VersionType::Delete => {
if update_version {
let mut v = self.get_idx(i)?;
if v.delete_marker.is_none() {
v.delete_marker = Some(MetaDeleteMarker {
version_id: vid,
mod_time: fi.mod_time,
meta_sys: HashMap::new(),
});
}
if let Some(delete_marker) = v.delete_marker.as_mut() {
if !fi.delete_marker_replication_status().is_empty() {
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
self.set_idx(i, v)?;
return Ok(None);
}
self.versions.remove(i);
if (fi.mark_deleted && fi.version_purge_status() != VersionPurgeStatusType::Complete)
|| (fi.deleted && vid == Some(Uuid::nil()))
{
self.add_version_filemata(ventry)?;
}
return Ok(None);
}
VersionType::Object => {
if update_version && !fi.deleted {
let mut v = self.get_idx(i)?;
if let Some(obj) = v.object.as_mut() {
obj.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
obj.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
let old_dir = v.object.as_ref().map(|v| v.data_dir).unwrap_or_default();
self.set_idx(i, v)?;
return Ok(old_dir);
}
found_index = Some(i);
}
}
}
let Some(i) = found_index else {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/filemeta/benches/xl_meta_bench.rs | crates/filemeta/benches/xl_meta_bench.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use criterion::{Criterion, criterion_group, criterion_main};
use rustfs_filemeta::{FileMeta, test_data::*};
use std::hint::black_box;
fn bench_create_real_xlmeta(c: &mut Criterion) {
c.bench_function("create_real_xlmeta", |b| b.iter(|| black_box(create_real_xlmeta().unwrap())));
}
fn bench_create_complex_xlmeta(c: &mut Criterion) {
c.bench_function("create_complex_xlmeta", |b| b.iter(|| black_box(create_complex_xlmeta().unwrap())));
}
fn bench_parse_real_xlmeta(c: &mut Criterion) {
let data = create_real_xlmeta().unwrap();
c.bench_function("parse_real_xlmeta", |b| b.iter(|| black_box(FileMeta::load(&data).unwrap())));
}
fn bench_parse_complex_xlmeta(c: &mut Criterion) {
let data = create_complex_xlmeta().unwrap();
c.bench_function("parse_complex_xlmeta", |b| b.iter(|| black_box(FileMeta::load(&data).unwrap())));
}
fn bench_serialize_real_xlmeta(c: &mut Criterion) {
let data = create_real_xlmeta().unwrap();
let fm = FileMeta::load(&data).unwrap();
c.bench_function("serialize_real_xlmeta", |b| b.iter(|| black_box(fm.marshal_msg().unwrap())));
}
fn bench_serialize_complex_xlmeta(c: &mut Criterion) {
let data = create_complex_xlmeta().unwrap();
let fm = FileMeta::load(&data).unwrap();
c.bench_function("serialize_complex_xlmeta", |b| b.iter(|| black_box(fm.marshal_msg().unwrap())));
}
fn bench_round_trip_real_xlmeta(c: &mut Criterion) {
let original_data = create_real_xlmeta().unwrap();
c.bench_function("round_trip_real_xlmeta", |b| {
b.iter(|| {
let fm = FileMeta::load(&original_data).unwrap();
let serialized = fm.marshal_msg().unwrap();
black_box(FileMeta::load(&serialized).unwrap())
})
});
}
fn bench_round_trip_complex_xlmeta(c: &mut Criterion) {
let original_data = create_complex_xlmeta().unwrap();
c.bench_function("round_trip_complex_xlmeta", |b| {
b.iter(|| {
let fm = FileMeta::load(&original_data).unwrap();
let serialized = fm.marshal_msg().unwrap();
black_box(FileMeta::load(&serialized).unwrap())
})
});
}
fn bench_version_stats(c: &mut Criterion) {
let data = create_complex_xlmeta().unwrap();
let fm = FileMeta::load(&data).unwrap();
c.bench_function("version_stats", |b| b.iter(|| black_box(fm.get_version_stats())));
}
fn bench_validate_integrity(c: &mut Criterion) {
let data = create_real_xlmeta().unwrap();
let fm = FileMeta::load(&data).unwrap();
c.bench_function("validate_integrity", |b| {
b.iter(|| {
fm.validate_integrity().unwrap();
black_box(())
})
});
}
criterion_group!(
benches,
bench_create_real_xlmeta,
bench_create_complex_xlmeta,
bench_parse_real_xlmeta,
bench_parse_complex_xlmeta,
bench_serialize_real_xlmeta,
bench_serialize_complex_xlmeta,
bench_round_trip_real_xlmeta,
bench_round_trip_complex_xlmeta,
bench_version_stats,
bench_validate_integrity
);
criterion_main!(benches);
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/lib.rs | crates/ahm/src/lib.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod error;
pub mod heal;
pub mod scanner;
pub use error::{Error, Result};
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
pub use scanner::Scanner;
use std::sync::{Arc, OnceLock};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
// Global cancellation token for AHM services (scanner and other background tasks)
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
/// Initialize the global AHM services cancellation token
pub fn init_ahm_services_cancel_token(cancel_token: CancellationToken) -> Result<()> {
GLOBAL_AHM_SERVICES_CANCEL_TOKEN
.set(cancel_token)
.map_err(|_| Error::Config("AHM services cancel token already initialized".to_string()))
}
/// Get the global AHM services cancellation token
pub fn get_ahm_services_cancel_token() -> Option<&'static CancellationToken> {
GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get()
}
/// Create and initialize the global AHM services cancellation token
pub fn create_ahm_services_cancel_token() -> CancellationToken {
let cancel_token = CancellationToken::new();
init_ahm_services_cancel_token(cancel_token.clone()).expect("AHM services cancel token already initialized");
cancel_token
}
/// Shutdown all AHM services gracefully
pub fn shutdown_ahm_services() {
if let Some(cancel_token) = GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get() {
cancel_token.cancel();
}
}
/// Global heal manager instance
static GLOBAL_HEAL_MANAGER: OnceLock<Arc<HealManager>> = OnceLock::new();
/// Global heal channel processor instance
static GLOBAL_HEAL_CHANNEL_PROCESSOR: OnceLock<Arc<tokio::sync::Mutex<HealChannelProcessor>>> = OnceLock::new();
/// Initialize and start heal manager with channel processor
pub async fn init_heal_manager(
storage: Arc<dyn heal::storage::HealStorageAPI>,
config: Option<heal::manager::HealConfig>,
) -> Result<Arc<HealManager>> {
// Create heal manager
let heal_manager = Arc::new(HealManager::new(storage, config));
// Start heal manager
heal_manager.start().await?;
// Store global instance
GLOBAL_HEAL_MANAGER
.set(heal_manager.clone())
.map_err(|_| Error::Config("Heal manager already initialized".to_string()))?;
// Initialize heal channel
let channel_receiver = rustfs_common::heal_channel::init_heal_channel();
// Create channel processor
let channel_processor = HealChannelProcessor::new(heal_manager.clone());
// Store channel processor instance first
GLOBAL_HEAL_CHANNEL_PROCESSOR
.set(Arc::new(tokio::sync::Mutex::new(channel_processor)))
.map_err(|_| Error::Config("Heal channel processor already initialized".to_string()))?;
// Start channel processor in background
let receiver = channel_receiver;
tokio::spawn(async move {
if let Some(processor_guard) = GLOBAL_HEAL_CHANNEL_PROCESSOR.get() {
let mut processor = processor_guard.lock().await;
if let Err(e) = processor.start(receiver).await {
error!("Heal channel processor failed: {}", e);
}
}
});
info!("Heal manager with channel processor initialized successfully");
Ok(heal_manager)
}
/// Get global heal manager instance
pub fn get_heal_manager() -> Option<&'static Arc<HealManager>> {
GLOBAL_HEAL_MANAGER.get()
}
/// Get global heal channel processor instance
pub fn get_heal_channel_processor() -> Option<&'static Arc<tokio::sync::Mutex<HealChannelProcessor>>> {
GLOBAL_HEAL_CHANNEL_PROCESSOR.get()
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/error.rs | crates/ahm/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use thiserror::Error;
/// Custom error type for AHM operations
/// This enum defines various error variants that can occur during
/// the execution of AHM-related tasks, such as I/O errors, storage errors,
/// configuration errors, and specific errors related to healing operations.
#[derive(Debug, Error)]
pub enum Error {
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Storage error: {0}")]
Storage(#[from] rustfs_ecstore::error::Error),
#[error("Disk error: {0}")]
Disk(#[from] rustfs_ecstore::disk::error::DiskError),
#[error("Configuration error: {0}")]
Config(String),
#[error("Heal configuration error: {message}")]
ConfigurationError { message: String },
#[error("Other error: {0}")]
Other(String),
#[error(transparent)]
Anyhow(#[from] anyhow::Error),
// Scanner
#[error("Scanner error: {0}")]
Scanner(String),
#[error("Metrics error: {0}")]
Metrics(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("IO error: {0}")]
IO(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Invalid checkpoint: {0}")]
InvalidCheckpoint(String),
// Heal
#[error("Heal task not found: {task_id}")]
TaskNotFound { task_id: String },
#[error("Heal task already exists: {task_id}")]
TaskAlreadyExists { task_id: String },
#[error("Heal manager is not running")]
ManagerNotRunning,
#[error("Heal task execution failed: {message}")]
TaskExecutionFailed { message: String },
#[error("Invalid heal type: {heal_type}")]
InvalidHealType { heal_type: String },
#[error("Heal task cancelled")]
TaskCancelled,
#[error("Heal task timeout")]
TaskTimeout,
#[error("Heal event processing failed: {message}")]
EventProcessingFailed { message: String },
#[error("Heal progress tracking failed: {message}")]
ProgressTrackingFailed { message: String },
}
/// A specialized Result type for AHM operations
///This type is a convenient alias for results returned by functions in the AHM crate,
/// using the custom Error type defined above.
pub type Result<T, E = Error> = std::result::Result<T, E>;
impl Error {
/// Create an Other error from any error type
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Error::Other(error.into().to_string())
}
}
impl From<Error> for std::io::Error {
fn from(err: Error) -> Self {
std::io::Error::other(err)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/data_scanner.rs | crates/ahm/src/scanner/data_scanner.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// IO throttling component is integrated into NodeScanner
use crate::{
Error, HealRequest, Result, get_ahm_services_cancel_token,
heal::HealManager,
scanner::{
BucketMetrics, DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig, DiskMetrics, MetricsCollector,
NodeScanner, NodeScannerConfig, ScannerMetrics,
lifecycle::ScannerItem,
local_scan::{self, LocalObjectRecord, LocalScanOutcome},
},
};
use rustfs_common::data_usage::{DataUsageInfo, SizeSummary};
use rustfs_common::metrics::{Metric, Metrics, global_metrics};
use rustfs_ecstore::{
self as ecstore, StorageAPI,
bucket::versioning::VersioningApi,
bucket::versioning_sys::BucketVersioningSys,
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
disk::{DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
set_disk::SetDisks,
store_api::ObjectInfo,
};
use rustfs_filemeta::{MetacacheReader, VersionType};
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
use std::{
collections::HashMap,
sync::Arc,
time::{Duration, SystemTime},
};
use time::OffsetDateTime;
use tokio::sync::{Mutex, RwLock};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use uuid;
/// Custom scan mode enum for AHM scanner
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum ScanMode {
/// Normal scan - basic object discovery and metadata collection
#[default]
Normal,
/// Deep scan - includes EC verification and integrity checks
Deep,
}
/// Scanner configuration
#[derive(Debug, Clone)]
pub struct ScannerConfig {
/// Scan interval between cycles
pub scan_interval: Duration,
/// Deep scan interval (how often to perform deep scan)
pub deep_scan_interval: Duration,
/// Maximum concurrent scans
pub max_concurrent_scans: usize,
/// Whether to enable healing
pub enable_healing: bool,
/// Whether to enable metrics collection
pub enable_metrics: bool,
/// Current scan mode (normal, deep)
pub scan_mode: ScanMode,
/// Whether to enable data usage statistics collection
pub enable_data_usage_stats: bool,
}
impl Default for ScannerConfig {
fn default() -> Self {
Self {
scan_interval: Duration::from_secs(300), // 5 minutes
deep_scan_interval: Duration::from_secs(3600), // 1 hour
max_concurrent_scans: 20,
enable_healing: true,
enable_metrics: true,
scan_mode: ScanMode::Normal,
enable_data_usage_stats: true,
}
}
}
/// Scanner state
#[derive(Debug, Default)]
pub struct ScannerState {
/// Whether scanner is running
pub is_running: bool,
/// Current scan cycle
pub current_cycle: u64,
/// Last scan start time
pub last_scan_start: Option<SystemTime>,
/// Last scan end time
pub last_scan_end: Option<SystemTime>,
/// Current scan duration
pub current_scan_duration: Option<Duration>,
/// Last deep scan time
pub last_deep_scan_time: Option<SystemTime>,
/// Buckets being scanned
pub scanning_buckets: Vec<String>,
/// Disks being scanned
pub scanning_disks: Vec<String>,
}
/// AHM Scanner - Automatic Health Management Scanner (Optimized Version)
///
/// This is the new optimized scanner that uses the decentralized node-based architecture
/// for minimal business IO impact. It wraps the NodeScanner and provides backward
/// compatibility with the original Scanner interface.
///
/// Key optimizations:
/// - Node-local serial disk scanning instead of global parallel scanning
/// - Intelligent IO throttling based on business load
/// - Decentralized statistics aggregation
/// - Checkpoint-based resume functionality
pub struct Scanner {
/// Scanner configuration (legacy compatibility)
config: Arc<RwLock<ScannerConfig>>,
/// Scanner state (legacy compatibility)
state: Arc<RwLock<ScannerState>>,
/// Local metrics collector (for backward compatibility)
metrics: Arc<MetricsCollector>,
/// Bucket metrics cache
bucket_metrics: Arc<Mutex<HashMap<String, BucketMetrics>>>,
/// Disk metrics cache
disk_metrics: Arc<Mutex<HashMap<String, DiskMetrics>>>,
/// Data usage statistics cache
data_usage_stats: Arc<Mutex<HashMap<String, DataUsageInfo>>>,
/// Last data usage statistics collection time
last_data_usage_collection: Arc<RwLock<Option<SystemTime>>>,
/// Backoff timestamp for heavy fallback collection
fallback_backoff_until: Arc<RwLock<Option<SystemTime>>>,
/// Heal manager for auto-heal integration
heal_manager: Option<Arc<HealManager>>,
// NEW: Optimized scanner components
/// Node scanner for local disk scanning
node_scanner: Arc<NodeScanner>,
/// Statistics aggregator for global view
stats_aggregator: Arc<DecentralizedStatsAggregator>,
/// Node ID for this scanner instance
node_id: String,
}
impl Scanner {
/// Create a new optimized scanner
pub fn new(config: Option<ScannerConfig>, heal_manager: Option<Arc<HealManager>>) -> Self {
let config = config.unwrap_or_default();
info!("Creating optimized AHM scanner with decentralized architecture");
// Generate unique node ID
let node_id = format!("scanner-node-{}", uuid::Uuid::new_v4().simple());
// Create node scanner configuration - we'll set the data directory properly later
let data_dir = std::env::temp_dir().join("rustfs_scanner");
let node_config = NodeScannerConfig {
scan_interval: config.scan_interval,
disk_scan_delay: Duration::from_secs(10), // 10s delay between disks
enable_smart_scheduling: true,
enable_checkpoint: true,
checkpoint_save_interval: Duration::from_secs(30),
data_dir,
max_retry_attempts: 3,
};
// Create node scanner
let node_scanner = Arc::new(NodeScanner::new(node_id.clone(), node_config));
// Create stats aggregator configuration
let aggregator_config = DecentralizedStatsAggregatorConfig {
aggregation_interval: Duration::from_secs(30),
cache_ttl: Duration::from_secs(3),
node_timeout: Duration::from_secs(5),
max_concurrent_aggregations: 10,
};
// Create stats aggregator
let stats_aggregator = Arc::new(DecentralizedStatsAggregator::new(aggregator_config));
Self {
config: Arc::new(RwLock::new(config)),
state: Arc::new(RwLock::new(ScannerState::default())),
metrics: Arc::new(MetricsCollector::new()),
bucket_metrics: Arc::new(Mutex::new(HashMap::new())),
disk_metrics: Arc::new(Mutex::new(HashMap::new())),
data_usage_stats: Arc::new(Mutex::new(HashMap::new())),
last_data_usage_collection: Arc::new(RwLock::new(None)),
fallback_backoff_until: Arc::new(RwLock::new(None)),
heal_manager,
node_scanner,
stats_aggregator,
node_id,
}
}
/// Set configuration options for the scanner
pub async fn set_config_enable_healing(&self, enable: bool) {
let mut config = self.config.write().await;
config.enable_healing = enable;
}
/// Set scan mode for the scanner
pub async fn set_config_scan_mode(&self, mode: ScanMode) {
let mut config = self.config.write().await;
config.scan_mode = mode;
}
/// Set enable data usage stats
pub async fn set_config_enable_data_usage_stats(&self, enable: bool) {
let mut config = self.config.write().await;
config.enable_data_usage_stats = enable;
}
/// Set the heal manager after construction
pub fn set_heal_manager(&mut self, heal_manager: Arc<HealManager>) {
self.heal_manager = Some(heal_manager);
}
/// Initialize scanner with ECStore disks (for testing and runtime)
pub async fn initialize_with_ecstore(&self) {
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
info!("Initializing scanner with ECStore disks");
let mut disk_count = 0;
// Get all local disks from ECStore and add them to the node scanner
for pool in &ecstore.pools {
for set_disks in &pool.disk_set {
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
for disk in disks {
// Add the disk to the node scanner
self.node_scanner.add_local_disk(Arc::new(disk.clone())).await;
info!("Added disk to scanner: {:?}", disk.path());
disk_count += 1;
}
}
}
info!("Scanner initialized with {} disks", disk_count);
} else {
warn!("ECStore not available during scanner initialization");
}
}
/// Perform basic test scan for testing environments
async fn perform_basic_test_scan(&self) -> Result<()> {
debug!("Starting basic test scan using ECStore directly");
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
let mut total_objects_scanned = 0u64;
// Check if deep scan mode is enabled
let config = self.config.read().await;
let enable_deep_scan = config.scan_mode == ScanMode::Deep;
let enable_healing = config.enable_healing;
drop(config);
let scan_outcome = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
Ok(outcome) => outcome,
Err(err) => {
warn!("Local usage scan failed: {}", err);
LocalScanOutcome::default()
}
};
let bucket_objects_map = &scan_outcome.bucket_objects;
// List all buckets
debug!("Listing buckets");
match ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions::default())
.await
{
Ok(buckets) => {
debug!("Found {} buckets", buckets.len());
for bucket_info in buckets {
let bucket_name = &bucket_info.name;
// Skip system buckets
if bucket_name.starts_with('.') {
debug!("Skipping system bucket: {}", bucket_name);
continue;
}
// Get bucket lifecycle configuration
let lifecycle_config = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name)
.await
.ok()
.map(|(c, _)| Arc::new(c));
// Get bucket versioning configuration
let versioning_config = Arc::new(VersioningConfiguration {
status: if bucket_info.versioning {
Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
} else {
None
},
..Default::default()
});
let records = match bucket_objects_map.get(bucket_name) {
Some(records) => records,
None => {
debug!(
"No local snapshot entries found for bucket {}; skipping lifecycle/integrity",
bucket_name
);
continue;
}
};
let live_objects = records.iter().filter(|record| record.usage.has_live_object).count() as u64;
total_objects_scanned = total_objects_scanned.saturating_add(live_objects);
debug!("Counted {} objects in bucket {} using local snapshots", live_objects, bucket_name);
// Process objects for lifecycle actions
if let Some(lifecycle_config) = &lifecycle_config {
debug!("Processing lifecycle actions for bucket: {}", bucket_name);
let mut scanner_item = ScannerItem::new(
bucket_name.to_string(),
Some(lifecycle_config.clone()),
Some(versioning_config.clone()),
);
match self
.process_bucket_objects_for_lifecycle(bucket_name, &mut scanner_item, records)
.await
{
Ok(processed_count) => {
debug!("Processed {} objects for lifecycle in bucket {}", processed_count, bucket_name);
}
Err(e) => {
warn!("Failed to process lifecycle actions for bucket {}: {}", bucket_name, e);
}
}
}
// If deep scan is enabled, verify each object's integrity
if enable_deep_scan && enable_healing {
debug!("Deep scan enabled, verifying object integrity in bucket {}", bucket_name);
if let Err(e) = self
.deep_scan_bucket_objects_with_records(&ecstore, bucket_name, records)
.await
{
warn!("Deep scan failed for bucket {}: {}", bucket_name, e);
}
}
}
self.update_data_usage_statistics(&scan_outcome, &ecstore).await;
}
Err(e) => {
error!("Failed to list buckets: {}", e);
}
}
debug!("Total objects scanned: {}", total_objects_scanned);
if total_objects_scanned > 0 {
// Update metrics directly
self.metrics.increment_objects_scanned(total_objects_scanned);
debug!("Updated metrics with {} objects", total_objects_scanned);
} else {
warn!("No objects found during basic test scan");
}
} else {
warn!("ECStore not available");
}
Ok(())
}
/// Update data usage statistics based on scan results
async fn update_data_usage_statistics(
&self,
outcome: &LocalScanOutcome,
ecstore: &std::sync::Arc<rustfs_ecstore::store::ECStore>,
) {
let enabled = {
let cfg = self.config.read().await;
cfg.enable_data_usage_stats
};
if !enabled {
debug!("Data usage statistics disabled; skipping refresh");
return;
}
if outcome.snapshots.is_empty() {
warn!("No local usage snapshots available; skipping data usage aggregation");
return;
}
let mut aggregated = DataUsageInfo::default();
let mut latest_update: Option<SystemTime> = None;
for snapshot in &outcome.snapshots {
if let Some(update) = snapshot.last_update
&& latest_update.is_none_or(|current| update > current)
{
latest_update = Some(update);
}
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
aggregated.versions_total_count = aggregated.versions_total_count.saturating_add(snapshot.versions_total_count);
aggregated.delete_markers_total_count = aggregated
.delete_markers_total_count
.saturating_add(snapshot.delete_markers_total_count);
aggregated.objects_total_size = aggregated.objects_total_size.saturating_add(snapshot.objects_total_size);
for (bucket, usage) in &snapshot.buckets_usage {
let size = usage.size;
match aggregated.buckets_usage.entry(bucket.clone()) {
std::collections::hash_map::Entry::Occupied(mut entry) => entry.get_mut().merge(usage),
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(usage.clone());
}
}
aggregated
.bucket_sizes
.entry(bucket.clone())
.and_modify(|existing| *existing = existing.saturating_add(size))
.or_insert(size);
}
}
aggregated.buckets_count = aggregated.buckets_usage.len() as u64;
aggregated.last_update = latest_update;
self.node_scanner.update_data_usage(aggregated.clone()).await;
let local_stats = self.node_scanner.get_stats_summary().await;
self.stats_aggregator.set_local_stats(local_stats).await;
let mut guard = self.data_usage_stats.lock().await;
guard.clear();
for (bucket, usage) in &aggregated.buckets_usage {
let mut bucket_data = DataUsageInfo::new();
bucket_data.last_update = aggregated.last_update;
bucket_data.buckets_count = 1;
bucket_data.objects_total_count = usage.objects_count;
bucket_data.versions_total_count = usage.versions_count;
bucket_data.delete_markers_total_count = usage.delete_markers_count;
bucket_data.objects_total_size = usage.size;
bucket_data.bucket_sizes.insert(bucket.clone(), usage.size);
bucket_data.buckets_usage.insert(bucket.clone(), usage.clone());
guard.insert(bucket.clone(), bucket_data);
}
drop(guard);
let info_clone = aggregated.clone();
let store_clone = ecstore.clone();
tokio::spawn(async move {
if let Err(err) = store_data_usage_in_backend(info_clone, store_clone).await {
warn!("Failed to persist aggregated usage: {}", err);
}
});
}
fn convert_record_to_object_info(record: &LocalObjectRecord) -> ObjectInfo {
if let Some(info) = &record.object_info {
return info.clone();
}
let usage = &record.usage;
ObjectInfo {
bucket: usage.bucket.clone(),
name: usage.object.clone(),
size: usage.total_size as i64,
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
mod_time: usage.last_modified_ns.and_then(Self::ns_to_offset_datetime),
// Set is_latest to true for live objects - required for lifecycle expiration evaluation
is_latest: usage.has_live_object,
..Default::default()
}
}
fn ns_to_offset_datetime(ns: i128) -> Option<OffsetDateTime> {
OffsetDateTime::from_unix_timestamp_nanos(ns).ok()
}
async fn deep_scan_bucket_objects_with_records(
&self,
ecstore: &std::sync::Arc<rustfs_ecstore::store::ECStore>,
bucket_name: &str,
records: &[LocalObjectRecord],
) -> Result<()> {
if records.is_empty() {
return self.deep_scan_bucket_objects(ecstore, bucket_name).await;
}
for record in records {
if !record.usage.has_live_object {
continue;
}
let object_name = &record.usage.object;
if let Err(err) = self.verify_object_integrity(bucket_name, object_name).await {
warn!(
"Object integrity verification failed for {}/{} during deep scan: {}",
bucket_name, object_name, err
);
}
}
Ok(())
}
/// Deep scan objects in a bucket for integrity verification
async fn deep_scan_bucket_objects(
&self,
ecstore: &std::sync::Arc<rustfs_ecstore::store::ECStore>,
bucket_name: &str,
) -> Result<()> {
debug!("Starting deep scan for bucket: {}", bucket_name);
// Get list of objects in this bucket by scanning the filesystem
if let Some(pool) = ecstore.pools.first() {
for set_disks in &pool.disk_set {
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
if let Some(disk) = disks.first() {
let bucket_path = disk.path().join(bucket_name);
if bucket_path.exists()
&& let Ok(entries) = std::fs::read_dir(&bucket_path)
{
for entry in entries.flatten() {
if let Ok(file_type) = entry.file_type()
&& file_type.is_dir()
&& let Some(object_name) = entry.file_name().to_str()
&& !object_name.starts_with('.')
{
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
warn!("Object integrity verification failed for {}/{}: {}", bucket_name, object_name, e);
} else {
debug!("Object integrity verification passed for {}/{}", bucket_name, object_name);
}
}
}
}
break; // Only scan first disk to avoid duplicates
}
}
}
Ok(())
}
/// Process bucket objects for lifecycle actions
async fn process_bucket_objects_for_lifecycle(
&self,
bucket_name: &str,
scanner_item: &mut ScannerItem,
records: &[LocalObjectRecord],
) -> Result<u64> {
info!("Processing objects for lifecycle in bucket: {}", bucket_name);
let mut processed_count = 0u64;
for record in records {
if !record.usage.has_live_object {
continue;
}
let object_info = Self::convert_record_to_object_info(record);
let mut size_summary = SizeSummary::default();
let (deleted, _size) = scanner_item.apply_actions(&object_info, &mut size_summary).await;
if deleted {
info!("Object {}/{} was deleted by lifecycle action", bucket_name, object_info.name);
}
processed_count = processed_count.saturating_add(1);
}
info!("Processed {} objects for lifecycle in bucket {}", processed_count, bucket_name);
Ok(processed_count)
}
/// Start the optimized scanner
pub async fn start(&self) -> Result<()> {
let mut state = self.state.write().await;
if state.is_running {
warn!("Scanner is already running");
return Ok(());
}
state.is_running = true;
state.last_scan_start = Some(SystemTime::now());
info!("Starting optimized AHM scanner with node ID: {}", self.node_id);
// Initialize and start the node scanner
self.node_scanner.initialize_stats().await?;
// update object count and size for each bucket
self.node_scanner.start().await?;
// Set local stats in aggregator
let local_stats = self.node_scanner.get_stats_summary().await;
self.stats_aggregator.set_local_stats(local_stats).await;
// Start background legacy scan loop for backward compatibility
let scanner = self.clone_for_background();
tokio::spawn(async move {
if let Err(e) = scanner.legacy_scan_loop().await {
error!("Legacy scanner loop failed: {}", e);
}
});
Ok(())
}
/// Stop the optimized scanner gracefully
pub async fn stop(&self) -> Result<()> {
let mut state = self.state.write().await;
if !state.is_running {
warn!("Scanner is not running");
return Ok(());
}
info!("Stopping optimized AHM scanner gracefully...");
// Stop the node scanner first
self.node_scanner.stop().await?;
// Trigger cancellation using global cancel token
if let Some(cancel_token) = get_ahm_services_cancel_token() {
cancel_token.cancel();
}
state.is_running = false;
state.last_scan_end = Some(SystemTime::now());
if let Some(start_time) = state.last_scan_start {
state.current_scan_duration = Some(SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO));
}
drop(state);
// Clear any cached data
self.stats_aggregator.clear_cache().await;
info!("Optimized AHM scanner stopped successfully");
Ok(())
}
/// Get integrated data usage statistics for DataUsageInfoHandler
pub async fn get_data_usage_info(&self) -> Result<DataUsageInfo> {
let mut integrated_info = DataUsageInfo::new();
// Collect data from all buckets
{
let data_usage_guard = self.data_usage_stats.lock().await;
debug!("get_data_usage_info: Found {} bucket entries in cache", data_usage_guard.len());
for (bucket_name, bucket_data) in data_usage_guard.iter() {
debug!(
"get_data_usage_info: Processing bucket {}: objects_total_count={}, buckets_usage.len()={}",
bucket_name,
bucket_data.objects_total_count,
bucket_data.buckets_usage.len()
);
// Merge bucket data into integrated info
integrated_info.merge(bucket_data);
debug!(
"get_data_usage_info: After merging bucket {}: integrated_info.objects_total_count={}",
bucket_name, integrated_info.objects_total_count
);
}
}
self.update_capacity_info(&mut integrated_info).await;
Ok(integrated_info)
}
/// Update capacity information in DataUsageInfo
async fn update_capacity_info(&self, integrated_info: &mut DataUsageInfo) {
// Update capacity information from storage info
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
let mut total_capacity = 0u64;
let mut total_used_capacity = 0u64;
let mut total_free_capacity = 0u64;
// Collect capacity info from all SetDisks
for pool in &ecstore.pools {
for set_disks in &pool.disk_set {
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
for disk in disks {
if let Ok(disk_info) = disk
.disk_info(&ecstore::disk::DiskInfoOptions {
disk_id: disk.path().to_string_lossy().to_string(),
metrics: true,
noop: false,
})
.await
{
total_capacity += disk_info.total;
total_used_capacity += disk_info.used;
total_free_capacity += disk_info.free;
}
}
}
}
if total_capacity > 0 {
integrated_info.update_capacity(total_capacity, total_used_capacity, total_free_capacity);
}
}
}
/// Get current scanner metrics
pub async fn get_metrics(&self) -> ScannerMetrics {
let mut metrics = self.metrics.get_metrics();
// Add bucket metrics
let bucket_metrics: HashMap<String, BucketMetrics> = {
let bucket_metrics_guard = self.bucket_metrics.lock().await;
bucket_metrics_guard
.iter()
.map(|(key, value)| (key.clone(), value.clone()))
.collect()
};
metrics.bucket_metrics = bucket_metrics;
// Add disk metrics
let disk_metrics: HashMap<String, DiskMetrics> = {
let disk_metrics_guard = self.disk_metrics.lock().await;
disk_metrics_guard
.iter()
.map(|(key, value)| (key.clone(), value.clone()))
.collect()
};
metrics.disk_metrics = disk_metrics;
// Add current scan duration
let state = self.state.read().await;
metrics.current_scan_duration = state.current_scan_duration;
metrics
}
/// Get global metrics from common crate
pub async fn get_global_metrics(&self) -> rustfs_madmin::metrics::ScannerMetrics {
global_metrics().report().await
}
/// Perform a single scan cycle using optimized node scanner
pub async fn scan_cycle(&self) -> Result<()> {
let start_time = SystemTime::now();
// Start global metrics collection for this cycle
let stop_fn = Metrics::time(Metric::ScanCycle);
info!(
"Starting optimized scan cycle {} using node scanner",
self.metrics.get_metrics().current_cycle + 1
);
// Update state
{
let mut state = self.state.write().await;
state.current_cycle += 1;
state.last_scan_start = Some(start_time);
state.scanning_buckets.clear();
state.scanning_disks.clear();
}
// Update global metrics cycle information
let cycle_info = rustfs_common::metrics::CurrentCycle {
current: self.state.read().await.current_cycle,
cycle_completed: vec![chrono::Utc::now()],
started: chrono::Utc::now(),
};
global_metrics().set_cycle(Some(cycle_info)).await;
self.metrics.set_current_cycle(self.state.read().await.current_cycle);
self.metrics.increment_total_cycles();
// Use the optimized node scanner instead of the old global scan
// The node scanner handles serial disk scanning with intelligent throttling
// Force a checkpoint save to ensure progress is tracked
if let Err(e) = self.node_scanner.force_save_checkpoint().await {
warn!("Failed to save checkpoint: {}", e);
}
// Always trigger data usage collection during scan cycle
let config = self.config.read().await;
if config.enable_data_usage_stats {
info!("Data usage stats enabled, collecting data");
if let Err(e) = self.collect_and_persist_data_usage().await {
error!("Failed to collect data usage during scan cycle: {}", e);
}
}
drop(config);
// Get aggregated statistics from all nodes
debug!("About to get aggregated stats");
match self.stats_aggregator.get_aggregated_stats().await {
Ok(aggregated_stats) => {
debug!(
"Successfully got aggregated stats: {} objects scanned",
aggregated_stats.total_objects_scanned
);
info!(
"Aggregated stats: total_objects_scanned={}, online_node_count={}",
aggregated_stats.total_objects_scanned, aggregated_stats.online_node_count
);
// Update legacy metrics with aggregated data
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/lifecycle.rs | crates/ahm/src/scanner/lifecycle.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Result;
use rustfs_common::data_usage::SizeSummary;
use rustfs_common::metrics::IlmAction;
use rustfs_ecstore::bucket::{
lifecycle::{
bucket_lifecycle_audit::LcEventSrc,
bucket_lifecycle_ops::{GLOBAL_ExpiryState, apply_lifecycle_action, eval_action_from_lifecycle},
lifecycle,
lifecycle::Lifecycle,
},
metadata_sys::get_object_lock_config,
object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion},
versioning::VersioningApi,
versioning_sys::BucketVersioningSys,
};
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
use rustfs_filemeta::FileInfo;
use s3s::dto::{BucketLifecycleConfiguration as LifecycleConfig, VersioningConfiguration};
use std::sync::{
Arc,
atomic::{AtomicU64, Ordering},
};
use time::OffsetDateTime;
use tracing::info;
static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100);
static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB
#[derive(Clone)]
pub struct ScannerItem {
pub bucket: String,
pub object_name: String,
pub lifecycle: Option<Arc<LifecycleConfig>>,
pub versioning: Option<Arc<VersioningConfiguration>>,
}
impl ScannerItem {
pub fn new(
bucket: String,
lifecycle: Option<Arc<LifecycleConfig>>,
versioning: Option<Arc<VersioningConfiguration>>,
) -> Self {
Self {
bucket,
object_name: "".to_string(),
lifecycle,
versioning,
}
}
pub async fn apply_versions_actions(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
let obj_infos = self.apply_newer_noncurrent_version_limit(fivs).await?;
if obj_infos.len() >= SCANNER_EXCESS_OBJECT_VERSIONS.load(Ordering::SeqCst) as usize {
// todo
}
let mut cumulative_size = 0;
for obj_info in obj_infos.iter() {
cumulative_size += obj_info.size;
}
if cumulative_size >= SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE.load(Ordering::SeqCst) as i64 {
//todo
}
Ok(obj_infos)
}
pub async fn apply_newer_noncurrent_version_limit(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
let lock_enabled = if let Some(rcfg) = BucketObjectLockSys::get(&self.bucket).await {
rcfg.mode.is_some()
} else {
false
};
let _vcfg = BucketVersioningSys::get(&self.bucket).await?;
let versioned = match BucketVersioningSys::get(&self.bucket).await {
Ok(vcfg) => vcfg.versioned(&self.object_name),
Err(_) => false,
};
let mut object_infos = Vec::with_capacity(fivs.len());
if self.lifecycle.is_none() {
for info in fivs.iter() {
object_infos.push(ObjectInfo::from_file_info(info, &self.bucket, &self.object_name, versioned));
}
return Ok(object_infos);
}
let event = self
.lifecycle
.as_ref()
.expect("lifecycle err.")
.clone()
.noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts {
name: self.object_name.clone(),
..Default::default()
})
.await;
let lim = event.newer_noncurrent_versions;
if lim == 0 || fivs.len() <= lim + 1 {
for fi in fivs.iter() {
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
}
return Ok(object_infos);
}
let overflow_versions = &fivs[lim + 1..];
for fi in fivs[..lim + 1].iter() {
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
}
let mut to_del = Vec::<ObjectToDelete>::with_capacity(overflow_versions.len());
for fi in overflow_versions.iter() {
let obj = ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned);
if lock_enabled && enforce_retention_for_deletion(&obj) {
//if enforce_retention_for_deletion(&obj) {
/*if self.debug {
if obj.version_id.is_some() {
info!("lifecycle: {} v({}) is locked, not deleting\n", obj.name, obj.version_id.expect("err"));
} else {
info!("lifecycle: {} is locked, not deleting\n", obj.name);
}
}*/
object_infos.push(obj);
continue;
}
if OffsetDateTime::now_utc().unix_timestamp()
< lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32)
.unix_timestamp()
{
object_infos.push(obj);
continue;
}
to_del.push(ObjectToDelete {
object_name: obj.name,
version_id: obj.version_id,
..Default::default()
});
}
if !to_del.is_empty() {
let mut expiry_state = GLOBAL_ExpiryState.write().await;
expiry_state.enqueue_by_newer_noncurrent(&self.bucket, to_del, event).await;
}
Ok(object_infos)
}
pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, i64) {
let (action, _size) = self.apply_lifecycle(oi).await;
info!(
"apply_actions {} {} {:?} {:?}",
oi.bucket.clone(),
oi.name.clone(),
oi.version_id.clone(),
oi.user_defined.clone()
);
// Create a mutable clone if you need to modify fields
/*let mut oi = oi.clone();
oi.replication_status = ReplicationStatusType::from(
oi.user_defined
.get("x-amz-bucket-replication-status")
.unwrap_or(&"PENDING".to_string()),
);
info!("apply status is: {:?}", oi.replication_status);
self.heal_replication(&oi, _size_s).await;*/
if action.delete_all() {
return (true, 0);
}
(false, oi.size)
}
async fn apply_lifecycle(&mut self, oi: &ObjectInfo) -> (IlmAction, i64) {
let size = oi.size;
if self.lifecycle.is_none() {
info!("apply_lifecycle: No lifecycle config for object: {}", oi.name);
return (IlmAction::NoneAction, size);
}
info!("apply_lifecycle: Lifecycle config exists for object: {}", oi.name);
let (olcfg, rcfg) = if self.bucket != ".minio.sys" {
(
get_object_lock_config(&self.bucket).await.ok(),
None, // FIXME: replication config
)
} else {
(None, None)
};
info!("apply_lifecycle: Evaluating lifecycle for object: {}", oi.name);
let lifecycle = match self.lifecycle.as_ref() {
Some(lc) => lc,
None => {
info!("No lifecycle configuration found for object: {}", oi.name);
return (IlmAction::NoneAction, 0);
}
};
let lc_evt = eval_action_from_lifecycle(
lifecycle,
olcfg
.as_ref()
.and_then(|(c, _)| c.rule.as_ref().and_then(|r| r.default_retention.clone())),
rcfg.clone(),
oi, // Pass oi directly
)
.await;
info!("lifecycle: {} Initial scan: {} (action: {:?})", oi.name, lc_evt.action, lc_evt.action);
let mut new_size = size;
match lc_evt.action {
IlmAction::DeleteVersionAction | IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction => {
info!("apply_lifecycle: Object {} marked for version deletion, new_size=0", oi.name);
new_size = 0;
}
IlmAction::DeleteAction => {
info!("apply_lifecycle: Object {} marked for deletion", oi.name);
if let Some(vcfg) = &self.versioning {
if !vcfg.enabled() {
info!("apply_lifecycle: Versioning disabled, setting new_size=0");
new_size = 0;
}
} else {
info!("apply_lifecycle: No versioning config, setting new_size=0");
new_size = 0;
}
}
IlmAction::NoneAction => {
info!("apply_lifecycle: No action for object {}", oi.name);
}
_ => {
info!("apply_lifecycle: Other action {:?} for object {}", lc_evt.action, oi.name);
}
}
if lc_evt.action != IlmAction::NoneAction {
info!("apply_lifecycle: Applying lifecycle action {:?} for object {}", lc_evt.action, oi.name);
apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await;
} else {
info!("apply_lifecycle: Skipping lifecycle action for object {} as no action is needed", oi.name);
}
(lc_evt.action, new_size)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/io_monitor.rs | crates/ahm/src/scanner/io_monitor.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Result;
use crate::scanner::LoadLevel;
use serde::{Deserialize, Serialize};
use std::{
collections::VecDeque,
sync::{
Arc,
atomic::{AtomicU64, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
/// IO monitor config
#[derive(Debug, Clone)]
pub struct IOMonitorConfig {
/// monitor interval
pub monitor_interval: Duration,
/// history data retention time
pub history_retention: Duration,
/// load evaluation window size
pub load_window_size: usize,
/// whether to enable actual system monitoring
pub enable_system_monitoring: bool,
/// disk path list (for monitoring specific disks)
pub disk_paths: Vec<String>,
}
impl Default for IOMonitorConfig {
fn default() -> Self {
Self {
monitor_interval: Duration::from_secs(1), // 1 second monitor interval
history_retention: Duration::from_secs(300), // keep 5 minutes history
load_window_size: 30, // 30 sample points sliding window
enable_system_monitoring: false, // default use simulated data
disk_paths: Vec::new(),
}
}
}
/// IO monitor metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IOMetrics {
/// timestamp
pub timestamp: SystemTime,
/// disk IOPS (read + write)
pub iops: u64,
/// read IOPS
pub read_iops: u64,
/// write IOPS
pub write_iops: u64,
/// disk queue depth
pub queue_depth: u64,
/// average latency (milliseconds)
pub avg_latency: u64,
/// read latency (milliseconds)
pub read_latency: u64,
/// write latency (milliseconds)
pub write_latency: u64,
/// CPU usage (0-100)
pub cpu_usage: u8,
/// memory usage (0-100)
pub memory_usage: u8,
/// disk usage (0-100)
pub disk_utilization: u8,
/// network IO (Mbps)
pub network_io: u64,
}
impl Default for IOMetrics {
fn default() -> Self {
Self {
timestamp: SystemTime::now(),
iops: 0,
read_iops: 0,
write_iops: 0,
queue_depth: 0,
avg_latency: 0,
read_latency: 0,
write_latency: 0,
cpu_usage: 0,
memory_usage: 0,
disk_utilization: 0,
network_io: 0,
}
}
}
/// load level stats
#[derive(Debug, Clone, Default)]
pub struct LoadLevelStats {
/// low load duration (seconds)
pub low_load_duration: u64,
/// medium load duration (seconds)
pub medium_load_duration: u64,
/// high load duration (seconds)
pub high_load_duration: u64,
/// critical load duration (seconds)
pub critical_load_duration: u64,
/// load transitions
pub load_transitions: u64,
}
/// advanced IO monitor
pub struct AdvancedIOMonitor {
/// config
config: Arc<RwLock<IOMonitorConfig>>,
/// current metrics
current_metrics: Arc<RwLock<IOMetrics>>,
/// history metrics (sliding window)
history_metrics: Arc<RwLock<VecDeque<IOMetrics>>>,
/// current load level
current_load_level: Arc<RwLock<LoadLevel>>,
/// load level history
load_level_history: Arc<RwLock<VecDeque<(SystemTime, LoadLevel)>>>,
/// load level stats
load_stats: Arc<RwLock<LoadLevelStats>>,
/// business IO metrics (updated by external)
business_metrics: Arc<BusinessIOMetrics>,
/// cancel token
cancel_token: CancellationToken,
}
/// business IO metrics
pub struct BusinessIOMetrics {
/// business request latency (milliseconds)
pub request_latency: AtomicU64,
/// business request QPS
pub request_qps: AtomicU64,
/// business error rate (0-10000, 0.00%-100.00%)
pub error_rate: AtomicU64,
/// active connections
pub active_connections: AtomicU64,
/// last update time
pub last_update: Arc<RwLock<SystemTime>>,
}
impl Default for BusinessIOMetrics {
fn default() -> Self {
Self {
request_latency: AtomicU64::new(0),
request_qps: AtomicU64::new(0),
error_rate: AtomicU64::new(0),
active_connections: AtomicU64::new(0),
last_update: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
}
}
}
impl AdvancedIOMonitor {
/// create new advanced IO monitor
pub fn new(config: IOMonitorConfig) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
current_metrics: Arc::new(RwLock::new(IOMetrics::default())),
history_metrics: Arc::new(RwLock::new(VecDeque::new())),
current_load_level: Arc::new(RwLock::new(LoadLevel::Low)),
load_level_history: Arc::new(RwLock::new(VecDeque::new())),
load_stats: Arc::new(RwLock::new(LoadLevelStats::default())),
business_metrics: Arc::new(BusinessIOMetrics::default()),
cancel_token: CancellationToken::new(),
}
}
/// start monitoring
pub async fn start(&self) -> Result<()> {
info!("start advanced IO monitor");
let monitor = self.clone_for_background();
tokio::spawn(async move {
if let Err(e) = monitor.monitoring_loop().await {
error!("IO monitoring loop failed: {}", e);
}
});
Ok(())
}
/// stop monitoring
pub async fn stop(&self) {
info!("stop IO monitor");
self.cancel_token.cancel();
}
/// monitoring loop
async fn monitoring_loop(&self) -> Result<()> {
let mut interval = {
let config = self.config.read().await;
tokio::time::interval(config.monitor_interval)
};
let mut last_load_level = LoadLevel::Low;
let mut load_level_start_time = SystemTime::now();
loop {
tokio::select! {
_ = self.cancel_token.cancelled() => {
info!("IO monitoring loop cancelled");
break;
}
_ = interval.tick() => {
// collect system metrics
let metrics = self.collect_system_metrics().await;
// update current metrics
*self.current_metrics.write().await = metrics.clone();
// update history metrics
self.update_metrics_history(metrics.clone()).await;
// calculate load level
let new_load_level = self.calculate_load_level(&metrics).await;
// check if load level changed
if new_load_level != last_load_level {
self.handle_load_level_change(last_load_level, new_load_level, load_level_start_time).await;
last_load_level = new_load_level;
load_level_start_time = SystemTime::now();
}
// update current load level
*self.current_load_level.write().await = new_load_level;
debug!("IO monitor updated: IOPS={}, queue depth={}, latency={}ms, load level={:?}",
metrics.iops, metrics.queue_depth, metrics.avg_latency, new_load_level);
}
}
}
Ok(())
}
/// collect system metrics
async fn collect_system_metrics(&self) -> IOMetrics {
let config = self.config.read().await;
if config.enable_system_monitoring {
// actual system monitoring implementation
self.collect_real_system_metrics().await
} else {
// simulated data
self.generate_simulated_metrics().await
}
}
/// collect real system metrics (need to be implemented according to specific system)
async fn collect_real_system_metrics(&self) -> IOMetrics {
// TODO: implement actual system metrics collection
// can use procfs, sysfs or other system API
let metrics = IOMetrics {
timestamp: SystemTime::now(),
..Default::default()
};
// example: read /proc/diskstats
if let Ok(diskstats) = tokio::fs::read_to_string("/proc/diskstats").await {
// parse disk stats info
// here need to implement specific parsing logic
debug!("read disk stats info: {} bytes", diskstats.len());
}
// example: read /proc/stat to get CPU info
if let Ok(stat) = tokio::fs::read_to_string("/proc/stat").await {
// parse CPU stats info
debug!("read CPU stats info: {} bytes", stat.len());
}
// example: read /proc/meminfo to get memory info
if let Ok(meminfo) = tokio::fs::read_to_string("/proc/meminfo").await {
// parse memory stats info
debug!("read memory stats info: {} bytes", meminfo.len());
}
metrics
}
/// generate simulated metrics (for testing and development)
async fn generate_simulated_metrics(&self) -> IOMetrics {
use rand::Rng;
let mut rng = rand::rng();
// get business metrics impact
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
let business_qps = self.business_metrics.request_qps.load(Ordering::Relaxed);
// generate simulated system metrics based on business load
let base_iops = 100 + (business_qps / 10);
let base_latency = 5 + (business_latency / 10);
IOMetrics {
timestamp: SystemTime::now(),
iops: base_iops + rng.random_range(0..50),
read_iops: (base_iops * 6 / 10) + rng.random_range(0..20),
write_iops: (base_iops * 4 / 10) + rng.random_range(0..20),
queue_depth: rng.random_range(1..20),
avg_latency: base_latency + rng.random_range(0..10),
read_latency: base_latency + rng.random_range(0..5),
write_latency: base_latency + rng.random_range(0..15),
cpu_usage: rng.random_range(10..70),
memory_usage: rng.random_range(30..80),
disk_utilization: rng.random_range(20..90),
network_io: rng.random_range(10..1000),
}
}
/// update metrics history
async fn update_metrics_history(&self, metrics: IOMetrics) {
let mut history = self.history_metrics.write().await;
let config = self.config.read().await;
// add new metrics
history.push_back(metrics);
// clean expired data
let retention_cutoff = SystemTime::now() - config.history_retention;
while let Some(front) = history.front() {
if front.timestamp < retention_cutoff {
history.pop_front();
} else {
break;
}
}
// limit window size
while history.len() > config.load_window_size {
history.pop_front();
}
}
/// calculate load level
async fn calculate_load_level(&self, metrics: &IOMetrics) -> LoadLevel {
// multi-dimensional load evaluation algorithm
let mut load_score = 0u32;
// IOPS load evaluation (weight: 25%)
let iops_score = match metrics.iops {
0..=200 => 0,
201..=500 => 15,
501..=1000 => 25,
_ => 35,
};
load_score += iops_score;
// latency load evaluation (weight: 30%)
let latency_score = match metrics.avg_latency {
0..=10 => 0,
11..=50 => 20,
51..=100 => 30,
_ => 40,
};
load_score += latency_score;
// queue depth evaluation (weight: 20%)
let queue_score = match metrics.queue_depth {
0..=5 => 0,
6..=15 => 10,
16..=30 => 20,
_ => 25,
};
load_score += queue_score;
// CPU usage evaluation (weight: 15%)
let cpu_score = match metrics.cpu_usage {
0..=30 => 0,
31..=60 => 8,
61..=80 => 12,
_ => 15,
};
load_score += cpu_score;
// disk usage evaluation (weight: 10%)
let disk_score = match metrics.disk_utilization {
0..=50 => 0,
51..=75 => 5,
76..=90 => 8,
_ => 10,
};
load_score += disk_score;
// business metrics impact
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
let business_error_rate = self.business_metrics.error_rate.load(Ordering::Relaxed);
if business_latency > 100 {
load_score += 20; // business latency too high
}
if business_error_rate > 100 {
// > 1%
load_score += 15; // business error rate too high
}
// history trend analysis
let trend_score = self.calculate_trend_score().await;
load_score += trend_score;
// determine load level based on total score
match load_score {
0..=30 => LoadLevel::Low,
31..=60 => LoadLevel::Medium,
61..=90 => LoadLevel::High,
_ => LoadLevel::Critical,
}
}
/// calculate trend score
async fn calculate_trend_score(&self) -> u32 {
let history = self.history_metrics.read().await;
if history.len() < 5 {
return 0; // data insufficient, cannot analyze trend
}
// analyze trend of last 5 samples
let recent: Vec<_> = history.iter().rev().take(5).collect();
// check IOPS rising trend
let mut iops_trend = 0;
for i in 1..recent.len() {
if recent[i - 1].iops > recent[i].iops {
iops_trend += 1;
}
}
// check latency rising trend
let mut latency_trend = 0;
for i in 1..recent.len() {
if recent[i - 1].avg_latency > recent[i].avg_latency {
latency_trend += 1;
}
}
// if IOPS and latency are both rising, increase load score
if iops_trend >= 3 && latency_trend >= 3 {
15 // obvious rising trend
} else if iops_trend >= 2 || latency_trend >= 2 {
5 // slight rising trend
} else {
0 // no obvious trend
}
}
/// handle load level change
async fn handle_load_level_change(&self, old_level: LoadLevel, new_level: LoadLevel, start_time: SystemTime) {
let duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
// update stats
{
let mut stats = self.load_stats.write().await;
match old_level {
LoadLevel::Low => stats.low_load_duration += duration.as_secs(),
LoadLevel::Medium => stats.medium_load_duration += duration.as_secs(),
LoadLevel::High => stats.high_load_duration += duration.as_secs(),
LoadLevel::Critical => stats.critical_load_duration += duration.as_secs(),
}
stats.load_transitions += 1;
}
// update history
{
let mut history = self.load_level_history.write().await;
history.push_back((SystemTime::now(), new_level));
// keep history record in reasonable range
while history.len() > 100 {
history.pop_front();
}
}
info!("load level changed: {:?} -> {:?}, duration: {:?}", old_level, new_level, duration);
// if enter critical load state, record warning
if new_level == LoadLevel::Critical {
warn!("system entered critical load state, Scanner will pause running");
}
}
/// get current load level
pub async fn get_business_load_level(&self) -> LoadLevel {
*self.current_load_level.read().await
}
/// get current metrics
pub async fn get_current_metrics(&self) -> IOMetrics {
self.current_metrics.read().await.clone()
}
/// get history metrics
pub async fn get_history_metrics(&self) -> Vec<IOMetrics> {
self.history_metrics.read().await.iter().cloned().collect()
}
/// get load stats
pub async fn get_load_stats(&self) -> LoadLevelStats {
self.load_stats.read().await.clone()
}
/// update business IO metrics
pub async fn update_business_metrics(&self, latency: u64, qps: u64, error_rate: u64, connections: u64) {
self.business_metrics.request_latency.store(latency, Ordering::Relaxed);
self.business_metrics.request_qps.store(qps, Ordering::Relaxed);
self.business_metrics.error_rate.store(error_rate, Ordering::Relaxed);
self.business_metrics.active_connections.store(connections, Ordering::Relaxed);
*self.business_metrics.last_update.write().await = SystemTime::now();
debug!(
"update business metrics: latency={}ms, QPS={}, error rate={}‰, connections={}",
latency, qps, error_rate, connections
);
}
/// clone for background task
fn clone_for_background(&self) -> Self {
Self {
config: self.config.clone(),
current_metrics: self.current_metrics.clone(),
history_metrics: self.history_metrics.clone(),
current_load_level: self.current_load_level.clone(),
load_level_history: self.load_level_history.clone(),
load_stats: self.load_stats.clone(),
business_metrics: self.business_metrics.clone(),
cancel_token: self.cancel_token.clone(),
}
}
/// reset stats
pub async fn reset_stats(&self) {
*self.load_stats.write().await = LoadLevelStats::default();
self.load_level_history.write().await.clear();
self.history_metrics.write().await.clear();
info!("IO monitor stats reset");
}
/// get load level history
pub async fn get_load_level_history(&self) -> Vec<(SystemTime, LoadLevel)> {
self.load_level_history.read().await.iter().cloned().collect()
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/node_scanner.rs | crates/ahm/src/scanner/node_scanner.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Result;
use crate::scanner::{
AdvancedIOMonitor, AdvancedIOThrottler, BatchScanResult, CheckpointManager, IOMonitorConfig, IOThrottlerConfig,
LocalStatsManager, MetricsSnapshot, ScanResultEntry,
};
use rustfs_common::data_usage::DataUsageInfo;
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::disk::{DiskAPI, DiskStore};
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
path::{Path, PathBuf},
sync::{
Arc,
atomic::{AtomicU8, AtomicU64, AtomicUsize, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
/// SystemTime serde
mod system_time_serde {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::time::{SystemTime, UNIX_EPOCH};
pub fn serialize<S>(time: &SystemTime, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let duration = time.duration_since(UNIX_EPOCH).unwrap_or_default();
duration.as_secs().serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<SystemTime, D::Error>
where
D: Deserializer<'de>,
{
let secs = u64::deserialize(deserializer)?;
Ok(UNIX_EPOCH + std::time::Duration::from_secs(secs))
}
}
/// Option<SystemTime> serde
mod option_system_time_serde {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::time::{SystemTime, UNIX_EPOCH};
pub fn serialize<S>(time: &Option<SystemTime>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match time {
Some(t) => {
let duration = t.duration_since(UNIX_EPOCH).unwrap_or_default();
Some(duration.as_secs()).serialize(serializer)
}
None => None::<u64>.serialize(serializer),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<SystemTime>, D::Error>
where
D: Deserializer<'de>,
{
let secs = Option::<u64>::deserialize(deserializer)?;
Ok(secs.map(|s| UNIX_EPOCH + std::time::Duration::from_secs(s)))
}
}
/// temporary BucketStats definition, for backward compatibility
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketStats {
pub name: String,
pub object_count: u64,
pub total_size: u64,
#[serde(with = "system_time_serde")]
pub last_update: SystemTime,
}
impl Default for BucketStats {
fn default() -> Self {
Self {
name: String::new(),
object_count: 0,
total_size: 0,
last_update: SystemTime::now(),
}
}
}
/// business load level enum
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum LoadLevel {
/// low load (<30%)
Low,
/// medium load (30-60%)
Medium,
/// high load (60-80%)
High,
/// ultra high load (>80%)
Critical,
}
/// node scanner config
#[derive(Debug, Clone)]
pub struct NodeScannerConfig {
/// scan interval
pub scan_interval: Duration,
/// disk scan delay
pub disk_scan_delay: Duration,
/// whether to enable smart scheduling
pub enable_smart_scheduling: bool,
/// whether to enable checkpoint resume
pub enable_checkpoint: bool,
/// checkpoint save interval
pub checkpoint_save_interval: Duration,
/// data directory path
pub data_dir: PathBuf,
/// max scan retry attempts
pub max_retry_attempts: u32,
}
impl Default for NodeScannerConfig {
fn default() -> Self {
// use a user-writable directory for scanner data
let data_dir = std::env::temp_dir().join("rustfs_scanner");
Self {
scan_interval: Duration::from_secs(300), // 5 minutes base interval
disk_scan_delay: Duration::from_secs(10), // disk scan delay 10 seconds
enable_smart_scheduling: true,
enable_checkpoint: true,
checkpoint_save_interval: Duration::from_secs(30), // checkpoint save interval 30 seconds
data_dir,
max_retry_attempts: 3,
}
}
}
/// local scan stats data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalScanStats {
/// scanned objects count
pub objects_scanned: u64,
/// healthy objects count
pub healthy_objects: u64,
/// corrupted objects count
pub corrupted_objects: u64,
/// data usage
pub data_usage: DataUsageInfo,
/// last update time
#[serde(with = "system_time_serde")]
pub last_update: SystemTime,
/// buckets stats
pub buckets_stats: HashMap<String, BucketStats>,
/// disks stats
pub disks_stats: HashMap<String, DiskStats>,
/// scan progress
pub scan_progress: ScanProgress,
/// checkpoint timestamp
#[serde(with = "system_time_serde")]
pub checkpoint_timestamp: SystemTime,
}
impl Default for LocalScanStats {
fn default() -> Self {
Self {
objects_scanned: 0,
healthy_objects: 0,
corrupted_objects: 0,
data_usage: DataUsageInfo::default(),
last_update: SystemTime::now(),
buckets_stats: HashMap::new(),
disks_stats: HashMap::new(),
scan_progress: ScanProgress::default(),
checkpoint_timestamp: SystemTime::now(),
}
}
}
/// disk stats info
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiskStats {
/// disk id
pub disk_id: String,
/// scanned objects count
pub objects_scanned: u64,
/// errors count
pub errors_count: u64,
/// last scan time
#[serde(with = "system_time_serde")]
pub last_scan_time: SystemTime,
/// scan duration
pub scan_duration: Duration,
/// whether scan is completed
pub scan_completed: bool,
}
impl Default for DiskStats {
fn default() -> Self {
Self {
disk_id: String::new(),
objects_scanned: 0,
errors_count: 0,
last_scan_time: SystemTime::now(),
scan_duration: Duration::from_secs(0),
scan_completed: false,
}
}
}
/// scan progress state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScanProgress {
/// current scan cycle
pub current_cycle: u64,
/// current disk index
pub current_disk_index: usize,
/// current bucket
pub current_bucket: Option<String>,
/// current object prefix
pub current_object_prefix: Option<String>,
/// completed disks
pub completed_disks: HashSet<String>,
/// completed buckets scan state
pub completed_buckets: HashMap<String, BucketScanState>,
/// last scanned object key
pub last_scan_key: Option<String>,
/// scan start time
#[serde(with = "system_time_serde")]
pub scan_start_time: SystemTime,
/// estimated completion time
#[serde(with = "option_system_time_serde")]
pub estimated_completion: Option<SystemTime>,
/// data usage statistics
pub data_usage: Option<DataUsageInfo>,
}
impl Default for ScanProgress {
fn default() -> Self {
Self {
current_cycle: 0,
current_disk_index: 0,
current_bucket: None,
current_object_prefix: None,
completed_disks: HashSet::new(),
completed_buckets: HashMap::new(),
last_scan_key: None,
scan_start_time: SystemTime::now(),
estimated_completion: None,
data_usage: None,
}
}
}
/// bucket scan state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketScanState {
/// whether completed
pub completed: bool,
/// last scanned object key
pub last_object_key: Option<String>,
/// scanned objects count
pub objects_scanned: u64,
/// scan timestamp
pub scan_timestamp: SystemTime,
}
/// IO monitor
pub struct IOMonitor {
/// current disk IOPS
current_iops: Arc<AtomicU64>,
/// disk queue depth
queue_depth: Arc<AtomicU64>,
/// business request latency (milliseconds)
business_latency: Arc<AtomicU64>,
/// CPU usage
cpu_usage: Arc<AtomicU8>,
/// memory usage
/// memory usage (reserved field)
#[allow(dead_code)]
memory_usage: Arc<AtomicU8>,
}
impl IOMonitor {
pub fn new() -> Self {
Self {
current_iops: Arc::new(AtomicU64::new(0)),
queue_depth: Arc::new(AtomicU64::new(0)),
business_latency: Arc::new(AtomicU64::new(0)),
cpu_usage: Arc::new(AtomicU8::new(0)),
memory_usage: Arc::new(AtomicU8::new(0)),
}
}
/// get current business load level
pub async fn get_business_load_level(&self) -> LoadLevel {
let iops = self.current_iops.load(Ordering::Relaxed);
let queue_depth = self.queue_depth.load(Ordering::Relaxed);
let latency = self.business_latency.load(Ordering::Relaxed);
let cpu = self.cpu_usage.load(Ordering::Relaxed);
// comprehensive evaluation of load level
let load_score = self.calculate_load_score(iops, queue_depth, latency, cpu);
match load_score {
0..=30 => LoadLevel::Low,
31..=60 => LoadLevel::Medium,
61..=80 => LoadLevel::High,
_ => LoadLevel::Critical,
}
}
fn calculate_load_score(&self, iops: u64, queue_depth: u64, latency: u64, cpu: u8) -> u8 {
// simplified load score algorithm, actual implementation needs to adjust based on specific hardware metrics
let iops_score = std::cmp::min(iops / 100, 25) as u8; // IOPS weight 25%
let queue_score = std::cmp::min(queue_depth * 5, 25) as u8; // queue depth weight 25%
let latency_score = std::cmp::min(latency / 10, 25) as u8; // latency weight 25%
let cpu_score = std::cmp::min(cpu / 4, 25); // CPU weight 25%
iops_score + queue_score + latency_score + cpu_score
}
/// update system metrics
pub async fn update_metrics(&self, iops: u64, queue_depth: u64, latency: u64, cpu: u8) {
self.current_iops.store(iops, Ordering::Relaxed);
self.queue_depth.store(queue_depth, Ordering::Relaxed);
self.business_latency.store(latency, Ordering::Relaxed);
self.cpu_usage.store(cpu, Ordering::Relaxed);
}
}
/// IO throttler
pub struct IOThrottler {
/// max IOPS limit (reserved field)
#[allow(dead_code)]
max_iops: Arc<AtomicU64>,
/// current IOPS usage (reserved field)
#[allow(dead_code)]
current_iops: Arc<AtomicU64>,
/// business priority weight (0-100)
business_priority: Arc<AtomicU8>,
/// scan operation delay (milliseconds)
scan_delay: Arc<AtomicU64>,
}
impl IOThrottler {
pub fn new() -> Self {
Self {
max_iops: Arc::new(AtomicU64::new(1000)), // default max 1000 IOPS
current_iops: Arc::new(AtomicU64::new(0)),
business_priority: Arc::new(AtomicU8::new(95)), // business priority 95%
scan_delay: Arc::new(AtomicU64::new(100)), // default 100ms delay
}
}
/// adjust scanning delay based on load level
pub async fn adjust_for_load_level(&self, load_level: LoadLevel) -> Duration {
let delay_ms = match load_level {
LoadLevel::Low => {
self.scan_delay.store(100, Ordering::Relaxed); // 100ms delay
self.business_priority.store(90, Ordering::Relaxed);
100
}
LoadLevel::Medium => {
self.scan_delay.store(500, Ordering::Relaxed); // 500ms delay
self.business_priority.store(95, Ordering::Relaxed);
500
}
LoadLevel::High => {
self.scan_delay.store(2000, Ordering::Relaxed); // 2s
self.business_priority.store(98, Ordering::Relaxed);
2000
}
LoadLevel::Critical => {
self.scan_delay.store(10000, Ordering::Relaxed); // 10s delay (actually will pause scanning)
self.business_priority.store(99, Ordering::Relaxed);
10000
}
};
Duration::from_millis(delay_ms)
}
/// check whether should pause scanning
pub async fn should_pause_scanning(&self, load_level: LoadLevel) -> bool {
matches!(load_level, LoadLevel::Critical)
}
}
/// decentralized node scanner
///
/// responsible for serial scanning of local disks, implementing smart scheduling and checkpoint resume functionality
pub struct NodeScanner {
/// node id
node_id: String,
/// local disks list
local_disks: Arc<RwLock<Vec<Arc<DiskStore>>>>,
/// IO monitor
io_monitor: Arc<AdvancedIOMonitor>,
/// IO throttler
throttler: Arc<AdvancedIOThrottler>,
/// config
config: Arc<RwLock<NodeScannerConfig>>,
/// current scanned disk index
current_disk_index: Arc<AtomicUsize>,
/// local stats data
local_stats: Arc<RwLock<LocalScanStats>>,
/// stats data manager
stats_manager: Arc<LocalStatsManager>,
/// scan progress state
scan_progress: Arc<RwLock<ScanProgress>>,
/// checkpoint manager mapping (one for each disk)
checkpoint_managers: Arc<RwLock<HashMap<String, Arc<CheckpointManager>>>>,
/// cancel token
cancel_token: CancellationToken,
}
impl NodeScanner {
/// create a new node scanner
pub fn new(node_id: String, config: NodeScannerConfig) -> Self {
// Ensure data directory exists
if !config.data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(&config.data_dir)
{
error!("create data directory failed {:?}: {}", config.data_dir, e);
}
let stats_manager = Arc::new(LocalStatsManager::new(&node_id, &config.data_dir));
let monitor_config = IOMonitorConfig {
monitor_interval: Duration::from_secs(1),
enable_system_monitoring: true,
..Default::default()
};
let io_monitor = Arc::new(AdvancedIOMonitor::new(monitor_config));
let throttler_config = IOThrottlerConfig {
max_iops: 1000,
base_business_priority: 95,
min_scan_delay: 5000,
max_scan_delay: 60000,
enable_dynamic_adjustment: true,
adjustment_response_time: 5,
};
let throttler = Arc::new(AdvancedIOThrottler::new(throttler_config));
Self {
node_id,
local_disks: Arc::new(RwLock::new(Vec::new())),
io_monitor,
throttler,
config: Arc::new(RwLock::new(config)),
current_disk_index: Arc::new(AtomicUsize::new(0)),
local_stats: Arc::new(RwLock::new(LocalScanStats::default())),
stats_manager,
scan_progress: Arc::new(RwLock::new(ScanProgress::default())),
checkpoint_managers: Arc::new(RwLock::new(HashMap::new())),
cancel_token: CancellationToken::new(),
}
}
/// add local disk and create checkpoint manager for it
pub async fn add_local_disk(&self, disk: Arc<DiskStore>) {
// get disk path and create corresponding scanner directory
let disk_path = disk.path();
let sys_dir = disk_path.join(".rustfs.sys").join("scanner");
// ensure directory exists
if let Err(e) = std::fs::create_dir_all(&sys_dir) {
error!("Failed to create scanner directory on disk {:?}: {}", disk_path, e);
return;
}
// create checkpoint manager for the disk
let disk_id = disk_path.to_string_lossy().to_string();
let checkpoint_manager = Arc::new(CheckpointManager::new(&self.node_id, &sys_dir));
// store checkpoint manager
self.checkpoint_managers
.write()
.await
.insert(disk_id.clone(), checkpoint_manager);
// add disk to local disks list
self.local_disks.write().await.push(disk.clone());
info!("Added disk {} with checkpoint manager to node {}", disk_id, self.node_id);
}
/// get checkpoint manager for the disk
async fn get_checkpoint_manager_for_disk(&self, disk_id: &str) -> Option<Arc<CheckpointManager>> {
self.checkpoint_managers.read().await.get(disk_id).cloned()
}
/// get default checkpoint manager (for the case when there is no local disk)
async fn get_default_checkpoint_manager(&self) -> Option<Arc<CheckpointManager>> {
let config = self.config.read().await;
let data_dir = &config.data_dir;
// ensure data directory exists
if let Err(e) = std::fs::create_dir_all(data_dir) {
error!("Failed to create data directory {:?}: {}", data_dir, e);
return None;
}
// create default checkpoint manager
Some(Arc::new(CheckpointManager::new(&self.node_id, data_dir)))
}
/// create checkpoint manager for all disks (called during initialization)
pub async fn initialize_checkpoint_managers(&self) {
let local_disks = self.local_disks.read().await;
let mut checkpoint_managers = self.checkpoint_managers.write().await;
for disk in local_disks.iter() {
let disk_path = disk.path();
let sys_dir = disk_path.join(".rustfs.sys").join("scanner");
// ensure directory exists
if let Err(e) = std::fs::create_dir_all(&sys_dir) {
error!("Failed to create scanner directory on disk {:?}: {}", disk_path, e);
continue;
}
// create checkpoint manager for the disk
let disk_id = disk_path.to_string_lossy().to_string();
let checkpoint_manager = Arc::new(CheckpointManager::new(&self.node_id, &sys_dir));
checkpoint_managers.insert(disk_id, checkpoint_manager);
}
}
/// set data directory
pub async fn set_data_dir(&self, data_dir: &Path) {
// Update the checkpoint manager with the new data directory
let _new_checkpoint_manager = CheckpointManager::new(&self.node_id, data_dir);
// Note: We can't directly replace the Arc, so we would need to redesign this
info!("Would set data directory to: {:?}", data_dir);
// TODO: Implement proper data directory management
}
/// get node id
pub fn node_id(&self) -> &str {
&self.node_id
}
/// get local stats data
pub async fn get_local_stats(&self) -> LocalScanStats {
self.local_stats.read().await.clone()
}
/// get scan progress
pub async fn get_scan_progress(&self) -> ScanProgress {
self.scan_progress.read().await.clone()
}
/// start scanner
pub async fn start(&self) -> Result<()> {
info!("start scanner for node {}", self.node_id);
// try to resume from checkpoint
self.start_with_resume().await?;
Ok(())
}
/// try to resume from checkpoint when node starts
pub async fn start_with_resume(&self) -> Result<()> {
info!("node {} start, try to resume checkpoint", self.node_id);
// initialize checkpoint managers
self.initialize_checkpoint_managers().await;
// try to resume scanning progress (from the first disk)
let local_disks = self.local_disks.read().await;
if !local_disks.is_empty() {
let first_disk = &local_disks[0];
let disk_id = first_disk.path().to_string_lossy().to_string();
if let Some(checkpoint_manager) = self.get_checkpoint_manager_for_disk(&disk_id).await {
match checkpoint_manager.load_checkpoint().await {
Ok(Some(progress)) => {
info!(
"success to resume scanning from disk {}: cycle={}, disk={}, last_key={:?}",
disk_id, progress.current_cycle, progress.current_disk_index, progress.last_scan_key
);
*self.scan_progress.write().await = progress;
// use the resumed progress to start scanning
self.resume_scanning_from_checkpoint().await
}
Ok(None) => {
info!("disk {} has no valid checkpoint, start fresh scanning", disk_id);
self.start_fresh_scanning().await
}
Err(e) => {
warn!("failed to resume scanning from disk {}: {}, start fresh scanning", disk_id, e);
self.start_fresh_scanning().await
}
}
} else {
info!("cannot get checkpoint manager for disk {}, start fresh scanning", disk_id);
self.start_fresh_scanning().await
}
} else {
info!("no local disk, try to resume from default checkpoint manager");
// try to load from default checkpoint manager
if let Some(default_checkpoint_manager) = self.get_default_checkpoint_manager().await {
match default_checkpoint_manager.load_checkpoint().await {
Ok(Some(scan_progress)) => {
info!(
"resume scanning from default checkpoint: cycle={}, last_key={:?}",
scan_progress.current_cycle, scan_progress.last_scan_key
);
// resume scanning progress
*self.scan_progress.write().await = scan_progress;
return self.resume_scanning_from_checkpoint().await;
}
Ok(None) => {
info!("no default checkpoint file");
}
Err(e) => {
warn!("load default checkpoint failed: {}", e);
}
}
}
// if no checkpoint, check if there is scanning progress in memory (for test scenario)
let current_progress = self.scan_progress.read().await;
if current_progress.current_cycle > 0 || current_progress.last_scan_key.is_some() {
info!(
"found scanning progress in memory: cycle={}, disk={}, last_key={:?}",
current_progress.current_cycle, current_progress.current_disk_index, current_progress.last_scan_key
);
drop(current_progress);
self.resume_scanning_from_checkpoint().await
} else {
drop(current_progress);
self.start_fresh_scanning().await
}
}
}
/// resume scanning from checkpoint
async fn resume_scanning_from_checkpoint(&self) -> Result<()> {
let progress = self.scan_progress.read().await;
let disk_index = progress.current_disk_index;
let last_scan_key = progress.last_scan_key.clone();
drop(progress);
info!("resume scanning from disk {}: last_scan_key={:?}", disk_index, last_scan_key);
// update current disk index
self.current_disk_index
.store(disk_index, std::sync::atomic::Ordering::Relaxed);
// start IO monitoring
self.start_io_monitoring().await?;
// start scanning loop
let scanner_clone = self.clone_for_background();
tokio::spawn(async move {
if let Err(e) = scanner_clone.scan_loop_with_resume(last_scan_key).await {
error!("scanning loop failed: {}", e);
}
});
Ok(())
}
/// start fresh scanning
async fn start_fresh_scanning(&self) -> Result<()> {
info!("start fresh scanning loop");
// initialize scanning progress
{
let mut progress = self.scan_progress.write().await;
progress.current_cycle += 1;
progress.current_disk_index = 0;
progress.scan_start_time = SystemTime::now();
progress.last_scan_key = None;
progress.completed_disks.clear();
progress.completed_buckets.clear();
}
self.current_disk_index.store(0, std::sync::atomic::Ordering::Relaxed);
// start IO monitoring
self.start_io_monitoring().await?;
// start scanning loop
let scanner_clone = self.clone_for_background();
tokio::spawn(async move {
// update object count and size for each bucket
if let Err(e) = scanner_clone.scan_loop_with_resume(None).await {
error!("scanning loop failed: {}", e);
}
});
Ok(())
}
/// stop scanner
pub async fn stop(&self) -> Result<()> {
info!("stop scanner for node {}", self.node_id);
self.cancel_token.cancel();
Ok(())
}
/// clone for background task
fn clone_for_background(&self) -> Self {
Self {
node_id: self.node_id.clone(),
local_disks: self.local_disks.clone(),
io_monitor: self.io_monitor.clone(),
throttler: self.throttler.clone(),
config: self.config.clone(),
current_disk_index: self.current_disk_index.clone(),
local_stats: self.local_stats.clone(),
stats_manager: self.stats_manager.clone(),
scan_progress: self.scan_progress.clone(),
checkpoint_managers: self.checkpoint_managers.clone(),
cancel_token: self.cancel_token.clone(),
}
}
/// start IO monitoring
async fn start_io_monitoring(&self) -> Result<()> {
info!("start advanced IO monitoring");
self.io_monitor.start().await?;
Ok(())
}
/// main scanning loop (not supported checkpoint resume)
#[allow(dead_code)]
async fn scan_loop(&self) -> Result<()> {
self.scan_loop_with_resume(None).await
}
/// main scanning loop with checkpoint resume
async fn scan_loop_with_resume(&self, resume_from_key: Option<String>) -> Result<()> {
info!("node {} start scanning loop, resume from key: {:?}", self.node_id, resume_from_key);
while !self.cancel_token.is_cancelled() {
// check business load
let load_level = self.io_monitor.get_business_load_level().await;
// get current system metrics
let current_metrics = self.io_monitor.get_current_metrics().await;
let metrics_snapshot = MetricsSnapshot {
iops: current_metrics.iops,
latency: current_metrics.avg_latency,
cpu_usage: current_metrics.cpu_usage,
memory_usage: current_metrics.memory_usage,
};
// get throttle decision
let throttle_decision = self
.throttler
.make_throttle_decision(load_level, Some(metrics_snapshot))
.await;
// according to decision action
if throttle_decision.should_pause {
warn!("pause scanning according to throttle decision: {}", throttle_decision.reason);
tokio::time::sleep(Duration::from_secs(600)).await; // pause 10 minutes
continue;
}
// execute serial disk scanning
if let Err(e) = self.scan_all_disks_serially().await {
error!("disk scanning failed: {}", e);
}
// save checkpoint
if let Err(e) = self.save_checkpoint().await {
warn!("save checkpoint failed: {}", e);
}
// use throttle decision suggested delay
let scan_interval = throttle_decision.suggested_delay;
info!("scan completed, wait {:?} for next round", scan_interval);
info!(
"resource allocation: business {}%, scanner {}%",
throttle_decision.resource_allocation.business_percentage,
throttle_decision.resource_allocation.scanner_percentage
);
tokio::select! {
_ = self.cancel_token.cancelled() => {
info!("scanning loop cancelled");
break;
}
_ = tokio::time::sleep(scan_interval) => {
// continue next round scanning
}
}
}
Ok(())
}
/// serial scanning all local disks
async fn scan_all_disks_serially(&self) -> Result<()> {
let local_disks = self.local_disks.read().await;
info!("start serial scanning node {} of {} disks", self.node_id, local_disks.len());
for (index, disk) in local_disks.iter().enumerate() {
// check again whether should pause
let load_level = self.io_monitor.get_business_load_level().await;
let should_pause = self.throttler.should_pause_scanning(load_level).await;
if should_pause {
warn!("business load too high, interrupt disk scanning");
break;
}
info!("start scanning disk {:?} (index: {})", disk.path(), index);
// scan single disk
if let Err(e) = self.scan_single_disk(disk.clone()).await {
error!("scan disk {:?} failed: {}", disk.path(), e);
continue;
}
// update scan progress
self.update_disk_scan_progress(index, &disk.path().to_string_lossy()).await;
// disk inter-delay (using smart throttle decision)
if index < local_disks.len() - 1 {
let delay = self.throttler.adjust_for_load_level(load_level).await;
info!("disk {:?} scan completed, smart delay {:?} for next", disk.path(), delay);
tokio::time::sleep(delay).await;
}
}
// update cycle scan progress
self.complete_scan_cycle().await;
Ok(())
}
/// scan single disk
async fn scan_single_disk(&self, disk: Arc<DiskStore>) -> Result<()> {
info!("scan disk: path={:?}", disk.path());
let scan_start = SystemTime::now();
let mut scan_entries = Vec::new();
// get ECStore instance for real disk scanning
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
// get all buckets on disk
match ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions::default())
.await
{
Ok(buckets) => {
for bucket_info in buckets {
let bucket_name = &bucket_info.name;
// skip system internal buckets
if bucket_name == ".minio.sys" {
continue;
}
// scan objects in bucket
match self.scan_bucket_on_disk(&disk, bucket_name, &mut scan_entries).await {
Ok(object_count) => {
debug!(
"disk {:?} bucket {} scan completed, found {} objects",
disk.path(),
bucket_name,
object_count
);
}
Err(e) => {
warn!("disk {:?} bucket {} scan failed: {}", disk.path(), bucket_name, e);
}
}
}
}
Err(e) => {
warn!("get bucket list failed: {}", e);
// Fallback: simulate some scan results for testing to continue
self.generate_fallback_scan_results(&disk, &mut scan_entries).await;
}
}
} else {
warn!("ECStore instance not available, use simulated scan data");
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/mod.rs | crates/ahm/src/scanner/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod checkpoint;
pub mod data_scanner;
pub mod histogram;
pub mod io_monitor;
pub mod io_throttler;
pub mod lifecycle;
pub mod local_scan;
pub mod local_stats;
pub mod metrics;
pub mod node_scanner;
pub mod stats_aggregator;
pub use checkpoint::{CheckpointData, CheckpointInfo, CheckpointManager};
pub use data_scanner::{ScanMode, Scanner, ScannerConfig, ScannerState};
pub use io_monitor::{AdvancedIOMonitor, IOMetrics, IOMonitorConfig};
pub use io_throttler::{AdvancedIOThrottler, IOThrottlerConfig, MetricsSnapshot, ResourceAllocation, ThrottleDecision};
pub use local_stats::{BatchScanResult, LocalStatsManager, ScanResultEntry, StatsSummary};
pub use metrics::{BucketMetrics, DiskMetrics, MetricsCollector, ScannerMetrics};
pub use node_scanner::{IOMonitor, IOThrottler, LoadLevel, LocalScanStats, NodeScanner, NodeScannerConfig};
pub use stats_aggregator::{
AggregatedStats, DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig, NodeClient, NodeInfo,
};
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/checkpoint.rs | crates/ahm/src/scanner/checkpoint.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::node_scanner::ScanProgress;
use crate::{Error, Result};
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CheckpointData {
pub version: u32,
pub timestamp: SystemTime,
pub progress: ScanProgress,
pub node_id: String,
pub checksum: u64,
}
impl CheckpointData {
pub fn new(progress: ScanProgress, node_id: String) -> Self {
let mut checkpoint = Self {
version: 1,
timestamp: SystemTime::now(),
progress,
node_id,
checksum: 0,
};
checkpoint.checksum = checkpoint.calculate_checksum();
checkpoint
}
fn calculate_checksum(&self) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
self.version.hash(&mut hasher);
self.node_id.hash(&mut hasher);
self.progress.current_cycle.hash(&mut hasher);
self.progress.current_disk_index.hash(&mut hasher);
if let Some(ref bucket) = self.progress.current_bucket {
bucket.hash(&mut hasher);
}
if let Some(ref key) = self.progress.last_scan_key {
key.hash(&mut hasher);
}
hasher.finish()
}
pub fn verify_integrity(&self) -> bool {
let calculated_checksum = self.calculate_checksum();
self.checksum == calculated_checksum
}
}
pub struct CheckpointManager {
checkpoint_file: PathBuf,
backup_file: PathBuf,
temp_file: PathBuf,
save_interval: Duration,
last_save: RwLock<SystemTime>,
node_id: String,
}
impl CheckpointManager {
pub fn new(node_id: &str, data_dir: &Path) -> Self {
if !data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(data_dir)
{
error!("create data dir failed {:?}: {}", data_dir, e);
}
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));
let backup_file = data_dir.join(format!("scanner_checkpoint_{node_id}.backup"));
let temp_file = data_dir.join(format!("scanner_checkpoint_{node_id}.tmp"));
Self {
checkpoint_file,
backup_file,
temp_file,
save_interval: Duration::from_secs(30), // 30s
last_save: RwLock::new(SystemTime::UNIX_EPOCH),
node_id: node_id.to_string(),
}
}
pub async fn save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
let now = SystemTime::now();
let last_save = *self.last_save.read().await;
if now.duration_since(last_save).unwrap_or(Duration::ZERO) < self.save_interval {
return Ok(());
}
let checkpoint_data = CheckpointData::new(progress.clone(), self.node_id.clone());
let json_data = serde_json::to_string_pretty(&checkpoint_data)
.map_err(|e| Error::Serialization(format!("serialize checkpoint failed: {e}")))?;
tokio::fs::write(&self.temp_file, json_data)
.await
.map_err(|e| Error::IO(format!("write temp checkpoint file failed: {e}")))?;
if self.checkpoint_file.exists() {
tokio::fs::copy(&self.checkpoint_file, &self.backup_file)
.await
.map_err(|e| Error::IO(format!("backup checkpoint file failed: {e}")))?;
}
tokio::fs::rename(&self.temp_file, &self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("replace checkpoint file failed: {e}")))?;
*self.last_save.write().await = now;
debug!(
"save checkpoint to {:?}, cycle: {}, disk index: {}",
self.checkpoint_file, checkpoint_data.progress.current_cycle, checkpoint_data.progress.current_disk_index
);
Ok(())
}
pub async fn load_checkpoint(&self) -> Result<Option<ScanProgress>> {
// first try main checkpoint file
match self.load_checkpoint_from_file(&self.checkpoint_file).await {
Ok(checkpoint) => {
info!(
"restore scan progress from main checkpoint file: cycle={}, disk index={}, last scan key={:?}",
checkpoint.current_cycle, checkpoint.current_disk_index, checkpoint.last_scan_key
);
Ok(Some(checkpoint))
}
Err(e) => {
warn!("main checkpoint file is corrupted or not exists: {}", e);
// try backup file
match self.load_checkpoint_from_file(&self.backup_file).await {
Ok(checkpoint) => {
warn!(
"restore scan progress from backup file: cycle={}, disk index={}",
checkpoint.current_cycle, checkpoint.current_disk_index
);
// copy backup file to main checkpoint file
if let Err(copy_err) = tokio::fs::copy(&self.backup_file, &self.checkpoint_file).await {
warn!("restore main checkpoint file failed: {}", copy_err);
}
Ok(Some(checkpoint))
}
Err(backup_e) => {
warn!("backup file is corrupted or not exists: {}", backup_e);
info!("cannot restore scan progress, will start fresh scan");
Ok(None)
}
}
}
}
}
/// load checkpoint from file
async fn load_checkpoint_from_file(&self, file_path: &Path) -> Result<ScanProgress> {
if !file_path.exists() {
return Err(Error::NotFound(format!("checkpoint file not exists: {file_path:?}")));
}
// read file content
let content = tokio::fs::read_to_string(file_path)
.await
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
// deserialize
let checkpoint_data: CheckpointData =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
// validate checkpoint data
self.validate_checkpoint(&checkpoint_data)?;
Ok(checkpoint_data.progress)
}
/// validate checkpoint data
fn validate_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> {
// validate data integrity
if !checkpoint.verify_integrity() {
return Err(Error::InvalidCheckpoint(
"checkpoint data verification failed, may be corrupted".to_string(),
));
}
// validate node id match
if checkpoint.node_id != self.node_id {
return Err(Error::InvalidCheckpoint(format!(
"checkpoint node id not match: expected {}, actual {}",
self.node_id, checkpoint.node_id
)));
}
let now = SystemTime::now();
let checkpoint_age = now.duration_since(checkpoint.timestamp).unwrap_or(Duration::MAX);
// checkpoint is too old (more than 24 hours), may be data expired
if checkpoint_age > Duration::from_secs(24 * 3600) {
return Err(Error::InvalidCheckpoint(format!("checkpoint data is too old: {checkpoint_age:?}")));
}
// validate version compatibility
if checkpoint.version > 1 {
return Err(Error::InvalidCheckpoint(format!(
"unsupported checkpoint version: {}",
checkpoint.version
)));
}
Ok(())
}
/// clean checkpoint file
///
/// called when scanner stops or resets
pub async fn cleanup_checkpoint(&self) -> Result<()> {
// delete main file
if self.checkpoint_file.exists() {
tokio::fs::remove_file(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("delete main checkpoint file failed: {e}")))?;
}
// delete backup file
if self.backup_file.exists() {
tokio::fs::remove_file(&self.backup_file)
.await
.map_err(|e| Error::IO(format!("delete backup checkpoint file failed: {e}")))?;
}
// delete temp file
if self.temp_file.exists() {
tokio::fs::remove_file(&self.temp_file)
.await
.map_err(|e| Error::IO(format!("delete temp checkpoint file failed: {e}")))?;
}
info!("cleaned up all checkpoint files");
Ok(())
}
/// get checkpoint file info
pub async fn get_checkpoint_info(&self) -> Result<Option<CheckpointInfo>> {
if !self.checkpoint_file.exists() {
return Ok(None);
}
let metadata = tokio::fs::metadata(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("get checkpoint file metadata failed: {e}")))?;
let content = tokio::fs::read_to_string(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
let checkpoint_data: CheckpointData =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
Ok(Some(CheckpointInfo {
file_size: metadata.len(),
last_modified: metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH),
checkpoint_timestamp: checkpoint_data.timestamp,
current_cycle: checkpoint_data.progress.current_cycle,
current_disk_index: checkpoint_data.progress.current_disk_index,
completed_disks_count: checkpoint_data.progress.completed_disks.len(),
is_valid: checkpoint_data.verify_integrity(),
}))
}
/// force save checkpoint (ignore time interval limit)
pub async fn force_save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
// temporarily reset last save time, force save
*self.last_save.write().await = SystemTime::UNIX_EPOCH;
self.save_checkpoint(progress).await
}
/// set save interval
pub async fn set_save_interval(&mut self, interval: Duration) {
self.save_interval = interval;
info!("checkpoint save interval set to: {:?}", interval);
}
}
/// checkpoint info
#[derive(Debug, Clone)]
pub struct CheckpointInfo {
/// file size
pub file_size: u64,
/// file last modified time
pub last_modified: SystemTime,
/// checkpoint creation time
pub checkpoint_timestamp: SystemTime,
/// current scan cycle
pub current_cycle: u64,
/// current disk index
pub current_disk_index: usize,
/// completed disks count
pub completed_disks_count: usize,
/// checkpoint is valid
pub is_valid: bool,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/metrics.rs | crates/ahm/src/scanner/metrics.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
sync::atomic::{AtomicU64, Ordering},
time::{Duration, SystemTime},
};
use tracing::info;
/// Scanner metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ScannerMetrics {
/// Total objects scanned since server start
pub objects_scanned: u64,
/// Total object versions scanned since server start
pub versions_scanned: u64,
/// Total directories scanned since server start
pub directories_scanned: u64,
/// Total bucket scans started since server start
pub bucket_scans_started: u64,
/// Total bucket scans finished since server start
pub bucket_scans_finished: u64,
/// Total objects with health issues found
pub objects_with_issues: u64,
/// Total heal tasks queued
pub heal_tasks_queued: u64,
/// Total heal tasks completed
pub heal_tasks_completed: u64,
/// Total heal tasks failed
pub heal_tasks_failed: u64,
/// Total healthy objects found
pub healthy_objects: u64,
/// Total corrupted objects found
pub corrupted_objects: u64,
/// Last scan activity time
pub last_activity: Option<SystemTime>,
/// Current scan cycle
pub current_cycle: u64,
/// Total scan cycles completed
pub total_cycles: u64,
/// Current scan duration
pub current_scan_duration: Option<Duration>,
/// Average scan duration
pub avg_scan_duration: Duration,
/// Objects scanned per second
pub objects_per_second: f64,
/// Buckets scanned per second
pub buckets_per_second: f64,
/// Storage metrics by bucket
pub bucket_metrics: HashMap<String, BucketMetrics>,
/// Disk metrics
pub disk_metrics: HashMap<String, DiskMetrics>,
}
/// Bucket-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketMetrics {
/// Bucket name
pub bucket: String,
/// Total objects in bucket
pub total_objects: u64,
/// Total size of objects in bucket (bytes)
pub total_size: u64,
/// Objects with health issues
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Scan duration
pub scan_duration: Option<Duration>,
/// Heal tasks queued for this bucket
pub heal_tasks_queued: u64,
/// Heal tasks completed for this bucket
pub heal_tasks_completed: u64,
/// Heal tasks failed for this bucket
pub heal_tasks_failed: u64,
}
/// Disk-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DiskMetrics {
/// Disk path
pub disk_path: String,
/// Total disk space (bytes)
pub total_space: u64,
/// Used disk space (bytes)
pub used_space: u64,
/// Free disk space (bytes)
pub free_space: u64,
/// Objects scanned on this disk
pub objects_scanned: u64,
/// Objects with issues on this disk
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Whether disk is online
pub is_online: bool,
/// Whether disk is being scanned
pub is_scanning: bool,
}
/// Thread-safe metrics collector
pub struct MetricsCollector {
/// Atomic counters for real-time metrics
objects_scanned: AtomicU64,
versions_scanned: AtomicU64,
directories_scanned: AtomicU64,
bucket_scans_started: AtomicU64,
bucket_scans_finished: AtomicU64,
objects_with_issues: AtomicU64,
heal_tasks_queued: AtomicU64,
heal_tasks_completed: AtomicU64,
heal_tasks_failed: AtomicU64,
current_cycle: AtomicU64,
total_cycles: AtomicU64,
healthy_objects: AtomicU64,
corrupted_objects: AtomicU64,
}
impl MetricsCollector {
/// Create a new metrics collector
pub fn new() -> Self {
Self {
objects_scanned: AtomicU64::new(0),
versions_scanned: AtomicU64::new(0),
directories_scanned: AtomicU64::new(0),
bucket_scans_started: AtomicU64::new(0),
bucket_scans_finished: AtomicU64::new(0),
objects_with_issues: AtomicU64::new(0),
heal_tasks_queued: AtomicU64::new(0),
heal_tasks_completed: AtomicU64::new(0),
heal_tasks_failed: AtomicU64::new(0),
current_cycle: AtomicU64::new(0),
total_cycles: AtomicU64::new(0),
healthy_objects: AtomicU64::new(0),
corrupted_objects: AtomicU64::new(0),
}
}
/// Increment objects scanned count
pub fn increment_objects_scanned(&self, count: u64) {
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment versions scanned count
pub fn increment_versions_scanned(&self, count: u64) {
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment directories scanned count
pub fn increment_directories_scanned(&self, count: u64) {
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans started count
pub fn increment_bucket_scans_started(&self, count: u64) {
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans finished count
pub fn increment_bucket_scans_finished(&self, count: u64) {
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
}
/// Increment objects with issues count
pub fn increment_objects_with_issues(&self, count: u64) {
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks queued count
pub fn increment_heal_tasks_queued(&self, count: u64) {
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks completed count
pub fn increment_heal_tasks_completed(&self, count: u64) {
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks failed count
pub fn increment_heal_tasks_failed(&self, count: u64) {
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
}
/// Set current cycle
pub fn set_current_cycle(&self, cycle: u64) {
self.current_cycle.store(cycle, Ordering::Relaxed);
}
/// Increment total cycles
pub fn increment_total_cycles(&self) {
self.total_cycles.fetch_add(1, Ordering::Relaxed);
}
/// Increment healthy objects count
pub fn increment_healthy_objects(&self) {
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
}
/// Increment corrupted objects count
pub fn increment_corrupted_objects(&self) {
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
}
/// Get current metrics snapshot
pub fn get_metrics(&self) -> ScannerMetrics {
ScannerMetrics {
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
last_activity: Some(SystemTime::now()),
current_cycle: self.current_cycle.load(Ordering::Relaxed),
total_cycles: self.total_cycles.load(Ordering::Relaxed),
current_scan_duration: None, // Will be set by scanner
avg_scan_duration: Duration::ZERO, // Will be calculated
objects_per_second: 0.0, // Will be calculated
buckets_per_second: 0.0, // Will be calculated
bucket_metrics: HashMap::new(), // Will be populated by scanner
disk_metrics: HashMap::new(), // Will be populated by scanner
}
}
/// Reset all metrics
pub fn reset(&self) {
self.objects_scanned.store(0, Ordering::Relaxed);
self.versions_scanned.store(0, Ordering::Relaxed);
self.directories_scanned.store(0, Ordering::Relaxed);
self.bucket_scans_started.store(0, Ordering::Relaxed);
self.bucket_scans_finished.store(0, Ordering::Relaxed);
self.objects_with_issues.store(0, Ordering::Relaxed);
self.heal_tasks_queued.store(0, Ordering::Relaxed);
self.heal_tasks_completed.store(0, Ordering::Relaxed);
self.heal_tasks_failed.store(0, Ordering::Relaxed);
self.current_cycle.store(0, Ordering::Relaxed);
self.total_cycles.store(0, Ordering::Relaxed);
self.healthy_objects.store(0, Ordering::Relaxed);
self.corrupted_objects.store(0, Ordering::Relaxed);
info!("Scanner metrics reset");
}
}
impl Default for MetricsCollector {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_metrics_collector_creation() {
let collector = MetricsCollector::new();
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
assert_eq!(metrics.versions_scanned, 0);
}
#[test]
fn test_metrics_increment() {
let collector = MetricsCollector::new();
collector.increment_objects_scanned(10);
collector.increment_versions_scanned(5);
collector.increment_objects_with_issues(2);
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 10);
assert_eq!(metrics.versions_scanned, 5);
assert_eq!(metrics.objects_with_issues, 2);
}
#[test]
fn test_metrics_reset() {
let collector = MetricsCollector::new();
collector.increment_objects_scanned(10);
collector.reset();
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/io_throttler.rs | crates/ahm/src/scanner/io_throttler.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::LoadLevel;
use std::{
sync::{
Arc,
atomic::{AtomicU8, AtomicU64, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
/// IO throttler config
#[derive(Debug, Clone)]
pub struct IOThrottlerConfig {
/// max IOPS limit
pub max_iops: u64,
/// business priority baseline (percentage)
pub base_business_priority: u8,
/// scanner minimum delay (milliseconds)
pub min_scan_delay: u64,
/// scanner maximum delay (milliseconds)
pub max_scan_delay: u64,
/// whether enable dynamic adjustment
pub enable_dynamic_adjustment: bool,
/// adjustment response time (seconds)
pub adjustment_response_time: u64,
}
impl Default for IOThrottlerConfig {
fn default() -> Self {
Self {
max_iops: 1000, // default max 1000 IOPS
base_business_priority: 95, // business priority 95%
min_scan_delay: 5000, // minimum 5s delay
max_scan_delay: 60000, // maximum 60s delay
enable_dynamic_adjustment: true,
adjustment_response_time: 5, // 5 seconds response time
}
}
}
/// resource allocation strategy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ResourceAllocationStrategy {
/// business priority strategy
BusinessFirst,
/// balanced strategy
Balanced,
/// maintenance priority strategy (only used in special cases)
MaintenanceFirst,
}
/// throttle decision
#[derive(Debug, Clone)]
pub struct ThrottleDecision {
/// whether should pause scanning
pub should_pause: bool,
/// suggested scanning delay
pub suggested_delay: Duration,
/// resource allocation suggestion
pub resource_allocation: ResourceAllocation,
/// decision reason
pub reason: String,
}
/// resource allocation
#[derive(Debug, Clone)]
pub struct ResourceAllocation {
/// business IO allocation percentage (0-100)
pub business_percentage: u8,
/// scanner IO allocation percentage (0-100)
pub scanner_percentage: u8,
/// allocation strategy
pub strategy: ResourceAllocationStrategy,
}
/// enhanced IO throttler
///
/// dynamically adjust the resource usage of the scanner based on real-time system load and business demand,
/// ensure business IO gets priority protection.
pub struct AdvancedIOThrottler {
/// config
config: Arc<RwLock<IOThrottlerConfig>>,
/// current IOPS usage (reserved field)
#[allow(dead_code)]
current_iops: Arc<AtomicU64>,
/// business priority weight (0-100)
business_priority: Arc<AtomicU8>,
/// scanning operation delay (milliseconds)
scan_delay: Arc<AtomicU64>,
/// resource allocation strategy
allocation_strategy: Arc<RwLock<ResourceAllocationStrategy>>,
/// throttle history record
throttle_history: Arc<RwLock<Vec<ThrottleRecord>>>,
/// last adjustment time (reserved field)
#[allow(dead_code)]
last_adjustment: Arc<RwLock<SystemTime>>,
}
/// throttle record
#[derive(Debug, Clone)]
pub struct ThrottleRecord {
/// timestamp
pub timestamp: SystemTime,
/// load level
pub load_level: LoadLevel,
/// decision
pub decision: ThrottleDecision,
/// system metrics snapshot
pub metrics_snapshot: MetricsSnapshot,
}
/// metrics snapshot
#[derive(Debug, Clone)]
pub struct MetricsSnapshot {
/// IOPS
pub iops: u64,
/// latency
pub latency: u64,
/// CPU usage
pub cpu_usage: u8,
/// memory usage
pub memory_usage: u8,
}
impl AdvancedIOThrottler {
/// create new advanced IO throttler
pub fn new(config: IOThrottlerConfig) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
current_iops: Arc::new(AtomicU64::new(0)),
business_priority: Arc::new(AtomicU8::new(95)),
scan_delay: Arc::new(AtomicU64::new(5000)),
allocation_strategy: Arc::new(RwLock::new(ResourceAllocationStrategy::BusinessFirst)),
throttle_history: Arc::new(RwLock::new(Vec::new())),
last_adjustment: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
}
}
/// adjust scanning delay based on load level
pub async fn adjust_for_load_level(&self, load_level: LoadLevel) -> Duration {
let config = self.config.read().await;
let delay_ms = match load_level {
LoadLevel::Low => {
// low load: use minimum delay
self.scan_delay.store(config.min_scan_delay, Ordering::Relaxed);
self.business_priority
.store(config.base_business_priority.saturating_sub(5), Ordering::Relaxed);
config.min_scan_delay
}
LoadLevel::Medium => {
// medium load: increase delay moderately
let delay = config.min_scan_delay * 5; // 500ms
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority.store(config.base_business_priority, Ordering::Relaxed);
delay
}
LoadLevel::High => {
// high load: increase delay significantly
let delay = config.min_scan_delay * 10; // 50s
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority
.store(config.base_business_priority.saturating_add(3), Ordering::Relaxed);
delay
}
LoadLevel::Critical => {
// critical load: maximum delay or pause
let delay = config.max_scan_delay; // 60s
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority.store(99, Ordering::Relaxed);
delay
}
};
let duration = Duration::from_millis(delay_ms);
debug!("Adjust scanning delay based on load level {:?}: {:?}", load_level, duration);
duration
}
/// create throttle decision
pub async fn make_throttle_decision(&self, load_level: LoadLevel, metrics: Option<MetricsSnapshot>) -> ThrottleDecision {
let _config = self.config.read().await;
let should_pause = matches!(load_level, LoadLevel::Critical);
let suggested_delay = self.adjust_for_load_level(load_level).await;
let resource_allocation = self.calculate_resource_allocation(load_level).await;
let reason = match load_level {
LoadLevel::Low => "system load is low, scanner can run normally".to_string(),
LoadLevel::Medium => "system load is moderate, scanner is running at reduced speed".to_string(),
LoadLevel::High => "system load is high, scanner is running at significantly reduced speed".to_string(),
LoadLevel::Critical => "system load is too high, scanner is paused".to_string(),
};
let decision = ThrottleDecision {
should_pause,
suggested_delay,
resource_allocation,
reason,
};
// record decision history
if let Some(snapshot) = metrics {
self.record_throttle_decision(load_level, decision.clone(), snapshot).await;
}
decision
}
/// calculate resource allocation
async fn calculate_resource_allocation(&self, load_level: LoadLevel) -> ResourceAllocation {
let strategy = *self.allocation_strategy.read().await;
let (business_pct, scanner_pct) = match (strategy, load_level) {
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Low) => (90, 10),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Medium) => (95, 5),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::High) => (98, 2),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Critical) => (99, 1),
(ResourceAllocationStrategy::Balanced, LoadLevel::Low) => (80, 20),
(ResourceAllocationStrategy::Balanced, LoadLevel::Medium) => (85, 15),
(ResourceAllocationStrategy::Balanced, LoadLevel::High) => (90, 10),
(ResourceAllocationStrategy::Balanced, LoadLevel::Critical) => (95, 5),
(ResourceAllocationStrategy::MaintenanceFirst, _) => (70, 30), // special maintenance mode
};
ResourceAllocation {
business_percentage: business_pct,
scanner_percentage: scanner_pct,
strategy,
}
}
/// check whether should pause scanning
pub async fn should_pause_scanning(&self, load_level: LoadLevel) -> bool {
match load_level {
LoadLevel::Critical => {
warn!("System load reached critical level, pausing scanner");
true
}
_ => false,
}
}
/// record throttle decision
async fn record_throttle_decision(&self, load_level: LoadLevel, decision: ThrottleDecision, metrics: MetricsSnapshot) {
let record = ThrottleRecord {
timestamp: SystemTime::now(),
load_level,
decision,
metrics_snapshot: metrics,
};
let mut history = self.throttle_history.write().await;
history.push(record);
// keep history record in reasonable range (last 1000 records)
while history.len() > 1000 {
history.remove(0);
}
}
/// set resource allocation strategy
pub async fn set_allocation_strategy(&self, strategy: ResourceAllocationStrategy) {
*self.allocation_strategy.write().await = strategy;
info!("Set resource allocation strategy: {:?}", strategy);
}
/// get current resource allocation
pub async fn get_current_allocation(&self) -> ResourceAllocation {
let current_load = LoadLevel::Low; // need to get from external
self.calculate_resource_allocation(current_load).await
}
/// get throttle history
pub async fn get_throttle_history(&self) -> Vec<ThrottleRecord> {
self.throttle_history.read().await.clone()
}
/// get throttle stats
pub async fn get_throttle_stats(&self) -> ThrottleStats {
let history = self.throttle_history.read().await;
let total_decisions = history.len();
let pause_decisions = history.iter().filter(|r| r.decision.should_pause).count();
let mut delay_sum = Duration::ZERO;
for record in history.iter() {
delay_sum += record.decision.suggested_delay;
}
let avg_delay = if total_decisions > 0 {
delay_sum / total_decisions as u32
} else {
Duration::ZERO
};
// count by load level
let low_count = history.iter().filter(|r| r.load_level == LoadLevel::Low).count();
let medium_count = history.iter().filter(|r| r.load_level == LoadLevel::Medium).count();
let high_count = history.iter().filter(|r| r.load_level == LoadLevel::High).count();
let critical_count = history.iter().filter(|r| r.load_level == LoadLevel::Critical).count();
ThrottleStats {
total_decisions,
pause_decisions,
average_delay: avg_delay,
load_level_distribution: LoadLevelDistribution {
low_count,
medium_count,
high_count,
critical_count,
},
}
}
/// reset throttle history
pub async fn reset_history(&self) {
self.throttle_history.write().await.clear();
info!("Reset throttle history");
}
/// update config
pub async fn update_config(&self, new_config: IOThrottlerConfig) {
*self.config.write().await = new_config;
info!("Updated IO throttler configuration");
}
/// get current scanning delay
pub fn get_current_scan_delay(&self) -> Duration {
let delay_ms = self.scan_delay.load(Ordering::Relaxed);
Duration::from_millis(delay_ms)
}
/// get current business priority
pub fn get_current_business_priority(&self) -> u8 {
self.business_priority.load(Ordering::Relaxed)
}
/// simulate business load pressure test
pub async fn simulate_business_pressure(&self, duration: Duration) -> SimulationResult {
info!("Start simulating business load pressure test, duration: {:?}", duration);
let start_time = SystemTime::now();
let mut simulation_records = Vec::new();
// simulate different load level changes
let load_levels = [
LoadLevel::Low,
LoadLevel::Medium,
LoadLevel::High,
LoadLevel::Critical,
LoadLevel::High,
LoadLevel::Medium,
LoadLevel::Low,
];
let step_duration = duration / load_levels.len() as u32;
for (i, &load_level) in load_levels.iter().enumerate() {
let _step_start = SystemTime::now();
// simulate metrics for this load level
let metrics = MetricsSnapshot {
iops: match load_level {
LoadLevel::Low => 200,
LoadLevel::Medium => 500,
LoadLevel::High => 800,
LoadLevel::Critical => 1200,
},
latency: match load_level {
LoadLevel::Low => 10,
LoadLevel::Medium => 25,
LoadLevel::High => 60,
LoadLevel::Critical => 150,
},
cpu_usage: match load_level {
LoadLevel::Low => 30,
LoadLevel::Medium => 50,
LoadLevel::High => 75,
LoadLevel::Critical => 95,
},
memory_usage: match load_level {
LoadLevel::Low => 40,
LoadLevel::Medium => 60,
LoadLevel::High => 80,
LoadLevel::Critical => 90,
},
};
let decision = self.make_throttle_decision(load_level, Some(metrics.clone())).await;
simulation_records.push(SimulationRecord {
step: i + 1,
load_level,
metrics,
decision: decision.clone(),
step_duration,
});
info!(
"simulate step {}: load={:?}, delay={:?}, pause={}",
i + 1,
load_level,
decision.suggested_delay,
decision.should_pause
);
// wait for step duration
tokio::time::sleep(step_duration).await;
}
let total_duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
SimulationResult {
total_duration,
simulation_records,
final_stats: self.get_throttle_stats().await,
}
}
}
/// throttle stats
#[derive(Debug, Clone)]
pub struct ThrottleStats {
/// total decisions
pub total_decisions: usize,
/// pause decisions
pub pause_decisions: usize,
/// average delay
pub average_delay: Duration,
/// load level distribution
pub load_level_distribution: LoadLevelDistribution,
}
/// load level distribution
#[derive(Debug, Clone)]
pub struct LoadLevelDistribution {
/// low load count
pub low_count: usize,
/// medium load count
pub medium_count: usize,
/// high load count
pub high_count: usize,
/// critical load count
pub critical_count: usize,
}
/// simulation result
#[derive(Debug, Clone)]
pub struct SimulationResult {
/// total duration
pub total_duration: Duration,
/// simulation records
pub simulation_records: Vec<SimulationRecord>,
/// final stats
pub final_stats: ThrottleStats,
}
/// simulation record
#[derive(Debug, Clone)]
pub struct SimulationRecord {
/// step number
pub step: usize,
/// load level
pub load_level: LoadLevel,
/// metrics snapshot
pub metrics: MetricsSnapshot,
/// throttle decision
pub decision: ThrottleDecision,
/// step duration
pub step_duration: Duration,
}
impl Default for AdvancedIOThrottler {
fn default() -> Self {
Self::new(IOThrottlerConfig::default())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/stats_aggregator.rs | crates/ahm/src/scanner/stats_aggregator.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::{
local_stats::StatsSummary,
node_scanner::{BucketStats, LoadLevel, ScanProgress},
};
use crate::{Error, Result};
use rustfs_common::data_usage::DataUsageInfo;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
sync::Arc,
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
/// node client config
#[derive(Debug, Clone)]
pub struct NodeClientConfig {
/// connect timeout
pub connect_timeout: Duration,
/// request timeout
pub request_timeout: Duration,
/// retry times
pub max_retries: u32,
/// retry interval
pub retry_interval: Duration,
}
impl Default for NodeClientConfig {
fn default() -> Self {
Self {
connect_timeout: Duration::from_secs(5),
request_timeout: Duration::from_secs(10),
max_retries: 3,
retry_interval: Duration::from_secs(1),
}
}
}
/// node info
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeInfo {
/// node id
pub node_id: String,
/// node address
pub address: String,
/// node port
pub port: u16,
/// is online
pub is_online: bool,
/// last heartbeat time
pub last_heartbeat: SystemTime,
/// node version
pub version: String,
}
/// aggregated stats
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AggregatedStats {
/// aggregation timestamp
pub aggregation_timestamp: SystemTime,
/// number of nodes participating in aggregation
pub node_count: usize,
/// number of online nodes
pub online_node_count: usize,
/// total scanned objects
pub total_objects_scanned: u64,
/// total healthy objects
pub total_healthy_objects: u64,
/// total corrupted objects
pub total_corrupted_objects: u64,
/// total scanned bytes
pub total_bytes_scanned: u64,
/// total scan errors
pub total_scan_errors: u64,
/// total heal triggered
pub total_heal_triggered: u64,
/// total disks
pub total_disks: usize,
/// total buckets
pub total_buckets: usize,
/// aggregated data usage
pub aggregated_data_usage: DataUsageInfo,
/// node summaries
pub node_summaries: HashMap<String, StatsSummary>,
/// aggregated bucket stats
pub aggregated_bucket_stats: HashMap<String, BucketStats>,
/// aggregated scan progress
pub scan_progress_summary: ScanProgressSummary,
/// load level distribution
pub load_level_distribution: HashMap<LoadLevel, usize>,
}
impl Default for AggregatedStats {
fn default() -> Self {
Self {
aggregation_timestamp: SystemTime::now(),
node_count: 0,
online_node_count: 0,
total_objects_scanned: 0,
total_healthy_objects: 0,
total_corrupted_objects: 0,
total_bytes_scanned: 0,
total_scan_errors: 0,
total_heal_triggered: 0,
total_disks: 0,
total_buckets: 0,
aggregated_data_usage: DataUsageInfo::default(),
node_summaries: HashMap::new(),
aggregated_bucket_stats: HashMap::new(),
scan_progress_summary: ScanProgressSummary::default(),
load_level_distribution: HashMap::new(),
}
}
}
/// scan progress summary
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ScanProgressSummary {
/// average current cycle
pub average_current_cycle: f64,
/// total completed disks
pub total_completed_disks: usize,
/// total completed buckets
pub total_completed_buckets: usize,
/// latest scan start time
pub earliest_scan_start: Option<SystemTime>,
/// estimated completion time
pub estimated_completion: Option<SystemTime>,
/// node progress
pub node_progress: HashMap<String, ScanProgress>,
}
/// node client
///
/// responsible for communicating with other nodes, getting stats data
pub struct NodeClient {
/// node info
node_info: NodeInfo,
/// config
config: NodeClientConfig,
/// HTTP client
http_client: reqwest::Client,
}
impl NodeClient {
/// create new node client
pub fn new(node_info: NodeInfo, config: NodeClientConfig) -> Self {
let http_client = reqwest::Client::builder()
.timeout(config.request_timeout)
.connect_timeout(config.connect_timeout)
.build()
.expect("Failed to create HTTP client");
Self {
node_info,
config,
http_client,
}
}
/// get node stats summary
pub async fn get_stats_summary(&self) -> Result<StatsSummary> {
let url = format!("http://{}:{}/internal/scanner/stats", self.node_info.address, self.node_info.port);
for attempt in 1..=self.config.max_retries {
match self.try_get_stats_summary(&url).await {
Ok(summary) => return Ok(summary),
Err(e) => {
warn!("try to get node {} stats failed: {}", self.node_info.node_id, e);
if attempt < self.config.max_retries {
tokio::time::sleep(self.config.retry_interval).await;
}
}
}
}
Err(Error::Other(format!("cannot get stats data from node {}", self.node_info.node_id)))
}
/// try to get stats summary
async fn try_get_stats_summary(&self, url: &str) -> Result<StatsSummary> {
let response = self
.http_client
.get(url)
.send()
.await
.map_err(|e| Error::Other(format!("HTTP request failed: {e}")))?;
if !response.status().is_success() {
return Err(Error::Other(format!("HTTP status error: {}", response.status())));
}
let summary = response
.json::<StatsSummary>()
.await
.map_err(|e| Error::Serialization(format!("deserialize stats data failed: {e}")))?;
Ok(summary)
}
/// check node health status
pub async fn check_health(&self) -> bool {
let url = format!("http://{}:{}/internal/health", self.node_info.address, self.node_info.port);
match self.http_client.get(&url).send().await {
Ok(response) => response.status().is_success(),
Err(_) => false,
}
}
/// get node info
pub fn get_node_info(&self) -> &NodeInfo {
&self.node_info
}
/// update node online status
pub fn update_online_status(&mut self, is_online: bool) {
self.node_info.is_online = is_online;
if is_online {
self.node_info.last_heartbeat = SystemTime::now();
}
}
}
/// decentralized stats aggregator config
#[derive(Debug, Clone)]
pub struct DecentralizedStatsAggregatorConfig {
/// aggregation interval
pub aggregation_interval: Duration,
/// cache ttl
pub cache_ttl: Duration,
/// node timeout
pub node_timeout: Duration,
/// max concurrent aggregations
pub max_concurrent_aggregations: usize,
}
impl Default for DecentralizedStatsAggregatorConfig {
fn default() -> Self {
Self {
aggregation_interval: Duration::from_secs(30), // 30 seconds to aggregate
cache_ttl: Duration::from_secs(3), // 3 seconds to cache
node_timeout: Duration::from_secs(5), // 5 seconds to node timeout
max_concurrent_aggregations: 10, // max 10 nodes to aggregate concurrently
}
}
}
/// decentralized stats aggregator
///
/// real-time aggregate stats data from all nodes, provide global view
pub struct DecentralizedStatsAggregator {
/// config
config: Arc<RwLock<DecentralizedStatsAggregatorConfig>>,
/// node clients
node_clients: Arc<RwLock<HashMap<String, Arc<NodeClient>>>>,
/// cached aggregated stats
cached_stats: Arc<RwLock<Option<AggregatedStats>>>,
/// cache timestamp
cache_timestamp: Arc<RwLock<SystemTime>>,
/// local node stats summary
local_stats_summary: Arc<RwLock<Option<StatsSummary>>>,
}
impl DecentralizedStatsAggregator {
/// create new decentralized stats aggregator
pub fn new(config: DecentralizedStatsAggregatorConfig) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
node_clients: Arc::new(RwLock::new(HashMap::new())),
cached_stats: Arc::new(RwLock::new(None)),
cache_timestamp: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
local_stats_summary: Arc::new(RwLock::new(None)),
}
}
/// add node client
pub async fn add_node(&self, node_info: NodeInfo) {
let client_config = NodeClientConfig::default();
let client = Arc::new(NodeClient::new(node_info.clone(), client_config));
self.node_clients.write().await.insert(node_info.node_id.clone(), client);
info!("add node to aggregator: {}", node_info.node_id);
}
/// remove node client
pub async fn remove_node(&self, node_id: &str) {
self.node_clients.write().await.remove(node_id);
info!("remove node from aggregator: {}", node_id);
}
/// set local node stats summary
pub async fn set_local_stats(&self, stats: StatsSummary) {
*self.local_stats_summary.write().await = Some(stats);
}
/// get aggregated stats data (with cache)
pub async fn get_aggregated_stats(&self) -> Result<AggregatedStats> {
let config = self.config.read().await;
let cache_ttl = config.cache_ttl;
drop(config);
// check cache validity
let cache_timestamp = *self.cache_timestamp.read().await;
let now = SystemTime::now();
debug!(
"cache check: cache_timestamp={:?}, now={:?}, cache_ttl={:?}",
cache_timestamp, now, cache_ttl
);
// Check cache validity if timestamp is not initial value (UNIX_EPOCH)
if cache_timestamp != SystemTime::UNIX_EPOCH
&& let Ok(elapsed) = now.duration_since(cache_timestamp)
{
if elapsed < cache_ttl {
if let Some(cached) = self.cached_stats.read().await.as_ref() {
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
return Ok(cached.clone());
}
} else {
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
}
}
// cache expired, re-aggregate
info!("cache expired, start re-aggregating stats data");
let aggregation_timestamp = now;
let aggregated = self.aggregate_stats_from_all_nodes(aggregation_timestamp).await?;
// update cache
*self.cached_stats.write().await = Some(aggregated.clone());
// Use the time when aggregation completes as cache timestamp to avoid premature expiry during long runs
*self.cache_timestamp.write().await = SystemTime::now();
Ok(aggregated)
}
/// force refresh aggregated stats (ignore cache)
pub async fn force_refresh_aggregated_stats(&self) -> Result<AggregatedStats> {
let now = SystemTime::now();
let aggregated = self.aggregate_stats_from_all_nodes(now).await?;
// update cache
*self.cached_stats.write().await = Some(aggregated.clone());
// Cache timestamp should reflect completion time rather than aggregation start
*self.cache_timestamp.write().await = SystemTime::now();
Ok(aggregated)
}
/// aggregate stats data from all nodes
async fn aggregate_stats_from_all_nodes(&self, aggregation_timestamp: SystemTime) -> Result<AggregatedStats> {
let node_clients = self.node_clients.read().await;
let config = self.config.read().await;
// concurrent get stats data from all nodes
let mut tasks = Vec::new();
let semaphore = Arc::new(tokio::sync::Semaphore::new(config.max_concurrent_aggregations));
// add local node stats
let mut node_summaries = HashMap::new();
if let Some(local_stats) = self.local_stats_summary.read().await.as_ref() {
node_summaries.insert(local_stats.node_id.clone(), local_stats.clone());
}
// get remote node stats
for (node_id, client) in node_clients.iter() {
let client = client.clone();
let semaphore = semaphore.clone();
let node_id = node_id.clone();
let task = tokio::spawn(async move {
let _permit = match semaphore.acquire().await {
Ok(permit) => permit,
Err(e) => {
warn!("Failed to acquire semaphore for node {}: {}", node_id, e);
return None;
}
};
match client.get_stats_summary().await {
Ok(summary) => {
debug!("successfully get node {} stats data", node_id);
Some((node_id, summary))
}
Err(e) => {
warn!("get node {} stats data failed: {}", node_id, e);
None
}
}
});
tasks.push(task);
}
// wait for all tasks to complete
for task in tasks {
if let Ok(Some((node_id, summary))) = task.await {
node_summaries.insert(node_id, summary);
}
}
drop(node_clients);
drop(config);
// aggregate stats data
let aggregated = self.aggregate_node_summaries(node_summaries, aggregation_timestamp).await;
info!(
"aggregate stats completed: {} nodes, {} online",
aggregated.node_count, aggregated.online_node_count
);
Ok(aggregated)
}
/// aggregate node summaries
async fn aggregate_node_summaries(
&self,
node_summaries: HashMap<String, StatsSummary>,
aggregation_timestamp: SystemTime,
) -> AggregatedStats {
let mut aggregated = AggregatedStats {
aggregation_timestamp,
node_count: node_summaries.len(),
online_node_count: node_summaries.len(), // assume all nodes with data are online
node_summaries: node_summaries.clone(),
..Default::default()
};
// aggregate numeric stats
for (node_id, summary) in &node_summaries {
aggregated.total_objects_scanned += summary.total_objects_scanned;
aggregated.total_healthy_objects += summary.total_healthy_objects;
aggregated.total_corrupted_objects += summary.total_corrupted_objects;
aggregated.total_bytes_scanned += summary.total_bytes_scanned;
aggregated.total_scan_errors += summary.total_scan_errors;
aggregated.total_heal_triggered += summary.total_heal_triggered;
aggregated.total_disks += summary.total_disks;
aggregated.total_buckets += summary.total_buckets;
aggregated.aggregated_data_usage.merge(&summary.data_usage);
// aggregate scan progress
aggregated
.scan_progress_summary
.node_progress
.insert(node_id.clone(), summary.scan_progress.clone());
aggregated.scan_progress_summary.total_completed_disks += summary.scan_progress.completed_disks.len();
aggregated.scan_progress_summary.total_completed_buckets += summary.scan_progress.completed_buckets.len();
}
// calculate average scan cycle
if !node_summaries.is_empty() {
let total_cycles: u64 = node_summaries.values().map(|s| s.scan_progress.current_cycle).sum();
aggregated.scan_progress_summary.average_current_cycle = total_cycles as f64 / node_summaries.len() as f64;
}
// find earliest scan start time
aggregated.scan_progress_summary.earliest_scan_start =
node_summaries.values().map(|s| s.scan_progress.scan_start_time).min();
// TODO: aggregate bucket stats and data usage
// here we need to implement it based on the specific BucketStats and DataUsageInfo structure
aggregated
}
/// get nodes health status
pub async fn get_nodes_health(&self) -> HashMap<String, bool> {
let node_clients = self.node_clients.read().await;
let mut health_status = HashMap::new();
// concurrent check all nodes health status
let mut tasks = Vec::new();
for (node_id, client) in node_clients.iter() {
let client = client.clone();
let node_id = node_id.clone();
let task = tokio::spawn(async move {
let is_healthy = client.check_health().await;
(node_id, is_healthy)
});
tasks.push(task);
}
// collect results
for task in tasks {
if let Ok((node_id, is_healthy)) = task.await {
health_status.insert(node_id, is_healthy);
}
}
health_status
}
/// get online nodes list
pub async fn get_online_nodes(&self) -> Vec<String> {
let health_status = self.get_nodes_health().await;
health_status
.into_iter()
.filter_map(|(node_id, is_healthy)| if is_healthy { Some(node_id) } else { None })
.collect()
}
/// clear cache
pub async fn clear_cache(&self) {
*self.cached_stats.write().await = None;
*self.cache_timestamp.write().await = SystemTime::UNIX_EPOCH;
info!("clear aggregated stats cache");
}
/// get cache status
pub async fn get_cache_status(&self) -> CacheStatus {
let cached_stats = self.cached_stats.read().await;
let cache_timestamp = *self.cache_timestamp.read().await;
let config = self.config.read().await;
let is_valid = if let Ok(elapsed) = SystemTime::now().duration_since(cache_timestamp) {
elapsed < config.cache_ttl
} else {
false
};
CacheStatus {
has_cached_data: cached_stats.is_some(),
cache_timestamp,
is_valid,
ttl: config.cache_ttl,
}
}
/// update config
pub async fn update_config(&self, new_config: DecentralizedStatsAggregatorConfig) {
*self.config.write().await = new_config;
info!("update aggregator config");
}
}
/// cache status
#[derive(Debug, Clone)]
pub struct CacheStatus {
/// has cached data
pub has_cached_data: bool,
/// cache timestamp
pub cache_timestamp: SystemTime,
/// cache is valid
pub is_valid: bool,
/// cache ttl
pub ttl: Duration,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scanner::node_scanner::{BucketScanState, ScanProgress};
use rustfs_common::data_usage::{BucketUsageInfo, DataUsageInfo};
use std::collections::{HashMap, HashSet};
use std::time::Duration;
#[tokio::test]
async fn aggregated_stats_merge_data_usage() {
let aggregator = DecentralizedStatsAggregator::new(DecentralizedStatsAggregatorConfig::default());
let mut data_usage = DataUsageInfo::default();
let bucket_usage = BucketUsageInfo {
objects_count: 5,
size: 1024,
..Default::default()
};
data_usage.buckets_usage.insert("bucket".to_string(), bucket_usage);
data_usage.objects_total_count = 5;
data_usage.objects_total_size = 1024;
let summary = StatsSummary {
node_id: "local-node".to_string(),
total_objects_scanned: 10,
total_healthy_objects: 9,
total_corrupted_objects: 1,
total_bytes_scanned: 2048,
total_scan_errors: 0,
total_heal_triggered: 0,
total_disks: 2,
total_buckets: 1,
last_update: SystemTime::now(),
scan_progress: ScanProgress::default(),
data_usage: data_usage.clone(),
};
aggregator.set_local_stats(summary).await;
// Wait briefly to ensure async cache writes settle in high-concurrency environments
tokio::time::sleep(Duration::from_millis(10)).await;
let aggregated = aggregator.get_aggregated_stats().await.expect("aggregated stats");
assert_eq!(aggregated.node_count, 1);
assert!(aggregated.node_summaries.contains_key("local-node"));
assert_eq!(aggregated.aggregated_data_usage.objects_total_count, 5);
assert_eq!(
aggregated
.aggregated_data_usage
.buckets_usage
.get("bucket")
.expect("bucket usage present")
.objects_count,
5
);
}
#[tokio::test]
async fn aggregated_stats_merge_multiple_nodes() {
let aggregator = DecentralizedStatsAggregator::new(DecentralizedStatsAggregatorConfig::default());
let mut local_usage = DataUsageInfo::default();
let local_bucket = BucketUsageInfo {
objects_count: 3,
versions_count: 3,
size: 150,
..Default::default()
};
local_usage.buckets_usage.insert("local-bucket".to_string(), local_bucket);
local_usage.calculate_totals();
local_usage.buckets_count = local_usage.buckets_usage.len() as u64;
local_usage.last_update = Some(SystemTime::now());
let local_progress = ScanProgress {
current_cycle: 1,
completed_disks: {
let mut set = std::collections::HashSet::new();
set.insert("disk-local".to_string());
set
},
completed_buckets: {
let mut map = std::collections::HashMap::new();
map.insert(
"local-bucket".to_string(),
BucketScanState {
completed: true,
last_object_key: Some("obj1".to_string()),
objects_scanned: 3,
scan_timestamp: SystemTime::now(),
},
);
map
},
..Default::default()
};
let local_summary = StatsSummary {
node_id: "node-local".to_string(),
total_objects_scanned: 30,
total_healthy_objects: 30,
total_corrupted_objects: 0,
total_bytes_scanned: 1500,
total_scan_errors: 0,
total_heal_triggered: 0,
total_disks: 1,
total_buckets: 1,
last_update: SystemTime::now(),
scan_progress: local_progress,
data_usage: local_usage.clone(),
};
let mut remote_usage = DataUsageInfo::default();
let remote_bucket = BucketUsageInfo {
objects_count: 5,
versions_count: 5,
size: 250,
..Default::default()
};
remote_usage.buckets_usage.insert("remote-bucket".to_string(), remote_bucket);
remote_usage.calculate_totals();
remote_usage.buckets_count = remote_usage.buckets_usage.len() as u64;
remote_usage.last_update = Some(SystemTime::now());
let remote_progress = ScanProgress {
current_cycle: 2,
completed_disks: {
let mut set = std::collections::HashSet::new();
set.insert("disk-remote".to_string());
set
},
completed_buckets: {
let mut map = std::collections::HashMap::new();
map.insert(
"remote-bucket".to_string(),
BucketScanState {
completed: true,
last_object_key: Some("remote-obj".to_string()),
objects_scanned: 5,
scan_timestamp: SystemTime::now(),
},
);
map
},
..Default::default()
};
let remote_summary = StatsSummary {
node_id: "node-remote".to_string(),
total_objects_scanned: 50,
total_healthy_objects: 48,
total_corrupted_objects: 2,
total_bytes_scanned: 2048,
total_scan_errors: 1,
total_heal_triggered: 1,
total_disks: 2,
total_buckets: 1,
last_update: SystemTime::now(),
scan_progress: remote_progress,
data_usage: remote_usage.clone(),
};
let node_summaries: HashMap<_, _> = [
(local_summary.node_id.clone(), local_summary.clone()),
(remote_summary.node_id.clone(), remote_summary.clone()),
]
.into_iter()
.collect();
let aggregated = aggregator.aggregate_node_summaries(node_summaries, SystemTime::now()).await;
assert_eq!(aggregated.node_count, 2);
assert_eq!(aggregated.total_objects_scanned, 80);
assert_eq!(aggregated.total_corrupted_objects, 2);
assert_eq!(aggregated.total_disks, 3);
assert!(aggregated.node_summaries.contains_key("node-local"));
assert!(aggregated.node_summaries.contains_key("node-remote"));
assert_eq!(
aggregated.aggregated_data_usage.objects_total_count,
local_usage.objects_total_count + remote_usage.objects_total_count
);
assert_eq!(
aggregated.aggregated_data_usage.objects_total_size,
local_usage.objects_total_size + remote_usage.objects_total_size
);
let mut expected_buckets: HashSet<&str> = HashSet::new();
expected_buckets.insert("local-bucket");
expected_buckets.insert("remote-bucket");
let actual_buckets: HashSet<&str> = aggregated
.aggregated_data_usage
.buckets_usage
.keys()
.map(|s| s.as_str())
.collect();
assert_eq!(expected_buckets, actual_buckets);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/histogram.rs | crates/ahm/src/scanner/histogram.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
sync::atomic::{AtomicU64, Ordering},
time::{Duration, SystemTime},
};
use tracing::info;
/// Scanner metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ScannerMetrics {
/// Total objects scanned since server start
pub objects_scanned: u64,
/// Total object versions scanned since server start
pub versions_scanned: u64,
/// Total directories scanned since server start
pub directories_scanned: u64,
/// Total bucket scans started since server start
pub bucket_scans_started: u64,
/// Total bucket scans finished since server start
pub bucket_scans_finished: u64,
/// Total objects with health issues found
pub objects_with_issues: u64,
/// Total heal tasks queued
pub heal_tasks_queued: u64,
/// Total heal tasks completed
pub heal_tasks_completed: u64,
/// Total heal tasks failed
pub heal_tasks_failed: u64,
/// Total healthy objects found
pub healthy_objects: u64,
/// Total corrupted objects found
pub corrupted_objects: u64,
/// Last scan activity time
pub last_activity: Option<SystemTime>,
/// Current scan cycle
pub current_cycle: u64,
/// Total scan cycles completed
pub total_cycles: u64,
/// Current scan duration
pub current_scan_duration: Option<Duration>,
/// Average scan duration
pub avg_scan_duration: Duration,
/// Objects scanned per second
pub objects_per_second: f64,
/// Buckets scanned per second
pub buckets_per_second: f64,
/// Storage metrics by bucket
pub bucket_metrics: HashMap<String, BucketMetrics>,
/// Disk metrics
pub disk_metrics: HashMap<String, DiskMetrics>,
}
/// Bucket-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketMetrics {
/// Bucket name
pub bucket: String,
/// Total objects in bucket
pub total_objects: u64,
/// Total size of objects in bucket (bytes)
pub total_size: u64,
/// Objects with health issues
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Scan duration
pub scan_duration: Option<Duration>,
/// Heal tasks queued for this bucket
pub heal_tasks_queued: u64,
/// Heal tasks completed for this bucket
pub heal_tasks_completed: u64,
/// Heal tasks failed for this bucket
pub heal_tasks_failed: u64,
}
/// Disk-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DiskMetrics {
/// Disk path
pub disk_path: String,
/// Total disk space (bytes)
pub total_space: u64,
/// Used disk space (bytes)
pub used_space: u64,
/// Free disk space (bytes)
pub free_space: u64,
/// Objects scanned on this disk
pub objects_scanned: u64,
/// Objects with issues on this disk
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Whether disk is online
pub is_online: bool,
/// Whether disk is being scanned
pub is_scanning: bool,
}
/// Thread-safe metrics collector
pub struct MetricsCollector {
/// Atomic counters for real-time metrics
objects_scanned: AtomicU64,
versions_scanned: AtomicU64,
directories_scanned: AtomicU64,
bucket_scans_started: AtomicU64,
bucket_scans_finished: AtomicU64,
objects_with_issues: AtomicU64,
heal_tasks_queued: AtomicU64,
heal_tasks_completed: AtomicU64,
heal_tasks_failed: AtomicU64,
current_cycle: AtomicU64,
total_cycles: AtomicU64,
healthy_objects: AtomicU64,
corrupted_objects: AtomicU64,
}
impl MetricsCollector {
/// Create a new metrics collector
pub fn new() -> Self {
Self {
objects_scanned: AtomicU64::new(0),
versions_scanned: AtomicU64::new(0),
directories_scanned: AtomicU64::new(0),
bucket_scans_started: AtomicU64::new(0),
bucket_scans_finished: AtomicU64::new(0),
objects_with_issues: AtomicU64::new(0),
heal_tasks_queued: AtomicU64::new(0),
heal_tasks_completed: AtomicU64::new(0),
heal_tasks_failed: AtomicU64::new(0),
current_cycle: AtomicU64::new(0),
total_cycles: AtomicU64::new(0),
healthy_objects: AtomicU64::new(0),
corrupted_objects: AtomicU64::new(0),
}
}
/// Increment objects scanned count
pub fn increment_objects_scanned(&self, count: u64) {
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment versions scanned count
pub fn increment_versions_scanned(&self, count: u64) {
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment directories scanned count
pub fn increment_directories_scanned(&self, count: u64) {
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans started count
pub fn increment_bucket_scans_started(&self, count: u64) {
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans finished count
pub fn increment_bucket_scans_finished(&self, count: u64) {
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
}
/// Increment objects with issues count
pub fn increment_objects_with_issues(&self, count: u64) {
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks queued count
pub fn increment_heal_tasks_queued(&self, count: u64) {
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks completed count
pub fn increment_heal_tasks_completed(&self, count: u64) {
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks failed count
pub fn increment_heal_tasks_failed(&self, count: u64) {
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
}
/// Set current cycle
pub fn set_current_cycle(&self, cycle: u64) {
self.current_cycle.store(cycle, Ordering::Relaxed);
}
/// Increment total cycles
pub fn increment_total_cycles(&self) {
self.total_cycles.fetch_add(1, Ordering::Relaxed);
}
/// Increment healthy objects count
pub fn increment_healthy_objects(&self) {
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
}
/// Increment corrupted objects count
pub fn increment_corrupted_objects(&self) {
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
}
/// Get current metrics snapshot
pub fn get_metrics(&self) -> ScannerMetrics {
ScannerMetrics {
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
last_activity: Some(SystemTime::now()),
current_cycle: self.current_cycle.load(Ordering::Relaxed),
total_cycles: self.total_cycles.load(Ordering::Relaxed),
current_scan_duration: None, // Will be set by scanner
avg_scan_duration: Duration::ZERO, // Will be calculated
objects_per_second: 0.0, // Will be calculated
buckets_per_second: 0.0, // Will be calculated
bucket_metrics: HashMap::new(), // Will be populated by scanner
disk_metrics: HashMap::new(), // Will be populated by scanner
}
}
/// Reset all metrics
pub fn reset(&self) {
self.objects_scanned.store(0, Ordering::Relaxed);
self.versions_scanned.store(0, Ordering::Relaxed);
self.directories_scanned.store(0, Ordering::Relaxed);
self.bucket_scans_started.store(0, Ordering::Relaxed);
self.bucket_scans_finished.store(0, Ordering::Relaxed);
self.objects_with_issues.store(0, Ordering::Relaxed);
self.heal_tasks_queued.store(0, Ordering::Relaxed);
self.heal_tasks_completed.store(0, Ordering::Relaxed);
self.heal_tasks_failed.store(0, Ordering::Relaxed);
self.current_cycle.store(0, Ordering::Relaxed);
self.total_cycles.store(0, Ordering::Relaxed);
self.healthy_objects.store(0, Ordering::Relaxed);
self.corrupted_objects.store(0, Ordering::Relaxed);
info!("Scanner metrics reset");
}
}
impl Default for MetricsCollector {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_metrics_collector_creation() {
let collector = MetricsCollector::new();
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
assert_eq!(metrics.versions_scanned, 0);
}
#[test]
fn test_metrics_increment() {
let collector = MetricsCollector::new();
collector.increment_objects_scanned(10);
collector.increment_versions_scanned(5);
collector.increment_objects_with_issues(2);
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 10);
assert_eq!(metrics.versions_scanned, 5);
assert_eq!(metrics.objects_with_issues, 2);
}
#[test]
fn test_metrics_reset() {
let collector = MetricsCollector::new();
collector.increment_objects_scanned(10);
collector.reset();
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/local_stats.rs | crates/ahm/src/scanner/local_stats.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::node_scanner::{BucketStats, DiskStats, LocalScanStats};
use crate::{Error, Result};
use rustfs_common::data_usage::DataUsageInfo;
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
sync::Arc,
sync::atomic::{AtomicU64, Ordering},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
/// local stats manager
pub struct LocalStatsManager {
/// node id
node_id: String,
/// stats file path
stats_file: PathBuf,
/// backup file path
backup_file: PathBuf,
/// temp file path
temp_file: PathBuf,
/// local stats data
stats: Arc<RwLock<LocalScanStats>>,
/// save interval
save_interval: Duration,
/// last save time
last_save: Arc<RwLock<SystemTime>>,
/// stats counters
counters: Arc<StatsCounters>,
}
/// stats counters
pub struct StatsCounters {
/// total scanned objects
pub total_objects_scanned: AtomicU64,
/// total healthy objects
pub total_healthy_objects: AtomicU64,
/// total corrupted objects
pub total_corrupted_objects: AtomicU64,
/// total scanned bytes
pub total_bytes_scanned: AtomicU64,
/// total scan errors
pub total_scan_errors: AtomicU64,
/// total heal triggered
pub total_heal_triggered: AtomicU64,
}
impl Default for StatsCounters {
fn default() -> Self {
Self {
total_objects_scanned: AtomicU64::new(0),
total_healthy_objects: AtomicU64::new(0),
total_corrupted_objects: AtomicU64::new(0),
total_bytes_scanned: AtomicU64::new(0),
total_scan_errors: AtomicU64::new(0),
total_heal_triggered: AtomicU64::new(0),
}
}
}
/// scan result entry
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScanResultEntry {
/// object path
pub object_path: String,
/// bucket name
pub bucket_name: String,
/// object size
pub object_size: u64,
/// is healthy
pub is_healthy: bool,
/// error message (if any)
pub error_message: Option<String>,
/// scan time
pub scan_time: SystemTime,
/// disk id
pub disk_id: String,
}
/// batch scan result
#[derive(Debug, Clone)]
pub struct BatchScanResult {
/// disk id
pub disk_id: String,
/// scan result entries
pub entries: Vec<ScanResultEntry>,
/// scan start time
pub scan_start: SystemTime,
/// scan end time
pub scan_end: SystemTime,
/// scan duration
pub scan_duration: Duration,
}
impl LocalStatsManager {
/// create new local stats manager
pub fn new(node_id: &str, data_dir: &Path) -> Self {
// ensure data directory exists
if !data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(data_dir)
{
error!("create stats data directory failed {:?}: {}", data_dir, e);
}
let stats_file = data_dir.join(format!("scanner_stats_{node_id}.json"));
let backup_file = data_dir.join(format!("scanner_stats_{node_id}.backup"));
let temp_file = data_dir.join(format!("scanner_stats_{node_id}.tmp"));
Self {
node_id: node_id.to_string(),
stats_file,
backup_file,
temp_file,
stats: Arc::new(RwLock::new(LocalScanStats::default())),
save_interval: Duration::from_secs(60), // 60 seconds save once
last_save: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
counters: Arc::new(StatsCounters::default()),
}
}
/// load local stats data
pub async fn load_stats(&self) -> Result<()> {
if !self.stats_file.exists() {
info!("stats data file not exists, will create new stats data");
return Ok(());
}
match self.load_stats_from_file(&self.stats_file).await {
Ok(stats) => {
*self.stats.write().await = stats;
info!("success load local stats data");
Ok(())
}
Err(e) => {
warn!("load main stats file failed: {}, try backup file", e);
match self.load_stats_from_file(&self.backup_file).await {
Ok(stats) => {
*self.stats.write().await = stats;
warn!("restore stats data from backup file");
Ok(())
}
Err(backup_e) => {
warn!("backup file also cannot load: {}, will use default stats data", backup_e);
Ok(())
}
}
}
}
}
/// load stats data from file
async fn load_stats_from_file(&self, file_path: &Path) -> Result<LocalScanStats> {
let content = tokio::fs::read_to_string(file_path)
.await
.map_err(|e| Error::IO(format!("read stats file failed: {e}")))?;
let stats: LocalScanStats =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize stats data failed: {e}")))?;
Ok(stats)
}
/// save stats data to disk
pub async fn save_stats(&self) -> Result<()> {
let now = SystemTime::now();
let last_save = *self.last_save.read().await;
// frequency control
if now.duration_since(last_save).unwrap_or(Duration::ZERO) < self.save_interval {
return Ok(());
}
let stats = self.stats.read().await.clone();
// serialize
let json_data = serde_json::to_string_pretty(&stats)
.map_err(|e| Error::Serialization(format!("serialize stats data failed: {e}")))?;
// atomic write
tokio::fs::write(&self.temp_file, json_data)
.await
.map_err(|e| Error::IO(format!("write temp stats file failed: {e}")))?;
// backup existing file
if self.stats_file.exists() {
tokio::fs::copy(&self.stats_file, &self.backup_file)
.await
.map_err(|e| Error::IO(format!("backup stats file failed: {e}")))?;
}
// atomic replace
tokio::fs::rename(&self.temp_file, &self.stats_file)
.await
.map_err(|e| Error::IO(format!("replace stats file failed: {e}")))?;
*self.last_save.write().await = now;
debug!("save local stats data to {:?}", self.stats_file);
Ok(())
}
/// force save stats data
pub async fn force_save_stats(&self) -> Result<()> {
*self.last_save.write().await = SystemTime::UNIX_EPOCH;
self.save_stats().await
}
/// update disk scan result
pub async fn update_disk_scan_result(&self, result: &BatchScanResult) -> Result<()> {
let mut stats = self.stats.write().await;
// update disk stats
let disk_stat = stats.disks_stats.entry(result.disk_id.clone()).or_insert_with(|| DiskStats {
disk_id: result.disk_id.clone(),
..Default::default()
});
let healthy_count = result.entries.iter().filter(|e| e.is_healthy).count() as u64;
let error_count = result.entries.iter().filter(|e| !e.is_healthy).count() as u64;
disk_stat.objects_scanned += result.entries.len() as u64;
disk_stat.errors_count += error_count;
disk_stat.last_scan_time = result.scan_end;
disk_stat.scan_duration = result.scan_duration;
disk_stat.scan_completed = true;
// update overall stats
stats.objects_scanned += result.entries.len() as u64;
stats.healthy_objects += healthy_count;
stats.corrupted_objects += error_count;
stats.last_update = SystemTime::now();
// update bucket stats
for entry in &result.entries {
let _bucket_stat = stats
.buckets_stats
.entry(entry.bucket_name.clone())
.or_insert_with(BucketStats::default);
// TODO: update BucketStats
}
// update atomic counters
self.counters
.total_objects_scanned
.fetch_add(result.entries.len() as u64, Ordering::Relaxed);
self.counters
.total_healthy_objects
.fetch_add(healthy_count, Ordering::Relaxed);
self.counters
.total_corrupted_objects
.fetch_add(error_count, Ordering::Relaxed);
let total_bytes: u64 = result.entries.iter().map(|e| e.object_size).sum();
self.counters.total_bytes_scanned.fetch_add(total_bytes, Ordering::Relaxed);
if error_count > 0 {
self.counters.total_scan_errors.fetch_add(error_count, Ordering::Relaxed);
}
drop(stats);
debug!(
"update disk {} scan result: objects {}, healthy {}, error {}",
result.disk_id,
result.entries.len(),
healthy_count,
error_count
);
Ok(())
}
/// record single object scan result
pub async fn record_object_scan(&self, entry: ScanResultEntry) -> Result<()> {
let result = BatchScanResult {
disk_id: entry.disk_id.clone(),
entries: vec![entry],
scan_start: SystemTime::now(),
scan_end: SystemTime::now(),
scan_duration: Duration::from_millis(0),
};
self.update_disk_scan_result(&result).await
}
/// get local stats data copy
pub async fn get_stats(&self) -> LocalScanStats {
self.stats.read().await.clone()
}
/// get real-time counters
pub fn get_counters(&self) -> Arc<StatsCounters> {
self.counters.clone()
}
/// reset stats data
pub async fn reset_stats(&self) -> Result<()> {
{
let mut stats = self.stats.write().await;
*stats = LocalScanStats::default();
}
// reset counters
self.counters.total_objects_scanned.store(0, Ordering::Relaxed);
self.counters.total_healthy_objects.store(0, Ordering::Relaxed);
self.counters.total_corrupted_objects.store(0, Ordering::Relaxed);
self.counters.total_bytes_scanned.store(0, Ordering::Relaxed);
self.counters.total_scan_errors.store(0, Ordering::Relaxed);
self.counters.total_heal_triggered.store(0, Ordering::Relaxed);
info!("reset local stats data");
Ok(())
}
/// get stats summary
pub async fn get_stats_summary(&self) -> StatsSummary {
let stats = self.stats.read().await;
StatsSummary {
node_id: self.node_id.clone(),
total_objects_scanned: self.counters.total_objects_scanned.load(Ordering::Relaxed),
total_healthy_objects: self.counters.total_healthy_objects.load(Ordering::Relaxed),
total_corrupted_objects: self.counters.total_corrupted_objects.load(Ordering::Relaxed),
total_bytes_scanned: self.counters.total_bytes_scanned.load(Ordering::Relaxed),
total_scan_errors: self.counters.total_scan_errors.load(Ordering::Relaxed),
total_heal_triggered: self.counters.total_heal_triggered.load(Ordering::Relaxed),
total_disks: stats.disks_stats.len(),
total_buckets: stats.buckets_stats.len(),
last_update: stats.last_update,
scan_progress: stats.scan_progress.clone(),
data_usage: stats.data_usage.clone(),
}
}
/// record heal triggered
pub async fn record_heal_triggered(&self, object_path: &str, error_message: &str) {
self.counters.total_heal_triggered.fetch_add(1, Ordering::Relaxed);
info!("record heal triggered: object={}, error={}", object_path, error_message);
}
/// update data usage stats
pub async fn update_data_usage(&self, data_usage: DataUsageInfo) {
let mut stats = self.stats.write().await;
stats.data_usage = data_usage;
stats.last_update = SystemTime::now();
debug!("update data usage stats");
}
/// cleanup stats files
pub async fn cleanup_stats_files(&self) -> Result<()> {
// delete main file
if self.stats_file.exists() {
tokio::fs::remove_file(&self.stats_file)
.await
.map_err(|e| Error::IO(format!("delete stats file failed: {e}")))?;
}
// delete backup file
if self.backup_file.exists() {
tokio::fs::remove_file(&self.backup_file)
.await
.map_err(|e| Error::IO(format!("delete backup stats file failed: {e}")))?;
}
// delete temp file
if self.temp_file.exists() {
tokio::fs::remove_file(&self.temp_file)
.await
.map_err(|e| Error::IO(format!("delete temp stats file failed: {e}")))?;
}
info!("cleanup all stats files");
Ok(())
}
/// set save interval
pub fn set_save_interval(&mut self, interval: Duration) {
self.save_interval = interval;
info!("set stats data save interval to {:?}", interval);
}
}
/// stats summary
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StatsSummary {
/// node id
pub node_id: String,
/// total scanned objects
pub total_objects_scanned: u64,
/// total healthy objects
pub total_healthy_objects: u64,
/// total corrupted objects
pub total_corrupted_objects: u64,
/// total scanned bytes
pub total_bytes_scanned: u64,
/// total scan errors
pub total_scan_errors: u64,
/// total heal triggered
pub total_heal_triggered: u64,
/// total disks
pub total_disks: usize,
/// total buckets
pub total_buckets: usize,
/// last update time
pub last_update: SystemTime,
/// scan progress
pub scan_progress: super::node_scanner::ScanProgress,
/// data usage snapshot for the node
pub data_usage: DataUsageInfo,
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/scanner/local_scan/mod.rs | crates/ahm/src/scanner/local_scan/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use rustfs_common::data_usage::DiskUsageStatus;
use rustfs_ecstore::data_usage::{
LocalUsageSnapshot, LocalUsageSnapshotMeta, data_usage_state_dir, ensure_data_usage_layout, snapshot_file_name,
write_local_snapshot,
};
use rustfs_ecstore::disk::DiskAPI;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::ObjectInfo;
use rustfs_filemeta::{FileInfo, FileMeta, FileMetaVersion, VersionType};
use serde::{Deserialize, Serialize};
use serde_json::{from_slice, to_vec};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::{fs, task};
use tracing::warn;
use walkdir::WalkDir;
const STATE_FILE_EXTENSION: &str = "";
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct LocalObjectUsage {
pub bucket: String,
pub object: String,
pub last_modified_ns: Option<i128>,
pub versions_count: u64,
pub delete_markers_count: u64,
pub total_size: u64,
pub has_live_object: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct IncrementalScanState {
last_scan_ns: Option<i128>,
objects: HashMap<String, LocalObjectUsage>,
}
struct DiskScanResult {
snapshot: LocalUsageSnapshot,
state: IncrementalScanState,
objects_by_bucket: HashMap<String, Vec<LocalObjectRecord>>,
status: DiskUsageStatus,
}
#[derive(Debug, Clone)]
pub struct LocalObjectRecord {
pub usage: LocalObjectUsage,
pub object_info: Option<rustfs_ecstore::store_api::ObjectInfo>,
}
#[derive(Debug, Default)]
pub struct LocalScanOutcome {
pub snapshots: Vec<LocalUsageSnapshot>,
pub bucket_objects: HashMap<String, Vec<LocalObjectRecord>>,
pub disk_status: Vec<DiskUsageStatus>,
}
/// Scan all local primary disks and persist refreshed usage snapshots.
pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalScanOutcome> {
let mut snapshots = Vec::new();
let mut bucket_objects: HashMap<String, Vec<LocalObjectRecord>> = HashMap::new();
let mut disk_status = Vec::new();
for (pool_idx, pool) in store.pools.iter().enumerate() {
for set_disks in pool.disk_set.iter() {
let disks = {
let guard = set_disks.disks.read().await;
guard.clone()
};
// Use the first local online disk in the set to avoid missing stats when disk 0 is down
let mut picked = false;
for (disk_index, disk_opt) in disks.into_iter().enumerate() {
let Some(disk) = disk_opt else {
continue;
};
if !disk.is_local() {
continue;
}
if picked {
continue;
}
// Skip offline disks; keep looking for an online candidate
if !disk.is_online().await {
continue;
}
picked = true;
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
Some(id) => id.to_string(),
None => {
warn!("Skipping disk without ID: {}", disk.to_string());
continue;
}
};
let root = disk.path();
ensure_data_usage_layout(root.as_path()).await.map_err(Error::from)?;
let meta = LocalUsageSnapshotMeta {
disk_id: disk_id.clone(),
pool_index: Some(pool_idx),
set_index: Some(set_disks.set_index),
disk_index: Some(disk_index),
};
let state_path = state_file_path(root.as_path(), &disk_id);
let state = read_scan_state(&state_path).await?;
let root_clone = root.clone();
let meta_clone = meta.clone();
let handle = task::spawn_blocking(move || scan_disk_blocking(root_clone, meta_clone, state));
match handle.await {
Ok(Ok(result)) => {
write_local_snapshot(root.as_path(), &disk_id, &result.snapshot)
.await
.map_err(Error::from)?;
write_scan_state(&state_path, &result.state).await?;
snapshots.push(result.snapshot);
for (bucket, records) in result.objects_by_bucket {
bucket_objects.entry(bucket).or_default().extend(records.into_iter());
}
disk_status.push(result.status);
}
Ok(Err(err)) => {
warn!("Failed to scan disk {}: {}", disk.to_string(), err);
}
Err(join_err) => {
warn!("Disk scan task panicked for disk {}: {}", disk.to_string(), join_err);
}
}
}
}
}
Ok(LocalScanOutcome {
snapshots,
bucket_objects,
disk_status,
})
}
fn scan_disk_blocking(root: PathBuf, meta: LocalUsageSnapshotMeta, mut state: IncrementalScanState) -> Result<DiskScanResult> {
let now = SystemTime::now();
let now_ns = system_time_to_ns(now);
let mut visited: HashSet<String> = HashSet::new();
let mut emitted: HashSet<String> = HashSet::new();
let mut objects_by_bucket: HashMap<String, Vec<LocalObjectRecord>> = HashMap::new();
let mut status = DiskUsageStatus {
disk_id: meta.disk_id.clone(),
pool_index: meta.pool_index,
set_index: meta.set_index,
disk_index: meta.disk_index,
last_update: None,
snapshot_exists: false,
};
for entry in WalkDir::new(&root).follow_links(false).into_iter().filter_map(|res| res.ok()) {
if !entry.file_type().is_file() {
continue;
}
if entry.file_name() != "xl.meta" {
continue;
}
let xl_path = entry.path().to_path_buf();
let Some(object_dir) = xl_path.parent() else {
continue;
};
let Some(rel_path) = object_dir.strip_prefix(&root).ok().map(normalize_path) else {
continue;
};
let mut components = rel_path.split('/');
let Some(bucket_name) = components.next() else {
continue;
};
if bucket_name.starts_with('.') {
continue;
}
let object_key = components.collect::<Vec<_>>().join("/");
visited.insert(rel_path.clone());
let metadata = match std::fs::metadata(&xl_path) {
Ok(meta) => meta,
Err(err) => {
warn!("Failed to read metadata for {xl_path:?}: {err}");
continue;
}
};
let mtime_ns = metadata.modified().ok().map(system_time_to_ns);
let should_parse = match state.objects.get(&rel_path) {
Some(existing) => existing.last_modified_ns != mtime_ns,
None => true,
};
if should_parse {
match std::fs::read(&xl_path) {
Ok(buf) => match FileMeta::load(&buf) {
Ok(file_meta) => match compute_object_usage(bucket_name, object_key.as_str(), &file_meta) {
Ok(Some(mut record)) => {
record.usage.last_modified_ns = mtime_ns;
state.objects.insert(rel_path.clone(), record.usage.clone());
emitted.insert(rel_path.clone());
objects_by_bucket.entry(record.usage.bucket.clone()).or_default().push(record);
}
Ok(None) => {
state.objects.remove(&rel_path);
}
Err(err) => {
warn!("Failed to parse usage from {:?}: {}", xl_path, err);
}
},
Err(err) => {
warn!("Failed to decode xl.meta {:?}: {}", xl_path, err);
}
},
Err(err) => {
warn!("Failed to read xl.meta {:?}: {}", xl_path, err);
}
}
}
}
state.objects.retain(|key, _| visited.contains(key));
state.last_scan_ns = Some(now_ns);
for (key, usage) in &state.objects {
if emitted.contains(key) {
continue;
}
objects_by_bucket
.entry(usage.bucket.clone())
.or_default()
.push(LocalObjectRecord {
usage: usage.clone(),
object_info: None,
});
}
let snapshot = build_snapshot(meta, &state.objects, now);
status.snapshot_exists = true;
status.last_update = Some(now);
Ok(DiskScanResult {
snapshot,
state,
objects_by_bucket,
status,
})
}
fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Result<Option<LocalObjectRecord>> {
let mut versions_count = 0u64;
let mut delete_markers_count = 0u64;
let mut total_size = 0u64;
let mut has_live_object = false;
let mut latest_file_info: Option<FileInfo> = None;
for shallow in &file_meta.versions {
match shallow.header.version_type {
VersionType::Object => {
let version = match FileMetaVersion::try_from(shallow.meta.as_slice()) {
Ok(version) => version,
Err(err) => {
warn!("Failed to parse file meta version: {}", err);
continue;
}
};
if let Some(obj) = version.object {
if !has_live_object {
total_size = obj.size.max(0) as u64;
}
has_live_object = true;
versions_count = versions_count.saturating_add(1);
if latest_file_info.is_none()
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false)
{
latest_file_info = Some(info);
}
}
}
VersionType::Delete => {
delete_markers_count = delete_markers_count.saturating_add(1);
versions_count = versions_count.saturating_add(1);
}
_ => {}
}
}
if !has_live_object && delete_markers_count == 0 {
return Ok(None);
}
let object_info = latest_file_info.as_ref().map(|fi| {
let versioned = fi.version_id.is_some();
ObjectInfo::from_file_info(fi, bucket, object, versioned)
});
Ok(Some(LocalObjectRecord {
usage: LocalObjectUsage {
bucket: bucket.to_string(),
object: object.to_string(),
last_modified_ns: None,
versions_count,
delete_markers_count,
total_size,
has_live_object,
},
object_info,
}))
}
fn build_snapshot(
meta: LocalUsageSnapshotMeta,
objects: &HashMap<String, LocalObjectUsage>,
now: SystemTime,
) -> LocalUsageSnapshot {
let mut snapshot = LocalUsageSnapshot::new(meta);
for usage in objects.values() {
let bucket_entry = snapshot.buckets_usage.entry(usage.bucket.clone()).or_default();
if usage.has_live_object {
bucket_entry.objects_count = bucket_entry.objects_count.saturating_add(1);
}
bucket_entry.versions_count = bucket_entry.versions_count.saturating_add(usage.versions_count);
bucket_entry.delete_markers_count = bucket_entry.delete_markers_count.saturating_add(usage.delete_markers_count);
bucket_entry.size = bucket_entry.size.saturating_add(usage.total_size);
}
snapshot.last_update = Some(now);
snapshot.recompute_totals();
snapshot
}
fn normalize_path(path: &Path) -> String {
path.iter()
.map(|component| component.to_string_lossy())
.collect::<Vec<_>>()
.join("/")
}
fn system_time_to_ns(time: SystemTime) -> i128 {
match time.duration_since(UNIX_EPOCH) {
Ok(duration) => {
let secs = duration.as_secs() as i128;
let nanos = duration.subsec_nanos() as i128;
secs * 1_000_000_000 + nanos
}
Err(err) => {
let duration = err.duration();
let secs = duration.as_secs() as i128;
let nanos = duration.subsec_nanos() as i128;
-(secs * 1_000_000_000 + nanos)
}
}
}
fn state_file_path(root: &Path, disk_id: &str) -> PathBuf {
let mut path = data_usage_state_dir(root);
path.push(format!("{}{}", snapshot_file_name(disk_id), STATE_FILE_EXTENSION));
path
}
async fn read_scan_state(path: &Path) -> Result<IncrementalScanState> {
match fs::read(path).await {
Ok(bytes) => from_slice(&bytes).map_err(|err| Error::Serialization(err.to_string())),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(IncrementalScanState::default()),
Err(err) => Err(err.into()),
}
}
async fn write_scan_state(path: &Path, state: &IncrementalScanState) -> Result<()> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await?;
}
let data = to_vec(state).map_err(|err| Error::Serialization(err.to_string()))?;
fs::write(path, data).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_filemeta::{ChecksumAlgo, ErasureAlgo, FileMetaShallowVersion, MetaDeleteMarker, MetaObject};
use std::collections::HashMap;
use std::fs;
use tempfile::TempDir;
use time::OffsetDateTime;
use uuid::Uuid;
fn build_file_meta_with_object(erasure_index: usize, size: i64) -> FileMeta {
let mut file_meta = FileMeta::default();
let meta_object = MetaObject {
version_id: Some(Uuid::new_v4()),
data_dir: Some(Uuid::new_v4()),
erasure_algorithm: ErasureAlgo::ReedSolomon,
erasure_m: 2,
erasure_n: 2,
erasure_block_size: 4096,
erasure_index,
erasure_dist: vec![0_u8, 1, 2, 3],
bitrot_checksum_algo: ChecksumAlgo::HighwayHash,
part_numbers: vec![1],
part_etags: vec!["etag".to_string()],
part_sizes: vec![size as usize],
part_actual_sizes: vec![size],
part_indices: Vec::new(),
size,
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: HashMap::new(),
meta_user: HashMap::new(),
};
let version = FileMetaVersion {
version_type: VersionType::Object,
object: Some(meta_object),
delete_marker: None,
write_version: 1,
};
let shallow = FileMetaShallowVersion::try_from(version).expect("convert version");
file_meta.versions.push(shallow);
file_meta
}
fn build_file_meta_with_delete_marker() -> FileMeta {
let mut file_meta = FileMeta::default();
let delete_marker = MetaDeleteMarker {
version_id: Some(Uuid::new_v4()),
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: HashMap::new(),
};
let version = FileMetaVersion {
version_type: VersionType::Delete,
object: None,
delete_marker: Some(delete_marker),
write_version: 2,
};
let shallow = FileMetaShallowVersion::try_from(version).expect("convert delete marker");
file_meta.versions.push(shallow);
file_meta
}
#[test]
fn compute_object_usage_primary_disk() {
let file_meta = build_file_meta_with_object(0, 1024);
let record = compute_object_usage("bucket", "foo/bar", &file_meta)
.expect("compute usage")
.expect("record should exist");
assert!(record.usage.has_live_object);
assert_eq!(record.usage.bucket, "bucket");
assert_eq!(record.usage.object, "foo/bar");
assert_eq!(record.usage.total_size, 1024);
assert!(record.object_info.is_some(), "object info should be synthesized");
}
#[test]
fn compute_object_usage_handles_non_primary_disk() {
let file_meta = build_file_meta_with_object(1, 2048);
let record = compute_object_usage("bucket", "obj", &file_meta)
.expect("compute usage")
.expect("record should exist for non-primary shard");
assert!(record.usage.has_live_object);
}
#[test]
fn compute_object_usage_reports_delete_marker() {
let file_meta = build_file_meta_with_delete_marker();
let record = compute_object_usage("bucket", "obj", &file_meta)
.expect("compute usage")
.expect("delete marker record");
assert!(!record.usage.has_live_object);
assert_eq!(record.usage.delete_markers_count, 1);
assert_eq!(record.usage.versions_count, 1);
}
#[test]
fn build_snapshot_accumulates_usage() {
let mut objects = HashMap::new();
objects.insert(
"bucket/a".to_string(),
LocalObjectUsage {
bucket: "bucket".to_string(),
object: "a".to_string(),
last_modified_ns: None,
versions_count: 2,
delete_markers_count: 1,
total_size: 512,
has_live_object: true,
},
);
let snapshot = build_snapshot(LocalUsageSnapshotMeta::default(), &objects, SystemTime::now());
let usage = snapshot.buckets_usage.get("bucket").expect("bucket entry should exist");
assert_eq!(usage.objects_count, 1);
assert_eq!(usage.versions_count, 2);
assert_eq!(usage.delete_markers_count, 1);
assert_eq!(usage.size, 512);
}
#[test]
fn scan_disk_blocking_handles_incremental_updates() {
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let bucket_dir = root.join("bench");
let object1_dir = bucket_dir.join("obj1");
fs::create_dir_all(&object1_dir).expect("create first object directory");
let file_meta = build_file_meta_with_object(0, 1024);
let bytes = file_meta.marshal_msg().expect("serialize first object");
fs::write(object1_dir.join("xl.meta"), bytes).expect("write first xl.meta");
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult {
snapshot: snapshot1,
state,
..
} = scan_disk_blocking(root.to_path_buf(), meta.clone(), IncrementalScanState::default()).expect("initial scan succeeds");
let usage1 = snapshot1.buckets_usage.get("bench").expect("bucket stats recorded");
assert_eq!(usage1.objects_count, 1);
assert_eq!(usage1.size, 1024);
assert_eq!(state.objects.len(), 1);
let object2_dir = bucket_dir.join("nested").join("obj2");
fs::create_dir_all(&object2_dir).expect("create second object directory");
let second_meta = build_file_meta_with_object(0, 2048);
let bytes = second_meta.marshal_msg().expect("serialize second object");
fs::write(object2_dir.join("xl.meta"), bytes).expect("write second xl.meta");
let DiskScanResult {
snapshot: snapshot2,
state: state_next,
..
} = scan_disk_blocking(root.to_path_buf(), meta.clone(), state).expect("incremental scan succeeds");
let usage2 = snapshot2
.buckets_usage
.get("bench")
.expect("bucket stats recorded after addition");
assert_eq!(usage2.objects_count, 2);
assert_eq!(usage2.size, 1024 + 2048);
assert_eq!(state_next.objects.len(), 2);
fs::remove_dir_all(&object1_dir).expect("remove first object");
let DiskScanResult {
snapshot: snapshot3,
state: state_final,
..
} = scan_disk_blocking(root.to_path_buf(), meta, state_next).expect("scan after deletion succeeds");
let usage3 = snapshot3
.buckets_usage
.get("bench")
.expect("bucket stats recorded after deletion");
assert_eq!(usage3.objects_count, 1);
assert_eq!(usage3.size, 2048);
assert_eq!(state_final.objects.len(), 1);
assert!(
state_final.objects.keys().all(|path| path.contains("nested")),
"state should only keep surviving object"
);
}
#[test]
fn scan_disk_blocking_recovers_from_stale_state_entries() {
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let mut stale_state = IncrementalScanState::default();
stale_state.objects.insert(
"bench/stale".to_string(),
LocalObjectUsage {
bucket: "bench".to_string(),
object: "stale".to_string(),
last_modified_ns: Some(42),
versions_count: 1,
delete_markers_count: 0,
total_size: 512,
has_live_object: true,
},
);
stale_state.last_scan_ns = Some(99);
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult {
snapshot, state, status, ..
} = scan_disk_blocking(root.to_path_buf(), meta, stale_state).expect("scan succeeds");
assert!(state.objects.is_empty(), "stale entries should be cleared when files disappear");
assert!(
snapshot.buckets_usage.is_empty(),
"no real xl.meta files means bucket usage should stay empty"
);
assert!(status.snapshot_exists, "snapshot status should indicate a refresh");
}
#[test]
fn scan_disk_blocking_handles_large_volume() {
const OBJECTS: usize = 256;
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let bucket_dir = root.join("bulk");
for idx in 0..OBJECTS {
let object_dir = bucket_dir.join(format!("obj-{idx:03}"));
fs::create_dir_all(&object_dir).expect("create object directory");
let size = 1024 + idx as i64;
let file_meta = build_file_meta_with_object(0, size);
let bytes = file_meta.marshal_msg().expect("serialize file meta");
fs::write(object_dir.join("xl.meta"), bytes).expect("write xl.meta");
}
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult { snapshot, state, .. } =
scan_disk_blocking(root.to_path_buf(), meta, IncrementalScanState::default()).expect("bulk scan succeeds");
let bucket_usage = snapshot
.buckets_usage
.get("bulk")
.expect("bucket usage present for bulk scan");
assert_eq!(bucket_usage.objects_count as usize, OBJECTS, "should count all objects once");
assert!(
bucket_usage.size >= (1024 * OBJECTS) as u64,
"aggregated size should grow with object count"
);
assert_eq!(state.objects.len(), OBJECTS, "incremental state tracks every object");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/event.rs | crates/ahm/src/heal/event.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{HealOptions, HealPriority, HealRequest, HealType};
use crate::{Error, Result};
use rustfs_ecstore::disk::endpoint::Endpoint;
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
/// Corruption type
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CorruptionType {
/// Data corruption
DataCorruption,
/// Metadata corruption
MetadataCorruption,
/// Partial corruption
PartialCorruption,
/// Complete corruption
CompleteCorruption,
}
/// Severity level
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum Severity {
/// Low severity
Low = 0,
/// Medium severity
Medium = 1,
/// High severity
High = 2,
/// Critical severity
Critical = 3,
}
/// Heal event
#[derive(Debug, Clone)]
pub enum HealEvent {
/// Object corruption event
ObjectCorruption {
bucket: String,
object: String,
version_id: Option<String>,
corruption_type: CorruptionType,
severity: Severity,
},
/// Object missing event
ObjectMissing {
bucket: String,
object: String,
version_id: Option<String>,
expected_locations: Vec<usize>,
available_locations: Vec<usize>,
},
/// Metadata corruption event
MetadataCorruption {
bucket: String,
object: String,
corruption_type: CorruptionType,
},
/// Disk status change event
DiskStatusChange {
endpoint: Endpoint,
old_status: String,
new_status: String,
},
/// EC decode failure event
ECDecodeFailure {
bucket: String,
object: String,
version_id: Option<String>,
missing_shards: Vec<usize>,
available_shards: Vec<usize>,
},
/// Checksum mismatch event
ChecksumMismatch {
bucket: String,
object: String,
version_id: Option<String>,
expected_checksum: String,
actual_checksum: String,
},
/// Bucket metadata corruption event
BucketMetadataCorruption {
bucket: String,
corruption_type: CorruptionType,
},
/// MRF metadata corruption event
MRFMetadataCorruption {
meta_path: String,
corruption_type: CorruptionType,
},
}
impl HealEvent {
/// Convert HealEvent to HealRequest
pub fn to_heal_request(&self) -> Result<HealRequest> {
match self {
HealEvent::ObjectCorruption {
bucket,
object,
version_id,
severity,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
Self::severity_to_priority(severity),
)),
HealEvent::ObjectMissing {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::MetadataCorruption { bucket, object, .. } => Ok(HealRequest::new(
HealType::Metadata {
bucket: bucket.clone(),
object: object.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::DiskStatusChange { endpoint, .. } => {
// Convert disk status change to erasure set heal
// Note: This requires access to storage to get bucket list, which is not available here
// The actual bucket list will need to be provided by the caller or retrieved differently
let set_disk_id = crate::heal::utils::format_set_disk_id_from_i32(endpoint.pool_idx, endpoint.set_idx)
.ok_or_else(|| Error::InvalidHealType {
heal_type: format!("erasure-set(pool={}, set={})", endpoint.pool_idx, endpoint.set_idx),
})?;
Ok(HealRequest::new(
HealType::ErasureSet {
buckets: vec![], // Empty bucket list - caller should populate this
set_disk_id,
},
HealOptions::default(),
HealPriority::High,
))
}
HealEvent::ECDecodeFailure {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::ECDecode {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::Urgent,
)),
HealEvent::ChecksumMismatch {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::BucketMetadataCorruption { bucket, .. } => Ok(HealRequest::new(
HealType::Bucket { bucket: bucket.clone() },
HealOptions::default(),
HealPriority::High,
)),
HealEvent::MRFMetadataCorruption { meta_path, .. } => Ok(HealRequest::new(
HealType::MRF {
meta_path: meta_path.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
}
}
/// Convert severity to priority
fn severity_to_priority(severity: &Severity) -> HealPriority {
match severity {
Severity::Low => HealPriority::Low,
Severity::Medium => HealPriority::Normal,
Severity::High => HealPriority::High,
Severity::Critical => HealPriority::Urgent,
}
}
/// Get event description
pub fn description(&self) -> String {
match self {
HealEvent::ObjectCorruption {
bucket,
object,
corruption_type,
..
} => {
format!("Object corruption detected: {bucket}/{object} - {corruption_type:?}")
}
HealEvent::ObjectMissing { bucket, object, .. } => {
format!("Object missing: {bucket}/{object}")
}
HealEvent::MetadataCorruption {
bucket,
object,
corruption_type,
..
} => {
format!("Metadata corruption: {bucket}/{object} - {corruption_type:?}")
}
HealEvent::DiskStatusChange {
endpoint,
old_status,
new_status,
..
} => {
format!("Disk status changed: {endpoint:?} {old_status} -> {new_status}")
}
HealEvent::ECDecodeFailure {
bucket,
object,
missing_shards,
..
} => {
format!("EC decode failure: {bucket}/{object} - missing shards: {missing_shards:?}")
}
HealEvent::ChecksumMismatch {
bucket,
object,
expected_checksum,
actual_checksum,
..
} => {
format!("Checksum mismatch: {bucket}/{object} - expected: {expected_checksum}, actual: {actual_checksum}")
}
HealEvent::BucketMetadataCorruption {
bucket, corruption_type, ..
} => {
format!("Bucket metadata corruption: {bucket} - {corruption_type:?}")
}
HealEvent::MRFMetadataCorruption {
meta_path,
corruption_type,
..
} => {
format!("MRF metadata corruption: {meta_path} - {corruption_type:?}")
}
}
}
/// Get event severity
pub fn severity(&self) -> Severity {
match self {
HealEvent::ObjectCorruption { severity, .. } => severity.clone(),
HealEvent::ObjectMissing { .. } => Severity::High,
HealEvent::MetadataCorruption { .. } => Severity::High,
HealEvent::DiskStatusChange { .. } => Severity::High,
HealEvent::ECDecodeFailure { .. } => Severity::Critical,
HealEvent::ChecksumMismatch { .. } => Severity::High,
HealEvent::BucketMetadataCorruption { .. } => Severity::High,
HealEvent::MRFMetadataCorruption { .. } => Severity::High,
}
}
/// Get event timestamp
pub fn timestamp(&self) -> SystemTime {
SystemTime::now()
}
}
/// Heal event handler
pub struct HealEventHandler {
/// Event queue
events: Vec<HealEvent>,
/// Maximum number of events
max_events: usize,
}
impl HealEventHandler {
pub fn new(max_events: usize) -> Self {
Self {
events: Vec::new(),
max_events,
}
}
/// Add event
pub fn add_event(&mut self, event: HealEvent) {
if self.events.len() >= self.max_events {
// Remove oldest event
self.events.remove(0);
}
self.events.push(event);
}
/// Get all events
pub fn get_events(&self) -> &[HealEvent] {
&self.events
}
/// Clear events
pub fn clear_events(&mut self) {
self.events.clear();
}
/// Get event count
pub fn event_count(&self) -> usize {
self.events.len()
}
/// Filter events by severity
pub fn filter_by_severity(&self, min_severity: Severity) -> Vec<&HealEvent> {
self.events.iter().filter(|event| event.severity() >= min_severity).collect()
}
/// Filter events by type
pub fn filter_by_type(&self, event_type: &str) -> Vec<&HealEvent> {
self.events
.iter()
.filter(|event| match event {
HealEvent::ObjectCorruption { .. } => event_type == "ObjectCorruption",
HealEvent::ObjectMissing { .. } => event_type == "ObjectMissing",
HealEvent::MetadataCorruption { .. } => event_type == "MetadataCorruption",
HealEvent::DiskStatusChange { .. } => event_type == "DiskStatusChange",
HealEvent::ECDecodeFailure { .. } => event_type == "ECDecodeFailure",
HealEvent::ChecksumMismatch { .. } => event_type == "ChecksumMismatch",
HealEvent::BucketMetadataCorruption { .. } => event_type == "BucketMetadataCorruption",
HealEvent::MRFMetadataCorruption { .. } => event_type == "MRFMetadataCorruption",
})
.collect()
}
}
impl Default for HealEventHandler {
fn default() -> Self {
Self::new(1000)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::task::{HealPriority, HealType};
#[test]
fn test_heal_event_object_corruption_to_request() {
let event = HealEvent::ObjectCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_object_missing_to_request() {
let event = HealEvent::ObjectMissing {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: Some("v1".to_string()),
expected_locations: vec![0, 1],
available_locations: vec![2, 3],
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_metadata_corruption_to_request() {
let event = HealEvent::MetadataCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Metadata { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_ec_decode_failure_to_request() {
let event = HealEvent::ECDecodeFailure {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
missing_shards: vec![0, 1],
available_shards: vec![2, 3, 4],
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
assert_eq!(request.priority, HealPriority::Urgent);
}
#[test]
fn test_heal_event_checksum_mismatch_to_request() {
let event = HealEvent::ChecksumMismatch {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
expected_checksum: "abc123".to_string(),
actual_checksum: "def456".to_string(),
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_bucket_metadata_corruption_to_request() {
let event = HealEvent::BucketMetadataCorruption {
bucket: "test-bucket".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Bucket { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_mrf_metadata_corruption_to_request() {
let event = HealEvent::MRFMetadataCorruption {
meta_path: "test-bucket/test-object".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::MRF { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_severity_to_priority() {
let event_low = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Low,
};
let request = event_low.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Low);
let event_medium = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Medium,
};
let request = event_medium.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Normal);
let event_high = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let request = event_high.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::High);
let event_critical = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Critical,
};
let request = event_critical.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Urgent);
}
#[test]
fn test_heal_event_description() {
let event = HealEvent::ObjectCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let desc = event.description();
assert!(desc.contains("Object corruption detected"));
assert!(desc.contains("test-bucket/test-object"));
assert!(desc.contains("DataCorruption"));
}
#[test]
fn test_heal_event_severity() {
let event = HealEvent::ECDecodeFailure {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
missing_shards: vec![],
available_shards: vec![],
};
assert_eq!(event.severity(), Severity::Critical);
let event = HealEvent::ObjectMissing {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
expected_locations: vec![],
available_locations: vec![],
};
assert_eq!(event.severity(), Severity::High);
}
#[test]
fn test_heal_event_handler_new() {
let handler = HealEventHandler::new(10);
assert_eq!(handler.event_count(), 0);
assert_eq!(handler.max_events, 10);
}
#[test]
fn test_heal_event_handler_default() {
let handler = HealEventHandler::default();
assert_eq!(handler.max_events, 1000);
}
#[test]
fn test_heal_event_handler_add_event() {
let mut handler = HealEventHandler::new(3);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
assert_eq!(handler.event_count(), 1);
handler.add_event(event.clone());
handler.add_event(event.clone());
assert_eq!(handler.event_count(), 3);
}
#[test]
fn test_heal_event_handler_max_events() {
let mut handler = HealEventHandler::new(2);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
handler.add_event(event.clone());
handler.add_event(event.clone()); // Should remove oldest
assert_eq!(handler.event_count(), 2);
}
#[test]
fn test_heal_event_handler_get_events() {
let mut handler = HealEventHandler::new(10);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
handler.add_event(event.clone());
let events = handler.get_events();
assert_eq!(events.len(), 2);
}
#[test]
fn test_heal_event_handler_clear_events() {
let mut handler = HealEventHandler::new(10);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event);
assert_eq!(handler.event_count(), 1);
handler.clear_events();
assert_eq!(handler.event_count(), 0);
}
#[test]
fn test_heal_event_handler_filter_by_severity() {
let mut handler = HealEventHandler::new(10);
handler.add_event(HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Low,
});
handler.add_event(HealEvent::ECDecodeFailure {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
missing_shards: vec![],
available_shards: vec![],
});
let high_severity = handler.filter_by_severity(Severity::High);
assert_eq!(high_severity.len(), 1); // Only ECDecodeFailure is Critical >= High
}
#[test]
fn test_heal_event_handler_filter_by_type() {
let mut handler = HealEventHandler::new(10);
handler.add_event(HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
});
handler.add_event(HealEvent::ObjectMissing {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
expected_locations: vec![],
available_locations: vec![],
});
let corruption_events = handler.filter_by_type("ObjectCorruption");
assert_eq!(corruption_events.len(), 1);
let missing_events = handler.filter_by_type("ObjectMissing");
assert_eq!(missing_events.len(), 1);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/manager.rs | crates/ahm/src/heal/manager.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{
progress::{HealProgress, HealStatistics},
storage::HealStorageAPI,
task::{HealOptions, HealPriority, HealRequest, HealTask, HealTaskStatus, HealType},
};
use crate::{Error, Result};
use rustfs_ecstore::disk::DiskAPI;
use rustfs_ecstore::disk::error::DiskError;
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
use std::{
collections::{BinaryHeap, HashMap, HashSet},
sync::Arc,
time::{Duration, SystemTime},
};
use tokio::{
sync::{Mutex, RwLock},
time::interval,
};
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
/// Priority queue wrapper for heal requests
/// Uses BinaryHeap for priority-based ordering while maintaining FIFO for same-priority items
#[derive(Debug)]
struct PriorityHealQueue {
/// Heap of (priority, sequence, request) tuples
heap: BinaryHeap<PriorityQueueItem>,
/// Sequence counter for FIFO ordering within same priority
sequence: u64,
/// Set of request keys to prevent duplicates
dedup_keys: HashSet<String>,
}
/// Wrapper for heap items to implement proper ordering
#[derive(Debug)]
struct PriorityQueueItem {
priority: HealPriority,
sequence: u64,
request: HealRequest,
}
impl Eq for PriorityQueueItem {}
impl PartialEq for PriorityQueueItem {
fn eq(&self, other: &Self) -> bool {
self.priority == other.priority && self.sequence == other.sequence
}
}
impl Ord for PriorityQueueItem {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// First compare by priority (higher priority first)
match self.priority.cmp(&other.priority) {
std::cmp::Ordering::Equal => {
// If priorities are equal, use sequence for FIFO (lower sequence first)
other.sequence.cmp(&self.sequence)
}
ordering => ordering,
}
}
}
impl PartialOrd for PriorityQueueItem {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PriorityHealQueue {
fn new() -> Self {
Self {
heap: BinaryHeap::new(),
sequence: 0,
dedup_keys: HashSet::new(),
}
}
fn len(&self) -> usize {
self.heap.len()
}
fn is_empty(&self) -> bool {
self.heap.is_empty()
}
fn push(&mut self, request: HealRequest) -> bool {
let key = Self::make_dedup_key(&request);
// Check for duplicates
if self.dedup_keys.contains(&key) {
return false; // Duplicate request, don't add
}
self.dedup_keys.insert(key);
self.sequence += 1;
self.heap.push(PriorityQueueItem {
priority: request.priority,
sequence: self.sequence,
request,
});
true
}
/// Get statistics about queue contents by priority
fn get_priority_stats(&self) -> HashMap<HealPriority, usize> {
let mut stats = HashMap::new();
for item in &self.heap {
*stats.entry(item.priority).or_insert(0) += 1;
}
stats
}
fn pop(&mut self) -> Option<HealRequest> {
self.heap.pop().map(|item| {
let key = Self::make_dedup_key(&item.request);
self.dedup_keys.remove(&key);
item.request
})
}
/// Create a deduplication key from a heal request
fn make_dedup_key(request: &HealRequest) -> String {
match &request.heal_type {
HealType::Object {
bucket,
object,
version_id,
} => {
format!("object:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
}
HealType::Bucket { bucket } => {
format!("bucket:{bucket}")
}
HealType::ErasureSet { set_disk_id, .. } => {
format!("erasure_set:{set_disk_id}")
}
HealType::Metadata { bucket, object } => {
format!("metadata:{bucket}:{object}")
}
HealType::MRF { meta_path } => {
format!("mrf:{meta_path}")
}
HealType::ECDecode {
bucket,
object,
version_id,
} => {
format!("ecdecode:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
}
}
}
/// Check if a request with the same key already exists in the queue
#[allow(dead_code)]
fn contains_key(&self, request: &HealRequest) -> bool {
let key = Self::make_dedup_key(request);
self.dedup_keys.contains(&key)
}
/// Check if an erasure set heal request for a specific set_disk_id exists
fn contains_erasure_set(&self, set_disk_id: &str) -> bool {
let key = format!("erasure_set:{set_disk_id}");
self.dedup_keys.contains(&key)
}
}
/// Heal config
#[derive(Debug, Clone)]
pub struct HealConfig {
/// Whether to enable auto heal
pub enable_auto_heal: bool,
/// Heal interval
pub heal_interval: Duration,
/// Maximum concurrent heal tasks
pub max_concurrent_heals: usize,
/// Task timeout
pub task_timeout: Duration,
/// Queue size
pub queue_size: usize,
}
impl Default for HealConfig {
fn default() -> Self {
let queue_size: usize =
rustfs_utils::get_env_usize(rustfs_config::ENV_HEAL_QUEUE_SIZE, rustfs_config::DEFAULT_HEAL_QUEUE_SIZE);
let heal_interval = Duration::from_secs(rustfs_utils::get_env_u64(
rustfs_config::ENV_HEAL_INTERVAL_SECS,
rustfs_config::DEFAULT_HEAL_INTERVAL_SECS,
));
let enable_auto_heal =
rustfs_utils::get_env_bool(rustfs_config::ENV_HEAL_AUTO_HEAL_ENABLE, rustfs_config::DEFAULT_HEAL_AUTO_HEAL_ENABLE);
let task_timeout = Duration::from_secs(rustfs_utils::get_env_u64(
rustfs_config::ENV_HEAL_TASK_TIMEOUT_SECS,
rustfs_config::DEFAULT_HEAL_TASK_TIMEOUT_SECS,
));
let max_concurrent_heals = rustfs_utils::get_env_usize(
rustfs_config::ENV_HEAL_MAX_CONCURRENT_HEALS,
rustfs_config::DEFAULT_HEAL_MAX_CONCURRENT_HEALS,
);
Self {
enable_auto_heal,
heal_interval, // 10 seconds
max_concurrent_heals, // max 4,
task_timeout, // 5 minutes
queue_size,
}
}
}
/// Heal state
#[derive(Debug, Default)]
pub struct HealState {
/// Whether running
pub is_running: bool,
/// Current heal cycle
pub current_cycle: u64,
/// Last heal time
pub last_heal_time: Option<SystemTime>,
/// Total healed objects
pub total_healed_objects: u64,
/// Total heal failures
pub total_heal_failures: u64,
/// Current active heal tasks
pub active_heal_count: usize,
}
/// Heal manager
pub struct HealManager {
/// Heal config
config: Arc<RwLock<HealConfig>>,
/// Heal state
state: Arc<RwLock<HealState>>,
/// Active heal tasks
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
/// Heal queue (priority-based)
heal_queue: Arc<Mutex<PriorityHealQueue>>,
/// Storage layer interface
storage: Arc<dyn HealStorageAPI>,
/// Cancel token
cancel_token: CancellationToken,
/// Statistics
statistics: Arc<RwLock<HealStatistics>>,
}
impl HealManager {
/// Create new HealManager
pub fn new(storage: Arc<dyn HealStorageAPI>, config: Option<HealConfig>) -> Self {
let config = config.unwrap_or_default();
Self {
config: Arc::new(RwLock::new(config)),
state: Arc::new(RwLock::new(HealState::default())),
active_heals: Arc::new(Mutex::new(HashMap::new())),
heal_queue: Arc::new(Mutex::new(PriorityHealQueue::new())),
storage,
cancel_token: CancellationToken::new(),
statistics: Arc::new(RwLock::new(HealStatistics::new())),
}
}
/// Start HealManager
pub async fn start(&self) -> Result<()> {
let mut state = self.state.write().await;
if state.is_running {
warn!("HealManager is already running");
return Ok(());
}
state.is_running = true;
drop(state);
info!("Starting HealManager");
// start scheduler
self.start_scheduler().await?;
// start auto disk scanner to heal unformatted disks
self.start_auto_disk_scanner().await?;
info!("HealManager started successfully");
Ok(())
}
/// Stop HealManager
pub async fn stop(&self) -> Result<()> {
info!("Stopping HealManager");
// cancel all tasks
self.cancel_token.cancel();
// wait for all tasks to complete
let mut active_heals = self.active_heals.lock().await;
for task in active_heals.values() {
if let Err(e) = task.cancel().await {
warn!("Failed to cancel task {}: {}", task.id, e);
}
}
active_heals.clear();
// update state
let mut state = self.state.write().await;
state.is_running = false;
info!("HealManager stopped successfully");
Ok(())
}
/// Submit heal request
pub async fn submit_heal_request(&self, request: HealRequest) -> Result<String> {
let config = self.config.read().await;
let mut queue = self.heal_queue.lock().await;
let queue_len = queue.len();
let queue_capacity = config.queue_size;
if queue_len >= queue_capacity {
return Err(Error::ConfigurationError {
message: format!("Heal queue is full ({queue_len}/{queue_capacity})"),
});
}
// Warn when queue is getting full (>80% capacity)
let capacity_threshold = (queue_capacity as f64 * 0.8) as usize;
if queue_len >= capacity_threshold {
warn!(
"Heal queue is {}% full ({}/{}). Consider increasing queue size or processing capacity.",
(queue_len * 100) / queue_capacity,
queue_len,
queue_capacity
);
}
let request_id = request.id.clone();
let priority = request.priority;
// Try to push the request; if it's a duplicate, still return the request_id
let is_new = queue.push(request);
// Log queue statistics periodically (when adding high/urgent priority items)
if matches!(priority, HealPriority::High | HealPriority::Urgent) {
let stats = queue.get_priority_stats();
info!(
"Heal queue stats after adding {:?} priority request: total={}, urgent={}, high={}, normal={}, low={}",
priority,
queue_len + 1,
stats.get(&HealPriority::Urgent).unwrap_or(&0),
stats.get(&HealPriority::High).unwrap_or(&0),
stats.get(&HealPriority::Normal).unwrap_or(&0),
stats.get(&HealPriority::Low).unwrap_or(&0)
);
}
drop(queue);
if is_new {
info!("Submitted heal request: {} with priority: {:?}", request_id, priority);
} else {
info!("Heal request already queued (duplicate): {}", request_id);
}
Ok(request_id)
}
/// Get task status
pub async fn get_task_status(&self, task_id: &str) -> Result<HealTaskStatus> {
let active_heals = self.active_heals.lock().await;
if let Some(task) = active_heals.get(task_id) {
Ok(task.get_status().await)
} else {
Err(Error::TaskNotFound {
task_id: task_id.to_string(),
})
}
}
/// Get task progress
pub async fn get_active_tasks_count(&self) -> usize {
self.active_heals.lock().await.len()
}
pub async fn get_task_progress(&self, task_id: &str) -> Result<HealProgress> {
let active_heals = self.active_heals.lock().await;
if let Some(task) = active_heals.get(task_id) {
Ok(task.get_progress().await)
} else {
Err(Error::TaskNotFound {
task_id: task_id.to_string(),
})
}
}
/// Cancel task
pub async fn cancel_task(&self, task_id: &str) -> Result<()> {
let mut active_heals = self.active_heals.lock().await;
if let Some(task) = active_heals.get(task_id) {
task.cancel().await?;
active_heals.remove(task_id);
info!("Cancelled heal task: {}", task_id);
Ok(())
} else {
Err(Error::TaskNotFound {
task_id: task_id.to_string(),
})
}
}
/// Get statistics
pub async fn get_statistics(&self) -> HealStatistics {
self.statistics.read().await.clone()
}
/// Get active task count
pub async fn get_active_task_count(&self) -> usize {
let active_heals = self.active_heals.lock().await;
active_heals.len()
}
/// Get queue length
pub async fn get_queue_length(&self) -> usize {
let queue = self.heal_queue.lock().await;
queue.len()
}
/// Start scheduler
async fn start_scheduler(&self) -> Result<()> {
let config = self.config.clone();
let heal_queue = self.heal_queue.clone();
let active_heals = self.active_heals.clone();
let cancel_token = self.cancel_token.clone();
let statistics = self.statistics.clone();
let storage = self.storage.clone();
tokio::spawn(async move {
let mut interval = interval(config.read().await.heal_interval);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
info!("Heal scheduler received shutdown signal");
break;
}
_ = interval.tick() => {
Self::process_heal_queue(&heal_queue, &active_heals, &config, &statistics, &storage).await;
}
}
}
});
Ok(())
}
/// Start background task to auto scan local disks and enqueue erasure set heal requests
async fn start_auto_disk_scanner(&self) -> Result<()> {
let config = self.config.clone();
let heal_queue = self.heal_queue.clone();
let active_heals = self.active_heals.clone();
let cancel_token = self.cancel_token.clone();
let storage = self.storage.clone();
let mut duration = {
let config = config.read().await;
config.heal_interval
};
if duration < Duration::from_secs(1) {
duration = Duration::from_secs(1);
}
info!("start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}", duration);
tokio::spawn(async move {
let mut interval = interval(duration);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
info!("start_auto_disk_scanner: Auto disk scanner received shutdown signal");
break;
}
_ = interval.tick() => {
// Build list of endpoints that need healing
let mut endpoints = Vec::new();
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
if let Some(disk) = disk_opt {
// detect unformatted disk via get_disk_id()
if let Err(err) = disk.get_disk_id().await
&& err == DiskError::UnformattedDisk {
endpoints.push(disk.endpoint());
continue;
}
}
}
if endpoints.is_empty() {
info!("start_auto_disk_scanner: No endpoints need healing");
continue;
}
// Get bucket list for erasure set healing
let buckets = match storage.list_buckets().await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("start_auto_disk_scanner: Failed to get bucket list for auto healing: {}", e);
continue;
}
};
// Create erasure set heal requests for each endpoint
for ep in endpoints {
let Some(set_disk_id) =
crate::heal::utils::format_set_disk_id_from_i32(ep.pool_idx, ep.set_idx)
else {
warn!("start_auto_disk_scanner: Skipping endpoint {} without valid pool/set index", ep);
continue;
};
// skip if already queued or healing
// Use consistent lock order: queue first, then active_heals to avoid deadlock
let mut skip = false;
{
let queue = heal_queue.lock().await;
if queue.contains_erasure_set(&set_disk_id) {
skip = true;
}
}
if !skip {
let active = active_heals.lock().await;
if active.values().any(|task| {
matches!(
&task.heal_type,
crate::heal::task::HealType::ErasureSet { set_disk_id: active_id, .. }
if active_id == &set_disk_id
)
}) {
skip = true;
}
}
if skip {
info!("start_auto_disk_scanner: Skipping auto erasure set heal for endpoint: {} (set_disk_id: {}) because it is already queued or healing", ep, set_disk_id);
continue;
}
// enqueue erasure set heal request for this disk
let req = HealRequest::new(
HealType::ErasureSet {
buckets: buckets.clone(),
set_disk_id: set_disk_id.clone(),
},
HealOptions::default(),
HealPriority::Normal,
);
let mut queue = heal_queue.lock().await;
queue.push(req);
info!("start_auto_disk_scanner: Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
}
}
}
}
});
Ok(())
}
/// Process heal queue
/// Processes multiple tasks per cycle when capacity allows and queue has high-priority items
async fn process_heal_queue(
heal_queue: &Arc<Mutex<PriorityHealQueue>>,
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
config: &Arc<RwLock<HealConfig>>,
statistics: &Arc<RwLock<HealStatistics>>,
storage: &Arc<dyn HealStorageAPI>,
) {
let config = config.read().await;
let mut active_heals_guard = active_heals.lock().await;
// Check if new heal tasks can be started
let active_count = active_heals_guard.len();
if active_count >= config.max_concurrent_heals {
return;
}
// Calculate how many tasks we can start this cycle
let available_slots = config.max_concurrent_heals - active_count;
let mut queue = heal_queue.lock().await;
let queue_len = queue.len();
if queue_len == 0 {
return;
}
// Process multiple tasks if:
// 1. We have available slots
// 2. Queue is not empty
// Prioritize urgent/high priority tasks by processing up to 2 tasks per cycle if available
let tasks_to_process = if queue_len > 0 {
std::cmp::min(available_slots, std::cmp::min(2, queue_len))
} else {
0
};
for _ in 0..tasks_to_process {
if let Some(request) = queue.pop() {
let task_priority = request.priority;
let task = Arc::new(HealTask::from_request(request, storage.clone()));
let task_id = task.id.clone();
active_heals_guard.insert(task_id.clone(), task.clone());
let active_heals_clone = active_heals.clone();
let statistics_clone = statistics.clone();
// start heal task
tokio::spawn(async move {
info!("Starting heal task: {} with priority: {:?}", task_id, task_priority);
let result = task.execute().await;
match result {
Ok(_) => {
info!("Heal task completed successfully: {}", task_id);
}
Err(e) => {
error!("Heal task failed: {} - {}", task_id, e);
}
}
let mut active_heals_guard = active_heals_clone.lock().await;
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
// update statistics
let mut stats = statistics_clone.write().await;
match completed_task.get_status().await {
HealTaskStatus::Completed => {
stats.update_task_completion(true);
}
_ => {
stats.update_task_completion(false);
}
}
stats.update_running_tasks(active_heals_guard.len() as u64);
}
});
} else {
break;
}
}
// Update statistics for all started tasks
let mut stats = statistics.write().await;
stats.total_tasks += tasks_to_process as u64;
// Log queue status if items remain
if !queue.is_empty() {
let remaining = queue.len();
if remaining > 10 {
info!("Heal queue has {} pending requests, {} tasks active", remaining, active_heals_guard.len());
}
}
}
}
impl std::fmt::Debug for HealManager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HealManager")
.field("config", &"<config>")
.field("state", &"<state>")
.field("active_heals_count", &"<active_heals>")
.field("queue_length", &"<queue>")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
#[test]
fn test_priority_queue_ordering() {
let mut queue = PriorityHealQueue::new();
// Add requests with different priorities
let low_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Low,
);
let normal_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket2".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let high_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket3".to_string(),
},
HealOptions::default(),
HealPriority::High,
);
let urgent_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket4".to_string(),
},
HealOptions::default(),
HealPriority::Urgent,
);
// Add in random order: low, high, normal, urgent
assert!(queue.push(low_req));
assert!(queue.push(high_req));
assert!(queue.push(normal_req));
assert!(queue.push(urgent_req));
assert_eq!(queue.len(), 4);
// Should pop in priority order: urgent, high, normal, low
let popped1 = queue.pop().unwrap();
assert_eq!(popped1.priority, HealPriority::Urgent);
let popped2 = queue.pop().unwrap();
assert_eq!(popped2.priority, HealPriority::High);
let popped3 = queue.pop().unwrap();
assert_eq!(popped3.priority, HealPriority::Normal);
let popped4 = queue.pop().unwrap();
assert_eq!(popped4.priority, HealPriority::Low);
assert_eq!(queue.len(), 0);
}
#[test]
fn test_priority_queue_fifo_same_priority() {
let mut queue = PriorityHealQueue::new();
// Add multiple requests with same priority
let req1 = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let req2 = HealRequest::new(
HealType::Bucket {
bucket: "bucket2".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let req3 = HealRequest::new(
HealType::Bucket {
bucket: "bucket3".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let id1 = req1.id.clone();
let id2 = req2.id.clone();
let id3 = req3.id.clone();
assert!(queue.push(req1));
assert!(queue.push(req2));
assert!(queue.push(req3));
// Should maintain FIFO order for same priority
let popped1 = queue.pop().unwrap();
assert_eq!(popped1.id, id1);
let popped2 = queue.pop().unwrap();
assert_eq!(popped2.id, id2);
let popped3 = queue.pop().unwrap();
assert_eq!(popped3.id, id3);
}
#[test]
fn test_priority_queue_deduplication() {
let mut queue = PriorityHealQueue::new();
let req1 = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::Normal,
);
let req2 = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::High,
);
// First request should be added
assert!(queue.push(req1));
assert_eq!(queue.len(), 1);
// Second request with same object should be rejected (duplicate)
assert!(!queue.push(req2));
assert_eq!(queue.len(), 1);
}
#[test]
fn test_priority_queue_contains_erasure_set() {
let mut queue = PriorityHealQueue::new();
let req = HealRequest::new(
HealType::ErasureSet {
buckets: vec!["bucket1".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
assert!(queue.push(req));
assert!(queue.contains_erasure_set("pool_0_set_1"));
assert!(!queue.contains_erasure_set("pool_0_set_2"));
}
#[test]
fn test_priority_queue_dedup_key_generation() {
// Test different heal types generate different keys
let obj_req = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::Normal,
);
let bucket_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let erasure_req = HealRequest::new(
HealType::ErasureSet {
buckets: vec!["bucket1".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let obj_key = PriorityHealQueue::make_dedup_key(&obj_req);
let bucket_key = PriorityHealQueue::make_dedup_key(&bucket_req);
let erasure_key = PriorityHealQueue::make_dedup_key(&erasure_req);
// All keys should be different
assert_ne!(obj_key, bucket_key);
assert_ne!(obj_key, erasure_key);
assert_ne!(bucket_key, erasure_key);
assert!(obj_key.starts_with("object:"));
assert!(bucket_key.starts_with("bucket:"));
assert!(erasure_key.starts_with("erasure_set:"));
}
#[test]
fn test_priority_queue_mixed_priorities_and_types() {
let mut queue = PriorityHealQueue::new();
// Add various requests
let requests = vec![
(
HealType::Object {
bucket: "b1".to_string(),
object: "o1".to_string(),
version_id: None,
},
HealPriority::Low,
),
(
HealType::Bucket {
bucket: "b2".to_string(),
},
HealPriority::Urgent,
),
(
HealType::ErasureSet {
buckets: vec!["b3".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealPriority::Normal,
),
(
HealType::Object {
bucket: "b4".to_string(),
object: "o4".to_string(),
version_id: None,
},
HealPriority::High,
),
];
for (heal_type, priority) in requests {
let req = HealRequest::new(heal_type, HealOptions::default(), priority);
queue.push(req);
}
assert_eq!(queue.len(), 4);
// Check they come out in priority order
let priorities: Vec<HealPriority> = (0..4).filter_map(|_| queue.pop().map(|r| r.priority)).collect();
assert_eq!(
priorities,
vec![
HealPriority::Urgent,
HealPriority::High,
HealPriority::Normal,
HealPriority::Low,
]
);
}
#[test]
fn test_priority_queue_stats() {
let mut queue = PriorityHealQueue::new();
// Add requests with different priorities
for _ in 0..3 {
queue.push(HealRequest::new(
HealType::Bucket {
bucket: format!("bucket-low-{}", queue.len()),
},
HealOptions::default(),
HealPriority::Low,
));
}
for _ in 0..2 {
queue.push(HealRequest::new(
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/storage.rs | crates/ahm/src/heal/storage.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use async_trait::async_trait;
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::{
disk::{DiskStore, endpoint::Endpoint},
store::ECStore,
store_api::{BucketInfo, ObjectIO, StorageAPI},
};
use rustfs_madmin::heal_commands::HealResultItem;
use std::sync::Arc;
use tracing::{debug, error, info, warn};
/// Disk status for heal operations
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DiskStatus {
/// Ok
Ok,
/// Offline
Offline,
/// Corrupt
Corrupt,
/// Missing
Missing,
/// Permission denied
PermissionDenied,
/// Faulty
Faulty,
/// Root mount
RootMount,
/// Unknown
Unknown,
/// Unformatted
Unformatted,
}
/// Heal storage layer interface
#[async_trait]
pub trait HealStorageAPI: Send + Sync {
/// Get object meta
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>>;
/// Get object data
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>>;
/// Put object data
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()>;
/// Delete object
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()>;
/// Check object integrity
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool>;
/// EC decode rebuild
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>>;
/// Get disk status
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus>;
/// Format disk
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()>;
/// Get bucket info
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>>;
/// Fix bucket metadata
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()>;
/// Get all buckets
async fn list_buckets(&self) -> Result<Vec<BucketInfo>>;
/// Check object exists
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool>;
/// Get object size
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>>;
/// Get object checksum
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>>;
/// Heal object using ecstore
async fn heal_object(
&self,
bucket: &str,
object: &str,
version_id: Option<&str>,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)>;
/// Heal bucket using ecstore
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
/// Heal format using ecstore
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
/// List objects for healing (returns all objects, may use significant memory for large buckets)
///
/// WARNING: This method loads all objects into memory at once. For buckets with many objects,
/// consider using `list_objects_for_heal_page` instead to process objects in pages.
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
/// List objects for healing with pagination (returns one page and continuation token)
/// Returns (objects, next_continuation_token, is_truncated)
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)>;
/// Get disk for resume functionality
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
}
/// ECStore Heal storage layer implementation
pub struct ECStoreHealStorage {
ecstore: Arc<ECStore>,
}
impl ECStoreHealStorage {
pub fn new(ecstore: Arc<ECStore>) -> Self {
Self { ecstore }
}
}
#[async_trait]
impl HealStorageAPI for ECStoreHealStorage {
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
debug!("Getting object meta: {}/{}", bucket, object);
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
Ok(info) => Ok(Some(info)),
Err(e) => {
// Map ObjectNotFound to None to align with Option return type
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
debug!("Object meta not found: {}/{}", bucket, object);
Ok(None)
} else {
error!("Failed to get object meta: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
}
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>> {
debug!("Getting object data: {}/{}", bucket, object);
let reader = match (*self.ecstore)
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
.await
{
Ok(reader) => reader,
Err(e) => {
error!("Failed to get object: {}/{} - {}", bucket, object, e);
return Err(Error::other(e));
}
};
// WARNING: Returning Vec<u8> for large objects is dangerous. To avoid OOM, cap the read size.
// If needed, refactor callers to stream instead of buffering entire object.
const MAX_READ_BYTES: usize = 16 * 1024 * 1024; // 16 MiB cap
let mut buf = Vec::with_capacity(1024 * 1024);
use tokio::io::AsyncReadExt as _;
let mut n_read: usize = 0;
let mut stream = reader.stream;
loop {
// Read in chunks
let mut chunk = vec![0u8; 1024 * 1024];
match stream.read(&mut chunk).await {
Ok(0) => break,
Ok(n) => {
buf.extend_from_slice(&chunk[..n]);
n_read += n;
if n_read > MAX_READ_BYTES {
warn!(
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
MAX_READ_BYTES, bucket, object
);
return Err(Error::other(format!(
"Object too large: {n_read} bytes (max: {MAX_READ_BYTES} bytes) for {bucket}/{object}"
)));
}
}
Err(e) => {
error!("Failed to read object data: {}/{} - {}", bucket, object, e);
return Err(Error::other(e));
}
}
}
Ok(Some(buf))
}
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()> {
debug!("Putting object data: {}/{} ({} bytes)", bucket, object, data.len());
let mut reader = rustfs_ecstore::store_api::PutObjReader::from_vec(data.to_vec());
match (*self.ecstore)
.put_object(bucket, object, &mut reader, &Default::default())
.await
{
Ok(_) => {
info!("Successfully put object: {}/{}", bucket, object);
Ok(())
}
Err(e) => {
error!("Failed to put object: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()> {
debug!("Deleting object: {}/{}", bucket, object);
match self.ecstore.delete_object(bucket, object, Default::default()).await {
Ok(_) => {
info!("Successfully deleted object: {}/{}", bucket, object);
Ok(())
}
Err(e) => {
error!("Failed to delete object: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool> {
debug!("Verifying object integrity: {}/{}", bucket, object);
// Check object metadata first
match self.get_object_meta(bucket, object).await? {
Some(obj_info) => {
if obj_info.size < 0 {
warn!("Object has invalid size: {}/{}", bucket, object);
return Ok(false);
}
// Stream-read the object to a sink to avoid loading into memory
match (*self.ecstore)
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
.await
{
Ok(reader) => {
let mut stream = reader.stream;
match tokio::io::copy(&mut stream, &mut tokio::io::sink()).await {
Ok(_) => {
info!("Object integrity check passed: {}/{}", bucket, object);
Ok(true)
}
Err(e) => {
warn!("Object stream read failed: {}/{} - {}", bucket, object, e);
Ok(false)
}
}
}
Err(e) => {
warn!("Failed to get object reader: {}/{} - {}", bucket, object, e);
Ok(false)
}
}
}
None => {
warn!("Object metadata not found: {}/{}", bucket, object);
Ok(false)
}
}
}
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>> {
debug!("EC decode rebuild: {}/{}", bucket, object);
// Use ecstore's heal_object to rebuild the object
let heal_opts = HealOpts {
recursive: false,
dry_run: false,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
match self.heal_object(bucket, object, None, &heal_opts).await {
Ok((_result, error)) => {
if error.is_some() {
return Err(Error::TaskExecutionFailed {
message: format!("Heal failed: {error:?}"),
});
}
// After healing, try to read the object data
match self.get_object_data(bucket, object).await? {
Some(data) => {
info!("EC decode rebuild successful: {}/{} ({} bytes)", bucket, object, data.len());
Ok(data)
}
None => {
error!("Object not found after heal: {}/{}", bucket, object);
Err(Error::TaskExecutionFailed {
message: format!("Object not found after heal: {bucket}/{object}"),
})
}
}
}
Err(e) => {
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
Err(e)
}
}
}
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus> {
debug!("Getting disk status: {:?}", endpoint);
// TODO: implement disk status check using ecstore
// For now, return Ok status
info!("Disk status check: {:?} - OK", endpoint);
Ok(DiskStatus::Ok)
}
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()> {
debug!("Formatting disk: {:?}", endpoint);
// Use ecstore's heal_format
match self.heal_format(false).await {
Ok((_, error)) => {
if error.is_some() {
return Err(Error::other(format!("Format failed: {error:?}")));
}
info!("Successfully formatted disk: {:?}", endpoint);
Ok(())
}
Err(e) => {
error!("Failed to format disk: {:?} - {}", endpoint, e);
Err(e)
}
}
}
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>> {
debug!("Getting bucket info: {}", bucket);
match self.ecstore.get_bucket_info(bucket, &Default::default()).await {
Ok(info) => Ok(Some(info)),
Err(e) => {
error!("Failed to get bucket info: {} - {}", bucket, e);
Err(Error::other(e))
}
}
}
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()> {
debug!("Healing bucket metadata: {}", bucket);
let heal_opts = HealOpts {
recursive: true,
dry_run: false,
remove: false,
recreate: false,
scan_mode: HealScanMode::Normal,
update_parity: false,
no_lock: false,
pool: None,
set: None,
};
match self.heal_bucket(bucket, &heal_opts).await {
Ok(_) => {
info!("Successfully healed bucket metadata: {}", bucket);
Ok(())
}
Err(e) => {
error!("Failed to heal bucket metadata: {} - {}", bucket, e);
Err(e)
}
}
}
async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
debug!("Listing buckets");
match self.ecstore.list_bucket(&Default::default()).await {
Ok(buckets) => Ok(buckets),
Err(e) => {
error!("Failed to list buckets: {}", e);
Err(Error::other(e))
}
}
}
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool> {
debug!("Checking object exists: {}/{}", bucket, object);
// Use get_object_info for efficient existence check without heavy heal operations
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
Ok(_) => Ok(true), // Object exists
Err(e) => {
// Map ObjectNotFound to false, other errors must be propagated!
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
debug!("Object not found: {}/{}", bucket, object);
Ok(false)
} else {
error!("Error checking object existence {}/{}: {}", bucket, object, e);
Err(Error::other(e))
}
}
}
}
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>> {
debug!("Getting object size: {}/{}", bucket, object);
match self.get_object_meta(bucket, object).await {
Ok(Some(obj_info)) => Ok(Some(obj_info.size as u64)),
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>> {
debug!("Getting object checksum: {}/{}", bucket, object);
match self.get_object_meta(bucket, object).await {
Ok(Some(obj_info)) => {
// Convert checksum bytes to hex string
let checksum = obj_info.checksum.iter().map(|b| format!("{b:02x}")).collect::<String>();
Ok(Some(checksum))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
async fn heal_object(
&self,
bucket: &str,
object: &str,
version_id: Option<&str>,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)> {
debug!("Healing object: {}/{}", bucket, object);
let version_id_str = version_id.unwrap_or("");
match self.ecstore.heal_object(bucket, object, version_id_str, opts).await {
Ok((result, ecstore_error)) => {
let error = ecstore_error.map(Error::other);
info!("Heal object completed: {}/{} - result: {:?}, error: {:?}", bucket, object, result, error);
Ok((result, error))
}
Err(e) => {
error!("Heal object failed: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
debug!("Healing bucket: {}", bucket);
match self.ecstore.heal_bucket(bucket, opts).await {
Ok(result) => {
info!("Heal bucket completed: {} - result: {:?}", bucket, result);
Ok(result)
}
Err(e) => {
error!("Heal bucket failed: {} - {}", bucket, e);
Err(Error::other(e))
}
}
}
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
debug!("Healing format (dry_run: {})", dry_run);
match self.ecstore.heal_format(dry_run).await {
Ok((result, ecstore_error)) => {
let error = ecstore_error.map(Error::other);
info!("Heal format completed - result: {:?}, error: {:?}", result, error);
Ok((result, error))
}
Err(e) => {
error!("Heal format failed: {}", e);
Err(Error::other(e))
}
}
}
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
debug!("Listing objects for heal: {}/{}", bucket, prefix);
warn!(
"list_objects_for_heal loads all objects into memory. For large buckets, consider using list_objects_for_heal_page instead."
);
let mut all_objects = Vec::new();
let mut continuation_token: Option<String> = None;
loop {
let (page_objects, next_token, is_truncated) = self
.list_objects_for_heal_page(bucket, prefix, continuation_token.as_deref())
.await?;
all_objects.extend(page_objects);
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}/{}", bucket, prefix);
break;
}
}
info!("Found {} objects for heal in {}/{}", all_objects.len(), bucket, prefix);
Ok(all_objects)
}
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)> {
debug!("Listing objects for heal (page): {}/{}", bucket, prefix);
const MAX_KEYS: i32 = 1000;
let continuation_token_opt = continuation_token.map(|s| s.to_string());
// Use list_objects_v2 to get objects with pagination
let list_info = match self
.ecstore
.clone()
.list_objects_v2(bucket, prefix, continuation_token_opt, None, MAX_KEYS, false, None, false)
.await
{
Ok(info) => info,
Err(e) => {
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
return Err(Error::other(e));
}
};
// Collect objects from this page
let page_objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
let page_count = page_objects.len();
debug!("Listed {} objects (page) for heal in {}/{}", page_count, bucket, prefix);
Ok((page_objects, list_info.next_continuation_token, list_info.is_truncated))
}
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
debug!("Getting disk for resume: {}", set_disk_id);
// Parse set_disk_id to extract pool and set indices
let (pool_idx, set_idx) = crate::heal::utils::parse_set_disk_id(set_disk_id)?;
// Get the first available disk from the set
let disks = self
.ecstore
.get_disks(pool_idx, set_idx)
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to get disks for pool {pool_idx} set {set_idx}: {e}"),
})?;
// Find the first available disk
if let Some(disk_store) = disks.into_iter().flatten().next() {
info!("Found disk for resume: {:?}", disk_store);
return Ok(disk_store);
}
Err(Error::TaskExecutionFailed {
message: format!("No available disk found for set_disk_id: {set_disk_id}"),
})
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/progress.rs | crates/ahm/src/heal/progress.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct HealProgress {
/// Objects scanned
pub objects_scanned: u64,
/// Objects healed
pub objects_healed: u64,
/// Objects failed
pub objects_failed: u64,
/// Bytes processed
pub bytes_processed: u64,
/// Current object
pub current_object: Option<String>,
/// Progress percentage
pub progress_percentage: f64,
/// Start time
pub start_time: Option<SystemTime>,
/// Last update time
pub last_update_time: Option<SystemTime>,
/// Estimated completion time
pub estimated_completion_time: Option<SystemTime>,
}
impl HealProgress {
pub fn new() -> Self {
Self {
start_time: Some(SystemTime::now()),
last_update_time: Some(SystemTime::now()),
..Default::default()
}
}
pub fn update_progress(&mut self, scanned: u64, healed: u64, failed: u64, bytes: u64) {
self.objects_scanned = scanned;
self.objects_healed = healed;
self.objects_failed = failed;
self.bytes_processed = bytes;
self.last_update_time = Some(SystemTime::now());
// calculate progress percentage
let total = scanned + healed + failed;
if total > 0 {
self.progress_percentage = (healed as f64 / total as f64) * 100.0;
}
}
pub fn set_current_object(&mut self, object: Option<String>) {
self.current_object = object;
self.last_update_time = Some(SystemTime::now());
}
pub fn is_completed(&self) -> bool {
self.progress_percentage >= 100.0
|| self.objects_scanned > 0 && self.objects_healed + self.objects_failed >= self.objects_scanned
}
pub fn get_success_rate(&self) -> f64 {
let total = self.objects_healed + self.objects_failed;
if total > 0 {
(self.objects_healed as f64 / total as f64) * 100.0
} else {
0.0
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealStatistics {
/// Total heal tasks
pub total_tasks: u64,
/// Successful tasks
pub successful_tasks: u64,
/// Failed tasks
pub failed_tasks: u64,
/// Running tasks
pub running_tasks: u64,
/// Total healed objects
pub total_objects_healed: u64,
/// Total healed bytes
pub total_bytes_healed: u64,
/// Last update time
pub last_update_time: SystemTime,
}
impl Default for HealStatistics {
fn default() -> Self {
Self::new()
}
}
impl HealStatistics {
pub fn new() -> Self {
Self {
total_tasks: 0,
successful_tasks: 0,
failed_tasks: 0,
running_tasks: 0,
total_objects_healed: 0,
total_bytes_healed: 0,
last_update_time: SystemTime::now(),
}
}
pub fn update_task_completion(&mut self, success: bool) {
if success {
self.successful_tasks += 1;
} else {
self.failed_tasks += 1;
}
self.last_update_time = SystemTime::now();
}
pub fn update_running_tasks(&mut self, count: u64) {
self.running_tasks = count;
self.last_update_time = SystemTime::now();
}
pub fn add_healed_objects(&mut self, count: u64, bytes: u64) {
self.total_objects_healed += count;
self.total_bytes_healed += bytes;
self.last_update_time = SystemTime::now();
}
pub fn get_success_rate(&self) -> f64 {
let total = self.successful_tasks + self.failed_tasks;
if total > 0 {
(self.successful_tasks as f64 / total as f64) * 100.0
} else {
0.0
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_heal_progress_new() {
let progress = HealProgress::new();
assert_eq!(progress.objects_scanned, 0);
assert_eq!(progress.objects_healed, 0);
assert_eq!(progress.objects_failed, 0);
assert_eq!(progress.bytes_processed, 0);
assert_eq!(progress.progress_percentage, 0.0);
assert!(progress.start_time.is_some());
assert!(progress.last_update_time.is_some());
assert!(progress.current_object.is_none());
}
#[test]
fn test_heal_progress_update_progress() {
let mut progress = HealProgress::new();
progress.update_progress(10, 8, 2, 1024);
assert_eq!(progress.objects_scanned, 10);
assert_eq!(progress.objects_healed, 8);
assert_eq!(progress.objects_failed, 2);
assert_eq!(progress.bytes_processed, 1024);
// Progress percentage should be calculated based on healed/total
// total = scanned + healed + failed = 10 + 8 + 2 = 20
// healed/total = 8/20 = 0.4 = 40%
assert!((progress.progress_percentage - 40.0).abs() < 0.001);
assert!(progress.last_update_time.is_some());
}
#[test]
fn test_heal_progress_update_progress_zero_total() {
let mut progress = HealProgress::new();
progress.update_progress(0, 0, 0, 0);
assert_eq!(progress.progress_percentage, 0.0);
}
#[test]
fn test_heal_progress_update_progress_all_healed() {
let mut progress = HealProgress::new();
// When scanned=0, healed=10, failed=0: total=10, progress = 10/10 = 100%
progress.update_progress(0, 10, 0, 2048);
// All healed, should be 100%
assert!((progress.progress_percentage - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_progress_set_current_object() {
let mut progress = HealProgress::new();
let initial_time = progress.last_update_time;
// Small delay to ensure time difference
std::thread::sleep(std::time::Duration::from_millis(10));
progress.set_current_object(Some("test-bucket/test-object".to_string()));
assert_eq!(progress.current_object, Some("test-bucket/test-object".to_string()));
assert!(progress.last_update_time.is_some());
// last_update_time should be updated
assert_ne!(progress.last_update_time, initial_time);
}
#[test]
fn test_heal_progress_set_current_object_none() {
let mut progress = HealProgress::new();
progress.set_current_object(Some("test".to_string()));
progress.set_current_object(None);
assert!(progress.current_object.is_none());
}
#[test]
fn test_heal_progress_is_completed_by_percentage() {
let mut progress = HealProgress::new();
progress.update_progress(10, 10, 0, 1024);
assert!(progress.is_completed());
}
#[test]
fn test_heal_progress_is_completed_by_processed() {
let mut progress = HealProgress::new();
progress.objects_scanned = 10;
progress.objects_healed = 8;
progress.objects_failed = 2;
// healed + failed = 8 + 2 = 10 >= scanned = 10
assert!(progress.is_completed());
}
#[test]
fn test_heal_progress_is_not_completed() {
let mut progress = HealProgress::new();
progress.objects_scanned = 10;
progress.objects_healed = 5;
progress.objects_failed = 2;
// healed + failed = 5 + 2 = 7 < scanned = 10
assert!(!progress.is_completed());
}
#[test]
fn test_heal_progress_get_success_rate() {
let mut progress = HealProgress::new();
progress.objects_healed = 8;
progress.objects_failed = 2;
// success_rate = 8 / (8 + 2) * 100 = 80%
assert!((progress.get_success_rate() - 80.0).abs() < 0.001);
}
#[test]
fn test_heal_progress_get_success_rate_zero_total() {
let progress = HealProgress::new();
// No healed or failed objects
assert_eq!(progress.get_success_rate(), 0.0);
}
#[test]
fn test_heal_progress_get_success_rate_all_success() {
let mut progress = HealProgress::new();
progress.objects_healed = 10;
progress.objects_failed = 0;
assert!((progress.get_success_rate() - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_new() {
let stats = HealStatistics::new();
assert_eq!(stats.total_tasks, 0);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 0);
assert_eq!(stats.running_tasks, 0);
assert_eq!(stats.total_objects_healed, 0);
assert_eq!(stats.total_bytes_healed, 0);
}
#[test]
fn test_heal_statistics_default() {
let stats = HealStatistics::default();
assert_eq!(stats.total_tasks, 0);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 0);
}
#[test]
fn test_heal_statistics_update_task_completion_success() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.update_task_completion(true);
assert_eq!(stats.successful_tasks, 1);
assert_eq!(stats.failed_tasks, 0);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_update_task_completion_failure() {
let mut stats = HealStatistics::new();
stats.update_task_completion(false);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 1);
}
#[test]
fn test_heal_statistics_update_running_tasks() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.update_running_tasks(5);
assert_eq!(stats.running_tasks, 5);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_add_healed_objects() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.add_healed_objects(10, 10240);
assert_eq!(stats.total_objects_healed, 10);
assert_eq!(stats.total_bytes_healed, 10240);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_add_healed_objects_accumulative() {
let mut stats = HealStatistics::new();
stats.add_healed_objects(5, 5120);
stats.add_healed_objects(3, 3072);
assert_eq!(stats.total_objects_healed, 8);
assert_eq!(stats.total_bytes_healed, 8192);
}
#[test]
fn test_heal_statistics_get_success_rate() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 8;
stats.failed_tasks = 2;
// success_rate = 8 / (8 + 2) * 100 = 80%
assert!((stats.get_success_rate() - 80.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_get_success_rate_zero_total() {
let stats = HealStatistics::new();
assert_eq!(stats.get_success_rate(), 0.0);
}
#[test]
fn test_heal_statistics_get_success_rate_all_success() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 10;
stats.failed_tasks = 0;
assert!((stats.get_success_rate() - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_get_success_rate_all_failure() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 0;
stats.failed_tasks = 5;
assert_eq!(stats.get_success_rate(), 0.0);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/utils.rs | crates/ahm/src/heal/utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
/// Prefix for pool index in set disk identifiers.
const POOL_PREFIX: &str = "pool";
/// Prefix for set index in set disk identifiers.
const SET_PREFIX: &str = "set";
/// Format a set disk identifier using unsigned indices.
pub fn format_set_disk_id(pool_idx: usize, set_idx: usize) -> String {
format!("{POOL_PREFIX}_{pool_idx}_{SET_PREFIX}_{set_idx}")
}
/// Format a set disk identifier from signed indices.
pub fn format_set_disk_id_from_i32(pool_idx: i32, set_idx: i32) -> Option<String> {
if pool_idx < 0 || set_idx < 0 {
None
} else {
Some(format_set_disk_id(pool_idx as usize, set_idx as usize))
}
}
/// Normalise external set disk identifiers into the canonical format.
pub fn normalize_set_disk_id(raw: &str) -> Option<String> {
if raw.starts_with(&format!("{POOL_PREFIX}_")) {
Some(raw.to_string())
} else {
parse_compact_set_disk_id(raw).map(|(pool, set)| format_set_disk_id(pool, set))
}
}
/// Parse a canonical set disk identifier into pool/set indices.
pub fn parse_set_disk_id(raw: &str) -> Result<(usize, usize)> {
let parts: Vec<&str> = raw.split('_').collect();
if parts.len() != 4 || parts[0] != POOL_PREFIX || parts[2] != SET_PREFIX {
return Err(Error::TaskExecutionFailed {
message: format!("Invalid set_disk_id format: {raw}"),
});
}
let pool_idx = parts[1].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
message: format!("Invalid pool index in set_disk_id: {raw}"),
})?;
let set_idx = parts[3].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
message: format!("Invalid set index in set_disk_id: {raw}"),
})?;
Ok((pool_idx, set_idx))
}
fn parse_compact_set_disk_id(raw: &str) -> Option<(usize, usize)> {
let (pool, set) = raw.split_once('_')?;
let pool_idx = pool.parse::<usize>().ok()?;
let set_idx = set.parse::<usize>().ok()?;
Some((pool_idx, set_idx))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn format_from_unsigned_indices() {
assert_eq!(format_set_disk_id(1, 2), "pool_1_set_2");
}
#[test]
fn format_from_signed_indices() {
assert_eq!(format_set_disk_id_from_i32(3, 4), Some("pool_3_set_4".into()));
assert_eq!(format_set_disk_id_from_i32(-1, 4), None);
}
#[test]
fn normalize_compact_identifier() {
assert_eq!(normalize_set_disk_id("3_5"), Some("pool_3_set_5".to_string()));
}
#[test]
fn normalize_prefixed_identifier() {
assert_eq!(normalize_set_disk_id("pool_7_set_1"), Some("pool_7_set_1".to_string()));
}
#[test]
fn normalize_invalid_identifier() {
assert_eq!(normalize_set_disk_id("invalid"), None);
}
#[test]
fn parse_prefixed_identifier() {
assert_eq!(parse_set_disk_id("pool_9_set_3").unwrap(), (9, 3));
}
#[test]
fn parse_invalid_identifier() {
assert!(parse_set_disk_id("bad").is_err());
assert!(parse_set_disk_id("pool_X_set_1").is_err());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/mod.rs | crates/ahm/src/heal/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod channel;
pub mod erasure_healer;
pub mod event;
pub mod manager;
pub mod progress;
pub mod resume;
pub mod storage;
pub mod task;
pub mod utils;
pub use erasure_healer::ErasureSetHealer;
pub use manager::HealManager;
pub use resume::{CheckpointManager, ResumeCheckpoint, ResumeManager, ResumeState, ResumeUtils};
pub use task::{HealOptions, HealPriority, HealRequest, HealTask, HealType};
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/task.rs | crates/ahm/src/heal/task.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{ErasureSetHealer, progress::HealProgress, storage::HealStorageAPI};
use crate::{Error, Result};
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use serde::{Deserialize, Serialize};
use std::{
future::Future,
sync::Arc,
time::{Duration, Instant, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{error, info, warn};
use uuid::Uuid;
/// Heal type
#[derive(Debug, Clone)]
pub enum HealType {
/// Object heal
Object {
bucket: String,
object: String,
version_id: Option<String>,
},
/// Bucket heal
Bucket { bucket: String },
/// Erasure Set heal (includes disk format repair)
ErasureSet { buckets: Vec<String>, set_disk_id: String },
/// Metadata heal
Metadata { bucket: String, object: String },
/// MRF heal
MRF { meta_path: String },
/// EC decode heal
ECDecode {
bucket: String,
object: String,
version_id: Option<String>,
},
}
/// Heal priority
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum HealPriority {
/// Low priority
Low = 0,
/// Normal priority
#[default]
Normal = 1,
/// High priority
High = 2,
/// Urgent priority
Urgent = 3,
}
/// Heal options
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealOptions {
/// Scan mode
pub scan_mode: HealScanMode,
/// Whether to remove corrupted data
pub remove_corrupted: bool,
/// Whether to recreate
pub recreate_missing: bool,
/// Whether to update parity
pub update_parity: bool,
/// Whether to recursively process
pub recursive: bool,
/// Whether to dry run
pub dry_run: bool,
/// Timeout
pub timeout: Option<Duration>,
/// pool index
pub pool_index: Option<usize>,
/// set index
pub set_index: Option<usize>,
}
impl Default for HealOptions {
fn default() -> Self {
Self {
scan_mode: HealScanMode::Normal,
remove_corrupted: false,
recreate_missing: true,
update_parity: true,
recursive: false,
dry_run: false,
timeout: Some(Duration::from_secs(300)), // 5 minutes default timeout
pool_index: None,
set_index: None,
}
}
}
/// Heal task status
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum HealTaskStatus {
/// Pending
Pending,
/// Running
Running,
/// Completed
Completed,
/// Failed
Failed { error: String },
/// Cancelled
Cancelled,
/// Timeout
Timeout,
}
/// Heal request
#[derive(Debug, Clone)]
pub struct HealRequest {
/// Request ID
pub id: String,
/// Heal type
pub heal_type: HealType,
/// Heal options
pub options: HealOptions,
/// Priority
pub priority: HealPriority,
/// Created time
pub created_at: SystemTime,
}
impl HealRequest {
pub fn new(heal_type: HealType, options: HealOptions, priority: HealPriority) -> Self {
Self {
id: Uuid::new_v4().to_string(),
heal_type,
options,
priority,
created_at: SystemTime::now(),
}
}
pub fn object(bucket: String, object: String, version_id: Option<String>) -> Self {
Self::new(
HealType::Object {
bucket,
object,
version_id,
},
HealOptions::default(),
HealPriority::Normal,
)
}
pub fn bucket(bucket: String) -> Self {
Self::new(HealType::Bucket { bucket }, HealOptions::default(), HealPriority::Normal)
}
pub fn metadata(bucket: String, object: String) -> Self {
Self::new(HealType::Metadata { bucket, object }, HealOptions::default(), HealPriority::High)
}
pub fn ec_decode(bucket: String, object: String, version_id: Option<String>) -> Self {
Self::new(
HealType::ECDecode {
bucket,
object,
version_id,
},
HealOptions::default(),
HealPriority::Urgent,
)
}
}
/// Heal task
pub struct HealTask {
/// Task ID
pub id: String,
/// Heal type
pub heal_type: HealType,
/// Heal options
pub options: HealOptions,
/// Task status
pub status: Arc<RwLock<HealTaskStatus>>,
/// Progress tracking
pub progress: Arc<RwLock<HealProgress>>,
/// Created time
pub created_at: SystemTime,
/// Started time
pub started_at: Arc<RwLock<Option<SystemTime>>>,
/// Completed time
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
/// Task start instant for timeout calculation (monotonic)
task_start_instant: Arc<RwLock<Option<Instant>>>,
/// Cancel token
pub cancel_token: tokio_util::sync::CancellationToken,
/// Storage layer interface
pub storage: Arc<dyn HealStorageAPI>,
}
impl HealTask {
pub fn from_request(request: HealRequest, storage: Arc<dyn HealStorageAPI>) -> Self {
Self {
id: request.id,
heal_type: request.heal_type,
options: request.options,
status: Arc::new(RwLock::new(HealTaskStatus::Pending)),
progress: Arc::new(RwLock::new(HealProgress::new())),
created_at: request.created_at,
started_at: Arc::new(RwLock::new(None)),
completed_at: Arc::new(RwLock::new(None)),
task_start_instant: Arc::new(RwLock::new(None)),
cancel_token: tokio_util::sync::CancellationToken::new(),
storage,
}
}
async fn remaining_timeout(&self) -> Result<Option<Duration>> {
if let Some(total) = self.options.timeout {
let start_instant = { *self.task_start_instant.read().await };
if let Some(started_at) = start_instant {
let elapsed = started_at.elapsed();
if elapsed >= total {
return Err(Error::TaskTimeout);
}
return Ok(Some(total - elapsed));
}
Ok(Some(total))
} else {
Ok(None)
}
}
async fn check_control_flags(&self) -> Result<()> {
if self.cancel_token.is_cancelled() {
return Err(Error::TaskCancelled);
}
// Only interested in propagating an error if the timeout has expired;
// the actual Duration value is not needed here
let _ = self.remaining_timeout().await?;
Ok(())
}
async fn await_with_control<F, T>(&self, fut: F) -> Result<T>
where
F: Future<Output = Result<T>> + Send,
T: Send,
{
let cancel_token = self.cancel_token.clone();
if let Some(remaining) = self.remaining_timeout().await? {
if remaining.is_zero() {
return Err(Error::TaskTimeout);
}
let mut fut = Box::pin(fut);
tokio::select! {
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
_ = tokio::time::sleep(remaining) => Err(Error::TaskTimeout),
result = &mut fut => result,
}
} else {
tokio::select! {
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
result = fut => result,
}
}
}
#[tracing::instrument(skip(self), fields(task_id = %self.id, heal_type = ?self.heal_type))]
pub async fn execute(&self) -> Result<()> {
// update status and timestamps atomically to avoid race conditions
let now = SystemTime::now();
let start_instant = Instant::now();
{
let mut status = self.status.write().await;
let mut started_at = self.started_at.write().await;
let mut task_start_instant = self.task_start_instant.write().await;
*status = HealTaskStatus::Running;
*started_at = Some(now);
*task_start_instant = Some(start_instant);
}
info!("Task started");
let result = match &self.heal_type {
HealType::Object {
bucket,
object,
version_id,
} => self.heal_object(bucket, object, version_id.as_deref()).await,
HealType::Bucket { bucket } => self.heal_bucket(bucket).await,
HealType::Metadata { bucket, object } => self.heal_metadata(bucket, object).await,
HealType::MRF { meta_path } => self.heal_mrf(meta_path).await,
HealType::ECDecode {
bucket,
object,
version_id,
} => self.heal_ec_decode(bucket, object, version_id.as_deref()).await,
HealType::ErasureSet { buckets, set_disk_id } => self.heal_erasure_set(buckets.clone(), set_disk_id.clone()).await,
};
// update completed time and status
{
let mut completed_at = self.completed_at.write().await;
*completed_at = Some(SystemTime::now());
}
match &result {
Ok(_) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Completed;
info!("Task completed successfully");
}
Err(Error::TaskCancelled) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Cancelled;
info!("Heal task was cancelled: {}", self.id);
}
Err(Error::TaskTimeout) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Timeout;
warn!("Heal task timed out: {}", self.id);
}
Err(e) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Failed { error: e.to_string() };
error!("Heal task failed: {} with error: {}", self.id, e);
}
}
result
}
pub async fn cancel(&self) -> Result<()> {
self.cancel_token.cancel();
let mut status = self.status.write().await;
*status = HealTaskStatus::Cancelled;
info!("Heal task cancelled: {}", self.id);
Ok(())
}
pub async fn get_status(&self) -> HealTaskStatus {
self.status.read().await.clone()
}
pub async fn get_progress(&self) -> HealProgress {
self.progress.read().await.clone()
}
// specific heal implementation method
#[tracing::instrument(skip(self), fields(bucket = %bucket, object = %object, version_id = ?version_id))]
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Starting object heal workflow");
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("{bucket}/{object}")));
progress.update_progress(0, 4, 0, 0);
}
// Step 1: Check if object exists and get metadata
warn!("Step 1: Checking object existence and metadata");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
if self.options.recreate_missing {
info!("Attempting to recreate missing object: {}/{}", bucket, object);
return self.recreate_missing_object(bucket, object, version_id).await;
} else {
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: directly call ecstore to perform heal
info!("Step 2: Performing heal using ecstore");
let heal_opts = HealOpts {
recursive: self.options.recursive,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: self.options.scan_mode,
update_parity: self.options.update_parity,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
bucket, object
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Ok(());
}
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
} else {
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal object {bucket}/{object}: {e}"),
});
}
// Step 3: Verify heal result
info!("Step 3: Verifying heal result");
let object_size = result.object_size as u64;
info!(
object_size = object_size,
drives_healed = result.after.drives.len(),
"Heal completed successfully"
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
bucket, object
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Ok(());
}
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
} else {
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal object {bucket}/{object}: {e}"),
})
}
}
}
/// Recreate missing object (for EC decode scenarios)
async fn recreate_missing_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Attempting to recreate missing object: {}/{}", bucket, object);
// Use ecstore's heal_object with recreate option
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
match self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await
{
Ok((result, error)) => {
if let Some(e) = error {
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
return Err(Error::TaskExecutionFailed {
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
});
}
let object_size = result.object_size as u64;
info!("Successfully recreated missing object: {}/{} ({} bytes)", bucket, object, object_size);
{
let mut progress = self.progress.write().await;
progress.update_progress(4, 4, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
Err(Error::TaskExecutionFailed {
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_bucket(&self, bucket: &str) -> Result<()> {
info!("Healing bucket: {}", bucket);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("bucket: {bucket}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if bucket exists
info!("Step 1: Checking bucket existence");
self.check_control_flags().await?;
let bucket_exists = self.await_with_control(self.storage.get_bucket_info(bucket)).await?.is_some();
if !bucket_exists {
warn!("Bucket does not exist: {}", bucket);
return Err(Error::TaskExecutionFailed {
message: format!("Bucket not found: {bucket}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform bucket heal using ecstore
info!("Step 2: Performing bucket heal using ecstore");
let heal_opts = HealOpts {
recursive: self.options.recursive,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: self.options.scan_mode,
update_parity: self.options.update_parity,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self.await_with_control(self.storage.heal_bucket(bucket, &heal_opts)).await;
match heal_result {
Ok(result) => {
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Bucket heal failed: {} - {}", bucket, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal bucket {bucket}: {e}"),
})
}
}
}
async fn heal_metadata(&self, bucket: &str, object: &str) -> Result<()> {
info!("Healing metadata: {}/{}", bucket, object);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("metadata: {bucket}/{object}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if object exists
info!("Step 1: Checking object existence");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform metadata heal using ecstore
info!("Step 2: Performing metadata heal using ecstore");
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: false,
scan_mode: HealScanMode::Deep,
update_parity: false,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, None, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
});
}
info!(
"Metadata heal completed successfully: {}/{} ({} drives)",
bucket,
object,
result.after.drives.len()
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_mrf(&self, meta_path: &str) -> Result<()> {
info!("Healing MRF: {}", meta_path);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("mrf: {meta_path}")));
progress.update_progress(0, 2, 0, 0);
}
// Parse meta_path to extract bucket and object
let parts: Vec<&str> = meta_path.split('/').collect();
if parts.len() < 2 {
return Err(Error::TaskExecutionFailed {
message: format!("Invalid meta path format: {meta_path}"),
});
}
let bucket = parts[0];
let object = parts[1..].join("/");
// Step 1: Perform MRF heal using ecstore
info!("Step 1: Performing MRF heal using ecstore");
let heal_opts = HealOpts {
recursive: true,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, &object, None, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("MRF heal failed: {} - {}", meta_path, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal MRF {meta_path}: {e}"),
});
}
info!("MRF heal completed successfully: {} ({} drives)", meta_path, result.after.drives.len());
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("MRF heal failed: {} - {}", meta_path, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal MRF {meta_path}: {e}"),
})
}
}
}
async fn heal_ec_decode(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Healing EC decode: {}/{}", bucket, object);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("ec_decode: {bucket}/{object}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if object exists
info!("Step 1: Checking object existence");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform EC decode heal using ecstore
info!("Step 2: Performing EC decode heal using ecstore");
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
});
}
let object_size = result.object_size as u64;
info!(
"EC decode heal completed successfully: {}/{} ({} bytes, {} drives)",
bucket,
object,
object_size,
result.after.drives.len()
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_erasure_set(&self, buckets: Vec<String>, set_disk_id: String) -> Result<()> {
info!("Healing Erasure Set: {} ({} buckets)", set_disk_id, buckets.len());
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("erasure_set: {} ({} buckets)", set_disk_id, buckets.len())));
progress.update_progress(0, 4, 0, 0);
}
let buckets = if buckets.is_empty() {
info!("No buckets specified, listing all buckets");
let bucket_infos = self.await_with_control(self.storage.list_buckets()).await?;
bucket_infos.into_iter().map(|info| info.name).collect()
} else {
buckets
};
// Step 1: Perform disk format heal using ecstore
info!("Step 1: Performing disk format heal using ecstore");
let format_result = self.await_with_control(self.storage.heal_format(self.options.dry_run)).await;
match format_result {
Ok((result, error)) => {
if let Some(e) = error {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/resume.rs | crates/ahm/src/heal/resume.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
use serde::{Deserialize, Serialize};
use std::path::Path;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
use uuid::Uuid;
/// resume state file constants
const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
/// Helper function to convert Path to &str, returning an error if conversion fails
fn path_to_str(path: &Path) -> Result<&str> {
path.to_str()
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {path:?}")))
}
/// resume state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResumeState {
/// task id
pub task_id: String,
/// task type
pub task_type: String,
/// set disk identifier (for erasure set tasks)
#[serde(default)]
pub set_disk_id: String,
/// start time
pub start_time: u64,
/// last update time
pub last_update: u64,
/// completed
pub completed: bool,
/// total objects
pub total_objects: u64,
/// processed objects
pub processed_objects: u64,
/// successful objects
pub successful_objects: u64,
/// failed objects
pub failed_objects: u64,
/// skipped objects
pub skipped_objects: u64,
/// current bucket
pub current_bucket: Option<String>,
/// current object
pub current_object: Option<String>,
/// completed buckets
pub completed_buckets: Vec<String>,
/// pending buckets
pub pending_buckets: Vec<String>,
/// error message
pub error_message: Option<String>,
/// retry count
pub retry_count: u32,
/// max retries
pub max_retries: u32,
}
impl ResumeState {
pub fn new(task_id: String, task_type: String, set_disk_id: String, buckets: Vec<String>) -> Self {
Self {
task_id,
task_type,
set_disk_id,
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
completed: false,
total_objects: 0,
processed_objects: 0,
successful_objects: 0,
failed_objects: 0,
skipped_objects: 0,
current_bucket: None,
current_object: None,
completed_buckets: Vec::new(),
pending_buckets: buckets,
error_message: None,
retry_count: 0,
max_retries: 3,
}
}
pub fn update_progress(&mut self, processed: u64, successful: u64, failed: u64, skipped: u64) {
self.processed_objects = processed;
self.successful_objects = successful;
self.failed_objects = failed;
self.skipped_objects = skipped;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
self.current_bucket = bucket;
self.current_object = object;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn complete_bucket(&mut self, bucket: &str) {
if !self.completed_buckets.contains(&bucket.to_string()) {
self.completed_buckets.push(bucket.to_string());
}
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
self.pending_buckets.remove(pos);
}
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn mark_completed(&mut self) {
self.completed = true;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn set_error(&mut self, error: String) {
self.error_message = Some(error);
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn increment_retry(&mut self) {
self.retry_count += 1;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn can_retry(&self) -> bool {
self.retry_count < self.max_retries
}
pub fn get_progress_percentage(&self) -> f64 {
if self.total_objects == 0 {
return 0.0;
}
(self.processed_objects as f64 / self.total_objects as f64) * 100.0
}
pub fn get_success_rate(&self) -> f64 {
let total = self.successful_objects + self.failed_objects;
if total == 0 {
return 0.0;
}
(self.successful_objects as f64 / total as f64) * 100.0
}
}
/// resume manager
pub struct ResumeManager {
disk: DiskStore,
state: Arc<RwLock<ResumeState>>,
}
impl ResumeManager {
/// create new resume manager
pub async fn new(
disk: DiskStore,
task_id: String,
task_type: String,
set_disk_id: String,
buckets: Vec<String>,
) -> Result<Self> {
let state = ResumeState::new(task_id, task_type, set_disk_id, buckets);
let manager = Self {
disk,
state: Arc::new(RwLock::new(state)),
};
// save initial state
manager.save_state().await?;
Ok(manager)
}
/// load resume state from disk
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
let state_data = Self::read_state_file(&disk, task_id).await?;
let state: ResumeState = serde_json::from_slice(&state_data).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to deserialize resume state: {e}"),
})?;
Ok(Self {
disk,
state: Arc::new(RwLock::new(state)),
})
}
/// check if resume state exists
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
match path_to_str(&file_path) {
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
Ok(data) => !data.is_empty(),
Err(_) => false,
},
Err(_) => false,
}
}
/// get current state
pub async fn get_state(&self) -> ResumeState {
self.state.read().await.clone()
}
/// update progress
pub async fn update_progress(&self, processed: u64, successful: u64, failed: u64, skipped: u64) -> Result<()> {
let mut state = self.state.write().await;
state.update_progress(processed, successful, failed, skipped);
drop(state);
self.save_state().await
}
/// set current item
pub async fn set_current_item(&self, bucket: Option<String>, object: Option<String>) -> Result<()> {
let mut state = self.state.write().await;
state.set_current_item(bucket, object);
drop(state);
self.save_state().await
}
/// complete bucket
pub async fn complete_bucket(&self, bucket: &str) -> Result<()> {
let mut state = self.state.write().await;
state.complete_bucket(bucket);
drop(state);
self.save_state().await
}
/// mark task completed
pub async fn mark_completed(&self) -> Result<()> {
let mut state = self.state.write().await;
state.mark_completed();
drop(state);
self.save_state().await
}
/// set error message
pub async fn set_error(&self, error: String) -> Result<()> {
let mut state = self.state.write().await;
state.set_error(error);
drop(state);
self.save_state().await
}
/// increment retry count
pub async fn increment_retry(&self) -> Result<()> {
let mut state = self.state.write().await;
state.increment_retry();
drop(state);
self.save_state().await
}
/// cleanup resume state
pub async fn cleanup(&self) -> Result<()> {
let state = self.state.read().await;
let task_id = &state.task_id;
// delete state files
let state_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
let progress_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_PROGRESS_FILE}"));
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
// ignore delete errors, files may not exist
if let Ok(path_str) = path_to_str(&state_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
if let Ok(path_str) = path_to_str(&progress_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
if let Ok(path_str) = path_to_str(&checkpoint_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
info!("Cleaned up resume state for task: {}", task_id);
Ok(())
}
/// save state to disk
async fn save_state(&self) -> Result<()> {
let state = self.state.read().await;
let state_data = serde_json::to_vec(&*state).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to serialize resume state: {e}"),
})?;
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
let path_str = path_to_str(&file_path)?;
self.disk
.write_all(RUSTFS_META_BUCKET, path_str, state_data.into())
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to save resume state: {e}"),
})?;
debug!("Saved resume state for task: {}", state.task_id);
Ok(())
}
/// read state file from disk
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
let path_str = path_to_str(&file_path)?;
disk.read_all(RUSTFS_META_BUCKET, path_str)
.await
.map(|bytes| bytes.to_vec())
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to read resume state file: {e}"),
})
}
}
/// resume checkpoint
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResumeCheckpoint {
/// task id
pub task_id: String,
/// checkpoint time
pub checkpoint_time: u64,
/// current bucket index
pub current_bucket_index: usize,
/// current object index
pub current_object_index: usize,
/// processed objects
pub processed_objects: Vec<String>,
/// failed objects
pub failed_objects: Vec<String>,
/// skipped objects
pub skipped_objects: Vec<String>,
}
impl ResumeCheckpoint {
pub fn new(task_id: String) -> Self {
Self {
task_id,
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
current_bucket_index: 0,
current_object_index: 0,
processed_objects: Vec::new(),
failed_objects: Vec::new(),
skipped_objects: Vec::new(),
}
}
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
self.current_bucket_index = bucket_index;
self.current_object_index = object_index;
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn add_processed_object(&mut self, object: String) {
if !self.processed_objects.contains(&object) {
self.processed_objects.push(object);
}
}
pub fn add_failed_object(&mut self, object: String) {
if !self.failed_objects.contains(&object) {
self.failed_objects.push(object);
}
}
pub fn add_skipped_object(&mut self, object: String) {
if !self.skipped_objects.contains(&object) {
self.skipped_objects.push(object);
}
}
}
/// resume checkpoint manager
pub struct CheckpointManager {
disk: DiskStore,
checkpoint: Arc<RwLock<ResumeCheckpoint>>,
}
impl CheckpointManager {
/// create new checkpoint manager
pub async fn new(disk: DiskStore, task_id: String) -> Result<Self> {
let checkpoint = ResumeCheckpoint::new(task_id);
let manager = Self {
disk,
checkpoint: Arc::new(RwLock::new(checkpoint)),
};
// save initial checkpoint
manager.save_checkpoint().await?;
Ok(manager)
}
/// load checkpoint from disk
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
let checkpoint_data = Self::read_checkpoint_file(&disk, task_id).await?;
let checkpoint: ResumeCheckpoint = serde_json::from_slice(&checkpoint_data).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to deserialize checkpoint: {e}"),
})?;
Ok(Self {
disk,
checkpoint: Arc::new(RwLock::new(checkpoint)),
})
}
/// check if checkpoint exists
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
match path_to_str(&file_path) {
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
Ok(data) => !data.is_empty(),
Err(_) => false,
},
Err(_) => false,
}
}
/// get current checkpoint
pub async fn get_checkpoint(&self) -> ResumeCheckpoint {
self.checkpoint.read().await.clone()
}
/// update position
pub async fn update_position(&self, bucket_index: usize, object_index: usize) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.update_position(bucket_index, object_index);
drop(checkpoint);
self.save_checkpoint().await
}
/// add processed object
pub async fn add_processed_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_processed_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// add failed object
pub async fn add_failed_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_failed_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// add skipped object
pub async fn add_skipped_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_skipped_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// cleanup checkpoint
pub async fn cleanup(&self) -> Result<()> {
let checkpoint = self.checkpoint.read().await;
let task_id = &checkpoint.task_id;
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
if let Ok(path_str) = path_to_str(&checkpoint_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
info!("Cleaned up checkpoint for task: {}", task_id);
Ok(())
}
/// save checkpoint to disk
async fn save_checkpoint(&self) -> Result<()> {
let checkpoint = self.checkpoint.read().await;
let checkpoint_data = serde_json::to_vec(&*checkpoint).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to serialize checkpoint: {e}"),
})?;
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
let path_str = path_to_str(&file_path)?;
self.disk
.write_all(RUSTFS_META_BUCKET, path_str, checkpoint_data.into())
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to save checkpoint: {e}"),
})?;
debug!("Saved checkpoint for task: {}", checkpoint.task_id);
Ok(())
}
/// read checkpoint file from disk
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
let path_str = path_to_str(&file_path)?;
disk.read_all(RUSTFS_META_BUCKET, path_str)
.await
.map(|bytes| bytes.to_vec())
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to read checkpoint file: {e}"),
})
}
}
/// resume utils
pub struct ResumeUtils;
impl ResumeUtils {
/// generate unique task id
pub fn generate_task_id() -> String {
Uuid::new_v4().to_string()
}
/// check if task can be resumed
pub async fn can_resume_task(disk: &DiskStore, task_id: &str) -> bool {
ResumeManager::has_resume_state(disk, task_id).await
}
/// get all resumable task ids
pub async fn get_resumable_tasks(disk: &DiskStore) -> Result<Vec<String>> {
// List all files in the buckets metadata directory
let entries = match disk.list_dir("", RUSTFS_META_BUCKET, BUCKET_META_PREFIX, -1).await {
Ok(entries) => entries,
Err(e) => {
debug!("Failed to list resume state files: {}", e);
return Ok(Vec::new());
}
};
let mut task_ids = Vec::new();
// Filter files that end with ahm_resume_state.json and extract task IDs
for entry in entries {
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
// Extract task ID from filename: {task_id}_ahm_resume_state.json
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}"))
&& !task_id.is_empty()
{
task_ids.push(task_id.to_string());
}
}
}
debug!("Found {} resumable tasks: {:?}", task_ids.len(), task_ids);
Ok(task_ids)
}
/// cleanup expired resume states
pub async fn cleanup_expired_states(disk: &DiskStore, max_age_hours: u64) -> Result<()> {
let task_ids = Self::get_resumable_tasks(disk).await?;
let current_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
for task_id in task_ids {
if let Ok(resume_manager) = ResumeManager::load_from_disk(disk.clone(), &task_id).await {
let state = resume_manager.get_state().await;
let age_hours = (current_time - state.last_update) / 3600;
if age_hours > max_age_hours {
info!("Cleaning up expired resume state for task: {} (age: {} hours)", task_id, age_hours);
if let Err(e) = resume_manager.cleanup().await {
warn!("Failed to cleanup expired resume state for task {}: {}", task_id, e);
}
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_resume_state_creation() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
assert_eq!(state.task_id, task_id);
assert_eq!(state.task_type, "erasure_set");
assert!(!state.completed);
assert_eq!(state.processed_objects, 0);
assert_eq!(state.pending_buckets.len(), 2);
}
#[tokio::test]
async fn test_resume_state_progress() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string()];
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
state.update_progress(10, 8, 1, 1);
assert_eq!(state.processed_objects, 10);
assert_eq!(state.successful_objects, 8);
assert_eq!(state.failed_objects, 1);
assert_eq!(state.skipped_objects, 1);
let progress = state.get_progress_percentage();
assert_eq!(progress, 0.0); // total_objects is 0
state.total_objects = 100;
let progress = state.get_progress_percentage();
assert_eq!(progress, 10.0);
}
#[tokio::test]
async fn test_resume_state_bucket_completion() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
assert_eq!(state.pending_buckets.len(), 2);
assert_eq!(state.completed_buckets.len(), 0);
state.complete_bucket("bucket1");
assert_eq!(state.pending_buckets.len(), 1);
assert_eq!(state.completed_buckets.len(), 1);
assert!(state.completed_buckets.contains(&"bucket1".to_string()));
}
#[tokio::test]
async fn test_resume_utils() {
let task_id1 = ResumeUtils::generate_task_id();
let task_id2 = ResumeUtils::generate_task_id();
assert_ne!(task_id1, task_id2);
assert_eq!(task_id1.len(), 36); // UUID length
assert_eq!(task_id2.len(), 36);
}
#[tokio::test]
async fn test_get_resumable_tasks_integration() {
use rustfs_ecstore::disk::{DiskOption, endpoint::Endpoint, new_disk};
use tempfile::TempDir;
// Create a temporary directory for testing
let temp_dir = TempDir::new().unwrap();
let disk_path = temp_dir.path().join("test_disk");
std::fs::create_dir_all(&disk_path).unwrap();
// Create a local disk for testing
let endpoint = Endpoint::try_from(disk_path.to_string_lossy().as_ref()).unwrap();
let disk_option = DiskOption {
cleanup: false,
health_check: false,
};
let disk = new_disk(&endpoint, &disk_option).await.unwrap();
// Create necessary directories first (ignore if already exist)
let _ = disk.make_volume(RUSTFS_META_BUCKET).await;
let _ = disk.make_volume(&format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}")).await;
// Create some test resume state files
let task_ids = vec![
"test-task-1".to_string(),
"test-task-2".to_string(),
"test-task-3".to_string(),
];
// Save resume state files for each task
for task_id in &task_ids {
let state = ResumeState::new(
task_id.clone(),
"erasure_set".to_string(),
"pool_0_set_0".to_string(),
vec!["bucket1".to_string(), "bucket2".to_string()],
);
let state_data = serde_json::to_vec(&state).unwrap();
let file_path = format!("{BUCKET_META_PREFIX}/{task_id}_{RESUME_STATE_FILE}");
disk.write_all(RUSTFS_META_BUCKET, &file_path, state_data.into())
.await
.unwrap();
}
// Also create some non-resume state files to test filtering
let non_resume_files = vec![
"other_file.txt",
"task4_ahm_checkpoint.json",
"task5_ahm_progress.json",
"_ahm_resume_state.json", // Invalid: empty task ID
];
for file_name in non_resume_files {
let file_path = format!("{BUCKET_META_PREFIX}/{file_name}");
disk.write_all(RUSTFS_META_BUCKET, &file_path, b"test data".to_vec().into())
.await
.unwrap();
}
// Now call get_resumable_tasks to see if it finds the correct files
let found_task_ids = ResumeUtils::get_resumable_tasks(&disk).await.unwrap();
// Verify that only the valid resume state files are found
assert_eq!(found_task_ids.len(), 3);
for task_id in &task_ids {
assert!(found_task_ids.contains(task_id), "Task ID {task_id} not found");
}
// Verify that invalid files are not included
assert!(!found_task_ids.contains(&"".to_string()));
assert!(!found_task_ids.contains(&"task4".to_string()));
assert!(!found_task_ids.contains(&"task5".to_string()));
// Clean up
temp_dir.close().unwrap();
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/erasure_healer.rs | crates/ahm/src/heal/erasure_healer.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{
progress::HealProgress,
resume::{CheckpointManager, ResumeManager, ResumeUtils},
storage::HealStorageAPI,
};
use crate::{Error, Result};
use futures::future::join_all;
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::disk::DiskStore;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{error, info, warn};
/// Erasure Set Healer
pub struct ErasureSetHealer {
storage: Arc<dyn HealStorageAPI>,
progress: Arc<RwLock<HealProgress>>,
cancel_token: tokio_util::sync::CancellationToken,
disk: DiskStore,
}
impl ErasureSetHealer {
pub fn new(
storage: Arc<dyn HealStorageAPI>,
progress: Arc<RwLock<HealProgress>>,
cancel_token: tokio_util::sync::CancellationToken,
disk: DiskStore,
) -> Self {
Self {
storage,
progress,
cancel_token,
disk,
}
}
/// execute erasure set heal with resume
#[tracing::instrument(skip(self, buckets), fields(set_disk_id = %set_disk_id, bucket_count = buckets.len()))]
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
info!("Starting erasure set heal");
// 1. generate or get task id
let task_id = self.get_or_create_task_id(set_disk_id).await?;
// 2. initialize or resume resume state
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, set_disk_id, buckets).await?;
// 3. execute heal with resume
let result = self
.execute_heal_with_resume(buckets, &resume_manager, &checkpoint_manager)
.await;
// 4. cleanup resume state
if result.is_ok() {
if let Err(e) = resume_manager.cleanup().await {
warn!("Failed to cleanup resume state: {}", e);
}
if let Err(e) = checkpoint_manager.cleanup().await {
warn!("Failed to cleanup checkpoint: {}", e);
}
}
result
}
/// get or create task id
async fn get_or_create_task_id(&self, set_disk_id: &str) -> Result<String> {
// check if there are resumable tasks
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
for task_id in resumable_tasks {
match ResumeManager::load_from_disk(self.disk.clone(), &task_id).await {
Ok(manager) => {
let state = manager.get_state().await;
if state.set_disk_id == set_disk_id && ResumeUtils::can_resume_task(&self.disk, &task_id).await {
info!("Found resumable task: {} for set {}", task_id, set_disk_id);
return Ok(task_id);
}
}
Err(e) => {
warn!("Failed to load resume state for task {}: {}", task_id, e);
}
}
}
// create new task id
let task_id = format!("{}_{}", set_disk_id, ResumeUtils::generate_task_id());
info!("Created new heal task: {}", task_id);
Ok(task_id)
}
/// initialize or resume resume state
async fn initialize_resume_state(
&self,
task_id: &str,
set_disk_id: &str,
buckets: &[String],
) -> Result<(ResumeManager, CheckpointManager)> {
// check if resume state exists
if ResumeManager::has_resume_state(&self.disk, task_id).await {
info!("Loading existing resume state for task: {}", task_id);
let resume_manager = ResumeManager::load_from_disk(self.disk.clone(), task_id).await?;
let checkpoint_manager = if CheckpointManager::has_checkpoint(&self.disk, task_id).await {
CheckpointManager::load_from_disk(self.disk.clone(), task_id).await?
} else {
CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?
};
Ok((resume_manager, checkpoint_manager))
} else {
info!("Creating new resume state for task: {}", task_id);
let resume_manager = ResumeManager::new(
self.disk.clone(),
task_id.to_string(),
"erasure_set".to_string(),
set_disk_id.to_string(),
buckets.to_vec(),
)
.await?;
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
Ok((resume_manager, checkpoint_manager))
}
}
/// execute heal with resume
async fn execute_heal_with_resume(
&self,
buckets: &[String],
resume_manager: &ResumeManager,
checkpoint_manager: &CheckpointManager,
) -> Result<()> {
// 1. get current state
let state = resume_manager.get_state().await;
let checkpoint = checkpoint_manager.get_checkpoint().await;
info!(
"Resuming from bucket {} object {}",
checkpoint.current_bucket_index, checkpoint.current_object_index
);
// 2. initialize progress
self.initialize_progress(buckets, &state).await;
// 3. continue from checkpoint
let current_bucket_index = checkpoint.current_bucket_index;
let mut current_object_index = checkpoint.current_object_index;
let mut processed_objects = state.processed_objects;
let mut successful_objects = state.successful_objects;
let mut failed_objects = state.failed_objects;
let mut skipped_objects = state.skipped_objects;
// 4. process remaining buckets
for (bucket_idx, bucket) in buckets.iter().enumerate().skip(current_bucket_index) {
// check if completed
if state.completed_buckets.contains(bucket) {
continue;
}
// update current bucket
resume_manager.set_current_item(Some(bucket.clone()), None).await?;
// process objects in bucket
let bucket_result = self
.heal_bucket_with_resume(
bucket,
bucket_idx,
&mut current_object_index,
&mut processed_objects,
&mut successful_objects,
&mut failed_objects,
&mut skipped_objects,
resume_manager,
checkpoint_manager,
)
.await;
// update checkpoint position
checkpoint_manager.update_position(bucket_idx, current_object_index).await?;
// update progress
resume_manager
.update_progress(processed_objects, successful_objects, failed_objects, skipped_objects)
.await?;
// check cancel status
if self.cancel_token.is_cancelled() {
warn!("Heal task cancelled");
return Err(Error::TaskCancelled);
}
// process bucket result
match bucket_result {
Ok(_) => {
resume_manager.complete_bucket(bucket).await?;
info!("Completed heal for bucket: {}", bucket);
}
Err(e) => {
error!("Failed to heal bucket {}: {}", bucket, e);
// continue to next bucket, do not interrupt the whole process
}
}
// reset object index
current_object_index = 0;
}
// 5. mark task completed
resume_manager.mark_completed().await?;
info!("Erasure set heal completed successfully");
Ok(())
}
/// heal single bucket with resume
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip(self, current_object_index, processed_objects, successful_objects, failed_objects, _skipped_objects, resume_manager, checkpoint_manager), fields(bucket = %bucket, bucket_index = bucket_index))]
async fn heal_bucket_with_resume(
&self,
bucket: &str,
bucket_index: usize,
current_object_index: &mut usize,
processed_objects: &mut u64,
successful_objects: &mut u64,
failed_objects: &mut u64,
_skipped_objects: &mut u64,
resume_manager: &ResumeManager,
checkpoint_manager: &CheckpointManager,
) -> Result<()> {
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket from object index {}", current_object_index);
// 1. get bucket info
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
Some(info) => info,
None => {
warn!("Bucket {} not found, skipping", bucket);
return Ok(());
}
};
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut global_obj_idx = 0usize;
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = self
.storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
// Process objects in this page
for object in objects {
// Skip objects before the checkpoint
if global_obj_idx < *current_object_index {
global_obj_idx += 1;
continue;
}
// check if already processed
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(&object) {
global_obj_idx += 1;
continue;
}
// update current object
resume_manager
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
.await?;
// Check if object still exists before attempting heal
let object_exists = match self.storage.object_exists(bucket, &object).await {
Ok(exists) => exists,
Err(e) => {
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
};
if !object_exists {
info!(
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
bucket, object
);
checkpoint_manager.add_processed_object(object.clone()).await?;
*successful_objects += 1; // Treat as successful - object is gone as intended
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
// heal object
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true,
recreate: true, // Keep recreate enabled for legitimate heal scenarios
..Default::default()
};
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {
*successful_objects += 1;
checkpoint_manager.add_processed_object(object.clone()).await?;
info!("Successfully healed object {}/{}", bucket, object);
}
Ok((_, Some(err))) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
}
Err(err) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Error healing object {}/{}: {}", bucket, object, err);
}
}
*processed_objects += 1;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
// check cancel status
if self.cancel_token.is_cancelled() {
info!("Heal task cancelled during object processing");
return Err(Error::TaskCancelled);
}
// save checkpoint periodically
if global_obj_idx.is_multiple_of(100) {
checkpoint_manager
.update_position(bucket_index, *current_object_index)
.await?;
}
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
Ok(())
}
/// initialize progress tracking
async fn initialize_progress(&self, _buckets: &[String], state: &crate::heal::resume::ResumeState) {
let mut progress = self.progress.write().await;
progress.objects_scanned = state.total_objects;
progress.objects_healed = state.successful_objects;
progress.objects_failed = state.failed_objects;
progress.bytes_processed = 0; // set to 0 for now, can be extended later
progress.set_current_object(state.current_object.clone());
}
/// heal all buckets concurrently
#[allow(dead_code)]
async fn heal_buckets_concurrently(&self, buckets: &[String]) -> Vec<Result<()>> {
// use semaphore to control concurrency, avoid too many concurrent healings
let semaphore = Arc::new(tokio::sync::Semaphore::new(4)); // max 4 concurrent healings
let heal_futures = buckets.iter().map(|bucket| {
let bucket = bucket.clone();
let storage = self.storage.clone();
let progress = self.progress.clone();
let semaphore = semaphore.clone();
let cancel_token = self.cancel_token.clone();
async move {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {e}")))?;
if cancel_token.is_cancelled() {
return Err(Error::TaskCancelled);
}
Self::heal_single_bucket(&storage, &bucket, &progress).await
}
});
// use join_all to process concurrently
join_all(heal_futures).await
}
/// heal single bucket
#[allow(dead_code)]
async fn heal_single_bucket(
storage: &Arc<dyn HealStorageAPI>,
bucket: &str,
progress: &Arc<RwLock<HealProgress>>,
) -> Result<()> {
info!("Starting heal for bucket: {}", bucket);
// 1. get bucket info
let _bucket_info = match storage.get_bucket_info(bucket).await? {
Some(info) => info,
None => {
warn!("Bucket {} not found, skipping", bucket);
return Ok(());
}
};
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut total_scanned = 0u64;
let mut total_success = 0u64;
let mut total_failed = 0u64;
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true, // remove corrupted data
recreate: true, // recreate missing data
..Default::default()
};
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
let page_count = objects.len() as u64;
total_scanned += page_count;
// 3. update progress
{
let mut p = progress.write().await;
p.objects_scanned = total_scanned;
}
// 4. heal objects concurrently for this page
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
// 5. count results for this page
let (success_count, failure_count) =
object_results
.into_iter()
.fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
total_success += success_count;
total_failed += failure_count;
// 6. update progress
{
let mut p = progress.write().await;
p.objects_healed = total_success;
p.objects_failed = total_failed;
p.set_current_object(Some(format!("processing bucket: {bucket} (page)")));
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
// 7. final progress update
{
let mut p = progress.write().await;
p.set_current_object(Some(format!("completed bucket: {bucket}")));
}
info!(
"Completed heal for bucket {}: {} success, {} failures (total scanned: {})",
bucket, total_success, total_failed, total_scanned
);
Ok(())
}
/// heal objects concurrently
#[allow(dead_code)]
async fn heal_objects_concurrently(
storage: &Arc<dyn HealStorageAPI>,
bucket: &str,
objects: &[String],
heal_opts: &HealOpts,
_progress: &Arc<RwLock<HealProgress>>,
) -> Vec<Result<()>> {
// use semaphore to control object healing concurrency
let semaphore = Arc::new(tokio::sync::Semaphore::new(8)); // max 8 concurrent object healings
let heal_futures = objects.iter().map(|object| {
let object = object.clone();
let bucket = bucket.to_string();
let storage = storage.clone();
let heal_opts = *heal_opts;
let semaphore = semaphore.clone();
async move {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {e}")))?;
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {
info!("Successfully healed object {}/{}", bucket, object);
Ok(())
}
Ok((_, Some(err))) => {
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
Err(Error::other(err))
}
Err(err) => {
warn!("Error healing object {}/{}: {}", bucket, object, err);
Err(err)
}
}
}
});
join_all(heal_futures).await
}
/// process results
#[allow(dead_code)]
async fn process_results(&self, results: Vec<Result<()>>) -> Result<()> {
let (success_count, failure_count): (usize, usize) =
results.into_iter().fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
let total = success_count + failure_count;
info!("Erasure set heal completed: {}/{} buckets successful", success_count, total);
if failure_count > 0 {
warn!("{} buckets failed to heal", failure_count);
return Err(Error::other(format!("{failure_count} buckets failed to heal")));
}
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/src/heal/channel.rs | crates/ahm/src/heal/channel.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{
manager::HealManager,
task::{HealOptions, HealPriority, HealRequest, HealType},
utils,
};
use crate::{Error, Result};
use rustfs_common::heal_channel::{
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
publish_heal_response,
};
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::{debug, error, info};
/// Heal channel processor
pub struct HealChannelProcessor {
/// Heal manager
heal_manager: Arc<HealManager>,
/// Response sender
response_sender: mpsc::UnboundedSender<HealChannelResponse>,
/// Response receiver
response_receiver: mpsc::UnboundedReceiver<HealChannelResponse>,
}
impl HealChannelProcessor {
/// Create new HealChannelProcessor
pub fn new(heal_manager: Arc<HealManager>) -> Self {
let (response_tx, response_rx) = mpsc::unbounded_channel();
Self {
heal_manager,
response_sender: response_tx,
response_receiver: response_rx,
}
}
/// Start processing heal channel requests
pub async fn start(&mut self, mut receiver: HealChannelReceiver) -> Result<()> {
info!("Starting heal channel processor");
loop {
tokio::select! {
command = receiver.recv() => {
match command {
Some(command) => {
if let Err(e) = self.process_command(command).await {
error!("Failed to process heal command: {}", e);
}
}
None => {
debug!("Heal channel receiver closed, stopping processor");
break;
}
}
}
response = self.response_receiver.recv() => {
if let Some(response) = response {
// Handle response if needed
info!("Received heal response for request: {}", response.request_id);
}
}
}
}
info!("Heal channel processor stopped");
Ok(())
}
/// Process heal command
async fn process_command(&self, command: HealChannelCommand) -> Result<()> {
match command {
HealChannelCommand::Start(request) => self.process_start_request(request).await,
HealChannelCommand::Query { heal_path, client_token } => self.process_query_request(heal_path, client_token).await,
HealChannelCommand::Cancel { heal_path } => self.process_cancel_request(heal_path).await,
}
}
/// Process start request
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
info!(
"Processing heal start request: {} for bucket: {}/{}",
request.id,
request.bucket,
request.object_prefix.as_deref().unwrap_or("")
);
// Convert channel request to heal request
let heal_request = self.convert_to_heal_request(request.clone())?;
// Submit to heal manager
match self.heal_manager.submit_heal_request(heal_request).await {
Ok(task_id) => {
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
let response = HealChannelResponse {
request_id: request.id,
success: true,
data: Some(format!("Task ID: {task_id}").into_bytes()),
error: None,
};
self.publish_response(response);
}
Err(e) => {
error!("Failed to submit heal request: {} - {}", request.id, e);
// Send error response
let response = HealChannelResponse {
request_id: request.id,
success: false,
data: None,
error: Some(e.to_string()),
};
self.publish_response(response);
}
}
Ok(())
}
/// Process query request
async fn process_query_request(&self, heal_path: String, client_token: String) -> Result<()> {
info!("Processing heal query request for path: {}", heal_path);
// TODO: Implement query logic based on heal_path and client_token
// For now, return a placeholder response
let response = HealChannelResponse {
request_id: client_token,
success: true,
data: Some(format!("Query result for path: {heal_path}").into_bytes()),
error: None,
};
self.publish_response(response);
Ok(())
}
/// Process cancel request
async fn process_cancel_request(&self, heal_path: String) -> Result<()> {
info!("Processing heal cancel request for path: {}", heal_path);
// TODO: Implement cancel logic based on heal_path
// For now, return a placeholder response
let response = HealChannelResponse {
request_id: heal_path.clone(),
success: true,
data: Some(format!("Cancel request for path: {heal_path}").into_bytes()),
error: None,
};
self.publish_response(response);
Ok(())
}
/// Convert channel request to heal request
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
let heal_type = if let Some(disk_id) = &request.disk {
let set_disk_id = utils::normalize_set_disk_id(disk_id).ok_or_else(|| Error::InvalidHealType {
heal_type: format!("erasure-set({disk_id})"),
})?;
HealType::ErasureSet {
buckets: vec![],
set_disk_id,
}
} else if let Some(prefix) = &request.object_prefix {
if !prefix.is_empty() {
HealType::Object {
bucket: request.bucket.clone(),
object: prefix.clone(),
version_id: None,
}
} else {
HealType::Bucket {
bucket: request.bucket.clone(),
}
}
} else {
HealType::Bucket {
bucket: request.bucket.clone(),
}
};
let priority = match request.priority {
HealChannelPriority::Low => HealPriority::Low,
HealChannelPriority::Normal => HealPriority::Normal,
HealChannelPriority::High => HealPriority::High,
HealChannelPriority::Critical => HealPriority::Urgent,
};
// Build HealOptions with all available fields
let mut options = HealOptions {
scan_mode: request.scan_mode.unwrap_or(HealScanMode::Normal),
remove_corrupted: request.remove_corrupted.unwrap_or(false),
recreate_missing: request.recreate_missing.unwrap_or(true),
update_parity: request.update_parity.unwrap_or(true),
recursive: request.recursive.unwrap_or(false),
dry_run: request.dry_run.unwrap_or(false),
timeout: request.timeout_seconds.map(std::time::Duration::from_secs),
pool_index: request.pool_index,
set_index: request.set_index,
};
// Apply force_start overrides
if request.force_start {
options.remove_corrupted = true;
options.recreate_missing = true;
options.update_parity = true;
}
Ok(HealRequest::new(heal_type, options, priority))
}
fn publish_response(&self, response: HealChannelResponse) {
// Try to send to local channel first, but don't block broadcast on failure
if let Err(e) = self.response_sender.send(response.clone()) {
error!("Failed to enqueue heal response locally: {}", e);
}
// Always attempt to broadcast, even if local send failed
// Use the original response for broadcast; local send uses a clone
if let Err(e) = publish_heal_response(response) {
error!("Failed to broadcast heal response: {}", e);
}
}
/// Get response sender for external use
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
self.response_sender.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::storage::HealStorageAPI;
use rustfs_common::heal_channel::{HealChannelPriority, HealChannelRequest, HealScanMode};
use std::sync::Arc;
// Mock storage for testing
struct MockStorage;
#[async_trait::async_trait]
impl HealStorageAPI for MockStorage {
async fn get_object_meta(
&self,
_bucket: &str,
_object: &str,
) -> crate::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
Ok(None)
}
async fn get_object_data(&self, _bucket: &str, _object: &str) -> crate::Result<Option<Vec<u8>>> {
Ok(None)
}
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> crate::Result<()> {
Ok(())
}
async fn delete_object(&self, _bucket: &str, _object: &str) -> crate::Result<()> {
Ok(())
}
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
Ok(true)
}
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> crate::Result<Vec<u8>> {
Ok(vec![])
}
async fn get_disk_status(
&self,
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
) -> crate::Result<crate::heal::storage::DiskStatus> {
Ok(crate::heal::storage::DiskStatus::Ok)
}
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> crate::Result<()> {
Ok(())
}
async fn get_bucket_info(&self, _bucket: &str) -> crate::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
Ok(None)
}
async fn heal_bucket_metadata(&self, _bucket: &str) -> crate::Result<()> {
Ok(())
}
async fn list_buckets(&self) -> crate::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
Ok(vec![])
}
async fn object_exists(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
Ok(false)
}
async fn get_object_size(&self, _bucket: &str, _object: &str) -> crate::Result<Option<u64>> {
Ok(None)
}
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> crate::Result<Option<String>> {
Ok(None)
}
async fn heal_object(
&self,
_bucket: &str,
_object: &str,
_version_id: Option<&str>,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn heal_bucket(
&self,
_bucket: &str,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> crate::Result<rustfs_madmin::heal_commands::HealResultItem> {
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
}
async fn heal_format(
&self,
_dry_run: bool,
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
Ok(vec![])
}
async fn list_objects_for_heal_page(
&self,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<&str>,
) -> crate::Result<(Vec<String>, Option<String>, bool)> {
Ok((vec![], None, false))
}
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
Err(crate::Error::other("Not implemented in mock"))
}
}
fn create_test_heal_manager() -> Arc<HealManager> {
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
Arc::new(HealManager::new(storage, None))
}
#[test]
fn test_heal_channel_processor_new() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
// Verify processor is created successfully
let _sender = processor.get_response_sender();
// If we can get the sender, processor was created correctly
}
#[tokio::test]
async fn test_convert_to_heal_request_bucket() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
assert_eq!(heal_request.priority, HealPriority::Normal);
}
#[tokio::test]
async fn test_convert_to_heal_request_object() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: Some("test-object".to_string()),
disk: None,
priority: HealChannelPriority::High,
scan_mode: Some(HealScanMode::Deep),
remove_corrupted: Some(true),
recreate_missing: Some(true),
update_parity: Some(true),
recursive: Some(false),
dry_run: Some(false),
timeout_seconds: Some(300),
pool_index: Some(0),
set_index: Some(1),
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Object { .. }));
assert_eq!(heal_request.priority, HealPriority::High);
assert_eq!(heal_request.options.scan_mode, HealScanMode::Deep);
assert!(heal_request.options.remove_corrupted);
assert!(heal_request.options.recreate_missing);
}
#[tokio::test]
async fn test_convert_to_heal_request_erasure_set() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: Some("pool_0_set_1".to_string()),
priority: HealChannelPriority::Critical,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::ErasureSet { .. }));
assert_eq!(heal_request.priority, HealPriority::Urgent);
}
#[tokio::test]
async fn test_convert_to_heal_request_invalid_disk_id() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: Some("invalid-disk-id".to_string()),
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let result = processor.convert_to_heal_request(channel_request);
assert!(result.is_err());
}
#[tokio::test]
async fn test_convert_to_heal_request_priority_mapping() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let priorities = vec![
(HealChannelPriority::Low, HealPriority::Low),
(HealChannelPriority::Normal, HealPriority::Normal),
(HealChannelPriority::High, HealPriority::High),
(HealChannelPriority::Critical, HealPriority::Urgent),
];
for (channel_priority, expected_heal_priority) in priorities {
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: channel_priority,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert_eq!(heal_request.priority, expected_heal_priority);
}
}
#[tokio::test]
async fn test_convert_to_heal_request_force_start() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: Some(false),
recreate_missing: Some(false),
update_parity: Some(false),
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: true, // Should override the above false values
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(heal_request.options.remove_corrupted);
assert!(heal_request.options.recreate_missing);
assert!(heal_request.options.update_parity);
}
#[tokio::test]
async fn test_convert_to_heal_request_empty_object_prefix() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: Some("".to_string()), // Empty prefix should be treated as bucket heal
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/heal_bug_fixes_test.rs | crates/ahm/tests/heal_bug_fixes_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::heal::{
event::{HealEvent, Severity},
task::{HealPriority, HealType},
utils,
};
#[test]
fn test_heal_event_to_heal_request_no_panic() {
use rustfs_ecstore::disk::endpoint::Endpoint;
// Test that invalid pool/set indices don't cause panic
// Create endpoint using try_from or similar method
let endpoint_result = Endpoint::try_from("http://localhost:9000");
if let Ok(mut endpoint) = endpoint_result {
endpoint.pool_idx = -1;
endpoint.set_idx = -1;
endpoint.disk_idx = 0;
let event = HealEvent::DiskStatusChange {
endpoint,
old_status: "ok".to_string(),
new_status: "offline".to_string(),
};
// Should return error instead of panicking
let result = event.to_heal_request();
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("Invalid heal type"));
}
}
#[test]
fn test_heal_event_to_heal_request_valid_indices() {
use rustfs_ecstore::disk::endpoint::Endpoint;
// Test that valid indices work correctly
let endpoint_result = Endpoint::try_from("http://localhost:9000");
if let Ok(mut endpoint) = endpoint_result {
endpoint.pool_idx = 0;
endpoint.set_idx = 1;
endpoint.disk_idx = 0;
let event = HealEvent::DiskStatusChange {
endpoint,
old_status: "ok".to_string(),
new_status: "offline".to_string(),
};
let result = event.to_heal_request();
assert!(result.is_ok());
let request = result.unwrap();
assert!(matches!(request.heal_type, HealType::ErasureSet { .. }));
}
}
#[test]
fn test_heal_event_object_corruption() {
let event = HealEvent::ObjectCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
corruption_type: rustfs_ahm::heal::event::CorruptionType::DataCorruption,
severity: Severity::High,
};
let result = event.to_heal_request();
assert!(result.is_ok());
let request = result.unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_ec_decode_failure() {
let event = HealEvent::ECDecodeFailure {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
missing_shards: vec![0, 1],
available_shards: vec![2, 3],
};
let result = event.to_heal_request();
assert!(result.is_ok());
let request = result.unwrap();
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
assert_eq!(request.priority, HealPriority::Urgent);
}
#[test]
fn test_format_set_disk_id_from_i32_negative() {
// Test that negative indices return None
assert!(utils::format_set_disk_id_from_i32(-1, 0).is_none());
assert!(utils::format_set_disk_id_from_i32(0, -1).is_none());
assert!(utils::format_set_disk_id_from_i32(-1, -1).is_none());
}
#[test]
fn test_format_set_disk_id_from_i32_valid() {
// Test that valid indices return Some
let result = utils::format_set_disk_id_from_i32(0, 1);
assert!(result.is_some());
assert_eq!(result.unwrap(), "pool_0_set_1");
}
#[test]
fn test_resume_state_timestamp_handling() {
use rustfs_ahm::heal::resume::ResumeState;
// Test that ResumeState creation doesn't panic even if system time is before epoch
// This is a theoretical test - in practice, system time should never be before epoch
// But we want to ensure unwrap_or_default handles edge cases
let state = ResumeState::new(
"test-task".to_string(),
"test-type".to_string(),
"pool_0_set_1".to_string(),
vec!["bucket1".to_string()],
);
// Verify fields are initialized (u64 is always >= 0)
// The important thing is that unwrap_or_default prevents panic
let _ = state.start_time;
let _ = state.last_update;
}
#[test]
fn test_resume_checkpoint_timestamp_handling() {
use rustfs_ahm::heal::resume::ResumeCheckpoint;
// Test that ResumeCheckpoint creation doesn't panic
let checkpoint = ResumeCheckpoint::new("test-task".to_string());
// Verify field is initialized (u64 is always >= 0)
// The important thing is that unwrap_or_default prevents panic
let _ = checkpoint.checkpoint_time;
}
#[test]
fn test_path_to_str_helper() {
use std::path::Path;
// Test that path conversion handles non-UTF-8 paths gracefully
// Note: This is a compile-time test - actual non-UTF-8 paths are hard to construct in Rust
// The helper function should properly handle the conversion
let valid_path = Path::new("test/path");
assert!(valid_path.to_str().is_some());
}
#[test]
fn test_heal_task_status_atomic_update() {
use rustfs_ahm::heal::storage::HealStorageAPI;
use rustfs_ahm::heal::task::{HealOptions, HealRequest, HealTask, HealTaskStatus};
use std::sync::Arc;
// Mock storage for testing
struct MockStorage;
#[async_trait::async_trait]
impl HealStorageAPI for MockStorage {
async fn get_object_meta(
&self,
_bucket: &str,
_object: &str,
) -> rustfs_ahm::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
Ok(None)
}
async fn get_object_data(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<Vec<u8>>> {
Ok(None)
}
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> rustfs_ahm::Result<()> {
Ok(())
}
async fn delete_object(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<()> {
Ok(())
}
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<bool> {
Ok(true)
}
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Vec<u8>> {
Ok(vec![])
}
async fn get_disk_status(
&self,
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
) -> rustfs_ahm::Result<rustfs_ahm::heal::storage::DiskStatus> {
Ok(rustfs_ahm::heal::storage::DiskStatus::Ok)
}
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> rustfs_ahm::Result<()> {
Ok(())
}
async fn get_bucket_info(&self, _bucket: &str) -> rustfs_ahm::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
Ok(None)
}
async fn heal_bucket_metadata(&self, _bucket: &str) -> rustfs_ahm::Result<()> {
Ok(())
}
async fn list_buckets(&self) -> rustfs_ahm::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
Ok(vec![])
}
async fn object_exists(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<bool> {
Ok(false)
}
async fn get_object_size(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<u64>> {
Ok(None)
}
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<String>> {
Ok(None)
}
async fn heal_object(
&self,
_bucket: &str,
_object: &str,
_version_id: Option<&str>,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> rustfs_ahm::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<rustfs_ahm::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn heal_bucket(
&self,
_bucket: &str,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> rustfs_ahm::Result<rustfs_madmin::heal_commands::HealResultItem> {
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
}
async fn heal_format(
&self,
_dry_run: bool,
) -> rustfs_ahm::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<rustfs_ahm::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> rustfs_ahm::Result<Vec<String>> {
Ok(vec![])
}
async fn list_objects_for_heal_page(
&self,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<&str>,
) -> rustfs_ahm::Result<(Vec<String>, Option<String>, bool)> {
Ok((vec![], None, false))
}
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> rustfs_ahm::Result<rustfs_ecstore::disk::DiskStore> {
Err(rustfs_ahm::Error::other("Not implemented in mock"))
}
}
// Create a heal request and task
let request = HealRequest::new(
HealType::Object {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::Normal,
);
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
let task = HealTask::from_request(request, storage);
// Verify initial status
let status = tokio::runtime::Runtime::new().unwrap().block_on(task.get_status());
assert_eq!(status, HealTaskStatus::Pending);
// The task should have task_start_instant field initialized
// This is an internal detail, but we can verify it doesn't cause issues
// by checking that the task can be created successfully
// Note: We can't directly access private fields, but creation without panic
// confirms the fix works
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/endpoint_index_test.rs | crates/ahm/tests/endpoint_index_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! test endpoint index settings
use rustfs_ecstore::disk::endpoint::Endpoint;
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
use std::net::SocketAddr;
use tempfile::TempDir;
use tokio_util::sync::CancellationToken;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_endpoint_index_settings() -> anyhow::Result<()> {
let temp_dir = TempDir::new()?;
// create test disk paths
let disk_paths: Vec<_> = (0..4).map(|i| temp_dir.path().join(format!("disk{i}"))).collect();
for path in &disk_paths {
tokio::fs::create_dir_all(path).await?;
}
// build endpoints
let mut endpoints: Vec<Endpoint> = disk_paths
.iter()
.map(|p| Endpoint::try_from(p.to_string_lossy().as_ref()).unwrap())
.collect();
// set endpoint indexes correctly
for (i, endpoint) in endpoints.iter_mut().enumerate() {
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(i); // note: disk_index is usize type
println!(
"Endpoint {}: pool_idx={}, set_idx={}, disk_idx={}",
i, endpoint.pool_idx, endpoint.set_idx, endpoint.disk_idx
);
}
let pool_endpoints = PoolEndpoints {
legacy: false,
set_count: 1,
drives_per_set: endpoints.len(),
endpoints: Endpoints::from(endpoints.clone()),
cmd_line: "test".to_string(),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
// validate all endpoint indexes are in valid range
for (i, ep) in endpoints.iter().enumerate() {
assert_eq!(ep.pool_idx, 0, "Endpoint {i} pool_idx should be 0");
assert_eq!(ep.set_idx, 0, "Endpoint {i} set_idx should be 0");
assert_eq!(ep.disk_idx, i as i32, "Endpoint {i} disk_idx should be {i}");
println!(
"Endpoint {} indices are valid: pool={}, set={}, disk={}",
i, ep.pool_idx, ep.set_idx, ep.disk_idx
);
}
// test ECStore initialization
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await?;
let server_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
let ecstore = rustfs_ecstore::store::ECStore::new(server_addr, endpoint_pools, CancellationToken::new()).await?;
println!("ECStore initialized successfully with {} pools", ecstore.pools.len());
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/scanner_optimization_tests.rs | crates/ahm/tests/scanner_optimization_tests.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::scanner::{
checkpoint::{CheckpointData, CheckpointManager},
io_monitor::{AdvancedIOMonitor, IOMonitorConfig},
io_throttler::{AdvancedIOThrottler, IOThrottlerConfig},
local_stats::LocalStatsManager,
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig, ScanProgress},
stats_aggregator::{DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig},
};
use std::time::Duration;
use tempfile::TempDir;
#[tokio::test]
async fn test_checkpoint_manager_save_and_load() {
let temp_dir = TempDir::new().unwrap();
let node_id = "test-node-1";
let checkpoint_manager = CheckpointManager::new(node_id, temp_dir.path());
// create checkpoint
let progress = ScanProgress {
current_cycle: 5,
current_disk_index: 2,
last_scan_key: Some("test-object-key".to_string()),
..Default::default()
};
// save checkpoint
checkpoint_manager
.force_save_checkpoint(&progress)
.await
.expect("Failed to save checkpoint");
// load checkpoint
let loaded_progress = checkpoint_manager
.load_checkpoint()
.await
.expect("Failed to load checkpoint")
.expect("No checkpoint found");
// verify data
assert_eq!(loaded_progress.current_cycle, 5);
assert_eq!(loaded_progress.current_disk_index, 2);
assert_eq!(loaded_progress.last_scan_key, Some("test-object-key".to_string()));
}
#[tokio::test]
async fn test_checkpoint_data_integrity() {
let temp_dir = TempDir::new().unwrap();
let node_id = "test-node-integrity";
let checkpoint_manager = CheckpointManager::new(node_id, temp_dir.path());
let progress = ScanProgress::default();
// create checkpoint data
let checkpoint_data = CheckpointData::new(progress.clone(), node_id.to_string());
// verify integrity
assert!(checkpoint_data.verify_integrity());
// save and load
checkpoint_manager
.force_save_checkpoint(&progress)
.await
.expect("Failed to save checkpoint");
let loaded = checkpoint_manager.load_checkpoint().await.expect("Failed to load checkpoint");
assert!(loaded.is_some());
}
#[tokio::test]
async fn test_local_stats_manager() {
let temp_dir = TempDir::new().unwrap();
let node_id = "test-stats-node";
let stats_manager = LocalStatsManager::new(node_id, temp_dir.path());
// load stats
stats_manager.load_stats().await.expect("Failed to load stats");
// get stats summary
let summary = stats_manager.get_stats_summary().await;
assert_eq!(summary.node_id, node_id);
assert_eq!(summary.total_objects_scanned, 0);
// record heal triggered
stats_manager
.record_heal_triggered("test-object", "corruption detected")
.await;
let counters = stats_manager.get_counters();
assert_eq!(counters.total_heal_triggered.load(std::sync::atomic::Ordering::Relaxed), 1);
}
#[tokio::test]
async fn test_io_monitor_load_level_calculation() {
let config = IOMonitorConfig {
enable_system_monitoring: false, // use mock data
..Default::default()
};
let io_monitor = AdvancedIOMonitor::new(config);
io_monitor.start().await.expect("Failed to start IO monitor");
// update business metrics to affect load calculation
io_monitor.update_business_metrics(50, 100, 0, 10).await;
// wait for a monitoring cycle
tokio::time::sleep(Duration::from_millis(1500)).await;
let load_level = io_monitor.get_business_load_level().await;
// load level should be in a reasonable range
assert!(matches!(
load_level,
LoadLevel::Low | LoadLevel::Medium | LoadLevel::High | LoadLevel::Critical
));
io_monitor.stop().await;
}
#[tokio::test]
async fn test_io_throttler_load_adjustment() {
let config = IOThrottlerConfig::default();
let throttler = AdvancedIOThrottler::new(config);
// test adjust for load level
let low_delay = throttler.adjust_for_load_level(LoadLevel::Low).await;
let medium_delay = throttler.adjust_for_load_level(LoadLevel::Medium).await;
let high_delay = throttler.adjust_for_load_level(LoadLevel::High).await;
let critical_delay = throttler.adjust_for_load_level(LoadLevel::Critical).await;
// verify delay increment
assert!(low_delay < medium_delay);
assert!(medium_delay < high_delay);
assert!(high_delay < critical_delay);
// verify pause logic
assert!(!throttler.should_pause_scanning(LoadLevel::Low).await);
assert!(!throttler.should_pause_scanning(LoadLevel::Medium).await);
assert!(!throttler.should_pause_scanning(LoadLevel::High).await);
assert!(throttler.should_pause_scanning(LoadLevel::Critical).await);
}
#[tokio::test]
async fn test_throttler_business_pressure_simulation() {
let throttler = AdvancedIOThrottler::default();
// run short time pressure test
let simulation_duration = Duration::from_millis(500);
let result = throttler.simulate_business_pressure(simulation_duration).await;
// verify simulation result
assert!(!result.simulation_records.is_empty());
assert!(result.total_duration >= simulation_duration);
assert!(result.final_stats.total_decisions > 0);
// verify all load levels are tested
let load_levels: std::collections::HashSet<_> = result.simulation_records.iter().map(|r| r.load_level).collect();
assert!(load_levels.contains(&LoadLevel::Low));
assert!(load_levels.contains(&LoadLevel::Critical));
}
#[tokio::test]
async fn test_node_scanner_creation_and_config() {
let temp_dir = TempDir::new().unwrap();
let node_id = "test-scanner-node".to_string();
let config = NodeScannerConfig {
scan_interval: Duration::from_secs(30),
disk_scan_delay: Duration::from_secs(5),
enable_smart_scheduling: true,
enable_checkpoint: true,
data_dir: temp_dir.path().to_path_buf(),
..Default::default()
};
let scanner = NodeScanner::new(node_id.clone(), config);
// verify node id
assert_eq!(scanner.node_id(), &node_id);
// initialize stats
scanner.initialize_stats().await.expect("Failed to initialize stats");
// get stats summary
let summary = scanner.get_stats_summary().await;
assert_eq!(summary.node_id, node_id);
}
#[tokio::test]
async fn test_decentralized_stats_aggregator() {
let config = DecentralizedStatsAggregatorConfig {
cache_ttl: Duration::from_millis(100), // short cache ttl for testing
..Default::default()
};
let aggregator = DecentralizedStatsAggregator::new(config);
// test cache mechanism
let _start_time = std::time::Instant::now();
// first get stats (should trigger aggregation)
let stats1 = aggregator
.get_aggregated_stats()
.await
.expect("Failed to get aggregated stats");
let first_call_duration = _start_time.elapsed();
// second get stats (should use cache)
let cache_start = std::time::Instant::now();
let stats2 = aggregator.get_aggregated_stats().await.expect("Failed to get cached stats");
let cache_call_duration = cache_start.elapsed();
// cache call should be faster
assert!(cache_call_duration < first_call_duration);
// data should be same
assert_eq!(stats1.aggregation_timestamp, stats2.aggregation_timestamp);
// wait for cache expiration
tokio::time::sleep(Duration::from_millis(150)).await;
// third get should refresh data
let stats3 = aggregator
.get_aggregated_stats()
.await
.expect("Failed to get refreshed stats");
// timestamp should be different
assert!(stats3.aggregation_timestamp > stats1.aggregation_timestamp);
}
#[tokio::test]
async fn test_scanner_performance_impact() {
let temp_dir = TempDir::new().unwrap();
let node_id = "performance-test-node".to_string();
let config = NodeScannerConfig {
scan_interval: Duration::from_millis(100), // fast scan for testing
disk_scan_delay: Duration::from_millis(10),
data_dir: temp_dir.path().to_path_buf(),
..Default::default()
};
let scanner = NodeScanner::new(node_id, config);
// simulate business workload
let _start_time = std::time::Instant::now();
// update business metrics for high load
scanner.update_business_metrics(1500, 3000, 500, 800).await;
// get io monitor and throttler
let io_monitor = scanner.get_io_monitor();
let throttler = scanner.get_io_throttler();
// start io monitor
io_monitor.start().await.expect("Failed to start IO monitor");
// wait for monitor system to stabilize and trigger throttling - increase wait time
tokio::time::sleep(Duration::from_millis(1000)).await;
// simulate some io operations to trigger throttling mechanism
for _ in 0..10 {
let _current_metrics = io_monitor.get_current_metrics().await;
let metrics_snapshot = rustfs_ahm::scanner::io_throttler::MetricsSnapshot {
iops: 1000,
latency: 100,
cpu_usage: 80,
memory_usage: 70,
};
let load_level = io_monitor.get_business_load_level().await;
let _decision = throttler.make_throttle_decision(load_level, Some(metrics_snapshot)).await;
tokio::time::sleep(Duration::from_millis(50)).await;
}
// check if load level is correctly responded
let load_level = io_monitor.get_business_load_level().await;
// in high load, scanner should automatically adjust
let throttle_stats = throttler.get_throttle_stats().await;
println!("Performance test results:");
println!(" Load level: {load_level:?}");
println!(" Throttle decisions: {}", throttle_stats.total_decisions);
println!(" Average delay: {:?}", throttle_stats.average_delay);
// verify performance impact control - if load is high enough, there should be throttling delay
if load_level != LoadLevel::Low {
assert!(throttle_stats.average_delay > Duration::from_millis(0));
} else {
// in low load, there should be no throttling delay
assert!(throttle_stats.average_delay >= Duration::from_millis(0));
}
io_monitor.stop().await;
}
#[tokio::test]
async fn test_checkpoint_recovery_resilience() {
let temp_dir = TempDir::new().unwrap();
let node_id = "resilience-test-node";
let checkpoint_manager = CheckpointManager::new(node_id, temp_dir.path());
// verify checkpoint manager
let result = checkpoint_manager.load_checkpoint().await.unwrap();
assert!(result.is_none());
// create and save checkpoint
let progress = ScanProgress {
current_cycle: 10,
current_disk_index: 3,
last_scan_key: Some("recovery-test-key".to_string()),
..Default::default()
};
checkpoint_manager
.force_save_checkpoint(&progress)
.await
.expect("Failed to save checkpoint");
// verify recovery
let recovered = checkpoint_manager
.load_checkpoint()
.await
.expect("Failed to load checkpoint")
.expect("No checkpoint recovered");
assert_eq!(recovered.current_cycle, 10);
assert_eq!(recovered.current_disk_index, 3);
// cleanup checkpoint
checkpoint_manager
.cleanup_checkpoint()
.await
.expect("Failed to cleanup checkpoint");
// verify cleanup
let after_cleanup = checkpoint_manager.load_checkpoint().await.unwrap();
assert!(after_cleanup.is_none());
}
pub async fn create_test_scanner(temp_dir: &TempDir) -> NodeScanner {
let config = NodeScannerConfig {
scan_interval: Duration::from_millis(50),
disk_scan_delay: Duration::from_millis(10),
data_dir: temp_dir.path().to_path_buf(),
..Default::default()
};
NodeScanner::new("integration-test-node".to_string(), config)
}
pub struct PerformanceBenchmark {
pub _scanner_overhead_ms: u64,
pub business_impact_percentage: f64,
pub _throttle_effectiveness: f64,
}
impl PerformanceBenchmark {
pub fn meets_optimization_goals(&self) -> bool {
self.business_impact_percentage < 10.0
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/heal_integration_test.rs | crates/ahm/tests/heal_integration_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::heal::{
manager::{HealConfig, HealManager},
storage::{ECStoreHealStorage, HealStorageAPI},
task::{HealOptions, HealPriority, HealRequest, HealTaskStatus, HealType},
};
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::{
disk::endpoint::Endpoint,
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
store::ECStore,
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
};
use serial_test::serial;
use std::{
path::PathBuf,
sync::{Arc, Once, OnceLock},
time::Duration,
};
use tokio::fs;
use tokio_util::sync::CancellationToken;
use tracing::info;
use walkdir::WalkDir;
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
static INIT: Once = Once::new();
pub fn init_tracing() {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
.with_thread_names(true)
.try_init();
});
}
/// Test helper: Create test environment with ECStore
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>) {
init_tracing();
// Fast path: already initialized, just clone and return
if let Some((paths, ecstore, heal_storage)) = GLOBAL_ENV.get() {
return (paths.clone(), ecstore.clone(), heal_storage.clone());
}
// create temp dir as 4 disks with unique base dir
let test_base_dir = format!("/tmp/rustfs_ahm_heal_test_{}", uuid::Uuid::new_v4());
let temp_dir = std::path::PathBuf::from(&test_base_dir);
if temp_dir.exists() {
fs::remove_dir_all(&temp_dir).await.ok();
}
fs::create_dir_all(&temp_dir).await.unwrap();
// create 4 disk dirs
let disk_paths = vec![
temp_dir.join("disk1"),
temp_dir.join("disk2"),
temp_dir.join("disk3"),
temp_dir.join("disk4"),
];
for disk_path in &disk_paths {
fs::create_dir_all(disk_path).await.unwrap();
}
// create EndpointServerPools
let mut endpoints = Vec::new();
for (i, disk_path) in disk_paths.iter().enumerate() {
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
// set correct index
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(i);
endpoints.push(endpoint);
}
let pool_endpoints = PoolEndpoints {
legacy: false,
set_count: 1,
drives_per_set: 4,
endpoints: Endpoints::from(endpoints),
cmd_line: "test".to_string(),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
// format disks (only first time)
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
// create ECStore with dynamic port 0 (let OS assign) or fixed 9001 if free
let port = 9001; // for simplicity
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
no_metadata: true,
..Default::default()
})
.await
.unwrap();
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
// Create heal storage layer
let heal_storage = Arc::new(ECStoreHealStorage::new(ecstore.clone()));
// Store in global once lock
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone(), heal_storage.clone()));
(disk_paths, ecstore, heal_storage)
}
/// Test helper: Create a test bucket
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(bucket_name, &Default::default())
.await
.expect("Failed to create test bucket");
info!("Created test bucket: {}", bucket_name);
}
/// Test helper: Upload test object
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
let mut reader = PutObjReader::from_vec(data.to_vec());
let object_info = (**ecstore)
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
.await
.expect("Failed to upload test object");
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
}
mod serial_tests {
use super::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_heal_object_basic() {
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
// Create test bucket and object
let bucket_name = "test-heal-object-basic";
let object_name = "test-object.txt";
let test_data = b"Hello, this is test data for healing!";
create_test_bucket(&ecstore, bucket_name).await;
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
// ─── 1️⃣ delete single data shard file ─────────────────────────────────────
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
// find part file at depth 2, e.g. .../<uuid>/part.1
let target_part = WalkDir::new(&obj_dir)
.min_depth(2)
.max_depth(2)
.into_iter()
.filter_map(Result::ok)
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
.map(|e| e.into_path())
.expect("Failed to locate part file to delete");
std::fs::remove_file(&target_part).expect("failed to delete part file");
assert!(!target_part.exists());
println!("✅ Deleted shard part file: {target_part:?}");
// Create heal manager with faster interval
let cfg = HealConfig {
heal_interval: Duration::from_millis(1),
..Default::default()
};
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
heal_manager.start().await.unwrap();
// Submit heal request for the object
let heal_request = HealRequest::new(
HealType::Object {
bucket: bucket_name.to_string(),
object: object_name.to_string(),
version_id: None,
},
HealOptions {
dry_run: false,
recursive: false,
remove_corrupted: false,
recreate_missing: true,
scan_mode: HealScanMode::Normal,
update_parity: true,
timeout: Some(Duration::from_secs(300)),
pool_index: None,
set_index: None,
},
HealPriority::Normal,
);
let task_id = heal_manager
.submit_heal_request(heal_request)
.await
.expect("Failed to submit heal request");
info!("Submitted heal request with task ID: {}", task_id);
// Wait for task completion
tokio::time::sleep(tokio::time::Duration::from_secs(8)).await;
// Attempt to fetch task status (might be removed if finished)
match heal_manager.get_task_status(&task_id).await {
Ok(status) => info!("Task status: {:?}", status),
Err(e) => info!("Task status not found (likely completed): {}", e),
}
// ─── 2️⃣ verify each part file is restored ───────
assert!(target_part.exists());
info!("Heal object basic test passed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_heal_bucket_basic() {
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
// Create test bucket
let bucket_name = "test-heal-bucket-basic";
create_test_bucket(&ecstore, bucket_name).await;
// ─── 1️⃣ delete bucket dir on disk ──────────────
let broken_bucket_path = disk_paths[0].join(bucket_name);
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
std::fs::remove_dir_all(&broken_bucket_path).expect("failed to delete bucket dir on disk");
assert!(!broken_bucket_path.exists(), "bucket dir still exists after deletion");
println!("✅ Deleted bucket directory on disk: {broken_bucket_path:?}");
// Create heal manager with faster interval
let cfg = HealConfig {
heal_interval: Duration::from_millis(1),
..Default::default()
};
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
heal_manager.start().await.unwrap();
// Submit heal request for the bucket
let heal_request = HealRequest::new(
HealType::Bucket {
bucket: bucket_name.to_string(),
},
HealOptions {
dry_run: false,
recursive: true,
remove_corrupted: false,
recreate_missing: false,
scan_mode: HealScanMode::Normal,
update_parity: false,
timeout: Some(Duration::from_secs(300)),
pool_index: None,
set_index: None,
},
HealPriority::Normal,
);
let task_id = heal_manager
.submit_heal_request(heal_request)
.await
.expect("Failed to submit bucket heal request");
info!("Submitted bucket heal request with task ID: {}", task_id);
// Wait for task completion
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// Attempt to fetch task status (optional)
if let Ok(status) = heal_manager.get_task_status(&task_id).await {
if status == HealTaskStatus::Completed {
info!("Bucket heal task status: {:?}", status);
} else {
panic!("Bucket heal task status: {status:?}");
}
}
// ─── 3️⃣ Verify bucket directory is restored on every disk ───────
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
info!("Heal bucket basic test passed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_heal_format_basic() {
let (disk_paths, _ecstore, heal_storage) = setup_test_env().await;
// ─── 1️⃣ delete format.json on one disk ──────────────
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
assert!(format_path.exists(), "format.json does not exist on disk");
std::fs::remove_file(&format_path).expect("failed to delete format.json on disk");
assert!(!format_path.exists(), "format.json still exists after deletion");
println!("✅ Deleted format.json on disk: {format_path:?}");
// Create heal manager with faster interval
let cfg = HealConfig {
heal_interval: Duration::from_secs(2),
..Default::default()
};
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
heal_manager.start().await.unwrap();
// Wait for task completion
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// ─── 2️⃣ verify format.json is restored ───────
assert!(format_path.exists(), "format.json does not exist on disk after heal");
info!("Heal format basic test passed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_heal_format_with_data() {
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
// Create test bucket and object
let bucket_name = "test-heal-format-with-data";
let object_name = "test-object.txt";
let test_data = b"Hello, this is test data for healing!";
create_test_bucket(&ecstore, bucket_name).await;
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
let target_part = WalkDir::new(&obj_dir)
.min_depth(2)
.max_depth(2)
.into_iter()
.filter_map(Result::ok)
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
.map(|e| e.into_path())
.expect("Failed to locate part file to delete");
// ─── 1️⃣ delete format.json on one disk ──────────────
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
std::fs::remove_dir_all(&disk_paths[0]).expect("failed to delete all contents under disk_paths[0]");
std::fs::create_dir_all(&disk_paths[0]).expect("failed to recreate disk_paths[0] directory");
println!("✅ Deleted format.json on disk: {:?}", disk_paths[0]);
// Create heal manager with faster interval
let cfg = HealConfig {
heal_interval: Duration::from_secs(1),
..Default::default()
};
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
heal_manager.start().await.unwrap();
// Wait for task completion
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// ─── 2️⃣ verify format.json is restored ───────
assert!(format_path.exists(), "format.json does not exist on disk after heal");
// ─── 3 verify each part file is restored ───────
assert!(target_part.exists());
info!("Heal format basic test passed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_heal_storage_api_direct() {
let (_disk_paths, ecstore, heal_storage) = setup_test_env().await;
// Test direct heal storage API calls
// Test heal_format
let format_result = heal_storage.heal_format(true).await; // dry run
assert!(format_result.is_ok());
info!("Direct heal_format test passed");
// Test heal_bucket
let bucket_name = "test-bucket-direct";
create_test_bucket(&ecstore, bucket_name).await;
let heal_opts = HealOpts {
recursive: true,
dry_run: true,
remove: false,
recreate: false,
scan_mode: HealScanMode::Normal,
update_parity: false,
no_lock: false,
pool: None,
set: None,
};
let bucket_result = heal_storage.heal_bucket(bucket_name, &heal_opts).await;
assert!(bucket_result.is_ok());
info!("Direct heal_bucket test passed");
// Test heal_object
let object_name = "test-object-direct.txt";
let test_data = b"Test data for direct heal API";
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
let object_heal_opts = HealOpts {
recursive: false,
dry_run: true,
remove: false,
recreate: false,
scan_mode: HealScanMode::Normal,
update_parity: false,
no_lock: false,
pool: None,
set: None,
};
let object_result = heal_storage
.heal_object(bucket_name, object_name, None, &object_heal_opts)
.await;
assert!(object_result.is_ok());
info!("Direct heal_object test passed");
info!("Direct heal storage API test passed");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/lifecycle_cache_test.rs | crates/ahm/tests/lifecycle_cache_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use heed::byteorder::BigEndian;
use heed::types::*;
use heed::{BoxedError, BytesDecode, BytesEncode, Database, DatabaseFlags, Env, EnvOpenOptions};
use rustfs_ahm::scanner::local_scan::{self, LocalObjectRecord, LocalScanOutcome};
use rustfs_ecstore::{
disk::endpoint::Endpoint,
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
store::ECStore,
store_api::{MakeBucketOptions, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader, StorageAPI},
};
use serial_test::serial;
use std::{
borrow::Cow,
path::PathBuf,
sync::{Arc, Once, OnceLock},
};
//use heed_traits::Comparator;
use time::OffsetDateTime;
use tokio::fs;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use uuid::Uuid;
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
static INIT: Once = Once::new();
static _LIFECYCLE_EXPIRY_CURRENT_DAYS: i32 = 1;
static _LIFECYCLE_EXPIRY_NONCURRENT_DAYS: i32 = 1;
static _LIFECYCLE_TRANSITION_CURRENT_DAYS: i32 = 1;
static _LIFECYCLE_TRANSITION_NONCURRENT_DAYS: i32 = 1;
static GLOBAL_LMDB_ENV: OnceLock<Env> = OnceLock::new();
static GLOBAL_LMDB_DB: OnceLock<Database<I64<BigEndian>, LifecycleContentCodec>> = OnceLock::new();
fn init_tracing() {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt::try_init();
});
}
/// Test helper: Create test environment with ECStore
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
init_tracing();
// Fast path: already initialized, just clone and return
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
return (paths.clone(), ecstore.clone());
}
// create temp dir as 4 disks with unique base dir
let test_base_dir = format!("/tmp/rustfs_ahm_lifecyclecache_test_{}", uuid::Uuid::new_v4());
let temp_dir = std::path::PathBuf::from(&test_base_dir);
if temp_dir.exists() {
fs::remove_dir_all(&temp_dir).await.ok();
}
fs::create_dir_all(&temp_dir).await.unwrap();
// create 4 disk dirs
let disk_paths = vec![
temp_dir.join("disk1"),
temp_dir.join("disk2"),
temp_dir.join("disk3"),
temp_dir.join("disk4"),
];
for disk_path in &disk_paths {
fs::create_dir_all(disk_path).await.unwrap();
}
// create EndpointServerPools
let mut endpoints = Vec::new();
for (i, disk_path) in disk_paths.iter().enumerate() {
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
// set correct index
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(i);
endpoints.push(endpoint);
}
let pool_endpoints = PoolEndpoints {
legacy: false,
set_count: 1,
drives_per_set: 4,
endpoints: Endpoints::from(endpoints),
cmd_line: "test".to_string(),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
// format disks (only first time)
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
let port = 9002; // for simplicity
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
no_metadata: true,
..Default::default()
})
.await
.unwrap();
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
//lmdb env
// User home directory
/*if let Ok(home_dir) = env::var("HOME").or_else(|_| env::var("USERPROFILE")) {
let mut path = PathBuf::from(home_dir);
path.push(format!(".{DEFAULT_LOG_FILENAME}"));
path.push(DEFAULT_LOG_DIR);
if ensure_directory_writable(&path) {
//return path;
}
}*/
let test_lmdb_lifecycle_dir = "/tmp/lmdb_lifecycle".to_string();
let temp_dir = std::path::PathBuf::from(&test_lmdb_lifecycle_dir);
if temp_dir.exists() {
fs::remove_dir_all(&temp_dir).await.ok();
}
fs::create_dir_all(&temp_dir).await.unwrap();
let lmdb_env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&test_lmdb_lifecycle_dir).unwrap() };
let bucket_name = format!("test-lc-cache-{}", "00000");
let mut wtxn = lmdb_env.write_txn().unwrap();
let db = match lmdb_env
.database_options()
.name(&format!("bucket_{bucket_name}"))
.types::<I64<BigEndian>, LifecycleContentCodec>()
.flags(DatabaseFlags::DUP_SORT)
//.dup_sort_comparator::<>()
.create(&mut wtxn)
{
Ok(db) => db,
Err(err) => {
panic!("lmdb error: {err}");
}
};
let _ = wtxn.commit();
let _ = GLOBAL_LMDB_ENV.set(lmdb_env);
let _ = GLOBAL_LMDB_DB.set(db);
// Store in global once lock
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
(disk_paths, ecstore)
}
/// Test helper: Create a test bucket
#[allow(dead_code)]
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(bucket_name, &Default::default())
.await
.expect("Failed to create test bucket");
info!("Created test bucket: {}", bucket_name);
}
/// Test helper: Create a test lock bucket
async fn create_test_lock_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(
bucket_name,
&MakeBucketOptions {
lock_enabled: true,
versioning_enabled: true,
..Default::default()
},
)
.await
.expect("Failed to create test bucket");
info!("Created test bucket: {}", bucket_name);
}
/// Test helper: Upload test object
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
let mut reader = PutObjReader::from_vec(data.to_vec());
let object_info = (**ecstore)
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
.await
.expect("Failed to upload test object");
println!("object_info1: {object_info:?}");
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
}
/// Test helper: Check if object exists
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
match (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
Ok(info) => !info.delete_marker,
Err(_) => false,
}
}
fn ns_to_offset_datetime(ns: i128) -> Option<OffsetDateTime> {
OffsetDateTime::from_unix_timestamp_nanos(ns).ok()
}
fn convert_record_to_object_info(record: &LocalObjectRecord) -> ObjectInfo {
let usage = &record.usage;
ObjectInfo {
bucket: usage.bucket.clone(),
name: usage.object.clone(),
size: usage.total_size as i64,
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
mod_time: usage.last_modified_ns.and_then(ns_to_offset_datetime),
..Default::default()
}
}
#[allow(dead_code)]
fn to_object_info(
bucket: &str,
object: &str,
total_size: i64,
delete_marker: bool,
mod_time: OffsetDateTime,
version_id: &str,
) -> ObjectInfo {
ObjectInfo {
bucket: bucket.to_string(),
name: object.to_string(),
size: total_size,
delete_marker,
mod_time: Some(mod_time),
version_id: Some(Uuid::parse_str(version_id).unwrap()),
..Default::default()
}
}
#[derive(Debug, PartialEq, Eq)]
enum LifecycleType {
ExpiryCurrent,
ExpiryNoncurrent,
TransitionCurrent,
TransitionNoncurrent,
}
#[derive(Debug, PartialEq, Eq)]
pub struct LifecycleContent {
ver_no: u8,
ver_id: String,
mod_time: OffsetDateTime,
type_: LifecycleType,
object_name: String,
}
pub struct LifecycleContentCodec;
impl BytesEncode<'_> for LifecycleContentCodec {
type EItem = LifecycleContent;
fn bytes_encode(lcc: &Self::EItem) -> Result<Cow<'_, [u8]>, BoxedError> {
let (ver_no_byte, ver_id_bytes, mod_timestamp_bytes, type_byte, object_name_bytes) = match lcc {
LifecycleContent {
ver_no,
ver_id,
mod_time,
type_: LifecycleType::ExpiryCurrent,
object_name,
} => (
ver_no,
ver_id.clone().into_bytes(),
mod_time.unix_timestamp().to_be_bytes(),
0,
object_name.clone().into_bytes(),
),
LifecycleContent {
ver_no,
ver_id,
mod_time,
type_: LifecycleType::ExpiryNoncurrent,
object_name,
} => (
ver_no,
ver_id.clone().into_bytes(),
mod_time.unix_timestamp().to_be_bytes(),
1,
object_name.clone().into_bytes(),
),
LifecycleContent {
ver_no,
ver_id,
mod_time,
type_: LifecycleType::TransitionCurrent,
object_name,
} => (
ver_no,
ver_id.clone().into_bytes(),
mod_time.unix_timestamp().to_be_bytes(),
2,
object_name.clone().into_bytes(),
),
LifecycleContent {
ver_no,
ver_id,
mod_time,
type_: LifecycleType::TransitionNoncurrent,
object_name,
} => (
ver_no,
ver_id.clone().into_bytes(),
mod_time.unix_timestamp().to_be_bytes(),
3,
object_name.clone().into_bytes(),
),
};
let mut output = Vec::<u8>::new();
output.push(*ver_no_byte);
output.extend_from_slice(&ver_id_bytes);
output.extend_from_slice(&mod_timestamp_bytes);
output.push(type_byte);
output.extend_from_slice(&object_name_bytes);
Ok(Cow::Owned(output))
}
}
impl<'a> BytesDecode<'a> for LifecycleContentCodec {
type DItem = LifecycleContent;
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
use std::mem::size_of;
let ver_no = match bytes.get(..size_of::<u8>()) {
Some(bytes) => bytes.try_into().map(u8::from_be_bytes).unwrap(),
None => return Err("invalid LifecycleContent: cannot extract ver_no".into()),
};
let ver_id = match bytes.get(size_of::<u8>()..(36 + 1)) {
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
None => return Err("invalid LifecycleContent: cannot extract ver_id".into()),
};
let mod_timestamp = match bytes.get((36 + 1)..(size_of::<i64>() + 36 + 1)) {
Some(bytes) => bytes.try_into().map(i64::from_be_bytes).unwrap(),
None => return Err("invalid LifecycleContent: cannot extract mod_time timestamp".into()),
};
let type_ = match bytes.get(size_of::<i64>() + 36 + 1) {
Some(&0) => LifecycleType::ExpiryCurrent,
Some(&1) => LifecycleType::ExpiryNoncurrent,
Some(&2) => LifecycleType::TransitionCurrent,
Some(&3) => LifecycleType::TransitionNoncurrent,
Some(_) => return Err("invalid LifecycleContent: invalid LifecycleType".into()),
None => return Err("invalid LifecycleContent: cannot extract LifecycleType".into()),
};
let object_name = match bytes.get((size_of::<i64>() + 36 + 1 + 1)..) {
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
None => return Err("invalid LifecycleContent: cannot extract object_name".into()),
};
Ok(LifecycleContent {
ver_no,
ver_id,
mod_time: OffsetDateTime::from_unix_timestamp(mod_timestamp).unwrap(),
type_,
object_name,
})
}
}
mod serial_tests {
use super::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
//#[ignore]
async fn test_lifecycle_chche_build() {
let (_disk_paths, ecstore) = setup_test_env().await;
// Create test bucket and object
let suffix = uuid::Uuid::new_v4().simple().to_string();
let bucket_name = format!("test-lc-cache-{}", &suffix[..8]);
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
// Verify object exists initially
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
println!("✅ Object exists before lifecycle processing");
let scan_outcome = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
Ok(outcome) => outcome,
Err(err) => {
warn!("Local usage scan failed: {}", err);
LocalScanOutcome::default()
}
};
let bucket_objects_map = &scan_outcome.bucket_objects;
let records = match bucket_objects_map.get(&bucket_name) {
Some(records) => records,
None => {
debug!("No local snapshot entries found for bucket {}; skipping lifecycle/integrity", bucket_name);
&vec![]
}
};
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get()
&& let Some(lmdb) = GLOBAL_LMDB_DB.get()
{
let mut wtxn = lmdb_env.write_txn().unwrap();
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
if let Ok(object_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
}*/
for record in records {
if !record.usage.has_live_object {
continue;
}
let object_info = convert_record_to_object_info(record);
println!("object_info2: {object_info:?}");
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
let version_id = if let Some(version_id) = object_info.version_id {
version_id.to_string()
} else {
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
};
lmdb.put(
&mut wtxn,
&expiry_time.unix_timestamp(),
&LifecycleContent {
ver_no: 0,
ver_id: version_id,
mod_time,
type_: LifecycleType::TransitionNoncurrent,
object_name: object_info.name,
},
)
.unwrap();
}
wtxn.commit().unwrap();
let mut wtxn = lmdb_env.write_txn().unwrap();
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
//let _ = unsafe { iter.del_current().unwrap() };
for row in iter {
if let Ok(ref elm) = row {
let LifecycleContent {
ver_no,
ver_id,
mod_time,
type_,
object_name,
} = &elm.1;
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
}
println!("row:{row:?}");
}
//drop(iter);
wtxn.commit().unwrap();
}
println!("Lifecycle cache test completed");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/integration_tests.rs | crates/ahm/tests/integration_tests.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::scanner::{
io_throttler::MetricsSnapshot,
local_stats::StatsSummary,
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig},
stats_aggregator::{DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig, NodeInfo},
};
use scanner_optimization_tests::{PerformanceBenchmark, create_test_scanner};
use std::{sync::Arc, time::Duration};
use tempfile::TempDir;
mod scanner_optimization_tests;
#[tokio::test]
async fn test_end_to_end_scanner_lifecycle() {
let temp_dir = TempDir::new().unwrap();
let scanner = create_test_scanner(&temp_dir).await;
scanner.initialize_stats().await.expect("Failed to initialize stats");
let initial_progress = scanner.get_scan_progress().await;
assert_eq!(initial_progress.current_cycle, 0);
scanner.force_save_checkpoint().await.expect("Failed to save checkpoint");
let checkpoint_info = scanner.get_checkpoint_info().await.unwrap();
assert!(checkpoint_info.is_some());
}
#[tokio::test]
async fn test_load_balancing_and_throttling_integration() {
let temp_dir = TempDir::new().unwrap();
let scanner = create_test_scanner(&temp_dir).await;
let io_monitor = scanner.get_io_monitor();
let throttler = scanner.get_io_throttler();
// Start IO monitoring
io_monitor.start().await.expect("Failed to start IO monitor");
// Simulate load variation scenarios
let load_scenarios = vec![
(LoadLevel::Low, 10, 100, 0, 5), // (load level, latency, QPS, error rate, connections)
(LoadLevel::Medium, 30, 300, 10, 20),
(LoadLevel::High, 80, 800, 50, 50),
(LoadLevel::Critical, 200, 1200, 100, 100),
];
for (expected_level, latency, qps, error_rate, connections) in load_scenarios {
// Update business metrics
scanner.update_business_metrics(latency, qps, error_rate, connections).await;
// Wait for monitoring system response
tokio::time::sleep(Duration::from_millis(1200)).await;
// Get current load level
let current_level = io_monitor.get_business_load_level().await;
// Get throttling decision
let metrics_snapshot = MetricsSnapshot {
iops: 100 + qps / 10,
latency,
cpu_usage: std::cmp::min(50 + (qps / 20) as u8, 100),
memory_usage: 40,
};
let decision = throttler.make_throttle_decision(current_level, Some(metrics_snapshot)).await;
println!(
"Load scenario test: Expected={:?}, Actual={:?}, Should_pause={}, Delay={:?}",
expected_level, current_level, decision.should_pause, decision.suggested_delay
);
// Verify throttling effect under high load
if matches!(current_level, LoadLevel::High | LoadLevel::Critical) {
assert!(decision.suggested_delay > Duration::from_millis(1000));
}
if matches!(current_level, LoadLevel::Critical) {
assert!(decision.should_pause);
}
}
io_monitor.stop().await;
}
#[tokio::test]
async fn test_checkpoint_resume_functionality() {
let temp_dir = TempDir::new().unwrap();
// Create first scanner instance
let scanner1 = {
let config = NodeScannerConfig {
data_dir: temp_dir.path().to_path_buf(),
..Default::default()
};
NodeScanner::new("checkpoint-test-node".to_string(), config)
};
// Initialize and simulate some scan progress
scanner1.initialize_stats().await.unwrap();
// Simulate scan progress
scanner1
.update_scan_progress_for_test(3, 1, Some("checkpoint-test-key".to_string()))
.await;
// Save checkpoint
scanner1.force_save_checkpoint().await.unwrap();
// Stop first scanner
scanner1.stop().await.unwrap();
// Create second scanner instance (simulate restart)
let scanner2 = {
let config = NodeScannerConfig {
data_dir: temp_dir.path().to_path_buf(),
..Default::default()
};
NodeScanner::new("checkpoint-test-node".to_string(), config)
};
// Try to recover from checkpoint
scanner2.start_with_resume().await.unwrap();
// Verify recovered progress
let recovered_progress = scanner2.get_scan_progress().await;
assert_eq!(recovered_progress.current_cycle, 3);
assert_eq!(recovered_progress.current_disk_index, 1);
assert_eq!(recovered_progress.last_scan_key, Some("checkpoint-test-key".to_string()));
// Cleanup
scanner2.cleanup_checkpoint().await.unwrap();
}
#[tokio::test]
async fn test_distributed_stats_aggregation() {
// Create decentralized stats aggregator
let config = DecentralizedStatsAggregatorConfig {
cache_ttl: Duration::from_secs(10), // Increase cache TTL to ensure cache is valid during test
node_timeout: Duration::from_millis(500), // Reduce timeout
..Default::default()
};
let aggregator = DecentralizedStatsAggregator::new(config);
// Simulate multiple nodes (these nodes don't exist in test environment, will cause connection failures)
let node_infos = vec![
NodeInfo {
node_id: "node-1".to_string(),
address: "127.0.0.1".to_string(),
port: 9001,
is_online: true,
last_heartbeat: std::time::SystemTime::now(),
version: "1.0.0".to_string(),
},
NodeInfo {
node_id: "node-2".to_string(),
address: "127.0.0.1".to_string(),
port: 9002,
is_online: true,
last_heartbeat: std::time::SystemTime::now(),
version: "1.0.0".to_string(),
},
];
// Add nodes to aggregator
for node_info in node_infos {
aggregator.add_node(node_info).await;
}
// Set local statistics (simulate local node)
let local_stats = StatsSummary {
node_id: "local-node".to_string(),
total_objects_scanned: 1000,
total_healthy_objects: 950,
total_corrupted_objects: 50,
total_bytes_scanned: 1024 * 1024 * 100, // 100MB
total_scan_errors: 5,
total_heal_triggered: 10,
total_disks: 4,
total_buckets: 5,
last_update: std::time::SystemTime::now(),
scan_progress: Default::default(),
data_usage: rustfs_common::data_usage::DataUsageInfo::default(),
};
aggregator.set_local_stats(local_stats).await;
// Get aggregated statistics (remote nodes will fail, but local node should succeed)
let aggregated = aggregator.get_aggregated_stats().await.unwrap();
// Verify local node statistics are included
assert!(aggregated.node_summaries.contains_key("local-node"));
assert!(aggregated.total_objects_scanned >= 1000);
// Only local node data due to remote node connection failures
assert_eq!(aggregated.node_summaries.len(), 1);
// Test caching mechanism
let original_timestamp = aggregated.aggregation_timestamp;
let start_time = std::time::Instant::now();
let cached_result = aggregator.get_aggregated_stats().await.unwrap();
let cached_duration = start_time.elapsed();
// Verify cache is effective: timestamps should be the same
assert_eq!(original_timestamp, cached_result.aggregation_timestamp);
// Cached calls should be fast (relaxed to 200ms for test environment)
assert!(cached_duration < Duration::from_millis(200));
// Force refresh
let _refreshed = aggregator.force_refresh_aggregated_stats().await.unwrap();
// Clear cache
aggregator.clear_cache().await;
// Verify cache status
let cache_status = aggregator.get_cache_status().await;
assert!(!cache_status.has_cached_data);
}
#[tokio::test]
async fn test_performance_impact_measurement() {
let temp_dir = TempDir::new().unwrap();
let scanner = create_test_scanner(&temp_dir).await;
// Start performance monitoring
let io_monitor = scanner.get_io_monitor();
let _throttler = scanner.get_io_throttler();
io_monitor.start().await.unwrap();
// Baseline test: no scanner load - measure multiple times for stability
const MEASUREMENT_COUNT: usize = 5;
let mut baseline_measurements = Vec::new();
for _ in 0..MEASUREMENT_COUNT {
let duration = measure_workload(10_000, Duration::ZERO).await;
baseline_measurements.push(duration);
}
// Use median to reduce impact of outliers
baseline_measurements.sort();
let median_idx = baseline_measurements.len() / 2;
let baseline_duration = baseline_measurements[median_idx].max(Duration::from_millis(20));
// Simulate scanner activity
scanner.update_business_metrics(50, 500, 0, 25).await;
tokio::time::sleep(Duration::from_millis(200)).await;
// Performance test: with scanner load - measure multiple times for stability
let mut scanner_measurements = Vec::new();
for _ in 0..MEASUREMENT_COUNT {
let duration = measure_workload(10_000, Duration::ZERO).await;
scanner_measurements.push(duration);
}
scanner_measurements.sort();
let median_idx = scanner_measurements.len() / 2;
let with_scanner_duration = scanner_measurements[median_idx].max(baseline_duration);
// Calculate performance impact
let baseline_ns = baseline_duration.as_nanos().max(1) as f64;
let overhead_duration = with_scanner_duration.saturating_sub(baseline_duration);
let overhead_ns = overhead_duration.as_nanos() as f64;
let overhead_ms = (overhead_ns / 1_000_000.0).round() as u64;
let impact_percentage = (overhead_ns / baseline_ns) * 100.0;
let benchmark = PerformanceBenchmark {
_scanner_overhead_ms: overhead_ms,
business_impact_percentage: impact_percentage,
_throttle_effectiveness: 95.0, // Simulated value
};
println!("Performance impact measurement:");
println!(" Baseline duration: {baseline_duration:?}");
println!(" With scanner duration: {with_scanner_duration:?}");
println!(" Overhead: {overhead_ms} ms");
println!(" Impact percentage: {impact_percentage:.2}%");
println!(" Meets optimization goals: {}", benchmark.meets_optimization_goals());
// Verify optimization target (business impact < 50%)
// Note: In test environment, allow higher threshold due to system load variability
// In production, the actual impact should be much lower (< 10%)
assert!(impact_percentage < 50.0, "Performance impact too high: {impact_percentage:.2}%");
io_monitor.stop().await;
}
#[tokio::test]
async fn test_concurrent_scanner_operations() {
let temp_dir = TempDir::new().unwrap();
let scanner = Arc::new(create_test_scanner(&temp_dir).await);
scanner.initialize_stats().await.unwrap();
// Execute multiple scanner operations concurrently
let tasks = vec![
// Task 1: Periodically update business metrics
{
let scanner = scanner.clone();
tokio::spawn(async move {
for i in 0..10 {
scanner.update_business_metrics(10 + i * 5, 100 + i * 10, i, 5 + i).await;
tokio::time::sleep(Duration::from_millis(50)).await;
}
})
},
// Task 2: Periodically save checkpoints
{
let scanner = scanner.clone();
tokio::spawn(async move {
for _i in 0..5 {
if let Err(e) = scanner.force_save_checkpoint().await {
eprintln!("Checkpoint save failed: {e}");
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
})
},
// Task 3: Periodically get statistics
{
let scanner = scanner.clone();
tokio::spawn(async move {
for _i in 0..8 {
let _summary = scanner.get_stats_summary().await;
let _progress = scanner.get_scan_progress().await;
tokio::time::sleep(Duration::from_millis(75)).await;
}
})
},
];
// Wait for all tasks to complete
for task in tasks {
task.await.unwrap();
}
// Verify final state
let final_stats = scanner.get_stats_summary().await;
let _final_progress = scanner.get_scan_progress().await;
assert_eq!(final_stats.node_id, "integration-test-node");
assert!(final_stats.last_update > std::time::SystemTime::UNIX_EPOCH);
// Cleanup
scanner.cleanup_checkpoint().await.unwrap();
}
// Helper function to simulate business workload
async fn simulate_business_workload(operations: usize) {
for _i in 0..operations {
// Simulate some CPU-intensive operations
let _result: u64 = (0..100).map(|x| x * x).sum();
// Small delay to simulate IO operations
if _i % 100 == 0 {
tokio::task::yield_now().await;
}
}
}
async fn measure_workload(operations: usize, extra_delay: Duration) -> Duration {
let start = std::time::Instant::now();
simulate_business_workload(operations).await;
if !extra_delay.is_zero() {
tokio::time::sleep(extra_delay).await;
}
start.elapsed()
}
#[tokio::test]
async fn test_error_recovery_and_resilience() {
let temp_dir = TempDir::new().unwrap();
let scanner = create_test_scanner(&temp_dir).await;
// Test recovery from stats initialization failure
scanner.initialize_stats().await.unwrap();
// Test recovery from checkpoint corruption
scanner.force_save_checkpoint().await.unwrap();
// Artificially corrupt checkpoint file (by writing invalid data)
let checkpoint_file = temp_dir.path().join("scanner_checkpoint_integration-test-node.json");
if checkpoint_file.exists() {
tokio::fs::write(&checkpoint_file, "invalid json data").await.unwrap();
}
// Verify system can gracefully handle corrupted checkpoint
let checkpoint_info = scanner.get_checkpoint_info().await;
// Should return error or null value, not crash
assert!(checkpoint_info.is_err() || checkpoint_info.unwrap().is_none());
// Clean up corrupted checkpoint
scanner.cleanup_checkpoint().await.unwrap();
// Verify ability to recreate valid checkpoint
scanner.force_save_checkpoint().await.unwrap();
let new_checkpoint_info = scanner.get_checkpoint_info().await.unwrap();
assert!(new_checkpoint_info.is_some());
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/lifecycle_integration_test.rs | crates/ahm/tests/lifecycle_integration_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::scanner::{Scanner, data_scanner::ScannerConfig};
use rustfs_ecstore::{
bucket::metadata::BUCKET_LIFECYCLE_CONFIG,
bucket::metadata_sys,
disk::endpoint::Endpoint,
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
global::GLOBAL_TierConfigMgr,
store::ECStore,
store_api::{MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
tier::tier_config::{TierConfig, TierMinIO, TierType},
};
use serial_test::serial;
use std::{
path::PathBuf,
sync::{Arc, Once, OnceLock},
time::Duration,
};
use tokio::fs;
use tokio_util::sync::CancellationToken;
use tracing::info;
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
static INIT: Once = Once::new();
fn init_tracing() {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt::try_init();
});
}
/// Test helper: Create test environment with ECStore
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
init_tracing();
// Fast path: already initialized, just clone and return
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
return (paths.clone(), ecstore.clone());
}
// create temp dir as 4 disks with unique base dir
let test_base_dir = format!("/tmp/rustfs_ahm_lifecycle_test_{}", uuid::Uuid::new_v4());
let temp_dir = std::path::PathBuf::from(&test_base_dir);
if temp_dir.exists() {
fs::remove_dir_all(&temp_dir).await.ok();
}
fs::create_dir_all(&temp_dir).await.unwrap();
// create 4 disk dirs
let disk_paths = vec![
temp_dir.join("disk1"),
temp_dir.join("disk2"),
temp_dir.join("disk3"),
temp_dir.join("disk4"),
];
for disk_path in &disk_paths {
fs::create_dir_all(disk_path).await.unwrap();
}
// create EndpointServerPools
let mut endpoints = Vec::new();
for (i, disk_path) in disk_paths.iter().enumerate() {
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
// set correct index
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(i);
endpoints.push(endpoint);
}
let pool_endpoints = PoolEndpoints {
legacy: false,
set_count: 1,
drives_per_set: 4,
endpoints: Endpoints::from(endpoints),
cmd_line: "test".to_string(),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
// format disks (only first time)
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
let port = 9002; // for simplicity
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
no_metadata: true,
..Default::default()
})
.await
.unwrap();
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
// Initialize background expiry workers
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry(ecstore.clone()).await;
// Store in global once lock
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
(disk_paths, ecstore)
}
/// Test helper: Create a test bucket
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(bucket_name, &Default::default())
.await
.expect("Failed to create test bucket");
info!("Created test bucket: {}", bucket_name);
}
/// Test helper: Create a test lock bucket
async fn create_test_lock_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(
bucket_name,
&MakeBucketOptions {
lock_enabled: true,
versioning_enabled: true,
..Default::default()
},
)
.await
.expect("Failed to create test bucket");
info!("Created test bucket: {}", bucket_name);
}
/// Test helper: Upload test object
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
let mut reader = PutObjReader::from_vec(data.to_vec());
let object_info = (**ecstore)
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
.await
.expect("Failed to upload test object");
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
}
/// Test helper: Set bucket lifecycle configuration
async fn set_bucket_lifecycle(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<ID>test-rule</ID>
<Status>Enabled</Status>
<Filter>
<Prefix>test/</Prefix>
</Filter>
<Expiration>
<Days>0</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>"#;
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
Ok(())
}
/// Test helper: Set bucket lifecycle configuration
async fn set_bucket_lifecycle_deletemarker(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<ID>test-rule</ID>
<Status>Enabled</Status>
<Filter>
<Prefix>test/</Prefix>
</Filter>
<Expiration>
<Days>0</Days>
<ExpiredObjectDeleteMarker>true</ExpiredObjectDeleteMarker>
</Expiration>
</Rule>
</LifecycleConfiguration>"#;
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
Ok(())
}
#[allow(dead_code)]
async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<ID>test-rule</ID>
<Status>Enabled</Status>
<Filter>
<Prefix>test/</Prefix>
</Filter>
<Transition>
<Days>0</Days>
<StorageClass>COLDTIER44</StorageClass>
</Transition>
</Rule>
<Rule>
<ID>test-rule2</ID>
<Status>Disabled</Status>
<Filter>
<Prefix>test/</Prefix>
</Filter>
<NoncurrentVersionTransition>
<NoncurrentDays>0</NoncurrentDays>
<StorageClass>COLDTIER44</StorageClass>
</NoncurrentVersionTransition>
</Rule>
</LifecycleConfiguration>"#;
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
Ok(())
}
/// Test helper: Create a test tier
#[allow(dead_code)]
async fn create_test_tier(server: u32) {
let args = TierConfig {
version: "v1".to_string(),
tier_type: TierType::MinIO,
name: "COLDTIER44".to_string(),
s3: None,
aliyun: None,
tencent: None,
huaweicloud: None,
azure: None,
gcs: None,
r2: None,
rustfs: None,
minio: if server == 1 {
Some(TierMinIO {
access_key: "minioadmin".to_string(),
secret_key: "minioadmin".to_string(),
bucket: "hello".to_string(),
endpoint: "http://39.105.198.204:9000".to_string(),
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
region: "".to_string(),
..Default::default()
})
} else {
Some(TierMinIO {
access_key: "minioadmin".to_string(),
secret_key: "minioadmin".to_string(),
bucket: "mblock2".to_string(),
endpoint: "http://127.0.0.1:9020".to_string(),
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
region: "".to_string(),
..Default::default()
})
},
};
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
if let Err(err) = tier_config_mgr.add(args, false).await {
println!("tier_config_mgr add failed, e: {err:?}");
panic!("tier add failed. {err}");
}
if let Err(e) = tier_config_mgr.save().await {
println!("tier_config_mgr save failed, e: {e:?}");
panic!("tier save failed");
}
println!("Created test tier: COLDTIER44");
}
/// Test helper: Check if object exists
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
match (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
Ok(info) => !info.delete_marker,
Err(_) => false,
}
}
/// Test helper: Check if object exists
#[allow(dead_code)]
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
println!("oi: {oi:?}");
oi.delete_marker
} else {
println!("object_is_delete_marker is error");
panic!("object_is_delete_marker is error");
}
}
/// Test helper: Check if object exists
#[allow(dead_code)]
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
println!("oi: {oi:?}");
!oi.transitioned_object.status.is_empty()
} else {
println!("object_is_transitioned is error");
panic!("object_is_transitioned is error");
}
}
async fn wait_for_object_absence(ecstore: &Arc<ECStore>, bucket: &str, object: &str, timeout: Duration) -> bool {
let deadline = tokio::time::Instant::now() + timeout;
loop {
if !object_exists(ecstore, bucket, object).await {
return true;
}
if tokio::time::Instant::now() >= deadline {
return false;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
}
mod serial_tests {
use super::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
#[serial]
async fn test_lifecycle_expiry_basic() {
let (_disk_paths, ecstore) = setup_test_env().await;
// Create test bucket and object
let suffix = uuid::Uuid::new_v4().simple().to_string();
let bucket_name = format!("test-lc-expiry-basic-{}", &suffix[..8]);
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
// Verify object exists initially
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
println!("✅ Object exists before lifecycle processing");
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
set_bucket_lifecycle(bucket_name.as_str())
.await
.expect("Failed to set lifecycle configuration");
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
// Verify lifecycle configuration was set
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
Ok(bucket_meta) => {
assert!(bucket_meta.lifecycle_config.is_some());
println!("✅ Bucket metadata retrieved successfully");
}
Err(e) => {
println!("❌ Error retrieving bucket metadata: {e:?}");
}
}
// Create scanner with very short intervals for testing
let scanner_config = ScannerConfig {
scan_interval: Duration::from_millis(100),
deep_scan_interval: Duration::from_millis(500),
max_concurrent_scans: 1,
..Default::default()
};
let scanner = Scanner::new(Some(scanner_config), None);
// Start scanner
scanner.start().await.expect("Failed to start scanner");
println!("✅ Scanner started");
// Wait for scanner to process lifecycle rules
tokio::time::sleep(Duration::from_secs(2)).await;
// Manually trigger a scan cycle to ensure lifecycle processing
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
println!("✅ Manual scan cycle completed");
let mut expired = false;
for attempt in 0..3 {
if attempt > 0 {
scanner.scan_cycle().await.expect("Failed to trigger scan cycle on retry");
}
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(5)).await;
if expired {
break;
}
}
println!("Object is_delete_marker after lifecycle processing: {}", !expired);
if !expired {
let pending = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::GLOBAL_ExpiryState
.read()
.await
.pending_tasks()
.await;
println!("Pending expiry tasks: {pending}");
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
&& let Ok(object_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
if !expired {
println!("❌ Object was not deleted by lifecycle processing");
}
} else {
println!("✅ Object was successfully deleted by lifecycle processing");
// Let's try to get object info to see its details
match ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
Ok(obj_info) => {
println!(
"Object info: name={}, size={}, mod_time={:?}",
obj_info.name, obj_info.size, obj_info.mod_time
);
}
Err(e) => {
println!("Error getting object info: {e:?}");
}
}
}
assert!(expired);
println!("✅ Object successfully expired");
// Stop scanner
let _ = scanner.stop().await;
println!("✅ Scanner stopped");
println!("Lifecycle expiry basic test completed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[serial]
//#[ignore]
async fn test_lifecycle_expiry_deletemarker() {
let (_disk_paths, ecstore) = setup_test_env().await;
// Create test bucket and object
let suffix = uuid::Uuid::new_v4().simple().to_string();
let bucket_name = format!("test-lc-expiry-marker-{}", &suffix[..8]);
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
// Verify object exists initially
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
println!("✅ Object exists before lifecycle processing");
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
set_bucket_lifecycle_deletemarker(bucket_name.as_str())
.await
.expect("Failed to set lifecycle configuration");
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
// Verify lifecycle configuration was set
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
Ok(bucket_meta) => {
assert!(bucket_meta.lifecycle_config.is_some());
println!("✅ Bucket metadata retrieved successfully");
}
Err(e) => {
println!("❌ Error retrieving bucket metadata: {e:?}");
}
}
// Create scanner with very short intervals for testing
let scanner_config = ScannerConfig {
scan_interval: Duration::from_millis(100),
deep_scan_interval: Duration::from_millis(500),
max_concurrent_scans: 1,
..Default::default()
};
let scanner = Scanner::new(Some(scanner_config), None);
// Start scanner
scanner.start().await.expect("Failed to start scanner");
println!("✅ Scanner started");
// Wait for scanner to process lifecycle rules
tokio::time::sleep(Duration::from_secs(2)).await;
// Manually trigger a scan cycle to ensure lifecycle processing
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
println!("✅ Manual scan cycle completed");
let mut deleted = false;
for attempt in 0..3 {
if attempt > 0 {
scanner.scan_cycle().await.expect("Failed to trigger scan cycle on retry");
}
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(5)).await;
if deleted {
break;
}
}
println!("Object exists after lifecycle processing: {}", !deleted);
if !deleted {
let pending = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::GLOBAL_ExpiryState
.read()
.await
.pending_tasks()
.await;
println!("Pending expiry tasks: {pending}");
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
&& let Ok(obj_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config, None, None, &obj_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&obj_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
if !deleted {
println!(
"Object info: name={}, size={}, mod_time={:?}",
obj_info.name, obj_info.size, obj_info.mod_time
);
}
}
if !deleted {
println!("❌ Object was not deleted by lifecycle processing");
}
} else {
println!("✅ Object was successfully deleted by lifecycle processing");
}
assert!(deleted);
println!("✅ Object successfully expired");
// Stop scanner
let _ = scanner.stop().await;
println!("✅ Scanner stopped");
println!("Lifecycle expiry basic test completed");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[serial]
#[ignore]
async fn test_lifecycle_transition_basic() {
let (_disk_paths, ecstore) = setup_test_env().await;
create_test_tier(1).await;
// Create test bucket and object
let suffix = uuid::Uuid::new_v4().simple().to_string();
let bucket_name = format!("test-lc-transition-{}", &suffix[..8]);
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
//create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
create_test_bucket(&ecstore, bucket_name.as_str()).await;
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
// Verify object exists initially
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
println!("✅ Object exists before lifecycle processing");
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
set_bucket_lifecycle_transition(bucket_name.as_str())
.await
.expect("Failed to set lifecycle configuration");
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
// Verify lifecycle configuration was set
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
Ok(bucket_meta) => {
assert!(bucket_meta.lifecycle_config.is_some());
println!("✅ Bucket metadata retrieved successfully");
}
Err(e) => {
println!("❌ Error retrieving bucket metadata: {e:?}");
}
}
// Create scanner with very short intervals for testing
let scanner_config = ScannerConfig {
scan_interval: Duration::from_millis(100),
deep_scan_interval: Duration::from_millis(500),
max_concurrent_scans: 1,
..Default::default()
};
let scanner = Scanner::new(Some(scanner_config), None);
// Start scanner
scanner.start().await.expect("Failed to start scanner");
println!("✅ Scanner started");
// Wait for scanner to process lifecycle rules
tokio::time::sleep(Duration::from_secs(2)).await;
// Manually trigger a scan cycle to ensure lifecycle processing
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
println!("✅ Manual scan cycle completed");
// Wait a bit more for background workers to process expiry tasks
tokio::time::sleep(Duration::from_secs(5)).await;
// Check if object has been expired (deleted)
let check_result = object_is_transitioned(&ecstore, &bucket_name, object_name).await;
println!("Object exists after lifecycle processing: {check_result}");
if check_result {
println!("✅ Object was transitioned by lifecycle processing");
// Let's try to get object info to see its details
match ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
Ok(obj_info) => {
println!(
"Object info: name={}, size={}, mod_time={:?}",
obj_info.name, obj_info.size, obj_info.mod_time
);
println!("Object info: transitioned_object={:?}", obj_info.transitioned_object);
}
Err(e) => {
println!("Error getting object info: {e:?}");
}
}
} else {
println!("❌ Object was not transitioned by lifecycle processing");
}
assert!(check_result);
println!("✅ Object successfully transitioned");
// Stop scanner
let _ = scanner.stop().await;
println!("✅ Scanner stopped");
println!("Lifecycle transition basic test completed");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/data_usage_fallback_test.rs | crates/ahm/tests/data_usage_fallback_test.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(test)]
use rustfs_ahm::scanner::data_scanner::Scanner;
use rustfs_common::data_usage::DataUsageInfo;
use rustfs_ecstore::GLOBAL_Endpoints;
use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetadataSys};
use rustfs_ecstore::endpoints::EndpointServerPools;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
use std::sync::{Arc, Once};
use tempfile::TempDir;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::Level;
/// Build a minimal single-node ECStore over a temp directory and populate objects.
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
let temp_dir = TempDir::new().expect("temp dir");
let root = temp_dir.path().to_string_lossy().to_string();
// Create endpoints from the temp dir
let (endpoint_pools, _setup) = EndpointServerPools::from_volumes("127.0.0.1:0", vec![root])
.await
.expect("endpoint pools");
// Seed globals required by metadata sys if not already set
if GLOBAL_Endpoints.get().is_none() {
let _ = GLOBAL_Endpoints.set(endpoint_pools.clone());
}
let store = ECStore::new("127.0.0.1:0".parse().unwrap(), endpoint_pools, CancellationToken::new())
.await
.expect("create store");
if rustfs_ecstore::global::new_object_layer_fn().is_none() {
rustfs_ecstore::global::set_object_layer(store.clone()).await;
}
// Initialize metadata system before bucket operations
if GLOBAL_BucketMetadataSys.get().is_none() {
let mut sys = BucketMetadataSys::new(store.clone());
sys.init(Vec::new()).await;
let _ = GLOBAL_BucketMetadataSys.set(Arc::new(RwLock::new(sys)));
}
store
.make_bucket("fallback-bucket", &rustfs_ecstore::store_api::MakeBucketOptions::default())
.await
.expect("make bucket");
for i in 0..count {
let key = format!("obj-{i:04}");
let data = format!("payload-{i}");
let mut reader = PutObjReader::from_vec(data.into_bytes());
store
.put_object("fallback-bucket", &key, &mut reader, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
.expect("put object");
}
(temp_dir, store)
}
static INIT: Once = Once::new();
fn init_tracing(filter_level: Level) {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_max_level(filter_level)
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
.with_thread_names(true)
.try_init();
});
}
#[tokio::test]
async fn fallback_builds_full_counts_over_100_objects() {
init_tracing(Level::ERROR);
let (_tmp, store) = create_store_with_objects(1000).await;
let scanner = Scanner::new(None, None);
// Directly call the fallback builder to ensure pagination works.
let usage: DataUsageInfo = scanner.build_data_usage_from_ecstore(&store).await.expect("fallback usage");
let bucket = usage.buckets_usage.get("fallback-bucket").expect("bucket usage present");
assert!(
usage.objects_total_count >= 1000,
"total objects should be >=1000, got {}",
usage.objects_total_count
);
assert!(
bucket.objects_count >= 1000,
"bucket objects should be >=1000, got {}",
bucket.objects_count
);
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ahm/tests/optimized_scanner_tests.rs | crates/ahm/tests/optimized_scanner_tests.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ahm::heal::manager::HealConfig;
use rustfs_ahm::scanner::{
Scanner,
data_scanner::ScanMode,
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig},
};
use rustfs_ecstore::{
StorageAPI,
disk::endpoint::Endpoint,
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
store::ECStore,
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
};
use serial_test::serial;
use std::{fs, net::SocketAddr, sync::Arc, sync::OnceLock, time::Duration};
use tempfile::TempDir;
use tokio_util::sync::CancellationToken;
// Global test environment cache to avoid repeated initialization
static GLOBAL_TEST_ENV: OnceLock<(Vec<std::path::PathBuf>, Arc<ECStore>)> = OnceLock::new();
async fn prepare_test_env(test_dir: Option<&str>, port: Option<u16>) -> (Vec<std::path::PathBuf>, Arc<ECStore>) {
// Check if global environment is already initialized
if let Some((disk_paths, ecstore)) = GLOBAL_TEST_ENV.get() {
return (disk_paths.clone(), ecstore.clone());
}
// create temp dir as 4 disks
let test_base_dir = test_dir.unwrap_or("/tmp/rustfs_ahm_optimized_test");
let temp_dir = std::path::PathBuf::from(test_base_dir);
if temp_dir.exists() {
fs::remove_dir_all(&temp_dir).unwrap();
}
fs::create_dir_all(&temp_dir).unwrap();
// create 4 disk dirs
let disk_paths = vec![
temp_dir.join("disk1"),
temp_dir.join("disk2"),
temp_dir.join("disk3"),
temp_dir.join("disk4"),
];
for disk_path in &disk_paths {
fs::create_dir_all(disk_path).unwrap();
}
// create EndpointServerPools
let mut endpoints = Vec::new();
for (i, disk_path) in disk_paths.iter().enumerate() {
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
// set correct index
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(i);
endpoints.push(endpoint);
}
let pool_endpoints = PoolEndpoints {
legacy: false,
set_count: 1,
drives_per_set: 4,
endpoints: Endpoints::from(endpoints),
cmd_line: "test".to_string(),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
// format disks
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
// create ECStore with dynamic port
let port = port.unwrap_or(9000);
let server_addr: SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
no_metadata: true,
..Default::default()
})
.await
.unwrap();
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
// Store in global cache
let _ = GLOBAL_TEST_ENV.set((disk_paths.clone(), ecstore.clone()));
(disk_paths, ecstore)
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_scanner_basic_functionality() {
const TEST_DIR_BASIC: &str = "/tmp/rustfs_ahm_optimized_test_basic";
let (disk_paths, ecstore) = prepare_test_env(Some(TEST_DIR_BASIC), Some(9101)).await;
// create some test data
let bucket_name = "test-bucket";
let object_name = "test-object";
let test_data = b"Hello, Optimized RustFS!";
// create bucket and verify
let bucket_opts = MakeBucketOptions::default();
ecstore
.make_bucket(bucket_name, &bucket_opts)
.await
.expect("make_bucket failed");
// check bucket really exists
let buckets = ecstore
.list_bucket(&rustfs_ecstore::store_api::BucketOptions::default())
.await
.unwrap();
assert!(buckets.iter().any(|b| b.name == bucket_name), "bucket not found after creation");
// write object
let mut put_reader = PutObjReader::from_vec(test_data.to_vec());
let object_opts = rustfs_ecstore::store_api::ObjectOptions::default();
ecstore
.put_object(bucket_name, object_name, &mut put_reader, &object_opts)
.await
.expect("put_object failed");
// create optimized Scanner and test basic functionality
let scanner = Scanner::new(None, None);
// Test 1: Normal scan - verify object is found
println!("=== Test 1: Optimized Normal scan ===");
let scan_result = scanner.scan_cycle().await;
assert!(scan_result.is_ok(), "Optimized normal scan should succeed");
let _metrics = scanner.get_metrics().await;
// Note: The optimized scanner may not immediately show scanned objects as it works differently
println!("Optimized normal scan completed successfully");
// Test 2: Simulate disk corruption - delete object data from disk1
println!("=== Test 2: Optimized corruption handling ===");
let disk1_bucket_path = disk_paths[0].join(bucket_name);
let disk1_object_path = disk1_bucket_path.join(object_name);
// Try to delete the object file from disk1 (simulate corruption)
// Note: This might fail if ECStore is actively using the file
match fs::remove_dir_all(&disk1_object_path) {
Ok(_) => {
println!("Successfully deleted object from disk1: {disk1_object_path:?}");
// Verify deletion by checking if the directory still exists
if disk1_object_path.exists() {
println!("WARNING: Directory still exists after deletion: {disk1_object_path:?}");
} else {
println!("Confirmed: Directory was successfully deleted");
}
}
Err(e) => {
println!("Could not delete object from disk1 (file may be in use): {disk1_object_path:?} - {e}");
// This is expected behavior - ECStore might be holding file handles
}
}
// Scan again - should still complete (even with missing data)
let scan_result_after_corruption = scanner.scan_cycle().await;
println!("Optimized scan after corruption result: {scan_result_after_corruption:?}");
// Scanner should handle missing data gracefully
assert!(
scan_result_after_corruption.is_ok(),
"Optimized scanner should handle missing data gracefully"
);
// Test 3: Test metrics collection
println!("=== Test 3: Optimized metrics collection ===");
let final_metrics = scanner.get_metrics().await;
println!("Optimized final metrics: {final_metrics:?}");
// Verify metrics are available (even if different from legacy scanner)
assert!(final_metrics.last_activity.is_some(), "Should have scan activity");
// clean up temp dir
let temp_dir = std::path::PathBuf::from(TEST_DIR_BASIC);
if let Err(e) = fs::remove_dir_all(&temp_dir) {
eprintln!("Warning: Failed to clean up temp directory {temp_dir:?}: {e}");
}
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_scanner_usage_stats() {
const TEST_DIR_USAGE_STATS: &str = "/tmp/rustfs_ahm_optimized_test_usage_stats";
let (_, ecstore) = prepare_test_env(Some(TEST_DIR_USAGE_STATS), Some(9102)).await;
// prepare test bucket and object
let bucket = "test-bucket-optimized";
ecstore.make_bucket(bucket, &Default::default()).await.unwrap();
let mut pr = PutObjReader::from_vec(b"hello optimized".to_vec());
ecstore
.put_object(bucket, "obj1", &mut pr, &Default::default())
.await
.unwrap();
let scanner = Scanner::new(None, None);
// enable statistics
scanner.set_config_enable_data_usage_stats(true).await;
// first scan and get statistics
scanner.scan_cycle().await.unwrap();
let du_initial = scanner.get_data_usage_info().await.unwrap();
// Note: Optimized scanner may work differently, so we're less strict about counts
println!("Initial data usage: {du_initial:?}");
// write 3 more objects and get statistics again
for size in [1024, 2048, 4096] {
let name = format!("obj_{size}");
let mut pr = PutObjReader::from_vec(vec![b'x'; size]);
ecstore.put_object(bucket, &name, &mut pr, &Default::default()).await.unwrap();
}
scanner.scan_cycle().await.unwrap();
let du_after = scanner.get_data_usage_info().await.unwrap();
println!("Data usage after adding objects: {du_after:?}");
// The optimized scanner should at least not crash and return valid data
// buckets_count is u64, so it's always >= 0
assert!(du_after.buckets_count == du_after.buckets_count);
// clean up temp dir
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_USAGE_STATS));
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_volume_healing_functionality() {
const TEST_DIR_VOLUME_HEAL: &str = "/tmp/rustfs_ahm_optimized_test_volume_heal";
let (disk_paths, ecstore) = prepare_test_env(Some(TEST_DIR_VOLUME_HEAL), Some(9103)).await;
// Create test buckets
let bucket1 = "test-bucket-1-opt";
let bucket2 = "test-bucket-2-opt";
ecstore.make_bucket(bucket1, &Default::default()).await.unwrap();
ecstore.make_bucket(bucket2, &Default::default()).await.unwrap();
// Add some test objects
let mut pr1 = PutObjReader::from_vec(b"test data 1 optimized".to_vec());
ecstore
.put_object(bucket1, "obj1", &mut pr1, &Default::default())
.await
.unwrap();
let mut pr2 = PutObjReader::from_vec(b"test data 2 optimized".to_vec());
ecstore
.put_object(bucket2, "obj2", &mut pr2, &Default::default())
.await
.unwrap();
// Simulate missing bucket on one disk by removing bucket directory
let disk1_bucket1_path = disk_paths[0].join(bucket1);
if disk1_bucket1_path.exists() {
println!("Removing bucket directory to simulate missing volume: {disk1_bucket1_path:?}");
match fs::remove_dir_all(&disk1_bucket1_path) {
Ok(_) => println!("Successfully removed bucket directory from disk 0"),
Err(e) => println!("Failed to remove bucket directory: {e}"),
}
}
// Create optimized scanner
let scanner = Scanner::new(None, None);
// Enable healing in config
scanner.set_config_enable_healing(true).await;
println!("=== Testing optimized volume healing functionality ===");
// Run scan cycle which should detect missing volume
let scan_result = scanner.scan_cycle().await;
assert!(scan_result.is_ok(), "Optimized scan cycle should succeed");
// Get metrics to verify scan completed
let metrics = scanner.get_metrics().await;
println!("Optimized volume healing detection test completed successfully");
println!("Optimized scan metrics: {metrics:?}");
// Clean up
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_VOLUME_HEAL));
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_performance_characteristics() {
const TEST_DIR_PERF: &str = "/tmp/rustfs_ahm_optimized_test_perf";
let (_, ecstore) = prepare_test_env(Some(TEST_DIR_PERF), Some(9104)).await;
// Create test bucket with multiple objects
let bucket_name = "performance-test-bucket";
ecstore.make_bucket(bucket_name, &Default::default()).await.unwrap();
// Create several test objects
for i in 0..10 {
let object_name = format!("perf-object-{i}");
let test_data = vec![b'A' + (i % 26) as u8; 1024 * (i + 1)]; // Variable size objects
let mut put_reader = PutObjReader::from_vec(test_data);
let object_opts = rustfs_ecstore::store_api::ObjectOptions::default();
ecstore
.put_object(bucket_name, &object_name, &mut put_reader, &object_opts)
.await
.unwrap_or_else(|_| panic!("Failed to create object {object_name}"));
}
// Create optimized scanner
let scanner = Scanner::new(None, None);
// Test performance characteristics
println!("=== Testing optimized scanner performance ===");
// Measure scan time
let start_time = std::time::Instant::now();
let scan_result = scanner.scan_cycle().await;
let scan_duration = start_time.elapsed();
println!("Optimized scan completed in: {scan_duration:?}");
assert!(scan_result.is_ok(), "Performance scan should succeed");
// Verify the scan was reasonably fast (should be faster than old concurrent scanner)
// Note: This is a rough check - in practice, optimized scanner should be much faster
assert!(
scan_duration < Duration::from_secs(30),
"Optimized scan should complete within 30 seconds"
);
// Test memory usage is reasonable (indirect test through successful completion)
let metrics = scanner.get_metrics().await;
println!("Performance test metrics: {metrics:?}");
// Test that multiple scans don't degrade performance significantly
let start_time2 = std::time::Instant::now();
let _scan_result2 = scanner.scan_cycle().await;
let scan_duration2 = start_time2.elapsed();
println!("Second optimized scan completed in: {scan_duration2:?}");
// Second scan should be similar or faster due to caching
let performance_ratio = scan_duration2.as_millis() as f64 / scan_duration.as_millis() as f64;
println!("Performance ratio (second/first): {performance_ratio:.2}");
// Clean up
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_PERF));
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_load_balancing_and_throttling() {
let temp_dir = TempDir::new().unwrap();
// Create a node scanner with optimized configuration
let config = NodeScannerConfig {
data_dir: temp_dir.path().to_path_buf(),
enable_smart_scheduling: true,
scan_interval: Duration::from_millis(100), // Fast for testing
disk_scan_delay: Duration::from_millis(50),
..Default::default()
};
let node_scanner = NodeScanner::new("test-optimized-node".to_string(), config);
// Initialize the scanner
node_scanner.initialize_stats().await.unwrap();
let io_monitor = node_scanner.get_io_monitor();
let throttler = node_scanner.get_io_throttler();
// Start IO monitoring
io_monitor.start().await.expect("Failed to start IO monitor");
// Test load balancing scenarios
let load_scenarios = vec![
(LoadLevel::Low, 10, 100, 0, 5), // (load level, latency, qps, error rate, connections)
(LoadLevel::Medium, 30, 300, 10, 20),
(LoadLevel::High, 80, 800, 50, 50),
(LoadLevel::Critical, 200, 1200, 100, 100),
];
for (expected_level, latency, qps, error_rate, connections) in load_scenarios {
println!("Testing load scenario: {expected_level:?}");
// Update business metrics to simulate load
node_scanner
.update_business_metrics(latency, qps, error_rate, connections)
.await;
// Wait for monitoring system to respond
tokio::time::sleep(Duration::from_millis(500)).await;
// Get current load level
let current_level = io_monitor.get_business_load_level().await;
println!("Detected load level: {current_level:?}");
// Get throttling decision
let _current_metrics = io_monitor.get_current_metrics().await;
let metrics_snapshot = rustfs_ahm::scanner::io_throttler::MetricsSnapshot {
iops: 100 + qps / 10,
latency,
cpu_usage: std::cmp::min(50 + (qps / 20) as u8, 100),
memory_usage: 40,
};
let decision = throttler.make_throttle_decision(current_level, Some(metrics_snapshot)).await;
println!(
"Throttle decision: should_pause={}, delay={:?}",
decision.should_pause, decision.suggested_delay
);
// Verify throttling behavior
match current_level {
LoadLevel::Critical => {
assert!(decision.should_pause, "Critical load should trigger pause");
}
LoadLevel::High => {
assert!(
decision.suggested_delay > Duration::from_millis(1000),
"High load should suggest significant delay"
);
}
_ => {
// Lower loads should have reasonable delays
assert!(
decision.suggested_delay < Duration::from_secs(5),
"Lower loads should not have excessive delays"
);
}
}
}
io_monitor.stop().await;
println!("Optimized load balancing and throttling test completed successfully");
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_scanner_detect_missing_data_parts() {
const TEST_DIR_MISSING_PARTS: &str = "/tmp/rustfs_ahm_optimized_test_missing_parts";
let (disk_paths, ecstore) = prepare_test_env(Some(TEST_DIR_MISSING_PARTS), Some(9105)).await;
// Create test bucket
let bucket_name = "test-bucket-parts-opt";
let object_name = "large-object-20mb-opt";
ecstore.make_bucket(bucket_name, &Default::default()).await.unwrap();
// Create a 20MB object to ensure it has multiple parts
let large_data = vec![b'A'; 20 * 1024 * 1024]; // 20MB of 'A' characters
let mut put_reader = PutObjReader::from_vec(large_data);
let object_opts = rustfs_ecstore::store_api::ObjectOptions::default();
println!("=== Creating 20MB object ===");
ecstore
.put_object(bucket_name, object_name, &mut put_reader, &object_opts)
.await
.expect("put_object failed for large object");
// Verify object was created and get its info
let obj_info = ecstore
.get_object_info(bucket_name, object_name, &object_opts)
.await
.expect("get_object_info failed");
println!(
"Object info: size={}, parts={}, inlined={}",
obj_info.size,
obj_info.parts.len(),
obj_info.inlined
);
assert!(!obj_info.inlined, "20MB object should not be inlined");
println!("Object has {} parts", obj_info.parts.len());
// Create HealManager and optimized Scanner
let heal_storage = Arc::new(rustfs_ahm::heal::storage::ECStoreHealStorage::new(ecstore.clone()));
let heal_config = HealConfig {
enable_auto_heal: true,
heal_interval: Duration::from_millis(100),
max_concurrent_heals: 4,
task_timeout: Duration::from_secs(300),
queue_size: 1000,
};
let heal_manager = Arc::new(rustfs_ahm::heal::HealManager::new(heal_storage, Some(heal_config)));
heal_manager.start().await.unwrap();
let scanner = Scanner::new(None, Some(heal_manager.clone()));
// Enable healing to detect missing parts
scanner.set_config_enable_healing(true).await;
scanner.set_config_scan_mode(ScanMode::Deep).await;
println!("=== Initial scan (all parts present) ===");
let initial_scan = scanner.scan_cycle().await;
assert!(initial_scan.is_ok(), "Initial scan should succeed");
let initial_metrics = scanner.get_metrics().await;
println!("Initial scan metrics: objects_scanned={}", initial_metrics.objects_scanned);
// Simulate data part loss by deleting part files from some disks
println!("=== Simulating data part loss ===");
let mut deleted_parts = 0;
let mut deleted_part_paths = Vec::new();
for (disk_idx, disk_path) in disk_paths.iter().enumerate() {
if disk_idx > 0 {
// Only delete from first disk
break;
}
let bucket_path = disk_path.join(bucket_name);
let object_path = bucket_path.join(object_name);
if !object_path.exists() {
continue;
}
// Find the data directory (UUID)
if let Ok(entries) = fs::read_dir(&object_path) {
for entry in entries.flatten() {
let entry_path = entry.path();
if entry_path.is_dir() {
// This is likely the data_dir, look for part files inside
let part_file_path = entry_path.join("part.1");
if part_file_path.exists() {
match fs::remove_file(&part_file_path) {
Ok(_) => {
println!("Deleted part file: {part_file_path:?}");
deleted_part_paths.push(part_file_path);
deleted_parts += 1;
}
Err(e) => {
println!("Failed to delete part file {part_file_path:?}: {e}");
}
}
}
}
}
}
}
println!("Deleted {deleted_parts} part files to simulate data loss");
// Scan again to detect missing parts
println!("=== Scan after data deletion (should detect missing data) ===");
let scan_after_deletion = scanner.scan_cycle().await;
// Wait a bit for the heal manager to process
tokio::time::sleep(Duration::from_millis(500)).await;
// Check heal statistics
let heal_stats = heal_manager.get_statistics().await;
println!("Heal statistics:");
println!(" - total_tasks: {}", heal_stats.total_tasks);
println!(" - successful_tasks: {}", heal_stats.successful_tasks);
println!(" - failed_tasks: {}", heal_stats.failed_tasks);
// Get scanner metrics
let final_metrics = scanner.get_metrics().await;
println!("Scanner metrics after deletion scan:");
println!(" - objects_scanned: {}", final_metrics.objects_scanned);
// The optimized scanner should handle missing data gracefully
match scan_after_deletion {
Ok(_) => {
println!("Optimized scanner completed successfully despite missing data");
}
Err(e) => {
println!("Optimized scanner detected errors (acceptable): {e}");
}
}
println!("=== Test completed ===");
println!("Optimized scanner successfully handled missing data scenario");
// Clean up
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_MISSING_PARTS));
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_scanner_detect_missing_xl_meta() {
const TEST_DIR_MISSING_META: &str = "/tmp/rustfs_ahm_optimized_test_missing_meta";
let (disk_paths, ecstore) = prepare_test_env(Some(TEST_DIR_MISSING_META), Some(9106)).await;
// Create test bucket
let bucket_name = "test-bucket-meta-opt";
let object_name = "test-object-meta-opt";
ecstore.make_bucket(bucket_name, &Default::default()).await.unwrap();
// Create a test object
let test_data = vec![b'B'; 5 * 1024 * 1024]; // 5MB of 'B' characters
let mut put_reader = PutObjReader::from_vec(test_data);
let object_opts = rustfs_ecstore::store_api::ObjectOptions::default();
println!("=== Creating test object ===");
ecstore
.put_object(bucket_name, object_name, &mut put_reader, &object_opts)
.await
.expect("put_object failed");
// Create HealManager and optimized Scanner
let heal_storage = Arc::new(rustfs_ahm::heal::storage::ECStoreHealStorage::new(ecstore.clone()));
let heal_config = HealConfig {
enable_auto_heal: true,
heal_interval: Duration::from_millis(100),
max_concurrent_heals: 4,
task_timeout: Duration::from_secs(300),
queue_size: 1000,
};
let heal_manager = Arc::new(rustfs_ahm::heal::HealManager::new(heal_storage, Some(heal_config)));
heal_manager.start().await.unwrap();
let scanner = Scanner::new(None, Some(heal_manager.clone()));
// Enable healing to detect missing metadata
scanner.set_config_enable_healing(true).await;
scanner.set_config_scan_mode(ScanMode::Deep).await;
println!("=== Initial scan (all metadata present) ===");
let initial_scan = scanner.scan_cycle().await;
assert!(initial_scan.is_ok(), "Initial scan should succeed");
// Simulate xl.meta file loss by deleting xl.meta files from some disks
println!("=== Simulating xl.meta file loss ===");
let mut deleted_meta_files = 0;
let mut deleted_meta_paths = Vec::new();
for (disk_idx, disk_path) in disk_paths.iter().enumerate() {
if disk_idx >= 2 {
// Only delete from first two disks to ensure some copies remain
break;
}
let bucket_path = disk_path.join(bucket_name);
let object_path = bucket_path.join(object_name);
if !object_path.exists() {
continue;
}
// Delete xl.meta file
let xl_meta_path = object_path.join("xl.meta");
if xl_meta_path.exists() {
match fs::remove_file(&xl_meta_path) {
Ok(_) => {
println!("Deleted xl.meta file: {xl_meta_path:?}");
deleted_meta_paths.push(xl_meta_path);
deleted_meta_files += 1;
}
Err(e) => {
println!("Failed to delete xl.meta file {xl_meta_path:?}: {e}");
}
}
}
}
println!("Deleted {deleted_meta_files} xl.meta files to simulate metadata loss");
// Scan again to detect missing metadata
println!("=== Scan after xl.meta deletion ===");
let scan_after_deletion = scanner.scan_cycle().await;
// Wait for heal manager to process
tokio::time::sleep(Duration::from_millis(1000)).await;
// Check heal statistics
let final_heal_stats = heal_manager.get_statistics().await;
println!("Final heal statistics:");
println!(" - total_tasks: {}", final_heal_stats.total_tasks);
println!(" - successful_tasks: {}", final_heal_stats.successful_tasks);
println!(" - failed_tasks: {}", final_heal_stats.failed_tasks);
let _ = final_heal_stats; // Use the variable to avoid unused warning
// The optimized scanner should handle missing metadata gracefully
match scan_after_deletion {
Ok(_) => {
println!("Optimized scanner completed successfully despite missing metadata");
}
Err(e) => {
println!("Optimized scanner detected errors (acceptable): {e}");
}
}
println!("=== Test completed ===");
println!("Optimized scanner successfully handled missing xl.meta scenario");
// Clean up
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_MISSING_META));
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Please run it manually."]
#[serial]
async fn test_optimized_scanner_healthy_objects_not_marked_corrupted() {
const TEST_DIR_HEALTHY: &str = "/tmp/rustfs_ahm_optimized_test_healthy_objects";
let (_, ecstore) = prepare_test_env(Some(TEST_DIR_HEALTHY), Some(9107)).await;
// Create heal manager for this test
let heal_config = HealConfig::default();
let heal_storage = Arc::new(rustfs_ahm::heal::storage::ECStoreHealStorage::new(ecstore.clone()));
let heal_manager = Arc::new(rustfs_ahm::heal::manager::HealManager::new(heal_storage, Some(heal_config)));
heal_manager.start().await.unwrap();
// Create optimized scanner with healing enabled
let scanner = Scanner::new(None, Some(heal_manager.clone()));
scanner.set_config_enable_healing(true).await;
scanner.set_config_scan_mode(ScanMode::Deep).await;
// Create test bucket and multiple healthy objects
let bucket_name = "healthy-test-bucket-opt";
let bucket_opts = MakeBucketOptions::default();
ecstore.make_bucket(bucket_name, &bucket_opts).await.unwrap();
// Create multiple test objects with different sizes
let test_objects = vec![
("small-object-opt", b"Small test data optimized".to_vec()),
("medium-object-opt", vec![42u8; 1024]), // 1KB
("large-object-opt", vec![123u8; 10240]), // 10KB
];
let object_opts = rustfs_ecstore::store_api::ObjectOptions::default();
// Write all test objects
for (object_name, test_data) in &test_objects {
let mut put_reader = PutObjReader::from_vec(test_data.clone());
ecstore
.put_object(bucket_name, object_name, &mut put_reader, &object_opts)
.await
.expect("Failed to put test object");
println!("Created test object: {object_name} (size: {} bytes)", test_data.len());
}
// Wait a moment for objects to be fully written
tokio::time::sleep(Duration::from_millis(100)).await;
// Get initial heal statistics
let initial_heal_stats = heal_manager.get_statistics().await;
println!("Initial heal statistics:");
println!(" - total_tasks: {}", initial_heal_stats.total_tasks);
// Perform initial scan on healthy objects
println!("=== Scanning healthy objects ===");
let scan_result = scanner.scan_cycle().await;
assert!(scan_result.is_ok(), "Scan of healthy objects should succeed");
// Wait for any potential heal tasks to be processed
tokio::time::sleep(Duration::from_millis(1000)).await;
// Get scanner metrics after scanning
let metrics = scanner.get_metrics().await;
println!("Optimized scanner metrics after scanning healthy objects:");
println!(" - objects_scanned: {}", metrics.objects_scanned);
println!(" - healthy_objects: {}", metrics.healthy_objects);
println!(" - corrupted_objects: {}", metrics.corrupted_objects);
// Get heal statistics after scanning
let post_scan_heal_stats = heal_manager.get_statistics().await;
println!("Heal statistics after scanning healthy objects:");
println!(" - total_tasks: {}", post_scan_heal_stats.total_tasks);
println!(" - successful_tasks: {}", post_scan_heal_stats.successful_tasks);
println!(" - failed_tasks: {}", post_scan_heal_stats.failed_tasks);
// Critical assertion: healthy objects should not trigger unnecessary heal tasks
let heal_tasks_created = post_scan_heal_stats.total_tasks - initial_heal_stats.total_tasks;
if heal_tasks_created > 0 {
println!("WARNING: {heal_tasks_created} heal tasks were created for healthy objects");
// For optimized scanner, we're more lenient as it may work differently
println!("Note: Optimized scanner may have different behavior than legacy scanner");
} else {
println!("✓ No heal tasks created for healthy objects - optimized scanner working correctly");
}
// Perform a second scan to ensure consistency
println!("=== Second scan to verify consistency ===");
let second_scan_result = scanner.scan_cycle().await;
assert!(second_scan_result.is_ok(), "Second scan should also succeed");
let second_metrics = scanner.get_metrics().await;
let _final_heal_stats = heal_manager.get_statistics().await;
println!("Second scan metrics:");
println!(" - objects_scanned: {}", second_metrics.objects_scanned);
println!("=== Test completed successfully ===");
println!("✓ Optimized scanner handled healthy objects correctly");
println!("✓ No false positive corruption detection");
println!("✓ Objects remain accessible after scanning");
// Clean up
let _ = std::fs::remove_dir_all(std::path::Path::new(TEST_DIR_HEALTHY));
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/build.rs | crates/ecstore/build.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
fn main() -> shadow_rs::SdResult<()> {
shadow_rs::ShadowBuilder::builder().build()?;
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/store_list_objects.rs | crates/ecstore/src/store_list_objects.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::versioning::VersioningApi;
use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw};
use crate::disk::error::DiskError;
use crate::disk::{DiskInfo, DiskStore};
use crate::error::{
Error, Result, StorageError, is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err,
};
use crate::set_disk::SetDisks;
use crate::store::check_list_objs_args;
use crate::store_api::{
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOptions, WalkOptions, WalkVersionsSortOrder,
};
use crate::store_utils::is_reserved_or_invalid_bucket;
use crate::{store::ECStore, store_api::ListObjectsV2Info};
use futures::future::join_all;
use rand::seq::SliceRandom;
use rustfs_filemeta::{
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
merge_file_meta_versions,
};
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::broadcast::{self};
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use uuid::Uuid;
const MAX_OBJECT_LIST: i32 = 1000;
// const MAX_DELETE_LIST: i32 = 1000;
// const MAX_UPLOADS_LIST: i32 = 10000;
// const MAX_PARTS_LIST: i32 = 10000;
const METACACHE_SHARE_PREFIX: bool = false;
pub fn max_keys_plus_one(max_keys: i32, add_one: bool) -> i32 {
let mut max_keys = max_keys;
if !(0..=MAX_OBJECT_LIST).contains(&max_keys) {
max_keys = MAX_OBJECT_LIST;
}
if add_one {
max_keys += 1;
}
max_keys
}
#[derive(Debug, Default, Clone)]
pub struct ListPathOptions {
pub id: Option<String>,
// Bucket of the listing.
pub bucket: String,
// Directory inside the bucket.
// When unset listPath will set this based on Prefix
pub base_dir: String,
// Scan/return only content with prefix.
pub prefix: String,
// FilterPrefix will return only results with this prefix when scanning.
// Should never contain a slash.
// Prefix should still be set.
pub filter_prefix: Option<String>,
// Marker to resume listing.
// The response will be the first entry >= this object name.
pub marker: Option<String>,
// Limit the number of results.
pub limit: i32,
// The number of disks to ask.
pub ask_disks: String,
// InclDeleted will keep all entries where latest version is a delete marker.
pub incl_deleted: bool,
// Scan recursively.
// If false only main directory will be scanned.
// Should always be true if Separator is n SlashSeparator.
pub recursive: bool,
// Separator to use.
pub separator: Option<String>,
// Create indicates that the lister should not attempt to load an existing cache.
pub create: bool,
// Include pure directories.
pub include_directories: bool,
// Transient is set if the cache is transient due to an error or being a reserved bucket.
// This means the cache metadata will not be persisted on disk.
// A transient result will never be returned from the cache so knowing the list id is required.
pub transient: bool,
// Versioned is this a ListObjectVersions call.
pub versioned: bool,
pub stop_disk_at_limit: bool,
pub pool_idx: Option<usize>,
pub set_idx: Option<usize>,
}
const MARKER_TAG_VERSION: &str = "v1";
impl ListPathOptions {
pub fn set_filter(&mut self) {
if METACACHE_SHARE_PREFIX {
return;
}
if self.prefix == self.base_dir {
return;
}
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
self.filter_prefix = {
let fp = self.prefix.trim_start_matches(&self.base_dir).trim_matches(s);
if fp.contains(s) || fp.is_empty() {
None
} else {
Some(fp.to_owned())
}
}
}
pub fn parse_marker(&mut self) {
if let Some(marker) = &self.marker {
let s = marker.clone();
if !s.contains(format!("[rustfs_cache:{MARKER_TAG_VERSION}").as_str()) {
return;
}
if let (Some(start_idx), Some(end_idx)) = (s.find("["), s.find("]")) {
self.marker = Some(s[0..start_idx].to_owned());
let tags: Vec<_> = s[start_idx..end_idx].trim_matches(['[', ']']).split(",").collect();
for &tag in tags.iter() {
let kv: Vec<_> = tag.split(":").collect();
if kv.len() != 2 {
continue;
}
match kv[0] {
"rustfs_cache" => {
if kv[1] != MARKER_TAG_VERSION {
continue;
}
}
"id" => self.id = Some(kv[1].to_owned()),
"return" => {
self.id = Some(Uuid::new_v4().to_string());
self.create = true;
}
"p" => match kv[1].parse::<usize>() {
Ok(res) => self.pool_idx = Some(res),
Err(_) => {
self.id = Some(Uuid::new_v4().to_string());
self.create = true;
continue;
}
},
"s" => match kv[1].parse::<usize>() {
Ok(res) => self.set_idx = Some(res),
Err(_) => {
self.id = Some(Uuid::new_v4().to_string());
self.create = true;
continue;
}
},
_ => (),
}
}
}
}
}
pub fn encode_marker(&mut self, marker: &str) -> String {
if let Some(id) = &self.id {
format!(
"{}[rustfs_cache:{},id:{},p:{},s:{}]",
marker,
MARKER_TAG_VERSION,
id.to_owned(),
self.pool_idx.unwrap_or_default(),
self.pool_idx.unwrap_or_default(),
)
} else {
format!("{marker}[rustfs_cache:{MARKER_TAG_VERSION},return:]")
}
}
}
impl ECStore {
#[allow(clippy::too_many_arguments)]
// @continuation_token marker
// @start_after as marker when continuation_token empty
// @delimiter default="/", empty when recursive
// @max_keys limit
pub async fn inner_list_objects_v2(
self: Arc<Self>,
bucket: &str,
prefix: &str,
continuation_token: Option<String>,
delimiter: Option<String>,
max_keys: i32,
_fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
let marker = {
if continuation_token.is_none() {
start_after
} else {
continuation_token.clone()
}
};
let loi = self
.list_objects_generic(bucket, prefix, marker, delimiter, max_keys, incl_deleted)
.await?;
Ok(ListObjectsV2Info {
is_truncated: loi.is_truncated,
continuation_token,
next_continuation_token: loi.next_marker,
objects: loi.objects,
prefixes: loi.prefixes,
})
}
pub async fn list_objects_generic(
self: Arc<Self>,
bucket: &str,
prefix: &str,
marker: Option<String>,
delimiter: Option<String>,
max_keys: i32,
incl_deleted: bool,
) -> Result<ListObjectsInfo> {
let opts = ListPathOptions {
bucket: bucket.to_owned(),
prefix: prefix.to_owned(),
separator: delimiter.clone(),
limit: max_keys_plus_one(max_keys, marker.is_some()),
marker,
incl_deleted,
ask_disks: "strict".to_owned(), //TODO: from config
..Default::default()
};
// use get
if !opts.prefix.is_empty() && opts.limit == 1 && opts.marker.is_none() {
match self
.get_object_info(
&opts.bucket,
&opts.prefix,
&ObjectOptions {
no_lock: true,
..Default::default()
},
)
.await
{
Ok(res) => {
return Ok(ListObjectsInfo {
objects: vec![res],
..Default::default()
});
}
Err(err) => {
if is_err_bucket_not_found(&err) {
return Err(err);
}
}
};
};
let mut list_result = self
.list_path(&opts)
.await
.unwrap_or_else(|err| MetaCacheEntriesSortedResult {
err: Some(err.into()),
..Default::default()
});
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
result.forward_past(opts.marker);
}
// contextCanceled
let mut get_objects = ObjectInfo::from_meta_cache_entries_sorted_infos(
&list_result.entries.unwrap_or_default(),
bucket,
prefix,
delimiter.clone(),
)
.await;
let is_truncated = {
if max_keys > 0 && get_objects.len() > max_keys as usize {
get_objects.truncate(max_keys as usize);
true
} else {
list_result.err.is_none() && !get_objects.is_empty()
}
};
let next_marker = {
if is_truncated {
get_objects.last().map(|last| last.name.clone())
} else {
None
}
};
let mut prefixes: Vec<String> = Vec::new();
let mut objects = Vec::with_capacity(get_objects.len());
for obj in get_objects.into_iter() {
if let Some(delimiter) = &delimiter {
if obj.is_dir && obj.mod_time.is_none() {
let mut found = false;
if delimiter != SLASH_SEPARATOR {
for p in prefixes.iter() {
if found {
break;
}
found = p == &obj.name;
}
}
if !found {
prefixes.push(obj.name.clone());
}
} else {
objects.push(obj);
}
} else {
objects.push(obj);
}
}
Ok(ListObjectsInfo {
is_truncated,
next_marker,
objects,
prefixes,
})
}
pub async fn inner_list_object_versions(
self: Arc<Self>,
bucket: &str,
prefix: &str,
marker: Option<String>,
version_marker: Option<String>,
delimiter: Option<String>,
max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
if marker.is_none() && version_marker.is_some() {
return Err(StorageError::NotImplemented);
}
let version_marker = if let Some(marker) = version_marker {
// "null" is used for non-versioned objects in AWS S3 API
if marker == "null" {
None
} else {
Some(Uuid::parse_str(&marker)?)
}
} else {
None
};
// if marker set, limit +1
let opts = ListPathOptions {
bucket: bucket.to_owned(),
prefix: prefix.to_owned(),
separator: delimiter.clone(),
limit: max_keys_plus_one(max_keys, marker.is_some()),
marker,
incl_deleted: true,
ask_disks: "strict".to_owned(),
versioned: true,
..Default::default()
};
let mut list_result = match self.list_path(&opts).await {
Ok(res) => res,
Err(err) => MetaCacheEntriesSortedResult {
err: Some(err.into()),
..Default::default()
},
};
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
result.forward_past(opts.marker);
}
let mut get_objects = ObjectInfo::from_meta_cache_entries_sorted_versions(
&list_result.entries.unwrap_or_default(),
bucket,
prefix,
delimiter.clone(),
version_marker,
)
.await;
let is_truncated = {
if max_keys > 0 && get_objects.len() > max_keys as usize {
get_objects.truncate(max_keys as usize);
true
} else {
list_result.err.is_none() && !get_objects.is_empty()
}
};
let (next_marker, next_version_idmarker) = {
if is_truncated {
get_objects
.last()
.map(|last| {
(
Some(last.name.clone()),
// AWS S3 API returns "null" for non-versioned objects
Some(last.version_id.map(|v| v.to_string()).unwrap_or_else(|| "null".to_string())),
)
})
.unwrap_or_default()
} else {
(None, None)
}
};
let mut prefixes: Vec<String> = Vec::new();
let mut objects = Vec::with_capacity(get_objects.len());
for obj in get_objects.into_iter() {
if let Some(delimiter) = &delimiter {
if obj.is_dir && obj.mod_time.is_none() {
let mut found = false;
if delimiter != SLASH_SEPARATOR {
for p in prefixes.iter() {
if found {
break;
}
found = p == &obj.name;
}
}
if !found {
prefixes.push(obj.name.clone());
}
} else {
objects.push(obj);
}
} else {
objects.push(obj);
}
}
Ok(ListObjectVersionsInfo {
is_truncated,
next_marker,
next_version_idmarker,
objects,
prefixes,
})
}
pub async fn list_path(self: Arc<Self>, o: &ListPathOptions) -> Result<MetaCacheEntriesSortedResult> {
// warn!("list_path opt {:?}", &o);
check_list_objs_args(&o.bucket, &o.prefix, &o.marker)?;
// if opts.prefix.ends_with(SLASH_SEPARATOR) {
// return Err(Error::msg("eof"));
// }
let mut o = o.clone();
o.marker = o.marker.filter(|v| v >= &o.prefix);
if let Some(marker) = &o.marker
&& !o.prefix.is_empty()
&& !marker.starts_with(&o.prefix)
{
return Err(Error::Unexpected);
}
if o.limit == 0 {
return Err(Error::Unexpected);
}
if o.prefix.starts_with(SLASH_SEPARATOR) {
return Err(Error::Unexpected);
}
let slash_separator = Some(SLASH_SEPARATOR.to_owned());
o.include_directories = o.separator == slash_separator;
if (o.separator == slash_separator || o.separator.is_none()) && !o.recursive {
o.recursive = o.separator != slash_separator;
o.separator = slash_separator;
} else {
o.recursive = true
}
o.parse_marker();
if o.base_dir.is_empty() {
o.base_dir = base_dir_from_prefix(&o.prefix);
}
o.transient = o.transient || is_reserved_or_invalid_bucket(&o.bucket, false);
o.set_filter();
if o.transient {
o.create = false;
}
// cancel channel
let cancel = CancellationToken::new();
let (err_tx, mut err_rx) = broadcast::channel::<Arc<Error>>(1);
let (sender, recv) = mpsc::channel(o.limit as usize);
let store = self.clone();
let opts = o.clone();
let cancel_rx1 = cancel.clone();
let err_tx1 = err_tx.clone();
let job1 = tokio::spawn(async move {
let mut opts = opts;
opts.stop_disk_at_limit = true;
if let Err(err) = store.list_merged(cancel_rx1, opts, sender).await {
error!("list_merged err {:?}", err);
let _ = err_tx1.send(Arc::new(err));
}
});
let cancel_rx2 = cancel.clone();
let (result_tx, mut result_rx) = mpsc::channel(1);
let err_tx2 = err_tx.clone();
let opts = o.clone();
let job2 = tokio::spawn(async move {
if let Err(err) = gather_results(cancel_rx2, opts, recv, result_tx).await {
error!("gather_results err {:?}", err);
let _ = err_tx2.send(Arc::new(err));
}
// cancel call exit spawns
cancel.cancel();
});
let mut result = {
// receiver result
tokio::select! {
res = err_rx.recv() =>{
match res{
Ok(o) => {
error!("list_path err_rx.recv() ok {:?}", &o);
MetaCacheEntriesSortedResult{ entries: None, err: Some(o.as_ref().clone().into()) }
},
Err(err) => {
error!("list_path err_rx.recv() err {:?}", &err);
MetaCacheEntriesSortedResult{ entries: None, err: Some(rustfs_filemeta::Error::other(err)) }
},
}
},
Some(result) = result_rx.recv()=>{
result
}
}
};
// wait spawns exit
join_all(vec![job1, job2]).await;
if result.err.is_some() {
return Ok(result);
}
if let Some(entries) = result.entries.as_mut() {
entries.reuse = true;
let truncated = !entries.entries().is_empty() || result.err.is_none();
entries.o.0.truncate(o.limit as usize);
if !o.transient && truncated {
entries.list_id = if let Some(id) = o.id {
Some(id)
} else {
Some(Uuid::new_v4().to_string())
}
}
if !truncated {
result.err = Some(Error::Unexpected.into());
}
}
Ok(result)
}
// Read all
async fn list_merged(
&self,
rx: CancellationToken,
opts: ListPathOptions,
sender: Sender<MetaCacheEntry>,
) -> Result<Vec<ObjectInfo>> {
// warn!("list_merged ops {:?}", &opts);
let mut futures = Vec::new();
let mut inputs = Vec::new();
for sets in self.pools.iter() {
for set in sets.disk_set.iter() {
let (send, recv) = mpsc::channel(100);
inputs.push(recv);
let opts = opts.clone();
let rx_clone = rx.clone();
futures.push(set.list_path(rx_clone, opts, send));
}
}
tokio::spawn(async move {
if let Err(err) = merge_entry_channels(rx, inputs, sender.clone(), 1).await {
error!("merge_entry_channels err {:?}", err)
}
});
// let merge_res = merge_entry_channels(rx, inputs, sender.clone(), 1).await;
// TODO: cancelList
// let merge_res = merge_entry_channels(rx, inputs, sender.clone(), 1).await;
let results = join_all(futures).await;
let mut all_at_eof = true;
let mut errs = Vec::new();
for result in results {
if let Err(err) = result {
all_at_eof = false;
errs.push(Some(err));
} else {
errs.push(None);
}
}
if is_all_not_found(&errs) {
if is_all_volume_not_found(&errs) {
return Err(StorageError::VolumeNotFound);
}
return Ok(Vec::new());
}
// merge_res?;
// TODO check cancel
for err in errs.iter() {
if let Some(err) = err {
if err == &Error::Unexpected {
continue;
}
return Err(err.clone());
} else {
all_at_eof = false;
continue;
}
}
// check all_at_eof
_ = all_at_eof;
Ok(Vec::new())
}
#[allow(unused_assignments)]
pub async fn walk_internal(
self: Arc<Self>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: Sender<ObjectInfoOrErr>,
opts: WalkOptions,
) -> Result<()> {
check_list_objs_args(bucket, prefix, &None)?;
let mut futures = Vec::new();
let mut inputs = Vec::new();
for eset in self.pools.iter() {
for set in eset.disk_set.iter() {
let (mut disks, infos, _) = set.get_online_disks_with_healing_and_info(true).await;
let opts = opts.clone();
let (sender, list_out_rx) = mpsc::channel::<MetaCacheEntry>(1);
inputs.push(list_out_rx);
let rx_clone = rx.clone();
futures.push(async move {
let mut ask_disks = get_list_quorum(&opts.ask_disks, set.set_drive_count as i32);
if ask_disks == -1 {
let new_disks = get_quorum_disks(&disks, &infos, disks.len().div_ceil(2));
if !new_disks.is_empty() {
disks = new_disks;
} else {
ask_disks = get_list_quorum("strict", set.set_drive_count as i32);
}
}
if set.set_drive_count == 4 || ask_disks > disks.len() as i32 {
ask_disks = disks.len() as i32;
}
let fallback_disks = {
if ask_disks > 0 && disks.len() > ask_disks as usize {
let mut rand = rand::rng();
disks.shuffle(&mut rand);
disks.split_off(ask_disks as usize)
} else {
Vec::new()
}
};
let listing_quorum = ((ask_disks + 1) / 2) as usize;
let resolver = MetadataResolutionParams {
dir_quorum: listing_quorum,
obj_quorum: listing_quorum,
bucket: bucket.to_owned(),
..Default::default()
};
let path = base_dir_from_prefix(prefix);
let mut filter_prefix = {
prefix
.trim_start_matches(&path)
.trim_start_matches(SLASH_SEPARATOR)
.trim_end_matches(SLASH_SEPARATOR)
.to_owned()
};
if filter_prefix == path {
filter_prefix = "".to_owned();
}
// let (sender, rx1) = mpsc::channel(100);
let tx1 = sender.clone();
let tx2 = sender.clone();
list_path_raw(
rx_clone,
ListPathRawOptions {
disks: disks.iter().cloned().map(Some).collect(),
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
bucket: bucket.to_owned(),
path,
recursive: true,
filter_prefix: Some(filter_prefix),
forward_to: opts.marker.clone(),
min_disks: listing_quorum,
per_disk_limit: opts.limit as i32,
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
Box::pin({
let value = tx1.clone();
async move {
if entry.is_dir() {
return;
}
if let Err(err) = value.send(entry).await {
error!("list_path send fail {:?}", err);
}
}
})
})),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
Box::pin({
let value = tx2.clone();
let resolver = resolver.clone();
async move {
if let Some(entry) = entries.resolve(resolver)
&& let Err(err) = value.send(entry).await
{
error!("list_path send fail {:?}", err);
}
}
})
})),
finished: None,
..Default::default()
},
)
.await
});
}
}
let (merge_tx, mut merge_rx) = mpsc::channel::<MetaCacheEntry>(100);
let bucket = bucket.to_owned();
let vcf = match get_versioning_config(&bucket).await {
Ok((res, _)) => Some(res),
Err(_) => None,
};
tokio::spawn(async move {
let mut sent_err = false;
while let Some(entry) = merge_rx.recv().await {
if opts.latest_only {
let fi = match entry.to_fileinfo(&bucket) {
Ok(res) => res,
Err(err) => {
if !sent_err {
let item = ObjectInfoOrErr {
item: None,
err: Some(err.into()),
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
sent_err = true;
return;
}
continue;
}
};
if let Some(filter) = opts.filter {
if filter(&fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
}
} else {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
}
continue;
}
let fvs = match entry.file_info_versions(&bucket) {
Ok(res) => res,
Err(err) => {
let item = ObjectInfoOrErr {
item: None,
err: Some(err.into()),
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
return;
}
};
if opts.versions_sort == WalkVersionsSortOrder::Ascending {
//TODO: SORT
}
for fi in fvs.versions.iter() {
if let Some(filter) = opts.filter {
if filter(fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
}
} else {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
if let Err(err) = result.send(item).await {
error!("walk result send err {:?}", err);
}
}
}
}
});
tokio::spawn(async move { merge_entry_channels(rx, inputs, merge_tx, 1).await });
join_all(futures).await;
Ok(())
}
}
async fn gather_results(
_rx: CancellationToken,
opts: ListPathOptions,
recv: Receiver<MetaCacheEntry>,
results_tx: Sender<MetaCacheEntriesSortedResult>,
) -> Result<()> {
let mut returned = false;
let mut sender = Some(results_tx);
let mut recv = recv;
let mut entries = Vec::new();
while let Some(mut entry) = recv.recv().await {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/set_disk.rs | crates/ecstore/src/set_disk.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_imports)]
#![allow(unused_variables)]
use crate::batch_processor::{AsyncBatchProcessor, get_global_processors};
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
use crate::bucket::replication::check_replicate_delete;
use crate::bucket::versioning::VersioningApi;
use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::client::{object_api_utils::get_raw_etag, transition_api::ReaderImpl};
use crate::disk::STORAGE_FORMAT_FILE;
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_read_quorum_errs, reduce_write_quorum_errs};
use crate::disk::{
self, CHECK_PART_DISK_NOT_FOUND, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN,
conv_part_err_to_int, has_part_err,
};
use crate::erasure_coding;
use crate::erasure_coding::bitrot_verify;
use crate::error::{Error, Result, is_err_version_not_found};
use crate::error::{GenericError, ObjectApiError, is_err_object_not_found};
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
use crate::store_api::ListObjectVersionsInfo;
use crate::store_api::{ListPartsInfo, ObjectOptions, ObjectToDelete};
use crate::store_api::{ObjectInfoOrErr, WalkOptions};
use crate::{
bucket::lifecycle::bucket_lifecycle_ops::{
LifecycleOps, gen_transition_objname, get_transitioned_object_reader, put_restore_opts,
},
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
config::{GLOBAL_STORAGE_CLASS, storageclass},
disk::{
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, DiskStore, FileInfoVersions,
RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions,
UpdateMetadataOpts, endpoint::Endpoint, error::DiskError, format::FormatV3, new_disk,
},
error::{StorageError, to_object_err},
event::name::EventName,
event_notification::{EventArgs, send_event},
global::{GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, get_global_deployment_id, is_dist_erasure},
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult, ObjectIO, ObjectInfo,
PartInfo, PutObjReader, StorageAPI,
},
store_init::load_format_erasure,
};
use bytes::Bytes;
use bytesize::ByteSize;
use chrono::Utc;
use futures::future::join_all;
use glob::Pattern;
use http::HeaderMap;
use md5::{Digest as Md5Digest, Md5};
use rand::{Rng, seq::SliceRandom};
use regex::Regex;
use rustfs_common::heal_channel::{DriveState, HealChannelPriority, HealItemType, HealOpts, HealScanMode, send_heal_disk};
use rustfs_config::MI_B;
use rustfs_filemeta::{
FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo,
RawFileInfo, ReplicationStatusType, VersionPurgeStatusType, file_info_from_raw, merge_file_meta_versions,
};
use rustfs_lock::fast_lock::types::LockResult;
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
use rustfs_rio::{EtagResolvable, HashReader, HashReaderMut, TryGetIndex as _, WarpReader};
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM;
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER};
use rustfs_utils::{
HashAlgorithm,
crypto::hex,
path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf},
};
use rustfs_workers::workers::Workers;
use s3s::header::X_AMZ_RESTORE;
use sha2::{Digest, Sha256};
use std::hash::Hash;
use std::mem::{self};
use std::time::{Instant, SystemTime};
use std::{
collections::{HashMap, HashSet},
io::{Cursor, Write},
path::Path,
sync::Arc,
time::Duration,
};
use time::OffsetDateTime;
use tokio::{
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
sync::{RwLock, broadcast},
};
use tokio::{
select,
sync::mpsc::{self, Sender},
time::{interval, timeout},
};
use tokio_util::sync::CancellationToken;
use tracing::error;
use tracing::{debug, info, warn};
use uuid::Uuid;
pub const DEFAULT_READ_BUFFER_SIZE: usize = MI_B; // 1 MiB = 1024 * 1024;
pub const MAX_PARTS_COUNT: usize = 10000;
const DISK_ONLINE_TIMEOUT: Duration = Duration::from_secs(1);
const DISK_HEALTH_CACHE_TTL: Duration = Duration::from_millis(750);
#[derive(Clone, Debug)]
pub struct SetDisks {
pub fast_lock_manager: Arc<rustfs_lock::FastObjectLockManager>,
pub locker_owner: String,
pub disks: Arc<RwLock<Vec<Option<DiskStore>>>>,
pub set_endpoints: Vec<Endpoint>,
pub set_drive_count: usize,
pub default_parity_count: usize,
pub set_index: usize,
pub pool_index: usize,
pub format: FormatV3,
disk_health_cache: Arc<RwLock<Vec<Option<DiskHealthEntry>>>>,
}
#[derive(Clone, Debug)]
struct DiskHealthEntry {
last_check: Instant,
online: bool,
}
impl DiskHealthEntry {
fn cached_value(&self) -> Option<bool> {
if self.last_check.elapsed() <= DISK_HEALTH_CACHE_TTL {
Some(self.online)
} else {
None
}
}
}
impl SetDisks {
#[allow(clippy::too_many_arguments)]
pub async fn new(
fast_lock_manager: Arc<rustfs_lock::FastObjectLockManager>,
locker_owner: String,
disks: Arc<RwLock<Vec<Option<DiskStore>>>>,
set_drive_count: usize,
default_parity_count: usize,
set_index: usize,
pool_index: usize,
set_endpoints: Vec<Endpoint>,
format: FormatV3,
) -> Arc<Self> {
Arc::new(SetDisks {
fast_lock_manager,
locker_owner,
disks,
set_drive_count,
default_parity_count,
set_index,
pool_index,
format,
set_endpoints,
disk_health_cache: Arc::new(RwLock::new(Vec::new())),
})
}
// async fn cached_disk_health(&self, index: usize) -> Option<bool> {
// let cache = self.disk_health_cache.read().await;
// cache
// .get(index)
// .and_then(|entry| entry.as_ref().and_then(|state| state.cached_value()))
// }
// async fn update_disk_health(&self, index: usize, online: bool) {
// let mut cache = self.disk_health_cache.write().await;
// if cache.len() <= index {
// cache.resize(index + 1, None);
// }
// cache[index] = Some(DiskHealthEntry {
// last_check: Instant::now(),
// online,
// });
// }
// async fn is_disk_online_cached(&self, index: usize, disk: &DiskStore) -> bool {
// if let Some(online) = self.cached_disk_health(index).await {
// return online;
// }
// let disk_clone = disk.clone();
// let online = timeout(DISK_ONLINE_TIMEOUT, async move { disk_clone.is_online().await })
// .await
// .unwrap_or(false);
// self.update_disk_health(index, online).await;
// online
// }
// async fn filter_online_disks(&self, disks: Vec<Option<DiskStore>>) -> (Vec<Option<DiskStore>>, usize) {
// let mut filtered = Vec::with_capacity(disks.len());
// let mut online_count = 0;
// for (idx, disk) in disks.into_iter().enumerate() {
// if let Some(disk_store) = disk {
// if self.is_disk_online_cached(idx, &disk_store).await {
// filtered.push(Some(disk_store));
// online_count += 1;
// } else {
// filtered.push(None);
// }
// } else {
// filtered.push(None);
// }
// }
// (filtered, online_count)
// }
fn format_lock_error(&self, bucket: &str, object: &str, mode: &str, err: &LockResult) -> String {
match err {
LockResult::Timeout => {
format!("{mode} lock acquisition timed out on {bucket}/{object} (owner={})", self.locker_owner)
}
LockResult::Conflict {
current_owner,
current_mode,
} => format!("{mode} lock conflicted on {bucket}/{object}: held by {current_owner} as {current_mode:?}"),
LockResult::Acquired => format!("unexpected lock state while acquiring {mode} lock on {bucket}/{object}"),
}
}
async fn get_disks_internal(&self) -> Vec<Option<DiskStore>> {
let rl = self.disks.read().await;
rl.clone()
}
pub async fn get_local_disks(&self) -> Vec<Option<DiskStore>> {
let rl = self.disks.read().await;
let mut disks: Vec<Option<DiskStore>> = rl
.clone()
.into_iter()
.filter(|v| v.as_ref().is_some_and(|d| d.is_local()))
.collect();
let mut rng = rand::rng();
disks.shuffle(&mut rng);
disks
}
async fn get_online_disks(&self) -> Vec<Option<DiskStore>> {
let mut disks = self.get_disks_internal().await;
// TODO: diskinfo filter online
let mut new_disk = Vec::with_capacity(disks.len());
for disk in disks.iter() {
if let Some(d) = disk
&& d.is_online().await
{
new_disk.push(disk.clone());
}
}
let mut rng = rand::rng();
disks.shuffle(&mut rng);
new_disk
// let disks = self.get_disks_internal().await;
// let (filtered, _) = self.filter_online_disks(disks).await;
// filtered.into_iter().filter(|disk| disk.is_some()).collect()
}
async fn get_online_local_disks(&self) -> Vec<Option<DiskStore>> {
let mut disks = self.get_online_disks().await;
let mut rng = rand::rng();
disks.shuffle(&mut rng);
disks
.into_iter()
.filter(|v| v.as_ref().is_some_and(|d| d.is_local()))
.collect()
}
pub async fn get_online_disks_with_healing(&self, incl_healing: bool) -> (Vec<DiskStore>, bool) {
let (disks, _, healing) = self.get_online_disks_with_healing_and_info(incl_healing).await;
(disks, healing > 0)
}
pub async fn get_online_disks_with_healing_and_info(&self, incl_healing: bool) -> (Vec<DiskStore>, Vec<DiskInfo>, usize) {
let mut disks = self.get_disks_internal().await;
let mut infos = Vec::with_capacity(disks.len());
let mut futures = Vec::with_capacity(disks.len());
let mut numbers: Vec<usize> = (0..disks.len()).collect();
{
let mut rng = rand::rng();
disks.shuffle(&mut rng);
numbers.shuffle(&mut rng);
}
for &i in numbers.iter() {
let disk = disks[i].clone();
futures.push(async move {
if let Some(disk) = disk {
disk.disk_info(&DiskInfoOptions::default()).await
} else {
Err(DiskError::DiskNotFound)
}
});
}
// Use optimized batch processor for disk info retrieval
let processor = get_global_processors().metadata_processor();
let results = processor.execute_batch(futures).await;
for result in results {
match result {
Ok(res) => {
infos.push(res);
}
Err(err) => {
infos.push(DiskInfo {
error: err.to_string(),
..Default::default()
});
}
}
}
let mut healing: usize = 0;
let mut scanning_disks = Vec::new();
let mut healing_disks = Vec::new();
let mut scanning_infos = Vec::new();
let mut healing_infos = Vec::new();
let mut new_disks = Vec::new();
let mut new_infos = Vec::new();
for &i in numbers.iter() {
let (info, disk) = (infos[i].clone(), disks[i].clone());
if !info.error.is_empty() || disk.is_none() {
continue;
}
if info.healing {
healing += 1;
if incl_healing {
healing_disks.push(disk.unwrap());
healing_infos.push(info);
}
continue;
}
if !info.healing {
new_disks.push(disk.unwrap());
new_infos.push(info);
} else {
scanning_disks.push(disk.unwrap());
scanning_infos.push(info);
}
}
new_disks.extend(scanning_disks);
new_infos.extend(scanning_infos);
new_disks.extend(healing_disks);
new_infos.extend(healing_infos);
(new_disks, new_infos, healing)
}
async fn _get_local_disks(&self) -> Vec<Option<DiskStore>> {
let mut disks = self.get_disks_internal().await;
let mut rng = rand::rng();
disks.shuffle(&mut rng);
disks
.into_iter()
.filter(|v| v.as_ref().is_some_and(|d| d.is_local()))
.collect()
}
fn default_read_quorum(&self) -> usize {
self.set_drive_count - self.default_parity_count
}
fn default_write_quorum(&self) -> usize {
let mut data_count = self.set_drive_count - self.default_parity_count;
if data_count == self.default_parity_count {
data_count += 1
}
data_count
}
#[tracing::instrument(level = "debug", skip(disks, file_infos))]
#[allow(clippy::type_complexity)]
async fn rename_data(
disks: &[Option<DiskStore>],
src_bucket: &str,
src_object: &str,
file_infos: &[FileInfo],
dst_bucket: &str,
dst_object: &str,
write_quorum: usize,
) -> disk::error::Result<(Vec<Option<DiskStore>>, Option<Vec<u8>>, Option<Uuid>)> {
let mut futures = Vec::with_capacity(disks.len());
// let mut ress = Vec::with_capacity(disks.len());
let mut errs = Vec::with_capacity(disks.len());
let src_bucket = Arc::new(src_bucket.to_string());
let src_object = Arc::new(src_object.to_string());
let dst_bucket = Arc::new(dst_bucket.to_string());
let dst_object = Arc::new(dst_object.to_string());
for (i, (disk, file_info)) in disks.iter().zip(file_infos.iter()).enumerate() {
let mut file_info = file_info.clone();
let disk = disk.clone();
let src_bucket = src_bucket.clone();
let src_object = src_object.clone();
let dst_object = dst_object.clone();
let dst_bucket = dst_bucket.clone();
futures.push(tokio::spawn(async move {
if file_info.erasure.index == 0 {
file_info.erasure.index = i + 1;
}
if !file_info.is_valid() {
return Err(DiskError::FileCorrupt);
}
if let Some(disk) = disk {
disk.rename_data(&src_bucket, &src_object, file_info, &dst_bucket, &dst_object)
.await
} else {
Err(DiskError::DiskNotFound)
}
}));
}
let mut disk_versions = vec![None; disks.len()];
let mut data_dirs = vec![None; disks.len()];
let results = join_all(futures).await;
for (idx, result) in results.iter().enumerate() {
match result.as_ref().map_err(|_| DiskError::Unexpected)? {
Ok(res) => {
data_dirs[idx] = res.old_data_dir;
disk_versions[idx].clone_from(&res.sign);
errs.push(None);
}
Err(e) => {
errs.push(Some(e.clone()));
}
}
}
let mut futures = Vec::with_capacity(disks.len());
if let Some(ret_err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
// TODO: add concurrency
for (i, err) in errs.iter().enumerate() {
if err.is_some() {
continue;
}
if let Some(disk) = disks[i].as_ref() {
let fi = file_infos[i].clone();
let old_data_dir = data_dirs[i];
let disk = disk.clone();
let src_bucket = src_bucket.clone();
let src_object = src_object.clone();
futures.push(tokio::spawn(async move {
let _ = disk
.delete_version(
&src_bucket,
&src_object,
fi,
false,
DeleteOptions {
undo_write: true,
old_data_dir,
..Default::default()
},
)
.await
.map_err(|e| {
debug!("rename_data delete_version err {:?}", e);
e
});
}));
}
}
let _ = join_all(futures).await;
return Err(ret_err);
}
let versions = None;
// TODO: reduceCommonVersions
let data_dir = Self::reduce_common_data_dir(&data_dirs, write_quorum);
// // TODO: reduce_common_data_dir
// if let Some(old_dir) = rename_ress
// .iter()
// .filter_map(|v| if v.is_some() { v.as_ref().unwrap().old_data_dir } else { None })
// .map(|v| v.to_string())
// .next()
// {
// let cm_errs = self.commit_rename_data_dir(&shuffle_disks, &bucket, &object, &old_dir).await;
// warn!("put_object commit_rename_data_dir:{:?}", &cm_errs);
// }
// self.delete_all(RUSTFS_META_TMP_BUCKET, &tmp_dir).await?;
Ok((Self::eval_disks(disks, &errs), versions, data_dir))
}
fn reduce_common_data_dir(data_dirs: &Vec<Option<Uuid>>, write_quorum: usize) -> Option<Uuid> {
let mut data_dirs_count = HashMap::new();
for ddir in data_dirs {
*data_dirs_count.entry(ddir).or_insert(0) += 1;
}
let mut max = 0;
let mut data_dir = None;
for (ddir, count) in data_dirs_count {
if count > max {
max = count;
data_dir = *ddir;
}
}
if max >= write_quorum { data_dir } else { None }
}
#[allow(dead_code)]
#[tracing::instrument(level = "debug", skip(self, disks))]
async fn commit_rename_data_dir(
&self,
disks: &[Option<DiskStore>],
bucket: &str,
object: &str,
data_dir: &str,
write_quorum: usize,
) -> disk::error::Result<()> {
let file_path = Arc::new(format!("{object}/{data_dir}"));
let bucket = Arc::new(bucket.to_string());
let futures = disks.iter().map(|disk| {
let file_path = file_path.clone();
let bucket = bucket.clone();
let disk = disk.clone();
tokio::spawn(async move {
if let Some(disk) = disk {
(disk
.delete(
&bucket,
&file_path,
DeleteOptions {
recursive: true,
..Default::default()
},
)
.await)
.err()
} else {
Some(DiskError::DiskNotFound)
}
})
});
let errs: Vec<Option<DiskError>> = join_all(futures)
.await
.into_iter()
.map(|e| e.unwrap_or(Some(DiskError::Unexpected)))
.collect();
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
return Err(err);
}
Ok(())
}
#[tracing::instrument(skip(disks))]
async fn cleanup_multipart_path(disks: &[Option<DiskStore>], paths: &[String]) {
let mut errs = Vec::with_capacity(disks.len());
// Use improved simple batch processor instead of join_all for better performance
let processor = get_global_processors().write_processor();
let tasks: Vec<_> = disks
.iter()
.map(|disk| {
let disk = disk.clone();
let paths = paths.to_vec();
async move {
if let Some(disk) = disk {
disk.delete_paths(RUSTFS_META_MULTIPART_BUCKET, &paths).await
} else {
Err(DiskError::DiskNotFound)
}
}
})
.collect();
let results = processor.execute_batch(tasks).await;
for result in results {
match result {
Ok(_) => {
errs.push(None);
}
Err(e) => {
errs.push(Some(e));
}
}
}
if errs.iter().any(|e| e.is_some()) {
warn!("cleanup_multipart_path errs {:?}", &errs);
}
}
async fn read_parts(
disks: &[Option<DiskStore>],
bucket: &str,
part_meta_paths: &[String],
part_numbers: &[usize],
read_quorum: usize,
) -> disk::error::Result<Vec<ObjectPartInfo>> {
let mut errs = Vec::with_capacity(disks.len());
let mut object_parts = Vec::with_capacity(disks.len());
// Use batch processor for better performance
let processor = get_global_processors().read_processor();
let bucket = bucket.to_string();
let part_meta_paths = part_meta_paths.to_vec();
let tasks: Vec<_> = disks
.iter()
.map(|disk| {
let disk = disk.clone();
let bucket = bucket.clone();
let part_meta_paths = part_meta_paths.clone();
async move {
if let Some(disk) = disk {
disk.read_parts(&bucket, &part_meta_paths).await
} else {
Err(DiskError::DiskNotFound)
}
}
})
.collect();
let results = processor.execute_batch(tasks).await;
for result in results {
match result {
Ok(res) => {
errs.push(None);
object_parts.push(res);
}
Err(e) => {
errs.push(Some(e));
object_parts.push(vec![]);
}
}
}
if let Some(err) = reduce_read_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, read_quorum) {
return Err(err);
}
let mut ret = vec![ObjectPartInfo::default(); part_meta_paths.len()];
for (part_idx, part_info) in part_meta_paths.iter().enumerate() {
let mut part_meta_quorum = HashMap::new();
let mut part_infos = Vec::new();
for (j, parts) in object_parts.iter().enumerate() {
if parts.len() != part_meta_paths.len() {
*part_meta_quorum.entry(part_info.clone()).or_insert(0) += 1;
continue;
}
if !parts[part_idx].etag.is_empty() {
*part_meta_quorum.entry(parts[part_idx].etag.clone()).or_insert(0) += 1;
part_infos.push(parts[part_idx].clone());
continue;
}
*part_meta_quorum.entry(part_info.clone()).or_insert(0) += 1;
}
let mut max_quorum = 0;
let mut max_etag = None;
let mut max_part_meta = None;
for (etag, quorum) in part_meta_quorum.iter() {
if quorum > &max_quorum {
max_quorum = *quorum;
max_etag = Some(etag);
max_part_meta = Some(etag);
}
}
let mut found = None;
for info in part_infos.iter() {
if let Some(etag) = max_etag
&& info.etag == *etag
{
found = Some(info.clone());
break;
}
if let Some(part_meta) = max_part_meta
&& info.etag.is_empty()
&& part_meta.ends_with(format!("part.{0}.meta", info.number).as_str())
{
found = Some(info.clone());
break;
}
}
if let (Some(found), Some(max_etag)) = (found, max_etag)
&& !found.etag.is_empty()
&& part_meta_quorum.get(max_etag).unwrap_or(&0) >= &read_quorum
{
ret[part_idx] = found.clone();
} else {
ret[part_idx] = ObjectPartInfo {
number: part_numbers[part_idx],
error: Some(format!("part.{} not found", part_numbers[part_idx])),
..Default::default()
};
}
}
Ok(ret)
}
async fn list_parts(disks: &[Option<DiskStore>], part_path: &str, read_quorum: usize) -> disk::error::Result<Vec<usize>> {
let mut futures = Vec::with_capacity(disks.len());
for (i, disk) in disks.iter().enumerate() {
futures.push(async move {
if let Some(disk) = disk {
disk.list_dir(RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_MULTIPART_BUCKET, part_path, -1)
.await
} else {
Err(DiskError::DiskNotFound)
}
});
}
let mut errs = Vec::with_capacity(disks.len());
let mut object_parts = Vec::with_capacity(disks.len());
let results = join_all(futures).await;
for result in results {
match result {
Ok(res) => {
errs.push(None);
object_parts.push(res);
}
Err(e) => {
errs.push(Some(e));
object_parts.push(vec![]);
}
}
}
if let Some(err) = reduce_read_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, read_quorum) {
return Err(err);
}
let mut part_quorum_map: HashMap<usize, usize> = HashMap::new();
for drive_parts in object_parts {
let mut parts_with_meta_count: HashMap<usize, usize> = HashMap::new();
// part files can be either part.N or part.N.meta
for part_path in drive_parts {
if let Some(num_str) = part_path.strip_prefix("part.") {
if let Some(meta_idx) = num_str.find(".meta") {
if let Ok(part_num) = num_str[..meta_idx].parse::<usize>() {
*parts_with_meta_count.entry(part_num).or_insert(0) += 1;
}
} else if let Ok(part_num) = num_str.parse::<usize>() {
*parts_with_meta_count.entry(part_num).or_insert(0) += 1;
}
}
}
// Include only part.N.meta files with corresponding part.N
for (&part_num, &cnt) in &parts_with_meta_count {
if cnt >= 2 {
*part_quorum_map.entry(part_num).or_insert(0) += 1;
}
}
}
let mut part_numbers = Vec::with_capacity(part_quorum_map.len());
for (part_num, count) in part_quorum_map {
if count >= read_quorum {
part_numbers.push(part_num);
}
}
part_numbers.sort();
Ok(part_numbers)
}
#[tracing::instrument(skip(disks, meta))]
async fn rename_part(
disks: &[Option<DiskStore>],
src_bucket: &str,
src_object: &str,
dst_bucket: &str,
dst_object: &str,
meta: Bytes,
write_quorum: usize,
) -> disk::error::Result<Vec<Option<DiskStore>>> {
let src_bucket = Arc::new(src_bucket.to_string());
let src_object = Arc::new(src_object.to_string());
let dst_bucket = Arc::new(dst_bucket.to_string());
let dst_object = Arc::new(dst_object.to_string());
let mut errs = Vec::with_capacity(disks.len());
let futures = disks.iter().map(|disk| {
let disk = disk.clone();
let meta = meta.clone();
let src_bucket = src_bucket.clone();
let src_object = src_object.clone();
let dst_bucket = dst_bucket.clone();
let dst_object = dst_object.clone();
tokio::spawn(async move {
if let Some(disk) = disk {
disk.rename_part(&src_bucket, &src_object, &dst_bucket, &dst_object, meta)
.await
} else {
Err(DiskError::DiskNotFound)
}
})
});
let results = join_all(futures).await;
for result in results {
match result? {
Ok(_) => {
errs.push(None);
}
Err(e) => {
errs.push(Some(e));
}
}
}
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
warn!("rename_part errs {:?}", &errs);
Self::cleanup_multipart_path(disks, &[dst_object.to_string(), format!("{dst_object}.meta")]).await;
return Err(err);
}
let disks = Self::eval_disks(disks, &errs);
Ok(disks)
}
fn eval_disks(disks: &[Option<DiskStore>], errs: &[Option<DiskError>]) -> Vec<Option<DiskStore>> {
if disks.len() != errs.len() {
return Vec::new();
}
let mut online_disks = vec![None; disks.len()];
for (i, err_op) in errs.iter().enumerate() {
if err_op.is_none() {
online_disks[i].clone_from(&disks[i]);
}
}
online_disks
}
// async fn write_all(disks: &[Option<DiskStore>], bucket: &str, object: &str, buff: Vec<u8>) -> Vec<Option<Error>> {
// let mut futures = Vec::with_capacity(disks.len());
// let mut errors = Vec::with_capacity(disks.len());
// for disk in disks.iter() {
// if disk.is_none() {
// errors.push(Some(Error::new(DiskError::DiskNotFound)));
// continue;
// }
// let disk = disk.as_ref().unwrap();
// futures.push(disk.write_all(bucket, object, buff.clone()));
// }
// let results = join_all(futures).await;
// for result in results {
// match result {
// Ok(_) => {
// errors.push(None);
// }
// Err(e) => {
// errors.push(Some(e));
// }
// }
// }
// errors
// }
#[tracing::instrument(skip(disks, files))]
async fn write_unique_file_info(
disks: &[Option<DiskStore>],
org_bucket: &str,
bucket: &str,
prefix: &str,
files: &[FileInfo],
write_quorum: usize,
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/store_api.rs | crates/ecstore/src/store_api.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::versioning::VersioningApi as _;
use crate::config::storageclass;
use crate::disk::DiskStore;
use crate::error::{Error, Result};
use crate::store_utils::clean_metadata;
use crate::{
bucket::lifecycle::bucket_lifecycle_audit::LcAuditEvent,
bucket::lifecycle::lifecycle::ExpirationOptions,
bucket::lifecycle::{bucket_lifecycle_ops::TransitionedObject, lifecycle::TransitionOptions},
};
use bytes::Bytes;
use http::{HeaderMap, HeaderValue};
use rustfs_common::heal_channel::HealOpts;
use rustfs_filemeta::{
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, REPLICATION_RESET, REPLICATION_STATUS, ReplicateDecision, ReplicationState,
ReplicationStatusType, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
};
use rustfs_madmin::heal_commands::HealResultItem;
use rustfs_rio::Checksum;
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
use rustfs_utils::CompressionAlgorithm;
use rustfs_utils::http::AMZ_STORAGE_CLASS;
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
use rustfs_utils::path::decode_dir_object;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Debug;
use std::io::Cursor;
use std::pin::Pin;
use std::str::FromStr as _;
use std::sync::Arc;
use std::task::{Context, Poll};
use time::OffsetDateTime;
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
use tokio_util::sync::CancellationToken;
use tracing::warn;
use uuid::Uuid;
pub const ERASURE_ALGORITHM: &str = "rs-vandermonde";
pub const BLOCK_SIZE_V2: usize = 1024 * 1024; // 1M
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct MakeBucketOptions {
pub lock_enabled: bool,
pub versioning_enabled: bool,
pub force_create: bool, // Create buckets even if they are already created.
pub created_at: Option<OffsetDateTime>, // only for site replication
pub no_lock: bool,
}
#[derive(Debug, Default, Clone, PartialEq)]
pub enum SRBucketDeleteOp {
#[default]
NoOp,
MarkDelete,
Purge,
}
#[derive(Debug, Default, Clone)]
pub struct DeleteBucketOptions {
pub no_lock: bool,
pub no_recreate: bool,
pub force: bool, // Force deletion
pub srdelete_op: SRBucketDeleteOp,
}
pub struct PutObjReader {
pub stream: HashReader,
}
impl Debug for PutObjReader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PutObjReader").finish()
}
}
impl PutObjReader {
pub fn new(stream: HashReader) -> Self {
PutObjReader { stream }
}
pub fn as_hash_reader(&self) -> &HashReader {
&self.stream
}
pub fn from_vec(data: Vec<u8>) -> Self {
use sha2::{Digest, Sha256};
let content_length = data.len() as i64;
let sha256hex = if content_length > 0 {
Some(hex_simd::encode_to_string(Sha256::digest(&data), hex_simd::AsciiCase::Lower))
} else {
None
};
PutObjReader {
stream: HashReader::new(
Box::new(WarpReader::new(Cursor::new(data))),
content_length,
content_length,
None,
sha256hex,
false,
)
.unwrap(),
}
}
pub fn size(&self) -> i64 {
self.stream.size()
}
pub fn actual_size(&self) -> i64 {
self.stream.actual_size()
}
}
pub struct GetObjectReader {
pub stream: Box<dyn AsyncRead + Unpin + Send + Sync>,
pub object_info: ObjectInfo,
}
impl GetObjectReader {
#[tracing::instrument(level = "debug", skip(reader, rs, opts, _h))]
pub fn new(
reader: Box<dyn AsyncRead + Unpin + Send + Sync>,
rs: Option<HTTPRangeSpec>,
oi: &ObjectInfo,
opts: &ObjectOptions,
_h: &HeaderMap<HeaderValue>,
) -> Result<(Self, usize, i64)> {
let mut rs = rs;
if let Some(part_number) = opts.part_number
&& rs.is_none()
{
rs = HTTPRangeSpec::from_object_info(oi, part_number);
}
// TODO:Encrypted
let (algo, is_compressed) = oi.is_compressed_ok()?;
// TODO: check TRANSITION
if is_compressed {
let actual_size = oi.get_actual_size()?;
let (off, length, dec_off, dec_length) = if let Some(rs) = rs {
// Support range requests for compressed objects
let (dec_off, dec_length) = rs.get_offset_length(actual_size)?;
(0, oi.size, dec_off, dec_length)
} else {
(0, oi.size, 0, actual_size)
};
let dec_reader = DecompressReader::new(reader, algo);
let actual_size_usize = if actual_size > 0 {
actual_size as usize
} else {
return Err(Error::other(format!("invalid decompressed size {actual_size}")));
};
let final_reader: Box<dyn AsyncRead + Unpin + Send + Sync> = if dec_off > 0 || dec_length != actual_size {
// Use RangedDecompressReader for streaming range processing
// The new implementation supports any offset size by streaming and skipping data
match RangedDecompressReader::new(dec_reader, dec_off, dec_length, actual_size_usize) {
Ok(ranged_reader) => {
tracing::debug!(
"Successfully created RangedDecompressReader for offset={}, length={}",
dec_off,
dec_length
);
Box::new(ranged_reader)
}
Err(e) => {
// Only fail if the range parameters are fundamentally invalid (e.g., offset >= file size)
tracing::error!("RangedDecompressReader failed with invalid range parameters: {}", e);
return Err(e);
}
}
} else {
Box::new(LimitReader::new(dec_reader, actual_size_usize))
};
let mut oi = oi.clone();
oi.size = dec_length;
return Ok((
GetObjectReader {
stream: final_reader,
object_info: oi,
},
off,
length,
));
}
if let Some(rs) = rs {
let (off, length) = rs.get_offset_length(oi.size)?;
Ok((
GetObjectReader {
stream: reader,
object_info: oi.clone(),
},
off,
length,
))
} else {
Ok((
GetObjectReader {
stream: reader,
object_info: oi.clone(),
},
0,
oi.size,
))
}
}
pub async fn read_all(&mut self) -> Result<Vec<u8>> {
let mut data = Vec::new();
self.stream.read_to_end(&mut data).await?;
// while let Some(x) = self.stream.next().await {
// let buf = match x {
// Ok(res) => res,
// Err(e) => return Err(Error::other(e.to_string())),
// };
// data.extend_from_slice(buf.as_ref());
// }
Ok(data)
}
}
impl AsyncRead for GetObjectReader {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
}
#[derive(Debug, Clone)]
pub struct HTTPRangeSpec {
pub is_suffix_length: bool,
pub start: i64,
pub end: i64,
}
impl HTTPRangeSpec {
pub fn from_object_info(oi: &ObjectInfo, part_number: usize) -> Option<Self> {
if oi.size == 0 || oi.parts.is_empty() {
return None;
}
if part_number == 0 || part_number > oi.parts.len() {
return None;
}
let mut start = 0_i64;
let mut end = -1_i64;
for i in 0..part_number {
let part = &oi.parts[i];
start = end + 1;
end = start + (part.size as i64) - 1;
}
Some(HTTPRangeSpec {
is_suffix_length: false,
start,
end,
})
}
pub fn get_offset_length(&self, res_size: i64) -> Result<(usize, i64)> {
let len = self.get_length(res_size)?;
let mut start = self.start;
if self.is_suffix_length {
let suffix_len = if self.start < 0 {
self.start
.checked_neg()
.ok_or_else(|| Error::InvalidRangeSpec("range value invalid: suffix length overflow".to_string()))?
} else {
self.start
};
start = res_size - suffix_len;
if start < 0 {
start = 0;
}
}
Ok((start as usize, len))
}
pub fn get_length(&self, res_size: i64) -> Result<i64> {
if res_size < 0 {
return Err(Error::InvalidRangeSpec("The requested range is not satisfiable".to_string()));
}
if self.is_suffix_length {
let specified_len = if self.start < 0 {
self.start
.checked_neg()
.ok_or_else(|| Error::InvalidRangeSpec("range value invalid: suffix length overflow".to_string()))?
} else {
self.start
};
let mut range_length = specified_len;
if specified_len > res_size {
range_length = res_size;
}
return Ok(range_length);
}
if self.start >= res_size {
return Err(Error::InvalidRangeSpec("The requested range is not satisfiable".to_string()));
}
if self.end > -1 {
let mut end = self.end;
if res_size <= end {
end = res_size - 1;
}
let range_length = end - self.start + 1;
return Ok(range_length);
}
if self.end == -1 {
let range_length = res_size - self.start;
return Ok(range_length);
}
Err(Error::InvalidRangeSpec(format!(
"range value invalid: start={}, end={}, expected start <= end and end >= -1",
self.start, self.end
)))
}
}
#[derive(Debug, Default, Clone)]
pub struct HTTPPreconditions {
pub if_match: Option<String>,
pub if_none_match: Option<String>,
pub if_modified_since: Option<OffsetDateTime>,
pub if_unmodified_since: Option<OffsetDateTime>,
}
#[derive(Debug, Default, Clone)]
pub struct ObjectOptions {
// Use the maximum parity (N/2), used when saving server configuration files
pub max_parity: bool,
pub mod_time: Option<OffsetDateTime>,
pub part_number: Option<usize>,
pub delete_prefix: bool,
pub delete_prefix_object: bool,
pub version_id: Option<String>,
pub no_lock: bool,
pub versioned: bool,
pub version_suspended: bool,
pub skip_decommissioned: bool,
pub skip_rebalancing: bool,
pub skip_free_version: bool,
pub data_movement: bool,
pub src_pool_idx: usize,
pub user_defined: HashMap<String, String>,
pub preserve_etag: Option<String>,
pub metadata_chg: bool,
pub http_preconditions: Option<HTTPPreconditions>,
pub delete_replication: Option<ReplicationState>,
pub replication_request: bool,
pub delete_marker: bool,
pub transition: TransitionOptions,
pub expiration: ExpirationOptions,
pub lifecycle_audit_event: LcAuditEvent,
pub eval_metadata: Option<HashMap<String, String>>,
pub want_checksum: Option<Checksum>,
}
impl ObjectOptions {
pub fn set_delete_replication_state(&mut self, dsc: ReplicateDecision) {
let mut rs = ReplicationState {
replicate_decision_str: dsc.to_string(),
..Default::default()
};
if self.version_id.is_none() {
rs.replication_status_internal = dsc.pending_status();
rs.targets = replication_statuses_map(rs.replication_status_internal.as_deref().unwrap_or_default());
} else {
rs.version_purge_status_internal = dsc.pending_status();
rs.purge_targets = version_purge_statuses_map(rs.version_purge_status_internal.as_deref().unwrap_or_default());
}
self.delete_replication = Some(rs)
}
pub fn set_replica_status(&mut self, status: ReplicationStatusType) {
if let Some(rs) = self.delete_replication.as_mut() {
rs.replica_status = status;
rs.replica_timestamp = Some(OffsetDateTime::now_utc());
} else {
self.delete_replication = Some(ReplicationState {
replica_status: status,
replica_timestamp: Some(OffsetDateTime::now_utc()),
..Default::default()
});
}
}
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
self.delete_replication
.as_ref()
.map(|v| v.composite_version_purge_status())
.unwrap_or(VersionPurgeStatusType::Empty)
}
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
self.delete_replication
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
}
pub fn put_replication_state(&self) -> ReplicationState {
let rs = match self
.user_defined
.get(format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_STATUS}").as_str())
{
Some(v) => v.to_string(),
None => return ReplicationState::default(),
};
ReplicationState {
replication_status_internal: Some(rs.to_string()),
targets: replication_statuses_map(rs.as_str()),
..Default::default()
}
}
pub fn precondition_check(&self, obj_info: &ObjectInfo) -> Result<()> {
let has_valid_mod_time = obj_info.mod_time.is_some_and(|t| t != OffsetDateTime::UNIX_EPOCH);
if let Some(part_number) = self.part_number
&& part_number > 1
&& !obj_info.parts.is_empty()
{
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
}
if let Some(pre) = &self.http_preconditions {
if let Some(if_none_match) = &pre.if_none_match
&& let Some(etag) = &obj_info.etag
&& is_etag_equal(etag, if_none_match)
{
return Err(Error::NotModified);
}
if has_valid_mod_time
&& let Some(if_modified_since) = &pre.if_modified_since
&& let Some(mod_time) = &obj_info.mod_time
&& !is_modified_since(mod_time, if_modified_since)
{
return Err(Error::NotModified);
}
if let Some(if_match) = &pre.if_match {
if let Some(etag) = &obj_info.etag {
if !is_etag_equal(etag, if_match) {
return Err(Error::PreconditionFailed);
}
} else {
return Err(Error::PreconditionFailed);
}
}
if has_valid_mod_time
&& pre.if_match.is_none()
&& let Some(if_unmodified_since) = &pre.if_unmodified_since
&& let Some(mod_time) = &obj_info.mod_time
&& is_modified_since(mod_time, if_unmodified_since)
{
return Err(Error::PreconditionFailed);
}
}
Ok(())
}
}
fn is_etag_equal(etag1: &str, etag2: &str) -> bool {
let e1 = etag1.trim_matches('"');
let e2 = etag2.trim_matches('"');
// Handle wildcard "*" - matches any ETag (per HTTP/1.1 RFC 7232)
if e2 == "*" {
return true;
}
e1 == e2
}
fn is_modified_since(mod_time: &OffsetDateTime, given_time: &OffsetDateTime) -> bool {
let mod_secs = mod_time.unix_timestamp();
let given_secs = given_time.unix_timestamp();
mod_secs > given_secs
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct BucketOptions {
pub deleted: bool, // true only when site replication is enabled
pub cached: bool, // true only when we are requesting a cached response instead of hitting the disk for example ListBuckets() call.
pub no_metadata: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct BucketInfo {
pub name: String,
pub created: Option<OffsetDateTime>,
pub deleted: Option<OffsetDateTime>,
pub versioning: bool,
pub object_locking: bool,
}
#[derive(Debug, Default, Clone)]
pub struct MultipartUploadResult {
pub upload_id: String,
pub checksum_algo: Option<String>,
pub checksum_type: Option<String>,
}
#[derive(Debug, Default, Clone)]
pub struct PartInfo {
pub part_num: usize,
pub last_mod: Option<OffsetDateTime>,
pub size: usize,
pub etag: Option<String>,
pub actual_size: i64,
}
#[derive(Debug, Clone, Default)]
pub struct CompletePart {
pub part_num: usize,
pub etag: Option<String>,
// pub size: Option<usize>,
pub checksum_crc32: Option<String>,
pub checksum_crc32c: Option<String>,
pub checksum_sha1: Option<String>,
pub checksum_sha256: Option<String>,
pub checksum_crc64nvme: Option<String>,
}
impl From<s3s::dto::CompletedPart> for CompletePart {
fn from(value: s3s::dto::CompletedPart) -> Self {
Self {
part_num: value.part_number.unwrap_or_default() as usize,
etag: value.e_tag.map(|v| v.value().to_owned()),
checksum_crc32: value.checksum_crc32,
checksum_crc32c: value.checksum_crc32c,
checksum_sha1: value.checksum_sha1,
checksum_sha256: value.checksum_sha256,
checksum_crc64nvme: value.checksum_crc64nvme,
}
}
}
#[derive(Debug, Default)]
pub struct ObjectInfo {
pub bucket: String,
pub name: String,
pub storage_class: Option<String>,
pub mod_time: Option<OffsetDateTime>,
pub size: i64,
// Actual size is the real size of the object uploaded by client.
pub actual_size: i64,
pub is_dir: bool,
pub user_defined: HashMap<String, String>,
pub parity_blocks: usize,
pub data_blocks: usize,
pub version_id: Option<Uuid>,
pub delete_marker: bool,
pub transitioned_object: TransitionedObject,
pub restore_ongoing: bool,
pub restore_expires: Option<OffsetDateTime>,
pub user_tags: String,
pub parts: Vec<ObjectPartInfo>,
pub is_latest: bool,
pub content_type: Option<String>,
pub content_encoding: Option<String>,
pub expires: Option<OffsetDateTime>,
pub num_versions: usize,
pub successor_mod_time: Option<OffsetDateTime>,
pub put_object_reader: Option<PutObjReader>,
pub etag: Option<String>,
pub inlined: bool,
pub metadata_only: bool,
pub version_only: bool,
pub replication_status_internal: Option<String>,
pub replication_status: ReplicationStatusType,
pub version_purge_status_internal: Option<String>,
pub version_purge_status: VersionPurgeStatusType,
pub replication_decision: String,
pub checksum: Option<Bytes>,
}
impl Clone for ObjectInfo {
fn clone(&self) -> Self {
Self {
bucket: self.bucket.clone(),
name: self.name.clone(),
storage_class: self.storage_class.clone(),
mod_time: self.mod_time,
size: self.size,
actual_size: self.actual_size,
is_dir: self.is_dir,
user_defined: self.user_defined.clone(),
parity_blocks: self.parity_blocks,
data_blocks: self.data_blocks,
version_id: self.version_id,
delete_marker: self.delete_marker,
transitioned_object: self.transitioned_object.clone(),
restore_ongoing: self.restore_ongoing,
restore_expires: self.restore_expires,
user_tags: self.user_tags.clone(),
parts: self.parts.clone(),
is_latest: self.is_latest,
content_type: self.content_type.clone(),
content_encoding: self.content_encoding.clone(),
num_versions: self.num_versions,
successor_mod_time: self.successor_mod_time,
put_object_reader: None, // reader can not clone
etag: self.etag.clone(),
inlined: self.inlined,
metadata_only: self.metadata_only,
version_only: self.version_only,
replication_status_internal: self.replication_status_internal.clone(),
replication_status: self.replication_status.clone(),
version_purge_status_internal: self.version_purge_status_internal.clone(),
version_purge_status: self.version_purge_status.clone(),
replication_decision: self.replication_decision.clone(),
checksum: self.checksum.clone(),
expires: self.expires,
}
}
}
impl ObjectInfo {
pub fn is_compressed(&self) -> bool {
self.user_defined
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
}
pub fn is_compressed_ok(&self) -> Result<(CompressionAlgorithm, bool)> {
let scheme = self
.user_defined
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
.cloned();
if let Some(scheme) = scheme {
let algorithm = CompressionAlgorithm::from_str(&scheme)?;
Ok((algorithm, true))
} else {
Ok((CompressionAlgorithm::None, false))
}
}
pub fn is_multipart(&self) -> bool {
self.etag.as_ref().is_some_and(|v| v.len() != 32)
}
pub fn get_actual_size(&self) -> std::io::Result<i64> {
if self.actual_size > 0 {
return Ok(self.actual_size);
}
if self.is_compressed() {
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
&& !size_str.is_empty()
{
// Todo: deal with error
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
return Ok(size);
}
let mut actual_size = 0;
self.parts.iter().for_each(|part| {
actual_size += part.actual_size;
});
if actual_size == 0 && actual_size != self.size {
return Err(std::io::Error::other(format!("invalid decompressed size {} {}", actual_size, self.size)));
}
return Ok(actual_size);
}
// TODO: IsEncrypted
Ok(self.size)
}
pub fn from_file_info(fi: &FileInfo, bucket: &str, object: &str, versioned: bool) -> ObjectInfo {
let name = decode_dir_object(object);
let mut version_id = fi.version_id;
if versioned && version_id.is_none() {
version_id = Some(Uuid::nil())
}
// etag
let (content_type, content_encoding, etag) = {
let content_type = fi.metadata.get("content-type").cloned();
let content_encoding = fi.metadata.get("content-encoding").cloned();
let etag = fi.metadata.get("etag").cloned();
(content_type, content_encoding, etag)
};
// tags
let user_tags = fi.metadata.get(AMZ_OBJECT_TAGGING).cloned().unwrap_or_default();
let inlined = fi.inline_data();
// TODO:expires
// TODO:ReplicationState
let transitioned_object = TransitionedObject {
name: fi.transitioned_objname.clone(),
version_id: if let Some(transition_version_id) = fi.transition_version_id {
transition_version_id.to_string()
} else {
"".to_string()
},
status: fi.transition_status.clone(),
free_version: fi.tier_free_version(),
tier: fi.transition_tier.clone(),
};
let metadata = {
let mut v = fi.metadata.clone();
clean_metadata(&mut v);
v
};
// Extract storage class from metadata, default to STANDARD if not found
let storage_class = metadata
.get(AMZ_STORAGE_CLASS)
.cloned()
.or_else(|| Some(storageclass::STANDARD.to_string()));
// Convert parts from rustfs_filemeta::ObjectPartInfo to store_api::ObjectPartInfo
let parts = fi
.parts
.iter()
.map(|part| ObjectPartInfo {
etag: part.etag.clone(),
index: part.index.clone(),
size: part.size,
actual_size: part.actual_size,
mod_time: part.mod_time,
checksums: part.checksums.clone(),
number: part.number,
error: part.error.clone(),
})
.collect();
ObjectInfo {
bucket: bucket.to_string(),
name,
is_dir: object.starts_with('/'),
parity_blocks: fi.erasure.parity_blocks,
data_blocks: fi.erasure.data_blocks,
version_id,
delete_marker: fi.deleted,
mod_time: fi.mod_time,
size: fi.size,
parts,
is_latest: fi.is_latest,
user_tags,
content_type,
content_encoding,
num_versions: fi.num_versions,
successor_mod_time: fi.successor_mod_time,
etag,
inlined,
user_defined: metadata,
transitioned_object,
checksum: fi.checksum.clone(),
storage_class,
..Default::default()
}
}
pub async fn from_meta_cache_entries_sorted_versions(
entries: &MetaCacheEntriesSorted,
bucket: &str,
prefix: &str,
delimiter: Option<String>,
after_version_id: Option<Uuid>,
) -> Vec<ObjectInfo> {
let vcfg = get_versioning_config(bucket).await.ok();
let mut objects = Vec::with_capacity(entries.entries().len());
let mut prev_prefix = "";
for entry in entries.entries() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
if let Some(idx) = remaining.find(delimiter.as_str()) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
continue;
}
}
let file_infos = match entry.file_info_versions(bucket) {
Ok(res) => res,
Err(err) => {
warn!("file_info_versions err {:?}", err);
continue;
}
};
let versions = if let Some(vid) = after_version_id {
if let Some(idx) = file_infos.find_version_index(vid) {
&file_infos.versions[idx + 1..]
} else {
&file_infos.versions
}
} else {
&file_infos.versions
};
for fi in versions.iter() {
if !fi.version_purge_status().is_empty() {
continue;
}
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
objects.push(ObjectInfo::from_file_info(fi, bucket, &entry.name, versioned));
}
continue;
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
&& let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
}
{
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
objects
}
pub async fn from_meta_cache_entries_sorted_infos(
entries: &MetaCacheEntriesSorted,
bucket: &str,
prefix: &str,
delimiter: Option<String>,
) -> Vec<ObjectInfo> {
let vcfg = get_versioning_config(bucket).await.ok();
let mut objects = Vec::with_capacity(entries.entries().len());
let mut prev_prefix = "";
for entry in entries.entries() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
if let Some(idx) = remaining.find(delimiter.as_str()) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
continue;
}
}
let fi = match entry.to_fileinfo(bucket) {
Ok(res) => res,
Err(err) => {
warn!("file_info_versions err {:?}", err);
continue;
}
};
// TODO:VersionPurgeStatus
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
objects.push(ObjectInfo::from_file_info(&fi, bucket, &entry.name, versioned));
continue;
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/batch_processor.rs | crates/ecstore/src/batch_processor.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! High-performance batch processor using JoinSet
//!
//! This module provides optimized batching utilities to reduce async runtime overhead
//! and improve concurrent operation performance.
use crate::disk::error::{Error, Result};
use std::future::Future;
use std::sync::Arc;
use tokio::task::JoinSet;
/// Batch processor that executes tasks concurrently with a semaphore
pub struct AsyncBatchProcessor {
max_concurrent: usize,
}
impl AsyncBatchProcessor {
pub fn new(max_concurrent: usize) -> Self {
Self { max_concurrent }
}
/// Execute a batch of tasks concurrently with concurrency control
pub async fn execute_batch<T, F>(&self, tasks: Vec<F>) -> Vec<Result<T>>
where
T: Send + 'static,
F: Future<Output = Result<T>> + Send + 'static,
{
if tasks.is_empty() {
return Vec::new();
}
let semaphore = Arc::new(tokio::sync::Semaphore::new(self.max_concurrent));
let mut join_set = JoinSet::new();
let mut results = Vec::with_capacity(tasks.len());
for _ in 0..tasks.len() {
results.push(Err(Error::other("Not completed")));
}
// Spawn all tasks with semaphore control
for (i, task) in tasks.into_iter().enumerate() {
let sem = semaphore.clone();
join_set.spawn(async move {
let _permit = sem.acquire().await.map_err(|_| Error::other("Semaphore error"))?;
let result = task.await;
Ok::<(usize, Result<T>), Error>((i, result))
});
}
// Collect results
while let Some(join_result) = join_set.join_next().await {
match join_result {
Ok(Ok((index, task_result))) => {
if index < results.len() {
results[index] = task_result;
}
}
Ok(Err(e)) => {
// Semaphore or other system error - this is rare
tracing::warn!("Batch processor system error: {:?}", e);
}
Err(join_error) => {
// Task panicked - log but continue
tracing::warn!("Task panicked in batch processor: {:?}", join_error);
}
}
}
results
}
/// Execute batch with early termination when sufficient successful results are obtained
pub async fn execute_batch_with_quorum<T, F>(&self, tasks: Vec<F>, required_successes: usize) -> Result<Vec<T>>
where
T: Send + 'static,
F: Future<Output = Result<T>> + Send + 'static,
{
let results = self.execute_batch(tasks).await;
let mut successes = Vec::new();
for value in results.into_iter().flatten() {
successes.push(value);
if successes.len() >= required_successes {
return Ok(successes);
}
}
if successes.len() >= required_successes {
Ok(successes)
} else {
Err(Error::other(format!(
"Insufficient successful results: got {}, needed {}",
successes.len(),
required_successes
)))
}
}
}
/// Global batch processor instances
pub struct GlobalBatchProcessors {
read_processor: AsyncBatchProcessor,
write_processor: AsyncBatchProcessor,
metadata_processor: AsyncBatchProcessor,
}
impl GlobalBatchProcessors {
pub fn new() -> Self {
Self {
read_processor: AsyncBatchProcessor::new(16), // Higher concurrency for reads
write_processor: AsyncBatchProcessor::new(8), // Lower concurrency for writes
metadata_processor: AsyncBatchProcessor::new(12), // Medium concurrency for metadata
}
}
pub fn read_processor(&self) -> &AsyncBatchProcessor {
&self.read_processor
}
pub fn write_processor(&self) -> &AsyncBatchProcessor {
&self.write_processor
}
pub fn metadata_processor(&self) -> &AsyncBatchProcessor {
&self.metadata_processor
}
}
impl Default for GlobalBatchProcessors {
fn default() -> Self {
Self::new()
}
}
// Global instance
use std::sync::OnceLock;
static GLOBAL_PROCESSORS: OnceLock<GlobalBatchProcessors> = OnceLock::new();
pub fn get_global_processors() -> &'static GlobalBatchProcessors {
GLOBAL_PROCESSORS.get_or_init(GlobalBatchProcessors::new)
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[tokio::test]
async fn test_batch_processor_basic() {
let processor = AsyncBatchProcessor::new(4);
let tasks: Vec<_> = (0..10)
.map(|i| async move {
tokio::time::sleep(Duration::from_millis(10)).await;
Ok::<i32, Error>(i)
})
.collect();
let results = processor.execute_batch(tasks).await;
assert_eq!(results.len(), 10);
// All tasks should succeed
for (i, result) in results.iter().enumerate() {
assert!(result.is_ok());
assert_eq!(result.as_ref().unwrap(), &(i as i32));
}
}
#[tokio::test]
async fn test_batch_processor_with_errors() {
let processor = AsyncBatchProcessor::new(2);
let tasks: Vec<_> = (0..5)
.map(|i| async move {
tokio::time::sleep(Duration::from_millis(10)).await;
if i % 2 == 0 {
Ok::<i32, Error>(i)
} else {
Err(Error::other("Test error"))
}
})
.collect();
let results = processor.execute_batch(tasks).await;
assert_eq!(results.len(), 5);
// Check results pattern
for (i, result) in results.iter().enumerate() {
if i % 2 == 0 {
assert!(result.is_ok());
assert_eq!(result.as_ref().unwrap(), &(i as i32));
} else {
assert!(result.is_err());
}
}
}
#[tokio::test]
async fn test_batch_processor_quorum() {
let processor = AsyncBatchProcessor::new(4);
let tasks: Vec<_> = (0..10)
.map(|i| async move {
tokio::time::sleep(Duration::from_millis(10)).await;
if i < 3 {
Ok::<i32, Error>(i)
} else {
Err(Error::other("Test error"))
}
})
.collect();
let results = processor.execute_batch_with_quorum(tasks, 2).await;
assert!(results.is_ok());
let successes = results.unwrap();
assert!(successes.len() >= 2);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/compress.rs | crates/ecstore/src/compress.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_utils::string::has_pattern;
use rustfs_utils::string::has_string_suffix_in_slice;
use std::env;
use tracing::error;
pub const MIN_COMPRESSIBLE_SIZE: usize = 4096;
// Environment variable name to control whether compression is enabled
pub const ENV_COMPRESSION_ENABLED: &str = "RUSTFS_COMPRESSION_ENABLED";
// Some standard object extensions which we strictly dis-allow for compression.
pub const STANDARD_EXCLUDE_COMPRESS_EXTENSIONS: &[&str] = &[
".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov", ".jpg", ".png", ".gif",
];
// Some standard content-types which we strictly dis-allow for compression.
pub const STANDARD_EXCLUDE_COMPRESS_CONTENT_TYPES: &[&str] = &[
"video/*",
"audio/*",
"application/zip",
"application/x-gzip",
"application/x-zip-compressed",
"application/x-compress",
"application/x-spoon",
];
pub fn is_compressible(headers: &http::HeaderMap, object_name: &str) -> bool {
// Check if compression is enabled via environment variable, default disabled
if let Ok(compression_enabled) = env::var(ENV_COMPRESSION_ENABLED) {
if compression_enabled.to_lowercase() != "true" {
error!("Compression is disabled by environment variable");
return false;
}
} else {
// Default disabled when environment variable is not set
return false;
}
let content_type = headers.get("content-type").and_then(|s| s.to_str().ok()).unwrap_or("");
// TODO: crypto request return false
if has_string_suffix_in_slice(object_name, STANDARD_EXCLUDE_COMPRESS_EXTENSIONS) {
error!("object_name: {} is not compressible", object_name);
return false;
}
if !content_type.is_empty() && has_pattern(STANDARD_EXCLUDE_COMPRESS_CONTENT_TYPES, content_type) {
error!("content_type: {} is not compressible", content_type);
return false;
}
true
// TODO: check from config
}
#[cfg(test)]
mod tests {
use super::*;
use temp_env;
#[test]
fn test_is_compressible() {
use http::HeaderMap;
let headers = HeaderMap::new();
// Test environment variable control
temp_env::with_var(ENV_COMPRESSION_ENABLED, Some("false"), || {
assert!(!is_compressible(&headers, "file.txt"));
});
temp_env::with_var(ENV_COMPRESSION_ENABLED, Some("true"), || {
assert!(is_compressible(&headers, "file.txt"));
});
temp_env::with_var_unset(ENV_COMPRESSION_ENABLED, || {
assert!(!is_compressible(&headers, "file.txt"));
});
temp_env::with_var(ENV_COMPRESSION_ENABLED, Some("true"), || {
let mut headers = HeaderMap::new();
// Test non-compressible extensions
headers.insert("content-type", "text/plain".parse().unwrap());
assert!(!is_compressible(&headers, "file.gz"));
assert!(!is_compressible(&headers, "file.zip"));
assert!(!is_compressible(&headers, "file.mp4"));
assert!(!is_compressible(&headers, "file.jpg"));
// Test non-compressible content types
headers.insert("content-type", "video/mp4".parse().unwrap());
assert!(!is_compressible(&headers, "file.txt"));
headers.insert("content-type", "audio/mpeg".parse().unwrap());
assert!(!is_compressible(&headers, "file.txt"));
headers.insert("content-type", "application/zip".parse().unwrap());
assert!(!is_compressible(&headers, "file.txt"));
headers.insert("content-type", "application/x-gzip".parse().unwrap());
assert!(!is_compressible(&headers, "file.txt"));
// Test compressible cases
headers.insert("content-type", "text/plain".parse().unwrap());
assert!(is_compressible(&headers, "file.txt"));
assert!(is_compressible(&headers, "file.log"));
headers.insert("content-type", "text/html".parse().unwrap());
assert!(is_compressible(&headers, "file.html"));
headers.insert("content-type", "application/json".parse().unwrap());
assert!(is_compressible(&headers, "file.json"));
});
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/global.rs | crates/ecstore/src/global.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
bucket::lifecycle::bucket_lifecycle_ops::LifecycleSys,
disk::DiskStore,
endpoints::{EndpointServerPools, PoolEndpoints, SetupType},
event_notification::EventNotifier,
store::ECStore,
tier::tier::TierConfigMgr,
};
use lazy_static::lazy_static;
use std::{
collections::HashMap,
sync::{Arc, OnceLock},
time::SystemTime,
};
use tokio::sync::{OnceCell, RwLock};
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30;
pub const DISK_MIN_INODES: u64 = 1000;
pub const DISK_FILL_FRACTION: f64 = 0.99;
pub const DISK_RESERVE_FRACTION: f64 = 0.15;
lazy_static! {
static ref GLOBAL_RUSTFS_PORT: OnceLock<u16> = OnceLock::new();
static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
pub static ref GLOBAL_OBJECT_API: OnceLock<Arc<ECStore>> = OnceLock::new();
pub static ref GLOBAL_LOCAL_DISK: Arc<RwLock<Vec<Option<DiskStore>>>> = Arc::new(RwLock::new(Vec::new()));
pub static ref GLOBAL_IsErasure: RwLock<bool> = RwLock::new(false);
pub static ref GLOBAL_IsDistErasure: RwLock<bool> = RwLock::new(false);
pub static ref GLOBAL_IsErasureSD: RwLock<bool> = RwLock::new(false);
pub static ref GLOBAL_LOCAL_DISK_MAP: Arc<RwLock<HashMap<String, Option<DiskStore>>>> = Arc::new(RwLock::new(HashMap::new()));
pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc<RwLock<TypeLocalDiskSetDrives>> = Arc::new(RwLock::new(Vec::new()));
pub static ref GLOBAL_Endpoints: OnceLock<EndpointServerPools> = OnceLock::new();
pub static ref GLOBAL_RootDiskThreshold: RwLock<u64> = RwLock::new(0);
pub static ref GLOBAL_TierConfigMgr: Arc<RwLock<TierConfigMgr>> = TierConfigMgr::new();
pub static ref GLOBAL_LifecycleSys: Arc<LifecycleSys> = LifecycleSys::new();
pub static ref GLOBAL_EventNotifier: Arc<RwLock<EventNotifier>> = EventNotifier::new();
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
}
/// Global cancellation token for background services (data scanner and auto heal)
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
/// Get the global rustfs port
///
/// # Returns
/// * `u16` - The global rustfs port
///
pub fn global_rustfs_port() -> u16 {
if let Some(p) = GLOBAL_RUSTFS_PORT.get() {
*p
} else {
rustfs_config::DEFAULT_PORT
}
}
/// Set the global rustfs port
///
/// # Arguments
/// * `value` - The port value to set globally
///
/// # Returns
/// * None
pub fn set_global_rustfs_port(value: u16) {
GLOBAL_RUSTFS_PORT.set(value).expect("set_global_rustfs_port fail");
}
/// Set the global deployment id
///
/// # Arguments
/// * `id` - The Uuid to set as the global deployment id
///
/// # Returns
/// * None
///
pub fn set_global_deployment_id(id: Uuid) {
globalDeploymentIDPtr.set(id).unwrap();
}
/// Get the global deployment id
///
/// # Returns
/// * `Option<String>` - The global deployment id as a string, if set
///
pub fn get_global_deployment_id() -> Option<String> {
globalDeploymentIDPtr.get().map(|v| v.to_string())
}
/// Set the global endpoints
///
/// # Arguments
/// * `eps` - A vector of PoolEndpoints to set globally
///
/// # Returns
/// * None
///
pub fn set_global_endpoints(eps: Vec<PoolEndpoints>) {
GLOBAL_Endpoints
.set(EndpointServerPools::from(eps))
.expect("GLOBAL_Endpoints set failed")
}
/// Get the global endpoints
///
/// # Returns
/// * `EndpointServerPools` - The global endpoints
///
pub fn get_global_endpoints() -> EndpointServerPools {
if let Some(eps) = GLOBAL_Endpoints.get() {
eps.clone()
} else {
EndpointServerPools::default()
}
}
/// Create a new object layer instance
///
/// # Returns
/// * `Option<Arc<ECStore>>` - The global object layer instance, if set
///
pub fn new_object_layer_fn() -> Option<Arc<ECStore>> {
GLOBAL_OBJECT_API.get().cloned()
}
/// Set the global object layer
///
/// # Arguments
/// * `o` - The ECStore instance to set globally
///
/// # Returns
/// * None
pub async fn set_object_layer(o: Arc<ECStore>) {
GLOBAL_OBJECT_API.set(o).expect("set_object_layer fail ")
}
/// Check if the setup type is distributed erasure coding
///
/// # Returns
/// * `bool` - True if the setup type is distributed erasure coding, false otherwise
///
pub async fn is_dist_erasure() -> bool {
let lock = GLOBAL_IsDistErasure.read().await;
*lock
}
/// Check if the setup type is erasure coding with single data center
///
/// # Returns
/// * `bool` - True if the setup type is erasure coding with single data center, false otherwise
///
pub async fn is_erasure_sd() -> bool {
let lock = GLOBAL_IsErasureSD.read().await;
*lock
}
/// Check if the setup type is erasure coding
///
/// # Returns
/// * `bool` - True if the setup type is erasure coding, false otherwise
///
pub async fn is_erasure() -> bool {
let lock = GLOBAL_IsErasure.read().await;
*lock
}
/// Update the global erasure type based on the setup type
///
/// # Arguments
/// * `setup_type` - The SetupType to update the global erasure type
///
/// # Returns
/// * None
pub async fn update_erasure_type(setup_type: SetupType) {
let mut is_erasure = GLOBAL_IsErasure.write().await;
*is_erasure = setup_type == SetupType::Erasure;
let mut is_dist_erasure = GLOBAL_IsDistErasure.write().await;
*is_dist_erasure = setup_type == SetupType::DistErasure;
if *is_dist_erasure {
*is_erasure = true
}
let mut is_erasure_sd = GLOBAL_IsErasureSD.write().await;
*is_erasure_sd = setup_type == SetupType::ErasureSD;
}
// pub fn is_legacy() -> bool {
// if let Some(endpoints) = GLOBAL_Endpoints.get() {
// endpoints.as_ref().len() == 1 && endpoints.as_ref()[0].legacy
// } else {
// false
// }
// }
type TypeLocalDiskSetDrives = Vec<Vec<Vec<Option<DiskStore>>>>;
/// Set the global region
///
/// # Arguments
/// * `region` - The region string to set globally
///
/// # Returns
/// * None
pub fn set_global_region(region: String) {
GLOBAL_REGION.set(region).unwrap();
}
/// Get the global region
///
/// # Returns
/// * `Option<String>` - The global region string, if set
///
pub fn get_global_region() -> Option<String> {
GLOBAL_REGION.get().cloned()
}
/// Initialize the global background services cancellation token
///
/// # Arguments
/// * `cancel_token` - The CancellationToken instance to set globally
///
/// # Returns
/// * `Ok(())` if successful
/// * `Err(CancellationToken)` if setting fails
///
pub fn init_background_services_cancel_token(cancel_token: CancellationToken) -> Result<(), CancellationToken> {
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.set(cancel_token)
}
/// Get the global background services cancellation token
///
/// # Returns
/// * `Option<&'static CancellationToken>` - The global cancellation token, if set
///
pub fn get_background_services_cancel_token() -> Option<&'static CancellationToken> {
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get()
}
/// Create and initialize the global background services cancellation token
///
/// # Returns
/// * `CancellationToken` - The newly created global cancellation token
///
pub fn create_background_services_cancel_token() -> CancellationToken {
let cancel_token = CancellationToken::new();
init_background_services_cancel_token(cancel_token.clone()).expect("Background services cancel token already initialized");
cancel_token
}
/// Shutdown all background services gracefully
///
/// # Returns
/// * None
pub fn shutdown_background_services() {
if let Some(cancel_token) = GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get() {
cancel_token.cancel();
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/lib.rs | crates/ecstore/src/lib.rs | #![allow(dead_code)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate core;
pub mod admin_server_info;
pub mod batch_processor;
pub mod bitrot;
pub mod bucket;
pub mod cache_value;
pub mod compress;
pub mod config;
pub mod data_usage;
pub mod disk;
pub mod disks_layout;
pub mod endpoints;
pub mod erasure_coding;
pub mod error;
pub mod file_cache;
pub mod global;
pub mod metrics_realtime;
pub mod notification_sys;
pub mod pools;
pub mod rebalance;
pub mod rpc;
pub mod set_disk;
mod sets;
pub mod store;
pub mod store_api;
mod store_init;
pub mod store_list_objects;
pub mod store_utils;
// pub mod checksum;
pub mod client;
pub mod event;
pub mod event_notification;
pub mod tier;
pub use global::new_object_layer_fn;
pub use global::set_global_endpoints;
pub use global::update_erasure_type;
pub use global::GLOBAL_Endpoints;
pub use store_api::StorageAPI;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/endpoints.rs | crates/ecstore/src/endpoints.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
use tracing::{error, info, instrument, warn};
use crate::{
disk::endpoint::{Endpoint, EndpointType},
disks_layout::DisksLayout,
global::global_rustfs_port,
};
use std::io::{Error, Result};
use std::{
collections::{HashMap, HashSet, hash_map::Entry},
net::IpAddr,
};
/// enum for setup type.
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum SetupType {
/// starts with unknown setup type.
Unknown,
/// FS setup type enum.
FS,
/// Erasure single drive setup enum.
ErasureSD,
/// Erasure setup type enum.
Erasure,
/// Distributed Erasure setup type enum.
DistErasure,
}
/// holds information about a node in this cluster
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Node {
pub url: url::Url,
pub pools: Vec<usize>,
pub is_local: bool,
pub grid_host: String,
}
/// list of same type of endpoint.
#[derive(Debug, Default, Clone)]
pub struct Endpoints(Vec<Endpoint>);
impl AsRef<Vec<Endpoint>> for Endpoints {
fn as_ref(&self) -> &Vec<Endpoint> {
&self.0
}
}
impl AsMut<Vec<Endpoint>> for Endpoints {
fn as_mut(&mut self) -> &mut Vec<Endpoint> {
&mut self.0
}
}
impl From<Vec<Endpoint>> for Endpoints {
fn from(v: Vec<Endpoint>) -> Self {
Self(v)
}
}
impl<T: AsRef<str>> TryFrom<&[T]> for Endpoints {
type Error = Error;
/// returns new endpoint list based on input args.
fn try_from(args: &[T]) -> Result<Self> {
let mut endpoint_type = None;
let mut schema = None;
let mut endpoints = Vec::with_capacity(args.len());
let mut uniq_set = HashSet::with_capacity(args.len());
// Loop through args and adds to endpoint list.
for (i, arg) in args.iter().enumerate() {
let endpoint = match Endpoint::try_from(arg.as_ref()) {
Ok(ep) => ep,
Err(e) => return Err(Error::other(format!("'{}': {}", arg.as_ref(), e))),
};
// All endpoints have to be same type and scheme if applicable.
if i == 0 {
endpoint_type = Some(endpoint.get_type());
schema = Some(endpoint.url.scheme().to_owned());
} else if Some(endpoint.get_type()) != endpoint_type {
return Err(Error::other("mixed style endpoints are not supported"));
} else if Some(endpoint.url.scheme()) != schema.as_deref() {
return Err(Error::other("mixed scheme is not supported"));
}
// Check for duplicate endpoints.
let endpoint_str = endpoint.to_string();
if uniq_set.contains(&endpoint_str) {
return Err(Error::other("duplicate endpoints found"));
}
uniq_set.insert(endpoint_str);
endpoints.push(endpoint);
}
Ok(Endpoints(endpoints))
}
}
impl Endpoints {
/// Converts `self` into its inner representation.
///
/// This method consumes the `self` object and returns its inner `Vec<Endpoint>`.
/// It is useful for when you need to take the endpoints out of their container
/// without needing a reference to the container itself.
pub fn into_inner(self) -> Vec<Endpoint> {
self.0
}
pub fn into_ref(&self) -> &Vec<Endpoint> {
&self.0
}
// GetString - returns endpoint string of i-th endpoint (0-based),
// and empty string for invalid indexes.
pub fn get_string(&self, i: usize) -> String {
if i >= self.0.len() {
return "".to_string();
}
self.0[i].to_string()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
#[derive(Debug)]
/// a temporary type to holds the list of endpoints
struct PoolEndpointList {
inner: Vec<Endpoints>,
setup_type: SetupType,
}
impl AsRef<Vec<Endpoints>> for PoolEndpointList {
fn as_ref(&self) -> &Vec<Endpoints> {
&self.inner
}
}
impl AsMut<Vec<Endpoints>> for PoolEndpointList {
fn as_mut(&mut self) -> &mut Vec<Endpoints> {
&mut self.inner
}
}
impl PoolEndpointList {
/// creates a list of endpoints per pool, resolves their relevant
/// hostnames and discovers those are local or remote.
async fn create_pool_endpoints(server_addr: &str, disks_layout: &DisksLayout) -> Result<Self> {
if disks_layout.is_empty_layout() {
return Err(Error::other("invalid number of endpoints"));
}
let server_addr = check_local_server_addr(server_addr)?;
// For single arg, return single drive EC setup.
if disks_layout.is_single_drive_layout() {
let mut endpoint = Endpoint::try_from(disks_layout.get_single_drive_layout())?;
endpoint.update_is_local(server_addr.port())?;
if endpoint.get_type() != EndpointType::Path {
return Err(Error::other("use path style endpoint for single node setup"));
}
endpoint.set_pool_index(0);
endpoint.set_set_index(0);
endpoint.set_disk_index(0);
// TODO Check for cross device mounts if any.
return Ok(Self {
inner: vec![Endpoints::from(vec![endpoint])],
setup_type: SetupType::ErasureSD,
});
}
let mut pool_endpoints = Vec::<Endpoints>::with_capacity(disks_layout.pools.len());
for (pool_idx, pool) in disks_layout.pools.iter().enumerate() {
let mut endpoints = Endpoints::default();
for (set_idx, set_layout) in pool.iter().enumerate() {
// Convert args to endpoints
let mut eps = Endpoints::try_from(set_layout.as_slice())?;
// TODO Check for cross device mounts if any.
for (disk_idx, ep) in eps.as_mut().iter_mut().enumerate() {
ep.set_pool_index(pool_idx);
ep.set_set_index(set_idx);
ep.set_disk_index(disk_idx);
}
endpoints.as_mut().append(eps.as_mut());
}
if endpoints.as_ref().is_empty() {
return Err(Error::other("invalid number of endpoints"));
}
pool_endpoints.push(endpoints);
}
// setup type
let mut unique_args = HashSet::new();
let mut pool_endpoint_list = Self {
inner: pool_endpoints,
setup_type: SetupType::Unknown,
};
pool_endpoint_list.update_is_local(server_addr.port())?;
for endpoints in pool_endpoint_list.inner.iter_mut() {
// Check whether same path is not used in endpoints of a host on different port.
let mut path_ip_map: HashMap<String, HashSet<IpAddr>> = HashMap::new();
let mut host_ip_cache = HashMap::new();
for ep in endpoints.as_ref() {
if !ep.url.has_host() {
continue;
}
let host = ep.url.host().unwrap();
let host_ip_set = if let Some(set) = host_ip_cache.get(&host) {
info!(
target: "rustfs::ecstore::endpoints",
host = %host,
endpoint = %ep.to_string(),
from = "cache",
"Create pool endpoints host '{}' found in cache for endpoint '{}'", host, ep.to_string()
);
set
} else {
let ips = match get_host_ip(host.clone()).await {
Ok(ips) => ips,
Err(e) => {
error!("Create pool endpoints host {} not found, error:{}", host, e);
return Err(Error::other(format!("host '{host}' cannot resolve: {e}")));
}
};
info!(
target: "rustfs::ecstore::endpoints",
host = %host,
endpoint = %ep.to_string(),
from = "get_host_ip",
"Create pool endpoints host '{}' resolved to ips {:?} for endpoint '{}'",
host,
ips,
ep.to_string()
);
host_ip_cache.insert(host.clone(), ips);
host_ip_cache.get(&host).unwrap()
};
let path = ep.get_file_path();
match path_ip_map.entry(path) {
Entry::Occupied(mut e) => {
if e.get().intersection(host_ip_set).count() > 0 {
let path_key = e.key().clone();
return Err(Error::other(format!(
"same path '{path_key}' can not be served by different port on same address"
)));
}
e.get_mut().extend(host_ip_set.iter());
}
Entry::Vacant(e) => {
e.insert(host_ip_set.clone());
}
}
}
// Check whether same path is used for more than 1 local endpoints.
let mut local_path_set = HashSet::new();
for ep in endpoints.as_ref() {
if !ep.is_local {
continue;
}
let path = ep.get_file_path();
if local_path_set.contains(&path) {
return Err(Error::other(format!(
"path '{path}' cannot be served by different address on same server"
)));
}
local_path_set.insert(path);
}
// Here all endpoints are URL style.
let mut ep_path_set = HashSet::new();
let mut local_server_host_set = HashSet::new();
let mut local_port_set = HashSet::new();
let mut local_endpoint_count = 0;
for ep in endpoints.as_ref() {
ep_path_set.insert(ep.get_file_path());
if ep.is_local && ep.url.has_host() {
local_server_host_set.insert(ep.url.host());
local_port_set.insert(ep.url.port());
local_endpoint_count += 1;
}
}
// All endpoints are pointing to local host
if endpoints.as_ref().len() == local_endpoint_count {
// If all endpoints have same port number, Just treat it as local erasure setup
// using URL style endpoints.
if local_port_set.len() == 1 && local_server_host_set.len() > 1 {
return Err(Error::other("all local endpoints should not have different hostnames/ips"));
}
}
// Add missing port in all endpoints.
for ep in endpoints.as_mut() {
if !ep.url.has_host() {
unique_args.insert(format!("localhost:{}", server_addr.port()));
continue;
}
match ep.url.port() {
None => {
let _ = ep.url.set_port(Some(server_addr.port()));
}
Some(port) => {
// If endpoint is local, but port is different than serverAddrPort, then make it as remote.
if ep.is_local && server_addr.port() != port {
ep.is_local = false;
}
}
}
unique_args.insert(ep.host_port());
}
}
let setup_type = match pool_endpoint_list.as_ref()[0].as_ref()[0].get_type() {
EndpointType::Path => SetupType::Erasure,
EndpointType::Url => match unique_args.len() {
1 => SetupType::Erasure,
_ => SetupType::DistErasure,
},
};
pool_endpoint_list.setup_type = setup_type;
Ok(pool_endpoint_list)
}
/// resolves all hosts and discovers which are local
fn update_is_local(&mut self, local_port: u16) -> Result<()> {
for endpoints in self.inner.iter_mut() {
for ep in endpoints.as_mut() {
match ep.url.host() {
None => {
ep.is_local = true;
}
Some(host) => {
ep.is_local = is_local_host(host, ep.url.port().unwrap_or_default(), local_port)?;
}
}
}
}
Ok(())
}
/// resolves all hosts and discovers which are local
fn _update_is_local(&mut self, local_port: u16) -> Result<()> {
let mut eps_resolved = 0;
let mut found_local = false;
let mut resolved_set: HashSet<(usize, usize)> = HashSet::new();
let ep_count: usize = self.inner.iter().map(|v| v.as_ref().len()).sum();
loop {
// Break if the local endpoint is found already Or all the endpoints are resolved.
if found_local || eps_resolved == ep_count {
break;
}
for (i, endpoints) in self.inner.iter_mut().enumerate() {
for (j, ep) in endpoints.as_mut().iter_mut().enumerate() {
if resolved_set.contains(&(i, j)) {
// Continue if host is already resolved.
continue;
}
match ep.url.host() {
None => {
if !found_local {
found_local = true;
}
ep.is_local = true;
eps_resolved += 1;
resolved_set.insert((i, j));
continue;
}
Some(host) => match is_local_host(host, ep.url.port().unwrap_or_default(), local_port) {
Ok(is_local) => {
if !found_local {
found_local = is_local;
}
ep.is_local = is_local;
eps_resolved += 1;
resolved_set.insert((i, j));
}
Err(err) => {
// TODO Retry infinitely on Kubernetes and Docker swarm?
return Err(err);
}
},
}
}
}
}
unimplemented!()
}
}
/// represent endpoints in a given pool
/// along with its setCount and setDriveCount.
#[derive(Debug, Clone)]
pub struct PoolEndpoints {
// indicates if endpoints are provided in non-ellipses style
pub legacy: bool,
pub set_count: usize,
pub drives_per_set: usize,
pub endpoints: Endpoints,
pub cmd_line: String,
pub platform: String,
}
/// list of endpoints
#[derive(Debug, Clone, Default)]
pub struct EndpointServerPools(pub Vec<PoolEndpoints>);
impl From<Vec<PoolEndpoints>> for EndpointServerPools {
fn from(v: Vec<PoolEndpoints>) -> Self {
Self(v)
}
}
impl AsRef<Vec<PoolEndpoints>> for EndpointServerPools {
fn as_ref(&self) -> &Vec<PoolEndpoints> {
&self.0
}
}
impl AsMut<Vec<PoolEndpoints>> for EndpointServerPools {
fn as_mut(&mut self) -> &mut Vec<PoolEndpoints> {
&mut self.0
}
}
impl EndpointServerPools {
pub fn reset(&mut self, eps: Vec<PoolEndpoints>) {
self.0 = eps;
}
pub fn legacy(&self) -> bool {
self.0.len() == 1 && self.0[0].legacy
}
pub fn get_pool_idx(&self, cmd_line: &str) -> Option<usize> {
for (idx, eps) in self.0.iter().enumerate() {
if eps.cmd_line.as_str() == cmd_line {
return Some(idx);
}
}
None
}
pub async fn from_volumes(server_addr: &str, endpoints: Vec<String>) -> Result<(EndpointServerPools, SetupType)> {
let layouts = DisksLayout::from_volumes(endpoints.as_slice())?;
Self::create_server_endpoints(server_addr, &layouts).await
}
/// validates and creates new endpoints from input args, supports
/// both ellipses and without ellipses transparently.
pub async fn create_server_endpoints(
server_addr: &str,
disks_layout: &DisksLayout,
) -> Result<(EndpointServerPools, SetupType)> {
if disks_layout.pools.is_empty() {
return Err(Error::other("Invalid arguments specified"));
}
let pool_eps = PoolEndpointList::create_pool_endpoints(server_addr, disks_layout).await?;
let mut ret: EndpointServerPools = Vec::with_capacity(pool_eps.as_ref().len()).into();
for (i, eps) in pool_eps.inner.into_iter().enumerate() {
let ep = PoolEndpoints {
legacy: disks_layout.legacy,
set_count: disks_layout.get_set_count(i),
drives_per_set: disks_layout.get_drives_per_set(i),
endpoints: eps,
cmd_line: disks_layout.get_cmd_line(i),
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
};
ret.add(ep)?;
}
Ok((ret, pool_eps.setup_type))
}
pub fn es_count(&self) -> usize {
self.0.iter().map(|v| v.set_count).sum()
}
/// add pool endpoints
pub fn add(&mut self, eps: PoolEndpoints) -> Result<()> {
let mut exits = HashSet::new();
for peps in self.0.iter() {
for ep in peps.endpoints.as_ref() {
exits.insert(ep.to_string());
}
}
for ep in eps.endpoints.as_ref() {
if exits.contains(&ep.to_string()) {
return Err(Error::other("duplicate endpoints found"));
}
}
self.0.push(eps);
Ok(())
}
/// returns true if the first endpoint is local.
pub fn first_local(&self) -> bool {
self.0
.first()
.and_then(|v| v.endpoints.as_ref().first())
.is_some_and(|v| v.is_local)
}
/// returns a sorted list of nodes in this cluster
pub fn get_nodes(&self) -> Vec<Node> {
let mut node_map = HashMap::new();
for pool in self.0.iter() {
for ep in pool.endpoints.as_ref() {
let n = node_map.entry(ep.host_port()).or_insert(Node {
url: ep.url.clone(),
pools: vec![],
is_local: ep.is_local,
grid_host: ep.grid_host(),
});
if !n.pools.contains(&(ep.pool_idx as usize)) {
n.pools.push(ep.pool_idx as usize);
}
}
}
let mut nodes: Vec<Node> = node_map.into_values().collect();
nodes.sort_by(|a, b| a.grid_host.cmp(&b.grid_host));
nodes
}
#[instrument]
pub fn hosts_sorted(&self) -> Vec<Option<XHost>> {
let (mut peers, local) = self.peers();
let mut ret = vec![None; peers.len()];
peers.sort();
for (i, peer) in peers.iter().enumerate() {
if &local == peer {
continue;
}
let host = match XHost::try_from(peer.clone()) {
Ok(res) => res,
Err(err) => {
warn!("Xhost parse failed {:?}", err);
continue;
}
};
ret[i] = Some(host);
}
ret
}
pub fn peers(&self) -> (Vec<String>, String) {
let mut local = None;
let mut set = HashSet::new();
for ep in self.0.iter() {
for endpoint in ep.endpoints.0.iter() {
if endpoint.get_type() != EndpointType::Url {
continue;
}
let host = endpoint.host_port();
if endpoint.is_local && endpoint.url.port() == Some(global_rustfs_port()) && local.is_none() {
local = Some(host.clone());
}
set.insert(host);
}
}
let hosts: Vec<String> = set.iter().cloned().collect();
(hosts, local.unwrap_or_default())
}
pub fn find_grid_hosts_from_peer(&self, host: &XHost) -> Option<String> {
for ep in self.0.iter() {
for endpoint in ep.endpoints.0.iter() {
if endpoint.is_local {
continue;
}
let xhost = match XHost::try_from(endpoint.host_port()) {
Ok(res) => res,
Err(_) => {
continue;
}
};
if xhost.to_string() == host.to_string() {
return Some(endpoint.grid_host());
}
}
}
None
}
}
#[cfg(test)]
mod test {
use rustfs_utils::must_get_local_ips;
use super::*;
use std::path::Path;
#[test]
fn test_new_endpoints() {
let test_cases = [
(vec!["/d1", "/d2", "/d3", "/d4"], None, 1),
(
vec![
"http://localhost/d1",
"http://localhost/d2",
"http://localhost/d3",
"http://localhost/d4",
],
None,
2,
),
(
vec![
"http://example.org/d1",
"http://example.com/d1",
"http://example.net/d1",
"http://example.edu/d1",
],
None,
3,
),
(
vec![
"http://localhost/d1",
"http://localhost/d2",
"http://example.org/d1",
"http://example.org/d2",
],
None,
4,
),
(
vec![
"https://localhost:9000/d1",
"https://localhost:9001/d2",
"https://localhost:9002/d3",
"https://localhost:9003/d4",
],
None,
5,
),
// It is valid WRT endpoint list that same path is expected with different port on same server.
(
vec![
"https://127.0.0.1:9000/d1",
"https://127.0.0.1:9001/d1",
"https://127.0.0.1:9002/d1",
"https://127.0.0.1:9003/d1",
],
None,
6,
),
(vec!["d1", "d2", "d3", "d1"], Some(Error::other("duplicate endpoints found")), 7),
(vec!["d1", "d2", "d3", "./d1"], Some(Error::other("duplicate endpoints found")), 8),
(
vec![
"http://localhost/d1",
"http://localhost/d2",
"http://localhost/d1",
"http://localhost/d4",
],
Some(Error::other("duplicate endpoints found")),
9,
),
(
vec!["ftp://server/d1", "http://server/d2", "http://server/d3", "http://server/d4"],
Some(Error::other("'ftp://server/d1': io error invalid URL endpoint format")),
10,
),
(
vec!["d1", "http://localhost/d2", "d3", "d4"],
Some(Error::other("mixed style endpoints are not supported")),
11,
),
(
vec![
"http://example.org/d1",
"https://example.com/d1",
"http://example.net/d1",
"https://example.edut/d1",
],
Some(Error::other("mixed scheme is not supported")),
12,
),
(
vec![
"192.168.1.210:9000/tmp/dir0",
"192.168.1.210:9000/tmp/dir1",
"192.168.1.210:9000/tmp/dir2",
"192.168.110:9000/tmp/dir3",
],
Some(Error::other("'192.168.1.210:9000/tmp/dir0': io error")),
13,
),
];
for test_case in test_cases {
let args: Vec<String> = test_case.0.iter().map(|v| v.to_string()).collect();
let ret = Endpoints::try_from(args.as_slice());
match (test_case.1, ret) {
(None, Err(e)) => panic!("{}: error: expected = <nil>, got = {}", test_case.2, e),
(None, Ok(_)) => {}
(Some(e), Ok(_)) => panic!("{}: error: expected = {}, got = <nil>", test_case.2, e),
(Some(e), Err(e2)) => {
assert!(
e2.to_string().starts_with(&e.to_string()),
"{}: error: expected = {}, got = {}",
test_case.2,
e,
e2
)
}
}
}
}
#[tokio::test]
async fn test_create_pool_endpoints() {
#[derive(Default)]
struct TestCase<'a> {
num: usize,
server_addr: &'a str,
args: Vec<&'a str>,
expected_endpoints: Option<Endpoints>,
expected_setup_type: Option<SetupType>,
expected_err: Option<Error>,
}
// Filter ipList by IPs those do not start with '127.'.
let non_loop_back_i_ps =
must_get_local_ips().map_or(vec![], |v| v.into_iter().filter(|ip| ip.is_ipv4() && ip.is_loopback()).collect());
if non_loop_back_i_ps.is_empty() {
panic!("No non-loop back IP address found for this host");
}
let non_loop_back_ip = non_loop_back_i_ps[0];
let case1_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let case1_endpoint2 = format!("http://{non_loop_back_ip}/d2");
let args = vec![
format!("http://{}:10000/d1", non_loop_back_ip),
format!("http://{}:10000/d2", non_loop_back_ip),
"http://example.org:10000/d3".to_string(),
"http://example.com:10000/d4".to_string(),
];
let (case1_ur_ls, case1_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:10000/"));
let case2_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let case2_endpoint2 = format!("http://{non_loop_back_ip}:9000/d2");
let args = vec![
format!("http://{}:10000/d1", non_loop_back_ip),
format!("http://{}:9000/d2", non_loop_back_ip),
"http://example.org:10000/d3".to_string(),
"http://example.com:10000/d4".to_string(),
];
let (case2_ur_ls, case2_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:10000/"));
let case3_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let args = vec![
format!("http://{}:80/d1", non_loop_back_ip),
"http://example.org:9000/d2".to_string(),
"http://example.com:80/d3".to_string(),
"http://example.net:80/d4".to_string(),
];
let (case3_ur_ls, case3_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:80/"));
let case4_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let args = vec![
format!("http://{}:9000/d1", non_loop_back_ip),
"http://example.org:9000/d2".to_string(),
"http://example.com:9000/d3".to_string(),
"http://example.net:9000/d4".to_string(),
];
let (case4_ur_ls, case4_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9000/"));
let case5_endpoint1 = format!("http://{non_loop_back_ip}:9000/d1");
let case5_endpoint2 = format!("http://{non_loop_back_ip}:9001/d2");
let case5_endpoint3 = format!("http://{non_loop_back_ip}:9002/d3");
let case5_endpoint4 = format!("http://{non_loop_back_ip}:9003/d4");
let args = vec![
case5_endpoint1.clone(),
case5_endpoint2.clone(),
case5_endpoint3.clone(),
case5_endpoint4.clone(),
];
let (case5_ur_ls, case5_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9000/"));
let case6_endpoint1 = format!("http://{non_loop_back_ip}:9003/d4");
let args = vec![
"http://localhost:9000/d1".to_string(),
"http://localhost:9001/d2".to_string(),
"http://127.0.0.1:9002/d3".to_string(),
case6_endpoint1.clone(),
];
let (case6_ur_ls, case6_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9003/"));
let case7_endpoint1 = format!("http://{non_loop_back_ip}:9001/export");
let case7_endpoint2 = format!("http://{non_loop_back_ip}:9000/export");
let test_cases = [
TestCase {
num: 1,
server_addr: "localhost",
expected_err: Some(Error::other("address localhost: missing port in address")),
..Default::default()
},
// Erasure Single Drive
TestCase {
num: 2,
server_addr: "localhost:9000",
args: vec!["http://localhost/d1"],
expected_err: Some(Error::other("use path style endpoint for single node setup")),
..Default::default()
},
TestCase {
num: 3,
server_addr: "0.0.0.0:443",
args: vec!["/d1"],
expected_endpoints: Some(Endpoints(vec![Endpoint {
url: must_file_path("/d1"),
is_local: true,
pool_idx: 0,
set_idx: 0,
disk_idx: 0,
}])),
expected_setup_type: Some(SetupType::ErasureSD),
..Default::default()
},
TestCase {
num: 4,
server_addr: "localhost:10000",
args: vec!["/d1"],
expected_endpoints: Some(Endpoints(vec![Endpoint {
url: must_file_path("/d1"),
is_local: true,
pool_idx: 0,
set_idx: 0,
disk_idx: 0,
}])),
expected_setup_type: Some(SetupType::ErasureSD),
..Default::default()
},
TestCase {
num: 5,
server_addr: "localhost:9000",
args: vec![
"https://127.0.0.1:9000/d1",
"https://localhost:9001/d1",
"https://example.com/d1",
"https://example.com/d2",
],
expected_err: Some(Error::other("same path '/d1' can not be served by different port on same address")),
..Default::default()
},
// Erasure Setup with PathEndpointType
TestCase {
num: 6,
server_addr: "0.0.0.0:1234",
args: vec!["/d1", "/d2", "/d3", "/d4"],
expected_endpoints: Some(Endpoints(vec![
Endpoint {
url: must_file_path("/d1"),
is_local: true,
pool_idx: 0,
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/store.rs | crates/ecstore/src/store.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::map_entry)]
use crate::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry;
use crate::bucket::metadata_sys::{self, set_bucket_metadata};
use crate::bucket::utils::{check_valid_bucket_name, check_valid_bucket_name_strict, is_meta_bucketname};
use crate::config::GLOBAL_STORAGE_CLASS;
use crate::config::storageclass;
use crate::disk::endpoint::{Endpoint, EndpointType};
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
use crate::error::{Error, Result};
use crate::error::{
StorageError, is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum,
is_err_version_not_found, to_object_err,
};
use crate::global::{
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, GLOBAL_TierConfigMgr, get_global_deployment_id, get_global_endpoints,
is_dist_erasure, is_erasure_sd, set_global_deployment_id, set_object_layer,
};
use crate::notification_sys::get_global_notification_sys;
use crate::pools::PoolMeta;
use crate::rebalance::RebalanceMeta;
use crate::store_api::{
ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO, ObjectInfoOrErr, WalkOptions,
};
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
use crate::{
bucket::{lifecycle::bucket_lifecycle_ops::TransitionState, metadata::BucketMetadata},
disk::{BUCKET_META_PREFIX, DiskOption, DiskStore, RUSTFS_META_BUCKET, new_disk},
endpoints::EndpointServerPools,
rpc::S3PeerSys,
sets::Sets,
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo,
PutObjReader, StorageAPI,
},
store_init,
};
use futures::future::join_all;
use http::HeaderMap;
use lazy_static::lazy_static;
use rand::Rng as _;
use rustfs_common::heal_channel::{HealItemType, HealOpts};
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_HOST, GLOBAL_RUSTFS_PORT};
use rustfs_filemeta::FileInfo;
use rustfs_madmin::heal_commands::HealResultItem;
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration};
use std::cmp::Ordering;
use std::net::SocketAddr;
use std::process::exit;
use std::slice::Iter;
use std::time::SystemTime;
use std::{collections::HashMap, sync::Arc, time::Duration};
use time::OffsetDateTime;
use tokio::select;
use tokio::sync::RwLock;
use tokio::time::sleep;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
use uuid::Uuid;
const MAX_UPLOADS_LIST: usize = 10000;
#[derive(Debug)]
pub struct ECStore {
pub id: Uuid,
// pub disks: Vec<DiskStore>,
pub disk_map: HashMap<usize, Vec<Option<DiskStore>>>,
pub pools: Vec<Arc<Sets>>,
pub peer_sys: S3PeerSys,
// pub local_disks: Vec<DiskStore>,
pub pool_meta: RwLock<PoolMeta>,
pub rebalance_meta: RwLock<Option<RebalanceMeta>>,
pub decommission_cancelers: Vec<Option<usize>>,
}
// impl Clone for ECStore {
// fn clone(&self) -> Self {
// let pool_meta = match self.pool_meta.read() {
// Ok(pool_meta) => pool_meta.clone(),
// Err(_) => PoolMeta::default(),
// };
// Self {
// id: self.id.clone(),
// disk_map: self.disk_map.clone(),
// pools: self.pools.clone(),
// peer_sys: self.peer_sys.clone(),
// pool_meta: std_RwLock::new(pool_meta),
// decommission_cancelers: self.decommission_cancelers.clone(),
// }
// }
// }
impl ECStore {
#[allow(clippy::new_ret_no_self)]
#[instrument(level = "debug", skip(endpoint_pools))]
pub async fn new(address: SocketAddr, endpoint_pools: EndpointServerPools, ctx: CancellationToken) -> Result<Arc<Self>> {
// let layouts = DisksLayout::from_volumes(endpoints.as_slice())?;
let mut deployment_id = None;
// let (endpoint_pools, _) = EndpointServerPools::create_server_endpoints(address.as_str(), &layouts)?;
let mut pools = Vec::with_capacity(endpoint_pools.as_ref().len());
let mut disk_map = HashMap::with_capacity(endpoint_pools.as_ref().len());
let first_is_local = endpoint_pools.first_local();
let mut local_disks = Vec::new();
info!("ECStore new address: {}", address.to_string());
let mut host = address.ip().to_string();
if host.is_empty() {
host = GLOBAL_RUSTFS_HOST.read().await.to_string()
}
let mut port = address.port().to_string();
if port.is_empty() {
port = GLOBAL_RUSTFS_PORT.read().await.to_string()
}
info!("ECStore new host: {}, port: {}", host, port);
init_local_peer(&endpoint_pools, &host, &port).await;
// debug!("endpoint_pools: {:?}", endpoint_pools);
let mut common_parity_drives = 0;
for (i, pool_eps) in endpoint_pools.as_ref().iter().enumerate() {
if common_parity_drives == 0 {
let parity_drives = ec_drives_no_config(pool_eps.drives_per_set)?;
storageclass::validate_parity(parity_drives, pool_eps.drives_per_set)?;
common_parity_drives = parity_drives;
}
// validate_parity(parity_count, pool_eps.drives_per_set)?;
let (disks, errs) = store_init::init_disks(
&pool_eps.endpoints,
&DiskOption {
cleanup: true,
health_check: true,
},
)
.await;
check_disk_fatal_errs(&errs)?;
let fm = {
let mut times = 0;
let mut interval = 1;
loop {
if let Ok(fm) = store_init::connect_load_init_formats(
first_is_local,
&disks,
pool_eps.set_count,
pool_eps.drives_per_set,
deployment_id,
)
.await
{
break fm;
}
times += 1;
if interval < 16 {
interval *= 2;
}
if times > 10 {
return Err(Error::other("can not get formats"));
}
info!("retrying get formats after {:?}", interval);
select! {
_ = tokio::signal::ctrl_c() => {
info!("got ctrl+c, exits");
exit(0);
}
_ = sleep(Duration::from_secs(interval)) => {
}
}
}
};
if deployment_id.is_none() {
deployment_id = Some(fm.id);
}
if deployment_id != Some(fm.id) {
return Err(Error::other("deployment_id not same in one pool"));
}
if deployment_id.is_some() && deployment_id.unwrap().is_nil() {
deployment_id = Some(Uuid::new_v4());
}
for disk in disks.iter() {
if disk.is_some() && disk.as_ref().unwrap().is_local() {
local_disks.push(disk.as_ref().unwrap().clone());
}
}
let sets = Sets::new(disks.clone(), pool_eps, &fm, i, common_parity_drives).await?;
pools.push(sets);
disk_map.insert(i, disks);
}
// Replace the local disk
if !is_dist_erasure().await {
let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await;
for disk in local_disks {
let path = disk.endpoint().to_string();
global_local_disk_map.insert(path, Some(disk.clone()));
}
}
let peer_sys = S3PeerSys::new(&endpoint_pools);
let mut pool_meta = PoolMeta::new(&pools, &PoolMeta::default());
pool_meta.dont_save = true;
let decommission_cancelers = vec![None; pools.len()];
let ec = Arc::new(ECStore {
id: deployment_id.unwrap(),
disk_map,
pools,
peer_sys,
pool_meta: RwLock::new(pool_meta),
rebalance_meta: RwLock::new(None),
decommission_cancelers,
});
// Only set it when the global deployment ID is not yet configured
if let Some(dep_id) = deployment_id
&& get_global_deployment_id().is_none()
{
set_global_deployment_id(dep_id);
}
let wait_sec = 5;
let mut exit_count = 0;
loop {
if let Err(err) = ec.init(ctx.clone()).await {
error!("init err: {}", err);
error!("retry after {} second", wait_sec);
sleep(Duration::from_secs(wait_sec)).await;
if exit_count > 10 {
return Err(Error::other("ec init failed"));
}
exit_count += 1;
continue;
}
break;
}
set_object_layer(ec.clone()).await;
Ok(ec)
}
#[instrument(level = "debug", skip(self, rx))]
pub async fn init(self: &Arc<Self>, rx: CancellationToken) -> Result<()> {
GLOBAL_BOOT_TIME.get_or_init(|| async { SystemTime::now() }).await;
if self.load_rebalance_meta().await.is_ok() {
self.start_rebalance().await;
}
let mut meta = PoolMeta::default();
meta.load(self.pools[0].clone(), self.pools.clone()).await?;
let update = meta.validate(self.pools.clone())?;
if !update {
{
let mut pool_meta = self.pool_meta.write().await;
*pool_meta = meta.clone();
}
} else {
let new_meta = PoolMeta::new(&self.pools, &meta);
new_meta.save(self.pools.clone()).await?;
{
let mut pool_meta = self.pool_meta.write().await;
*pool_meta = new_meta;
}
}
let pools = meta.return_resumable_pools();
let mut pool_indices = Vec::with_capacity(pools.len());
let endpoints = get_global_endpoints();
for p in pools.iter() {
if let Some(idx) = endpoints.get_pool_idx(&p.cmd_line) {
pool_indices.push(idx);
} else {
return Err(Error::other(format!(
"unexpected state present for decommission status pool({}) not found",
p.cmd_line
)));
}
}
if !pool_indices.is_empty() {
let idx = pool_indices[0];
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
let store = self.clone();
tokio::spawn(async move {
// wait 3 minutes for cluster init
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
if let Err(err) = store.decommission(rx.clone(), pool_indices.clone()).await {
if err == StorageError::DecommissionAlreadyRunning {
for i in pool_indices.iter() {
store.do_decommission_in_routine(rx.clone(), *i).await;
}
return;
}
error!("store init decommission err: {}", err);
// TODO: check config err
}
});
}
}
init_background_expiry(self.clone()).await;
TransitionState::init(self.clone()).await;
if let Err(err) = GLOBAL_TierConfigMgr.write().await.init(self.clone()).await {
info!("TierConfigMgr init error: {}", err);
}
Ok(())
}
pub fn init_local_disks() {}
// pub fn local_disks(&self) -> Vec<DiskStore> {
// self.local_disks.clone()
// }
pub fn single_pool(&self) -> bool {
self.pools.len() == 1
}
// define in store_list_objects.rs
// pub async fn list_path(&self, opts: &ListPathOptions, delimiter: &str) -> Result<ListObjectsInfo> {
// // if opts.prefix.ends_with(SLASH_SEPARATOR) {
// // return Err(Error::other("eof"));
// // }
// let mut opts = opts.clone();
// if opts.base_dir.is_empty() {
// opts.base_dir = base_dir_from_prefix(&opts.prefix);
// }
// let objects = self.list_merged(&opts, delimiter).await?;
// let info = ListObjectsInfo {
// objects,
// ..Default::default()
// };
// Ok(info)
// }
// Read all entries
// define in store_list_objects.rs
// async fn list_merged(&self, opts: &ListPathOptions, delimiter: &str) -> Result<Vec<ObjectInfo>> {
// let walk_opts = WalkDirOptions {
// bucket: opts.bucket.clone(),
// base_dir: opts.base_dir.clone(),
// ..Default::default()
// };
// // let (mut wr, mut rd) = tokio::io::duplex(1024);
// let mut futures = Vec::new();
// for sets in self.pools.iter() {
// for set in sets.disk_set.iter() {
// futures.push(set.walk_dir(&walk_opts));
// }
// }
// let results = join_all(futures).await;
// // let mut errs = Vec::new();
// let mut ress = Vec::new();
// let mut uniq = HashSet::new();
// for (disks_ress, _disks_errs) in results {
// for disks_res in disks_ress.iter() {
// if disks_res.is_none() {
// // TODO handle errs
// continue;
// }
// let entries = disks_res.as_ref().unwrap();
// for entry in entries {
// // warn!("lst_merged entry---- {}", &entry.name);
// if !opts.prefix.is_empty() && !entry.name.starts_with(&opts.prefix) {
// continue;
// }
// if !uniq.contains(&entry.name) {
// uniq.insert(entry.name.clone());
// // TODO: filter
// if opts.limit > 0 && ress.len() as i32 >= opts.limit {
// return Ok(ress);
// }
// if entry.is_object() {
// if !delimiter.is_empty() {
// // entry.name.trim_start_matches(pat)
// }
// let fi = entry.to_fileinfo(&opts.bucket)?;
// if let Some(f) = fi {
// ress.push(f.to_object_info(&opts.bucket, &entry.name, false));
// }
// continue;
// }
// if entry.is_dir() {
// ress.push(ObjectInfo {
// is_dir: true,
// bucket: opts.bucket.clone(),
// name: entry.name.clone(),
// ..Default::default()
// });
// }
// }
// }
// }
// }
// // warn!("list_merged errs {:?}", errs);
// Ok(ress)
// }
#[instrument(level = "debug", skip(self))]
async fn delete_all(&self, bucket: &str, prefix: &str) -> Result<()> {
let mut futures = Vec::new();
for sets in self.pools.iter() {
for set in sets.disk_set.iter() {
futures.push(set.delete_all(bucket, prefix));
// let disks = set.disks.read().await;
// let dd = disks.clone();
// for disk in dd {
// if disk.is_none() {
// continue;
// }
// // let disk = disk.as_ref().unwrap().clone();
// // futures.push(disk.delete(
// // bucket,
// // prefix,
// // DeleteOptions {
// // recursive: true,
// // immediate: false,
// // },
// // ));
// }
}
}
let results = join_all(futures).await;
let mut errs = Vec::new();
for res in results {
match res {
Ok(_) => errs.push(None),
Err(e) => errs.push(Some(e)),
}
}
debug!("store delete_all errs {:?}", errs);
Ok(())
}
async fn delete_prefix(&self, bucket: &str, object: &str) -> Result<()> {
for pool in self.pools.iter() {
pool.delete_object(
bucket,
object,
ObjectOptions {
delete_prefix: true,
..Default::default()
},
)
.await?;
}
Ok(())
}
async fn get_available_pool_idx(&self, bucket: &str, object: &str, size: i64) -> Option<usize> {
// // Return a random one first
let mut server_pools = self.get_server_pools_available_space(bucket, object, size).await;
server_pools.filter_max_used(100 - (100_f64 * DISK_RESERVE_FRACTION) as u64);
let total = server_pools.total_available();
if total == 0 {
return None;
}
let mut rng = rand::rng();
let random_u64: u64 = rng.random_range(0..total);
let choose = random_u64 % total;
let mut at_total = 0;
for pool in server_pools.iter() {
at_total += pool.available;
if at_total > choose && pool.available > 0 {
return Some(pool.index);
}
}
None
}
async fn get_server_pools_available_space(&self, bucket: &str, object: &str, size: i64) -> ServerPoolsAvailableSpace {
let mut n_sets = vec![0; self.pools.len()];
let mut infos = vec![Vec::new(); self.pools.len()];
// TODO: add concurrency
for (idx, pool) in self.pools.iter().enumerate() {
if self.is_suspended(idx).await || self.is_pool_rebalancing(idx).await {
continue;
}
n_sets[idx] = pool.set_count;
if let Ok(disks) = pool.get_disks_by_key(object).get_disks(0, 0).await {
let disk_infos = get_disk_infos(&disks).await;
infos[idx] = disk_infos;
}
}
let mut server_pools = vec![PoolAvailableSpace::default(); self.pools.len()];
for (i, zinfo) in infos.iter().enumerate() {
if zinfo.is_empty() {
server_pools[i] = PoolAvailableSpace {
index: i,
..Default::default()
};
continue;
}
if !is_meta_bucketname(bucket) && !has_space_for(zinfo, size).await.unwrap_or_default() {
server_pools[i] = PoolAvailableSpace {
index: i,
..Default::default()
};
continue;
}
let mut available = 0;
let mut max_used_pct = 0;
for disk in zinfo.iter().flatten() {
if disk.total == 0 {
continue;
}
available += disk.total - disk.used;
let pct_used = disk.used * 100 / disk.total;
if pct_used > max_used_pct {
max_used_pct = pct_used;
}
}
available *= n_sets[i] as u64;
server_pools[i] = PoolAvailableSpace {
index: i,
available,
max_used_pct,
}
}
ServerPoolsAvailableSpace(server_pools)
}
async fn is_suspended(&self, idx: usize) -> bool {
// TODO: LOCK
let pool_meta = self.pool_meta.read().await;
pool_meta.is_suspended(idx)
}
async fn get_pool_idx(&self, bucket: &str, object: &str, size: i64) -> Result<usize> {
let idx = match self
.get_pool_idx_existing_with_opts(
bucket,
object,
&ObjectOptions {
skip_decommissioned: true,
skip_rebalancing: true,
..Default::default()
},
)
.await
{
Ok(res) => res,
Err(err) => {
if !is_err_object_not_found(&err) {
return Err(err);
}
if let Some(hit_idx) = self.get_available_pool_idx(bucket, object, size).await {
hit_idx
} else {
return Err(Error::DiskFull);
}
}
};
Ok(idx)
}
async fn get_pool_idx_no_lock(&self, bucket: &str, object: &str, size: i64) -> Result<usize> {
let idx = match self.get_pool_idx_existing_no_lock(bucket, object).await {
Ok(res) => res,
Err(err) => {
if !is_err_object_not_found(&err) {
return Err(err);
}
if let Some(idx) = self.get_available_pool_idx(bucket, object, size).await {
idx
} else {
warn!("get_pool_idx_no_lock: disk full {}/{}", bucket, object);
return Err(Error::DiskFull);
}
}
};
Ok(idx)
}
async fn get_pool_idx_existing_no_lock(&self, bucket: &str, object: &str) -> Result<usize> {
self.get_pool_idx_existing_with_opts(
bucket,
object,
&ObjectOptions {
no_lock: true,
skip_decommissioned: true,
skip_rebalancing: true,
..Default::default()
},
)
.await
}
async fn get_pool_idx_existing_with_opts(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<usize> {
let (pinfo, _) = self.get_pool_info_existing_with_opts(bucket, object, opts).await?;
Ok(pinfo.index)
}
async fn get_pool_info_existing_with_opts(
&self,
bucket: &str,
object: &str,
opts: &ObjectOptions,
) -> Result<(PoolObjInfo, Vec<PoolErr>)> {
self.internal_get_pool_info_existing_with_opts(bucket, object, opts).await
}
async fn internal_get_pool_info_existing_with_opts(
&self,
bucket: &str,
object: &str,
opts: &ObjectOptions,
) -> Result<(PoolObjInfo, Vec<PoolErr>)> {
let mut futures = Vec::new();
for pool in self.pools.iter() {
let mut pool_opts = opts.clone();
if !pool_opts.metadata_chg {
pool_opts.version_id = None;
}
futures.push(async move { pool.get_object_info(bucket, object, &pool_opts).await });
}
let results = join_all(futures).await;
let mut ress = Vec::new();
// join_all preserves the input order
for (i, res) in results.into_iter().enumerate() {
let index = i;
match res {
Ok(r) => {
ress.push(PoolObjInfo {
index,
object_info: r,
err: None,
});
}
Err(e) => {
ress.push(PoolObjInfo {
index,
err: Some(e),
..Default::default()
});
}
}
}
ress.sort_by(|a, b| {
let at = a.object_info.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH);
let bt = b.object_info.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH);
bt.cmp(&at)
});
let mut def_pool = PoolObjInfo::default();
let mut has_def_pool = false;
for pinfo in ress.iter() {
if opts.skip_decommissioned && self.is_suspended(pinfo.index).await {
continue;
}
if opts.skip_rebalancing && self.is_pool_rebalancing(pinfo.index).await {
continue;
}
if pinfo.err.is_none() {
return Ok((pinfo.clone(), self.pools_with_object(&ress, opts).await));
}
let err = pinfo.err.as_ref().unwrap();
if err == &Error::ErasureReadQuorum && !opts.metadata_chg {
return Ok((pinfo.clone(), self.pools_with_object(&ress, opts).await));
}
def_pool = pinfo.clone();
has_def_pool = true;
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-deletes.html
if is_err_object_not_found(err)
&& let Err(err) = opts.precondition_check(&pinfo.object_info)
{
return Err(err.clone());
}
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
return Err(err.clone());
}
if pinfo.object_info.delete_marker && !pinfo.object_info.name.is_empty() {
return Ok((pinfo.clone(), Vec::new()));
}
}
if opts.replication_request && opts.delete_marker && has_def_pool {
return Ok((def_pool, Vec::new()));
}
Err(Error::ObjectNotFound(bucket.to_owned(), object.to_owned()))
}
async fn pools_with_object(&self, pools: &[PoolObjInfo], opts: &ObjectOptions) -> Vec<PoolErr> {
let mut errs = Vec::new();
for pool in pools.iter() {
if opts.skip_decommissioned && self.is_suspended(pool.index).await {
continue;
}
if opts.skip_rebalancing && self.is_pool_rebalancing(pool.index).await {
continue;
}
if let Some(err) = &pool.err {
if err == &Error::ErasureReadQuorum {
errs.push(PoolErr {
index: Some(pool.index),
err: Some(Error::ErasureReadQuorum),
});
}
} else {
errs.push(PoolErr {
index: Some(pool.index),
err: None,
});
}
}
errs
}
async fn get_latest_object_info_with_idx(
&self,
bucket: &str,
object: &str,
opts: &ObjectOptions,
) -> Result<(ObjectInfo, usize)> {
let mut futures = Vec::with_capacity(self.pools.len());
for pool in self.pools.iter() {
futures.push(pool.get_object_info(bucket, object, opts));
}
let results = join_all(futures).await;
struct IndexRes {
res: Option<ObjectInfo>,
idx: usize,
err: Option<Error>,
}
let mut idx_res = Vec::with_capacity(self.pools.len());
for (idx, result) in results.into_iter().enumerate() {
match result {
Ok(res) => {
idx_res.push(IndexRes {
res: Some(res),
idx,
err: None,
});
}
Err(e) => {
idx_res.push(IndexRes {
res: None,
idx,
err: Some(e),
});
}
}
}
// TODO: test order
idx_res.sort_by(|a, b| {
let a_mod = if let Some(o1) = &a.res {
o1.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH)
} else {
OffsetDateTime::UNIX_EPOCH
};
let b_mod = if let Some(o2) = &b.res {
o2.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH)
} else {
OffsetDateTime::UNIX_EPOCH
};
if a_mod == b_mod {
return if a.idx < b.idx { Ordering::Greater } else { Ordering::Less };
}
b_mod.cmp(&a_mod)
});
for res in idx_res.into_iter() {
if let Some(obj) = res.res {
return Ok((obj, res.idx));
}
if let Some(err) = res.err
&& !is_err_object_not_found(&err)
&& !is_err_version_not_found(&err)
{
return Err(err);
}
// TODO: delete marker
}
let object = decode_dir_object(object);
if opts.version_id.is_none() {
Err(StorageError::ObjectNotFound(bucket.to_owned(), object.to_owned()))
} else {
Err(StorageError::VersionNotFound(
bucket.to_owned(),
object.to_owned(),
opts.version_id.clone().unwrap_or_default(),
))
}
}
async fn delete_object_from_all_pools(
&self,
bucket: &str,
object: &str,
opts: &ObjectOptions,
errs: Vec<PoolErr>,
) -> Result<ObjectInfo> {
let mut objs = Vec::new();
let mut derrs = Vec::new();
for pe in errs.iter() {
if let Some(err) = &pe.err
&& err == &StorageError::ErasureWriteQuorum
{
objs.push(None);
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
if let Some(idx) = pe.index {
match self.pools[idx].delete_object(bucket, object, opts.clone()).await {
Ok(res) => {
objs.push(Some(res));
derrs.push(None);
}
Err(err) => {
objs.push(None);
derrs.push(Some(err));
}
}
}
}
if let Some(e) = &derrs[0] {
return Err(e.clone());
}
Ok(objs[0].as_ref().unwrap().clone())
}
pub async fn reload_pool_meta(&self) -> Result<()> {
let mut meta = PoolMeta::default();
meta.load(self.pools[0].clone(), self.pools.clone()).await?;
let mut pool_meta = self.pool_meta.write().await;
*pool_meta = meta;
// *self.pool_meta.write().unwrap() = meta;
Ok(())
}
}
pub async fn find_local_disk(disk_path: &String) -> Option<DiskStore> {
let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await;
if let Some(disk) = disk_map.get(disk_path) {
disk.as_ref().cloned()
} else {
None
}
}
pub async fn get_disk_via_endpoint(endpoint: &Endpoint) -> Option<DiskStore> {
let global_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.read().await;
if global_set_drives.is_empty() {
return GLOBAL_LOCAL_DISK_MAP.read().await[&endpoint.to_string()].clone();
}
global_set_drives[endpoint.pool_idx as usize][endpoint.set_idx as usize][endpoint.disk_idx as usize].clone()
}
pub async fn all_local_disk_path() -> Vec<String> {
let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await;
disk_map.keys().cloned().collect()
}
pub async fn all_local_disk() -> Vec<DiskStore> {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/error.rs | crates/ecstore/src/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use s3s::{S3Error, S3ErrorCode};
use rustfs_utils::path::decode_dir_object;
use crate::bucket::error::BucketMetadataError;
use crate::disk::error::DiskError;
pub type Error = StorageError;
pub type Result<T> = core::result::Result<T, Error>;
#[derive(Debug, thiserror::Error)]
pub enum StorageError {
#[error("Faulty disk")]
FaultyDisk,
#[error("Disk full")]
DiskFull,
#[error("Volume not found")]
VolumeNotFound,
#[error("Volume exists")]
VolumeExists,
#[error("File not found")]
FileNotFound,
#[error("File version not found")]
FileVersionNotFound,
#[error("File name too long")]
FileNameTooLong,
#[error("File access denied")]
FileAccessDenied,
#[error("File is corrupted")]
FileCorrupt,
#[error("Not a regular file")]
IsNotRegular,
#[error("Volume not empty")]
VolumeNotEmpty,
#[error("Volume access denied")]
VolumeAccessDenied,
#[error("Corrupted format")]
CorruptedFormat,
#[error("Corrupted backend")]
CorruptedBackend,
#[error("Unformatted disk")]
UnformattedDisk,
#[error("Disk not found")]
DiskNotFound,
#[error("Drive is root")]
DriveIsRoot,
#[error("Faulty remote disk")]
FaultyRemoteDisk,
#[error("Disk access denied")]
DiskAccessDenied,
#[error("Unexpected error")]
Unexpected,
#[error("Too many open files")]
TooManyOpenFiles,
#[error("No heal required")]
NoHealRequired,
#[error("Config not found")]
ConfigNotFound,
#[error("not implemented")]
NotImplemented,
#[error("Invalid arguments provided for {0}/{1}-{2}")]
InvalidArgument(String, String, String),
#[error("method not allowed")]
MethodNotAllowed,
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Bucket not empty: {0}")]
BucketNotEmpty(String),
#[error("Bucket name invalid: {0}")]
BucketNameInvalid(String),
#[error("Object name invalid: {0}/{1}")]
ObjectNameInvalid(String, String),
#[error("Bucket exists: {0}")]
BucketExists(String),
#[error("Storage reached its minimum free drive threshold.")]
StorageFull,
#[error("Please reduce your request rate")]
SlowDown,
#[error("Prefix access is denied:{0}/{1}")]
PrefixAccessDenied(String, String),
#[error("Invalid UploadID KeyCombination: {0}/{1}")]
InvalidUploadIDKeyCombination(String, String),
#[error("Malformed UploadID: {0}")]
MalformedUploadID(String),
#[error("Object name too long: {0}/{1}")]
ObjectNameTooLong(String, String),
#[error("Object name contains forward slash as prefix: {0}/{1}")]
ObjectNamePrefixAsSlash(String, String),
#[error("Object not found: {0}/{1}")]
ObjectNotFound(String, String),
#[error("Version not found: {0}/{1}-{2}")]
VersionNotFound(String, String, String),
#[error("Invalid upload id: {0}/{1}-{2}")]
InvalidUploadID(String, String, String),
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Your proposed upload is smaller than the minimum allowed size. Part {0} size {1} is less than minimum {2}")]
EntityTooSmall(usize, i64, i64),
#[error("Invalid version id: {0}/{1}-{2}")]
InvalidVersionID(String, String, String),
#[error("invalid data movement operation, source and destination pool are the same for : {0}/{1}-{2}")]
DataMovementOverwriteErr(String, String, String),
#[error("Object exists on :{0} as directory {1}")]
ObjectExistsAsDirectory(String, String),
#[error("Storage resources are insufficient for the read operation: {0}/{1}")]
InsufficientReadQuorum(String, String),
#[error("Storage resources are insufficient for the write operation: {0}/{1}")]
InsufficientWriteQuorum(String, String),
#[error("Decommission not started")]
DecommissionNotStarted,
#[error("Decommission already running")]
DecommissionAlreadyRunning,
#[error("DoneForNow")]
DoneForNow,
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("not first disk")]
NotFirstDisk,
#[error("first disk wait")]
FirstDiskWait,
#[error("Io error: {0}")]
Io(std::io::Error),
#[error("Lock error: {0}")]
Lock(#[from] rustfs_lock::LockError),
#[error("Precondition failed")]
PreconditionFailed,
#[error("Not modified")]
NotModified,
#[error("Invalid part number: {0}")]
InvalidPartNumber(usize),
#[error("Invalid range specified: {0}")]
InvalidRangeSpec(String),
}
impl StorageError {
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
StorageError::Io(std::io::Error::other(error))
}
}
impl From<DiskError> for StorageError {
fn from(e: DiskError) -> Self {
match e {
DiskError::Io(io_error) => StorageError::Io(io_error),
// DiskError::MaxVersionsExceeded => todo!(),
DiskError::Unexpected => StorageError::Unexpected,
DiskError::CorruptedFormat => StorageError::CorruptedFormat,
DiskError::CorruptedBackend => StorageError::CorruptedBackend,
DiskError::UnformattedDisk => StorageError::UnformattedDisk,
// DiskError::InconsistentDisk => StorageError::InconsistentDisk,
// DiskError::UnsupportedDisk => StorageError::UnsupportedDisk,
DiskError::DiskFull => StorageError::DiskFull,
// DiskError::DiskNotDir => StorageError::DiskNotDir,
DiskError::DiskNotFound => StorageError::DiskNotFound,
// DiskError::DiskOngoingReq => StorageError::DiskOngoingReq,
DiskError::DriveIsRoot => StorageError::DriveIsRoot,
DiskError::FaultyRemoteDisk => StorageError::FaultyRemoteDisk,
DiskError::FaultyDisk => StorageError::FaultyDisk,
DiskError::DiskAccessDenied => StorageError::DiskAccessDenied,
DiskError::FileNotFound => StorageError::FileNotFound,
DiskError::FileVersionNotFound => StorageError::FileVersionNotFound,
DiskError::TooManyOpenFiles => StorageError::TooManyOpenFiles,
DiskError::FileNameTooLong => StorageError::FileNameTooLong,
DiskError::VolumeExists => StorageError::VolumeExists,
DiskError::IsNotRegular => StorageError::IsNotRegular,
// DiskError::PathNotFound => StorageError::PathNotFound,
DiskError::VolumeNotFound => StorageError::VolumeNotFound,
DiskError::VolumeNotEmpty => StorageError::VolumeNotEmpty,
DiskError::VolumeAccessDenied => StorageError::VolumeAccessDenied,
DiskError::FileAccessDenied => StorageError::FileAccessDenied,
DiskError::FileCorrupt => StorageError::FileCorrupt,
// DiskError::BitrotHashAlgoInvalid => StorageError::BitrotHashAlgoInvalid,
// DiskError::CrossDeviceLink => StorageError::CrossDeviceLink,
// DiskError::LessData => StorageError::LessData,
// DiskError::MoreData => StorageError::MoreData,
// DiskError::OutdatedXLMeta => StorageError::OutdatedXLMeta,
// DiskError::PartMissingOrCorrupt => StorageError::PartMissingOrCorrupt,
DiskError::NoHealRequired => StorageError::NoHealRequired,
DiskError::MethodNotAllowed => StorageError::MethodNotAllowed,
DiskError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
DiskError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum,
_ => StorageError::Io(std::io::Error::other(e)),
}
}
}
impl From<StorageError> for DiskError {
fn from(val: StorageError) -> Self {
match val {
StorageError::Io(io_error) => io_error.into(),
StorageError::Unexpected => DiskError::Unexpected,
StorageError::FileNotFound => DiskError::FileNotFound,
StorageError::FileVersionNotFound => DiskError::FileVersionNotFound,
StorageError::FileCorrupt => DiskError::FileCorrupt,
StorageError::MethodNotAllowed => DiskError::MethodNotAllowed,
StorageError::StorageFull => DiskError::DiskFull,
StorageError::SlowDown => DiskError::TooManyOpenFiles,
StorageError::ErasureReadQuorum => DiskError::ErasureReadQuorum,
StorageError::ErasureWriteQuorum => DiskError::ErasureWriteQuorum,
StorageError::TooManyOpenFiles => DiskError::TooManyOpenFiles,
StorageError::NoHealRequired => DiskError::NoHealRequired,
StorageError::CorruptedFormat => DiskError::CorruptedFormat,
StorageError::CorruptedBackend => DiskError::CorruptedBackend,
StorageError::UnformattedDisk => DiskError::UnformattedDisk,
StorageError::DiskNotFound => DiskError::DiskNotFound,
StorageError::FaultyDisk => DiskError::FaultyDisk,
StorageError::DiskFull => DiskError::DiskFull,
StorageError::VolumeNotFound => DiskError::VolumeNotFound,
StorageError::VolumeExists => DiskError::VolumeExists,
StorageError::FileNameTooLong => DiskError::FileNameTooLong,
_ => DiskError::other(val),
}
}
}
impl From<BucketMetadataError> for Error {
fn from(e: BucketMetadataError) -> Self {
match e {
BucketMetadataError::TaggingNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketPolicyNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketObjectLockConfigNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketLifecycleNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketSSEConfigNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketQuotaConfigNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketReplicationConfigNotFound => Error::ConfigNotFound,
BucketMetadataError::BucketRemoteTargetNotFound => Error::ConfigNotFound,
_ => Error::other(e),
}
}
}
impl From<std::io::Error> for StorageError {
fn from(e: std::io::Error) -> Self {
match e.downcast::<StorageError>() {
Ok(storage_error) => storage_error,
Err(io_error) => match io_error.downcast::<DiskError>() {
Ok(disk_error) => disk_error.into(),
Err(io_error) => StorageError::Io(io_error),
},
}
}
}
impl From<StorageError> for std::io::Error {
fn from(e: StorageError) -> Self {
match e {
StorageError::Io(io_error) => io_error,
e => std::io::Error::other(e),
}
}
}
impl From<rustfs_filemeta::Error> for StorageError {
fn from(e: rustfs_filemeta::Error) -> Self {
match e {
rustfs_filemeta::Error::DoneForNow => StorageError::DoneForNow,
rustfs_filemeta::Error::MethodNotAllowed => StorageError::MethodNotAllowed,
rustfs_filemeta::Error::VolumeNotFound => StorageError::VolumeNotFound,
rustfs_filemeta::Error::FileNotFound => StorageError::FileNotFound,
rustfs_filemeta::Error::FileVersionNotFound => StorageError::FileVersionNotFound,
rustfs_filemeta::Error::FileCorrupt => StorageError::FileCorrupt,
rustfs_filemeta::Error::Unexpected => StorageError::Unexpected,
rustfs_filemeta::Error::Io(io_error) => io_error.into(),
_ => StorageError::Io(std::io::Error::other(e)),
}
}
}
impl From<StorageError> for rustfs_filemeta::Error {
fn from(val: StorageError) -> Self {
match val {
StorageError::Unexpected => rustfs_filemeta::Error::Unexpected,
StorageError::FileNotFound => rustfs_filemeta::Error::FileNotFound,
StorageError::FileVersionNotFound => rustfs_filemeta::Error::FileVersionNotFound,
StorageError::FileCorrupt => rustfs_filemeta::Error::FileCorrupt,
StorageError::DoneForNow => rustfs_filemeta::Error::DoneForNow,
StorageError::MethodNotAllowed => rustfs_filemeta::Error::MethodNotAllowed,
StorageError::VolumeNotFound => rustfs_filemeta::Error::VolumeNotFound,
StorageError::Io(io_error) => io_error.into(),
_ => rustfs_filemeta::Error::other(val),
}
}
}
impl PartialEq for StorageError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(StorageError::Io(e1), StorageError::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
(e1, e2) => e1.to_u32() == e2.to_u32(),
}
}
}
impl Clone for StorageError {
fn clone(&self) -> Self {
match self {
StorageError::Io(e) => StorageError::Io(std::io::Error::new(e.kind(), e.to_string())),
StorageError::FaultyDisk => StorageError::FaultyDisk,
StorageError::DiskFull => StorageError::DiskFull,
StorageError::VolumeNotFound => StorageError::VolumeNotFound,
StorageError::VolumeExists => StorageError::VolumeExists,
StorageError::FileNotFound => StorageError::FileNotFound,
StorageError::FileVersionNotFound => StorageError::FileVersionNotFound,
StorageError::FileNameTooLong => StorageError::FileNameTooLong,
StorageError::FileAccessDenied => StorageError::FileAccessDenied,
StorageError::FileCorrupt => StorageError::FileCorrupt,
StorageError::IsNotRegular => StorageError::IsNotRegular,
StorageError::VolumeNotEmpty => StorageError::VolumeNotEmpty,
StorageError::VolumeAccessDenied => StorageError::VolumeAccessDenied,
StorageError::CorruptedFormat => StorageError::CorruptedFormat,
StorageError::CorruptedBackend => StorageError::CorruptedBackend,
StorageError::UnformattedDisk => StorageError::UnformattedDisk,
StorageError::DiskNotFound => StorageError::DiskNotFound,
StorageError::DriveIsRoot => StorageError::DriveIsRoot,
StorageError::FaultyRemoteDisk => StorageError::FaultyRemoteDisk,
StorageError::DiskAccessDenied => StorageError::DiskAccessDenied,
StorageError::Unexpected => StorageError::Unexpected,
StorageError::ConfigNotFound => StorageError::ConfigNotFound,
StorageError::NotImplemented => StorageError::NotImplemented,
StorageError::InvalidArgument(a, b, c) => StorageError::InvalidArgument(a.clone(), b.clone(), c.clone()),
StorageError::MethodNotAllowed => StorageError::MethodNotAllowed,
StorageError::BucketNotFound(a) => StorageError::BucketNotFound(a.clone()),
StorageError::BucketNotEmpty(a) => StorageError::BucketNotEmpty(a.clone()),
StorageError::BucketNameInvalid(a) => StorageError::BucketNameInvalid(a.clone()),
StorageError::ObjectNameInvalid(a, b) => StorageError::ObjectNameInvalid(a.clone(), b.clone()),
StorageError::BucketExists(a) => StorageError::BucketExists(a.clone()),
StorageError::StorageFull => StorageError::StorageFull,
StorageError::SlowDown => StorageError::SlowDown,
StorageError::PrefixAccessDenied(a, b) => StorageError::PrefixAccessDenied(a.clone(), b.clone()),
StorageError::InvalidUploadIDKeyCombination(a, b) => {
StorageError::InvalidUploadIDKeyCombination(a.clone(), b.clone())
}
StorageError::MalformedUploadID(a) => StorageError::MalformedUploadID(a.clone()),
StorageError::ObjectNameTooLong(a, b) => StorageError::ObjectNameTooLong(a.clone(), b.clone()),
StorageError::ObjectNamePrefixAsSlash(a, b) => StorageError::ObjectNamePrefixAsSlash(a.clone(), b.clone()),
StorageError::ObjectNotFound(a, b) => StorageError::ObjectNotFound(a.clone(), b.clone()),
StorageError::VersionNotFound(a, b, c) => StorageError::VersionNotFound(a.clone(), b.clone(), c.clone()),
StorageError::InvalidUploadID(a, b, c) => StorageError::InvalidUploadID(a.clone(), b.clone(), c.clone()),
StorageError::InvalidVersionID(a, b, c) => StorageError::InvalidVersionID(a.clone(), b.clone(), c.clone()),
StorageError::DataMovementOverwriteErr(a, b, c) => {
StorageError::DataMovementOverwriteErr(a.clone(), b.clone(), c.clone())
}
StorageError::ObjectExistsAsDirectory(a, b) => StorageError::ObjectExistsAsDirectory(a.clone(), b.clone()),
// StorageError::InsufficientReadQuorum => StorageError::InsufficientReadQuorum,
// StorageError::InsufficientWriteQuorum => StorageError::InsufficientWriteQuorum,
StorageError::DecommissionNotStarted => StorageError::DecommissionNotStarted,
StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(*a, b.clone(), c.clone()),
StorageError::EntityTooSmall(a, b, c) => StorageError::EntityTooSmall(*a, *b, *c),
StorageError::DoneForNow => StorageError::DoneForNow,
StorageError::DecommissionAlreadyRunning => StorageError::DecommissionAlreadyRunning,
StorageError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
StorageError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum,
StorageError::NotFirstDisk => StorageError::NotFirstDisk,
StorageError::FirstDiskWait => StorageError::FirstDiskWait,
StorageError::TooManyOpenFiles => StorageError::TooManyOpenFiles,
StorageError::NoHealRequired => StorageError::NoHealRequired,
StorageError::Lock(e) => StorageError::Lock(e.clone()),
StorageError::InsufficientReadQuorum(a, b) => StorageError::InsufficientReadQuorum(a.clone(), b.clone()),
StorageError::InsufficientWriteQuorum(a, b) => StorageError::InsufficientWriteQuorum(a.clone(), b.clone()),
StorageError::PreconditionFailed => StorageError::PreconditionFailed,
StorageError::NotModified => StorageError::NotModified,
StorageError::InvalidPartNumber(a) => StorageError::InvalidPartNumber(*a),
StorageError::InvalidRangeSpec(a) => StorageError::InvalidRangeSpec(a.clone()),
}
}
}
impl StorageError {
pub fn to_u32(&self) -> u32 {
match self {
StorageError::Io(_) => 0x01,
StorageError::FaultyDisk => 0x02,
StorageError::DiskFull => 0x03,
StorageError::VolumeNotFound => 0x04,
StorageError::VolumeExists => 0x05,
StorageError::FileNotFound => 0x06,
StorageError::FileVersionNotFound => 0x07,
StorageError::FileNameTooLong => 0x08,
StorageError::FileAccessDenied => 0x09,
StorageError::FileCorrupt => 0x0A,
StorageError::IsNotRegular => 0x0B,
StorageError::VolumeNotEmpty => 0x0C,
StorageError::VolumeAccessDenied => 0x0D,
StorageError::CorruptedFormat => 0x0E,
StorageError::CorruptedBackend => 0x0F,
StorageError::UnformattedDisk => 0x10,
StorageError::DiskNotFound => 0x11,
StorageError::DriveIsRoot => 0x12,
StorageError::FaultyRemoteDisk => 0x13,
StorageError::DiskAccessDenied => 0x14,
StorageError::Unexpected => 0x15,
StorageError::NotImplemented => 0x16,
StorageError::InvalidArgument(_, _, _) => 0x17,
StorageError::MethodNotAllowed => 0x18,
StorageError::BucketNotFound(_) => 0x19,
StorageError::BucketNotEmpty(_) => 0x1A,
StorageError::BucketNameInvalid(_) => 0x1B,
StorageError::ObjectNameInvalid(_, _) => 0x1C,
StorageError::BucketExists(_) => 0x1D,
StorageError::StorageFull => 0x1E,
StorageError::SlowDown => 0x1F,
StorageError::PrefixAccessDenied(_, _) => 0x20,
StorageError::InvalidUploadIDKeyCombination(_, _) => 0x21,
StorageError::MalformedUploadID(_) => 0x22,
StorageError::ObjectNameTooLong(_, _) => 0x23,
StorageError::ObjectNamePrefixAsSlash(_, _) => 0x24,
StorageError::ObjectNotFound(_, _) => 0x25,
StorageError::VersionNotFound(_, _, _) => 0x26,
StorageError::InvalidUploadID(_, _, _) => 0x27,
StorageError::InvalidVersionID(_, _, _) => 0x28,
StorageError::DataMovementOverwriteErr(_, _, _) => 0x29,
StorageError::ObjectExistsAsDirectory(_, _) => 0x2A,
// StorageError::InsufficientReadQuorum => 0x2B,
// StorageError::InsufficientWriteQuorum => 0x2C,
StorageError::DecommissionNotStarted => 0x2D,
StorageError::InvalidPart(_, _, _) => 0x2E,
StorageError::DoneForNow => 0x2F,
StorageError::DecommissionAlreadyRunning => 0x30,
StorageError::ErasureReadQuorum => 0x31,
StorageError::ErasureWriteQuorum => 0x32,
StorageError::NotFirstDisk => 0x33,
StorageError::FirstDiskWait => 0x34,
StorageError::ConfigNotFound => 0x35,
StorageError::TooManyOpenFiles => 0x36,
StorageError::NoHealRequired => 0x37,
StorageError::Lock(_) => 0x38,
StorageError::InsufficientReadQuorum(_, _) => 0x39,
StorageError::InsufficientWriteQuorum(_, _) => 0x3A,
StorageError::PreconditionFailed => 0x3B,
StorageError::EntityTooSmall(_, _, _) => 0x3C,
StorageError::InvalidRangeSpec(_) => 0x3D,
StorageError::NotModified => 0x3E,
StorageError::InvalidPartNumber(_) => 0x3F,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(StorageError::Io(std::io::Error::other("Io error"))),
0x02 => Some(StorageError::FaultyDisk),
0x03 => Some(StorageError::DiskFull),
0x04 => Some(StorageError::VolumeNotFound),
0x05 => Some(StorageError::VolumeExists),
0x06 => Some(StorageError::FileNotFound),
0x07 => Some(StorageError::FileVersionNotFound),
0x08 => Some(StorageError::FileNameTooLong),
0x09 => Some(StorageError::FileAccessDenied),
0x0A => Some(StorageError::FileCorrupt),
0x0B => Some(StorageError::IsNotRegular),
0x0C => Some(StorageError::VolumeNotEmpty),
0x0D => Some(StorageError::VolumeAccessDenied),
0x0E => Some(StorageError::CorruptedFormat),
0x0F => Some(StorageError::CorruptedBackend),
0x10 => Some(StorageError::UnformattedDisk),
0x11 => Some(StorageError::DiskNotFound),
0x12 => Some(StorageError::DriveIsRoot),
0x13 => Some(StorageError::FaultyRemoteDisk),
0x14 => Some(StorageError::DiskAccessDenied),
0x15 => Some(StorageError::Unexpected),
0x16 => Some(StorageError::NotImplemented),
0x17 => Some(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())),
0x18 => Some(StorageError::MethodNotAllowed),
0x19 => Some(StorageError::BucketNotFound(Default::default())),
0x1A => Some(StorageError::BucketNotEmpty(Default::default())),
0x1B => Some(StorageError::BucketNameInvalid(Default::default())),
0x1C => Some(StorageError::ObjectNameInvalid(Default::default(), Default::default())),
0x1D => Some(StorageError::BucketExists(Default::default())),
0x1E => Some(StorageError::StorageFull),
0x1F => Some(StorageError::SlowDown),
0x20 => Some(StorageError::PrefixAccessDenied(Default::default(), Default::default())),
0x21 => Some(StorageError::InvalidUploadIDKeyCombination(Default::default(), Default::default())),
0x22 => Some(StorageError::MalformedUploadID(Default::default())),
0x23 => Some(StorageError::ObjectNameTooLong(Default::default(), Default::default())),
0x24 => Some(StorageError::ObjectNamePrefixAsSlash(Default::default(), Default::default())),
0x25 => Some(StorageError::ObjectNotFound(Default::default(), Default::default())),
0x26 => Some(StorageError::VersionNotFound(Default::default(), Default::default(), Default::default())),
0x27 => Some(StorageError::InvalidUploadID(Default::default(), Default::default(), Default::default())),
0x28 => Some(StorageError::InvalidVersionID(Default::default(), Default::default(), Default::default())),
0x29 => Some(StorageError::DataMovementOverwriteErr(
Default::default(),
Default::default(),
Default::default(),
)),
0x2A => Some(StorageError::ObjectExistsAsDirectory(Default::default(), Default::default())),
// 0x2B => Some(StorageError::InsufficientReadQuorum),
// 0x2C => Some(StorageError::InsufficientWriteQuorum),
0x2D => Some(StorageError::DecommissionNotStarted),
0x2E => Some(StorageError::InvalidPart(Default::default(), Default::default(), Default::default())),
0x2F => Some(StorageError::DoneForNow),
0x30 => Some(StorageError::DecommissionAlreadyRunning),
0x31 => Some(StorageError::ErasureReadQuorum),
0x32 => Some(StorageError::ErasureWriteQuorum),
0x33 => Some(StorageError::NotFirstDisk),
0x34 => Some(StorageError::FirstDiskWait),
0x35 => Some(StorageError::ConfigNotFound),
0x36 => Some(StorageError::TooManyOpenFiles),
0x37 => Some(StorageError::NoHealRequired),
0x38 => Some(StorageError::Lock(rustfs_lock::LockError::internal("Generic lock error".to_string()))),
0x39 => Some(StorageError::InsufficientReadQuorum(Default::default(), Default::default())),
0x3A => Some(StorageError::InsufficientWriteQuorum(Default::default(), Default::default())),
0x3B => Some(StorageError::PreconditionFailed),
0x3C => Some(StorageError::EntityTooSmall(Default::default(), Default::default(), Default::default())),
0x3D => Some(StorageError::InvalidRangeSpec(Default::default())),
0x3E => Some(StorageError::NotModified),
0x3F => Some(StorageError::InvalidPartNumber(Default::default())),
_ => None,
}
}
}
impl From<tokio::task::JoinError> for StorageError {
fn from(e: tokio::task::JoinError) -> Self {
StorageError::other(e)
}
}
impl From<serde_json::Error> for StorageError {
fn from(e: serde_json::Error) -> Self {
StorageError::other(e)
}
}
impl From<rmp_serde::encode::Error> for Error {
fn from(e: rmp_serde::encode::Error) -> Self {
Error::other(e)
}
}
impl From<rmp::encode::ValueWriteError> for Error {
fn from(e: rmp::encode::ValueWriteError) -> Self {
Error::other(e)
}
}
impl From<rmp::decode::ValueReadError> for Error {
fn from(e: rmp::decode::ValueReadError) -> Self {
Error::other(e)
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(e: std::string::FromUtf8Error) -> Self {
Error::other(e)
}
}
impl From<rmp::decode::NumValueReadError> for Error {
fn from(e: rmp::decode::NumValueReadError) -> Self {
Error::other(e)
}
}
impl From<rmp_serde::decode::Error> for Error {
fn from(e: rmp_serde::decode::Error) -> Self {
Error::other(e)
}
}
impl From<s3s::xml::SerError> for Error {
fn from(e: s3s::xml::SerError) -> Self {
Error::other(e)
}
}
impl From<s3s::xml::DeError> for Error {
fn from(e: s3s::xml::DeError) -> Self {
Error::other(e)
}
}
impl From<tonic::Status> for Error {
fn from(e: tonic::Status) -> Self {
Error::other(e.to_string())
}
}
impl From<uuid::Error> for Error {
fn from(e: uuid::Error) -> Self {
Error::other(e)
}
}
impl From<time::error::ComponentRange> for Error {
fn from(e: time::error::ComponentRange) -> Self {
Error::other(e)
}
}
pub fn is_err_object_not_found(err: &Error) -> bool {
matches!(err, &Error::FileNotFound) || matches!(err, &Error::ObjectNotFound(_, _))
}
pub fn is_err_version_not_found(err: &Error) -> bool {
matches!(err, &Error::FileVersionNotFound) || matches!(err, &Error::VersionNotFound(_, _, _))
}
pub fn is_err_bucket_exists(err: &Error) -> bool {
matches!(err, &StorageError::BucketExists(_))
}
pub fn is_err_read_quorum(err: &Error) -> bool {
matches!(err, &StorageError::ErasureReadQuorum)
}
pub fn is_err_invalid_upload_id(err: &Error) -> bool {
matches!(err, &StorageError::InvalidUploadID(_, _, _))
}
pub fn is_err_bucket_not_found(err: &Error) -> bool {
matches!(err, &StorageError::VolumeNotFound)
| matches!(err, &StorageError::DiskNotFound)
| matches!(err, &StorageError::BucketNotFound(_))
}
pub fn is_err_data_movement_overwrite(err: &Error) -> bool {
matches!(err, &StorageError::DataMovementOverwriteErr(_, _, _))
}
pub fn is_err_io(err: &Error) -> bool {
matches!(err, &StorageError::Io(_))
}
pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if is_err_object_not_found(err) || is_err_version_not_found(err) || is_err_bucket_not_found(err) {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if is_err_bucket_not_found(err) {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
// pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
// for err in errs.iter() {
// if let Some(err) = err {
// if let Some(err) = err.downcast_ref::<DiskError>() {
// match err {
// DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
// continue;
// }
// _ => return false,
// }
// }
// }
// return false;
// }
// !errs.is_empty()
// }
pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
match err {
StorageError::DiskFull => StorageError::StorageFull,
StorageError::FileNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
StorageError::ObjectNotFound(bucket, object)
}
StorageError::FileVersionNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
let version = params.get(2).cloned().unwrap_or_default().to_owned();
StorageError::VersionNotFound(bucket, object, version)
}
StorageError::TooManyOpenFiles => StorageError::SlowDown,
StorageError::FileNameTooLong => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
StorageError::ObjectNameInvalid(bucket, object)
}
StorageError::VolumeExists => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
StorageError::BucketExists(bucket)
}
StorageError::IsNotRegular => {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/admin_server_info.rs | crates/ecstore/src/admin_server_info.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT, load_data_usage_from_backend};
use crate::error::{Error, Result};
use crate::{
disk::endpoint::Endpoint,
global::{GLOBAL_BOOT_TIME, GLOBAL_Endpoints},
new_object_layer_fn,
notification_sys::get_global_notification_sys,
store_api::StorageAPI,
};
use crate::data_usage::load_data_usage_cache;
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, heal_channel::DriveState};
use rustfs_madmin::{
BackendDisks, Disk, ErasureSetInfo, ITEM_INITIALIZING, ITEM_OFFLINE, ITEM_ONLINE, InfoMessage, ServerProperties,
};
use rustfs_protos::{
models::{PingBody, PingBodyBuilder},
node_service_time_out_client,
proto_gen::node_service::{PingRequest, PingResponse},
};
use std::{
collections::{HashMap, HashSet},
time::{Duration, SystemTime},
};
use time::OffsetDateTime;
use tokio::time::timeout;
use tonic::Request;
use tracing::warn;
use shadow_rs::shadow;
shadow!(build);
const SERVER_PING_TIMEOUT: Duration = Duration::from_secs(1);
// pub const ITEM_OFFLINE: &str = "offline";
// pub const ITEM_INITIALIZING: &str = "initializing";
// pub const ITEM_ONLINE: &str = "online";
// #[derive(Debug, Default, Serialize, Deserialize)]
// pub struct MemStats {
// alloc: u64,
// total_alloc: u64,
// mallocs: u64,
// frees: u64,
// heap_alloc: u64,
// }
// #[derive(Debug, Default, Serialize, Deserialize)]
// pub struct ServerProperties {
// pub state: String,
// pub endpoint: String,
// pub scheme: String,
// pub uptime: u64,
// pub version: String,
// pub commit_id: String,
// pub network: HashMap<String, String>,
// pub disks: Vec<madmin::Disk>,
// pub pool_number: i32,
// pub pool_numbers: Vec<i32>,
// pub mem_stats: MemStats,
// pub max_procs: u64,
// pub num_cpu: u64,
// pub runtime_version: String,
// pub rustfs_env_vars: HashMap<String, String>,
// }
async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
let addr = format!(
"{}://{}:{}",
endpoint.url.scheme(),
endpoint.url.host_str().unwrap(),
endpoint.url.port().unwrap()
);
let ping_task = async {
let mut fbb = flatbuffers::FlatBufferBuilder::new();
let payload = fbb.create_vector(b"hello world");
let mut builder = PingBodyBuilder::new(&mut fbb);
builder.add_payload(payload);
let root = builder.finish();
fbb.finish(root, None);
let finished_data = fbb.finished_data();
let decoded_payload = flatbuffers::root::<PingBody>(finished_data);
assert!(decoded_payload.is_ok());
let mut client = node_service_time_out_client(&addr)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(PingRequest {
version: 1,
body: bytes::Bytes::copy_from_slice(finished_data),
});
let response: PingResponse = client.ping(request).await?.into_inner();
let ping_response_body = flatbuffers::root::<PingBody>(&response.body);
if let Err(e) = ping_response_body {
eprintln!("{e}");
} else {
println!("ping_resp:body(flatbuffer): {ping_response_body:?}");
}
Ok(())
};
timeout(SERVER_PING_TIMEOUT, ping_task)
.await
.map_err(|_| Error::other("server ping timeout"))?
}
pub async fn get_local_server_property() -> ServerProperties {
let addr = GLOBAL_LOCAL_NODE_NAME.read().await.clone();
let mut pool_numbers = HashSet::new();
let mut network = HashMap::new();
let endpoints = match GLOBAL_Endpoints.get() {
Some(eps) => eps,
None => return ServerProperties::default(),
};
for ep in endpoints.as_ref().iter() {
for endpoint in ep.endpoints.as_ref().iter() {
let node_name = match endpoint.url.host_str() {
Some(s) => s.to_string(),
None => addr.clone(),
};
if endpoint.is_local {
pool_numbers.insert(endpoint.pool_idx + 1);
network.insert(node_name, ITEM_ONLINE.to_string());
continue;
}
if let std::collections::hash_map::Entry::Vacant(e) = network.entry(node_name) {
if is_server_resolvable(endpoint).await.is_err() {
e.insert(ITEM_OFFLINE.to_string());
} else {
e.insert(ITEM_ONLINE.to_string());
}
}
}
}
// todo: mem collect
// let mem_stats =
let mut props = ServerProperties {
endpoint: addr,
uptime: SystemTime::now()
.duration_since(*GLOBAL_BOOT_TIME.get().unwrap())
.unwrap_or_default()
.as_secs(),
network,
version: get_commit_id(),
..Default::default()
};
for pool_num in pool_numbers.iter() {
props.pool_numbers.push(*pool_num);
}
props.pool_numbers.sort();
props.pool_number = if props.pool_numbers.len() == 1 {
props.pool_numbers[0]
} else {
i32::MAX
};
// let mut sensitive = HashSet::new();
// sensitive.insert(ENV_ACCESS_KEY.to_string());
// sensitive.insert(ENV_SECRET_KEY.to_string());
// sensitive.insert(ENV_ROOT_USER.to_string());
// sensitive.insert(ENV_ROOT_PASSWORD.to_string());
if let Some(store) = new_object_layer_fn() {
let storage_info = store.local_storage_info().await;
props.state = ITEM_ONLINE.to_string();
props.disks = storage_info.disks;
} else {
props.state = ITEM_INITIALIZING.to_string();
};
props
}
pub async fn get_server_info(get_pools: bool) -> InfoMessage {
let nowt: OffsetDateTime = OffsetDateTime::now_utc();
warn!("get_server_info start {:?}", nowt);
let local = get_local_server_property().await;
let after1 = OffsetDateTime::now_utc();
warn!("get_local_server_property end {:?}", after1 - nowt);
let mut servers = {
if let Some(sys) = get_global_notification_sys() {
sys.server_info().await
} else {
vec![]
}
};
let after2 = OffsetDateTime::now_utc();
warn!("server_info end {:?}", after2 - after1);
servers.push(local);
let mut buckets = rustfs_madmin::Buckets::default();
let mut objects = rustfs_madmin::Objects::default();
let mut versions = rustfs_madmin::Versions::default();
let mut delete_markers = rustfs_madmin::DeleteMarkers::default();
let mut usage = rustfs_madmin::Usage::default();
let mut mode = ITEM_INITIALIZING;
let mut backend = rustfs_madmin::ErasureBackend::default();
let mut pools: HashMap<i32, HashMap<i32, ErasureSetInfo>> = HashMap::new();
if let Some(store) = new_object_layer_fn() {
mode = ITEM_ONLINE;
match load_data_usage_from_backend(store.clone()).await {
Ok(res) => {
buckets.count = res.buckets_count;
objects.count = res.objects_total_count;
versions.count = res.versions_total_count;
delete_markers.count = res.delete_markers_total_count;
usage.size = res.objects_total_size;
}
Err(err) => {
buckets.error = Some(err.to_string());
objects.error = Some(err.to_string());
versions.error = Some(err.to_string());
delete_markers.error = Some(err.to_string());
usage.error = Some(err.to_string());
}
}
let after3 = OffsetDateTime::now_utc();
warn!("load_data_usage_from_backend end {:?}", after3 - after2);
let backend_info = store.clone().backend_info().await;
let after4 = OffsetDateTime::now_utc();
warn!("backend_info end {:?}", after4 - after3);
let mut all_disks: Vec<Disk> = Vec::new();
for server in servers.iter() {
all_disks.extend(server.disks.clone());
}
let (online_disks, offline_disks) = get_online_offline_disks_stats(&all_disks);
let after5 = OffsetDateTime::now_utc();
warn!("get_online_offline_disks_stats end {:?}", after5 - after4);
backend = rustfs_madmin::ErasureBackend {
backend_type: rustfs_madmin::BackendType::ErasureType,
online_disks: online_disks.sum(),
offline_disks: offline_disks.sum(),
standard_sc_parity: backend_info.standard_sc_parity,
rr_sc_parity: backend_info.rr_sc_parity,
total_sets: backend_info.total_sets,
drives_per_set: backend_info.drives_per_set,
};
if get_pools {
pools = get_pools_info(&all_disks).await.unwrap_or_default();
let after6 = OffsetDateTime::now_utc();
warn!("get_pools_info end {:?}", after6 - after5);
}
}
let services = rustfs_madmin::Services::default();
InfoMessage {
mode: Some(mode.to_string()),
domain: None,
region: None,
sqs_arn: None,
deployment_id: None,
buckets: Some(buckets),
objects: Some(objects),
versions: Some(versions),
delete_markers: Some(delete_markers),
usage: Some(usage),
backend: Some(backend),
services: Some(services),
servers: Some(servers),
pools: Some(pools),
}
}
fn get_online_offline_disks_stats(disks_info: &[Disk]) -> (BackendDisks, BackendDisks) {
let mut online_disks: HashMap<String, usize> = HashMap::new();
let mut offline_disks: HashMap<String, usize> = HashMap::new();
for disk in disks_info {
let ep = &disk.endpoint;
offline_disks.entry(ep.clone()).or_insert(0);
online_disks.entry(ep.clone()).or_insert(0);
}
for disk in disks_info {
let ep = &disk.endpoint;
let state = &disk.state;
if *state != DriveState::Ok.to_string() && *state != DriveState::Unformatted.to_string() {
*offline_disks.get_mut(ep).unwrap() += 1;
continue;
}
*online_disks.get_mut(ep).unwrap() += 1;
}
let mut root_disk_count = 0;
for di in disks_info {
if di.root_disk {
root_disk_count += 1;
}
}
if disks_info.len() == (root_disk_count + offline_disks.values().sum::<usize>()) {
return (BackendDisks(online_disks), BackendDisks(offline_disks));
}
for disk in disks_info {
let ep = &disk.endpoint;
if disk.root_disk {
*offline_disks.get_mut(ep).unwrap() += 1;
*online_disks.get_mut(ep).unwrap() -= 1;
}
}
(BackendDisks(online_disks), BackendDisks(offline_disks))
}
async fn get_pools_info(all_disks: &[Disk]) -> Result<HashMap<i32, HashMap<i32, ErasureSetInfo>>> {
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("ServerNotInitialized"));
};
let mut pools_info: HashMap<i32, HashMap<i32, ErasureSetInfo>> = HashMap::new();
for d in all_disks {
let pool_info = pools_info.entry(d.pool_index).or_default();
let erasure_set = pool_info.entry(d.set_index).or_default();
if erasure_set.id == 0 {
erasure_set.id = d.set_index;
if let Ok(cache) = load_data_usage_cache(
&store.pools[d.pool_index as usize].disk_set[d.set_index as usize].clone(),
DATA_USAGE_CACHE_NAME,
)
.await
{
let data_usage_info = cache.dui(DATA_USAGE_ROOT, &Vec::<String>::new());
erasure_set.objects_count = data_usage_info.objects_total_count;
erasure_set.versions_count = data_usage_info.versions_total_count;
erasure_set.delete_markers_count = data_usage_info.delete_markers_total_count;
erasure_set.usage = data_usage_info.objects_total_size;
};
}
erasure_set.raw_capacity += d.total_space;
erasure_set.raw_usage += d.used_space;
if d.healing {
erasure_set.heal_disks = 1;
}
}
Ok(pools_info)
}
#[allow(clippy::const_is_empty)]
pub fn get_commit_id() -> String {
let ver = if !build::TAG.is_empty() {
build::TAG.to_string()
} else if !build::SHORT_COMMIT.is_empty() {
build::SHORT_COMMIT.to_string()
} else {
build::PKG_VERSION.to_string()
};
format!("{}@{}", build::COMMIT_DATE_3339, ver)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/metrics_realtime.rs | crates/ecstore/src/metrics_realtime.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
admin_server_info::get_local_server_property,
new_object_layer_fn,
store_api::StorageAPI,
// utils::os::get_drive_stats,
};
use chrono::Utc;
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics};
use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics};
use rustfs_utils::os::get_drive_stats;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use tracing::{debug, info};
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct CollectMetricsOpts {
pub hosts: HashSet<String>,
pub disks: HashSet<String>,
pub job_id: String,
pub dep_id: String,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct MetricType(u32);
impl MetricType {
// Define some constants
pub const NONE: MetricType = MetricType(0);
pub const SCANNER: MetricType = MetricType(1 << 0);
pub const DISK: MetricType = MetricType(1 << 1);
pub const OS: MetricType = MetricType(1 << 2);
pub const BATCH_JOBS: MetricType = MetricType(1 << 3);
pub const SITE_RESYNC: MetricType = MetricType(1 << 4);
pub const NET: MetricType = MetricType(1 << 5);
pub const MEM: MetricType = MetricType(1 << 6);
pub const CPU: MetricType = MetricType(1 << 7);
pub const RPC: MetricType = MetricType(1 << 8);
// MetricsAll must be last.
pub const ALL: MetricType = MetricType((1 << 9) - 1);
pub fn new(t: u32) -> Self {
Self(t)
}
}
impl MetricType {
fn contains(&self, x: &MetricType) -> bool {
(self.0 & x.0) == x.0
}
}
/// Collect local metrics based on the specified types and options.
///
/// # Arguments
///
/// * `types` - A `MetricType` specifying which types of metrics to collect.
/// * `opts` - A reference to `CollectMetricsOpts` containing additional options for metric collection.
///
/// # Returns
/// * A `RealtimeMetrics` struct containing the collected metrics.
///
pub async fn collect_local_metrics(types: MetricType, opts: &CollectMetricsOpts) -> RealtimeMetrics {
debug!("collect_local_metrics");
let mut real_time_metrics = RealtimeMetrics::default();
if types.0 == MetricType::NONE.0 {
info!("types is None, return");
return real_time_metrics;
}
let mut by_host_name = GLOBAL_RUSTFS_ADDR.read().await.clone();
if !opts.hosts.is_empty() {
let server = get_local_server_property().await;
if opts.hosts.contains(&server.endpoint) {
by_host_name = server.endpoint;
} else {
return real_time_metrics;
}
}
let local_node_name = GLOBAL_LOCAL_NODE_NAME.read().await.clone();
if by_host_name.starts_with(":") && !local_node_name.starts_with(":") {
by_host_name = local_node_name;
}
if types.contains(&MetricType::DISK) {
debug!("start get disk metrics");
let mut aggr = DiskMetric {
collected_at: Utc::now(),
..Default::default()
};
for (name, disk) in collect_local_disks_metrics(&opts.disks).await.into_iter() {
debug!("got disk metric, name: {name}, metric: {disk:?}");
real_time_metrics.by_disk.insert(name, disk.clone());
aggr.merge(&disk);
}
real_time_metrics.aggregated.disk = Some(aggr);
}
if types.contains(&MetricType::SCANNER) {
debug!("start get scanner metrics");
let mut metrics = global_metrics().report().await;
if let Some(init_time) = rustfs_common::get_global_init_time().await {
metrics.current_started = init_time;
}
real_time_metrics.aggregated.scanner = Some(metrics);
}
// if types.contains(&MetricType::OS) {}
// if types.contains(&MetricType::BATCH_JOBS) {}
// if types.contains(&MetricType::SITE_RESYNC) {}
// if types.contains(&MetricType::NET) {}
// if types.contains(&MetricType::MEM) {}
// if types.contains(&MetricType::CPU) {}
// if types.contains(&MetricType::RPC) {}
real_time_metrics
.by_host
.insert(by_host_name.clone(), real_time_metrics.aggregated.clone());
real_time_metrics.hosts.push(by_host_name);
real_time_metrics
}
async fn collect_local_disks_metrics(disks: &HashSet<String>) -> HashMap<String, DiskMetric> {
let store = match new_object_layer_fn() {
Some(store) => store,
None => return HashMap::new(),
};
let mut metrics = HashMap::new();
let storage_info = store.local_storage_info().await;
for d in storage_info.disks.iter() {
if !disks.is_empty() && !disks.contains(&d.endpoint) {
continue;
}
if d.state != DriveState::Ok.to_string() && d.state != DriveState::Unformatted.to_string() {
metrics.insert(
d.endpoint.clone(),
DiskMetric {
n_disks: 1,
offline: 1,
..Default::default()
},
);
continue;
}
let mut dm = DiskMetric {
n_disks: 1,
..Default::default()
};
if d.healing {
dm.healing += 1;
}
if let Some(m) = &d.metrics {
for (k, v) in m.api_calls.iter() {
if *v != 0 {
dm.life_time_ops.insert(k.clone(), *v);
}
}
for (k, v) in m.last_minute.iter() {
if v.count != 0 {
dm.last_minute.operations.insert(k.clone(), v.clone());
}
}
}
if let Ok(st) = get_drive_stats(d.major, d.minor) {
dm.io_stats = DiskIOStats {
read_ios: st.read_ios,
read_merges: st.read_merges,
read_sectors: st.read_sectors,
read_ticks: st.read_ticks,
write_ios: st.write_ios,
write_merges: st.write_merges,
write_sectors: st.write_sectors,
write_ticks: st.write_ticks,
current_ios: st.current_ios,
total_ticks: st.total_ticks,
req_ticks: st.req_ticks,
discard_ios: st.discard_ios,
discard_merges: st.discard_merges,
discard_sectors: st.discard_sectors,
discard_ticks: st.discard_ticks,
flush_ios: st.flush_ios,
flush_ticks: st.flush_ticks,
};
}
metrics.insert(d.endpoint.clone(), dm);
}
metrics
}
#[cfg(test)]
mod test {
use super::MetricType;
#[test]
fn tes_types() {
let t = MetricType::ALL;
assert!(t.contains(&MetricType::NONE));
assert!(t.contains(&MetricType::DISK));
assert!(t.contains(&MetricType::OS));
assert!(t.contains(&MetricType::BATCH_JOBS));
assert!(t.contains(&MetricType::SITE_RESYNC));
assert!(t.contains(&MetricType::NET));
assert!(t.contains(&MetricType::MEM));
assert!(t.contains(&MetricType::CPU));
assert!(t.contains(&MetricType::RPC));
let disk = MetricType::new(1 << 1);
assert!(disk.contains(&MetricType::DISK));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/notification_sys.rs | crates/ecstore/src/notification_sys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::admin_server_info::get_commit_id;
use crate::error::{Error, Result};
use crate::global::{GLOBAL_BOOT_TIME, get_global_endpoints};
use crate::metrics_realtime::{CollectMetricsOpts, MetricType};
use crate::rpc::PeerRestClient;
use crate::{endpoints::EndpointServerPools, new_object_layer_fn};
use futures::future::join_all;
use lazy_static::lazy_static;
use rustfs_madmin::health::{Cpus, MemInfo, OsInfo, Partitions, ProcInfo, SysConfig, SysErrors, SysService};
use rustfs_madmin::metrics::RealtimeMetrics;
use rustfs_madmin::net::NetInfo;
use rustfs_madmin::{ItemState, ServerProperties};
use std::collections::hash_map::DefaultHasher;
use std::future::Future;
use std::hash::{Hash, Hasher};
use std::sync::OnceLock;
use std::time::{Duration, SystemTime};
use tokio::time::timeout;
use tracing::{error, warn};
lazy_static! {
pub static ref GLOBAL_NotificationSys: OnceLock<NotificationSys> = OnceLock::new();
}
pub async fn new_global_notification_sys(eps: EndpointServerPools) -> Result<()> {
let _ = GLOBAL_NotificationSys
.set(NotificationSys::new(eps).await)
.map_err(|_| Error::other("init notification_sys fail"));
Ok(())
}
pub fn get_global_notification_sys() -> Option<&'static NotificationSys> {
GLOBAL_NotificationSys.get()
}
pub struct NotificationSys {
pub peer_clients: Vec<Option<PeerRestClient>>,
#[allow(dead_code)]
pub all_peer_clients: Vec<Option<PeerRestClient>>,
}
impl NotificationSys {
pub async fn new(eps: EndpointServerPools) -> Self {
let (peer_clients, all_peer_clients) = PeerRestClient::new_clients(eps).await;
Self {
peer_clients,
all_peer_clients,
}
}
}
pub struct NotificationPeerErr {
pub host: String,
pub err: Option<Error>,
}
impl NotificationSys {
pub fn rest_client_from_hash(&self, s: &str) -> Option<PeerRestClient> {
if self.all_peer_clients.is_empty() {
return None;
}
let mut hasher = DefaultHasher::new();
s.hash(&mut hasher);
let idx = (hasher.finish() as usize) % self.all_peer_clients.len();
self.all_peer_clients[idx].clone()
}
pub async fn delete_policy(&self, policy_name: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let policy = policy_name.to_string();
futures.push(async move {
if let Some(client) = client {
match client.delete_policy(&policy).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn load_policy(&self, policy_name: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let policy = policy_name.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_policy(&policy).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let uog = user_or_group.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_policy_mapping(&uog, user_type, is_group).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn delete_user(&self, access_key: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let ak = access_key.to_string();
futures.push(async move {
if let Some(client) = client {
match client.delete_user(&ak).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn storage_info<S: StorageAPI>(&self, api: &S) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.peer_clients.len());
let endpoints = get_global_endpoints();
let peer_timeout = Duration::from_secs(2); // Same timeout as server_info
for client in self.peer_clients.iter() {
let endpoints = endpoints.clone();
futures.push(async move {
if let Some(client) = client {
let host = client.host.to_string();
// Wrap in timeout to ensure we don't hang on dead peers
match timeout(peer_timeout, client.local_storage_info()).await {
Ok(Ok(info)) => Some(info),
Ok(Err(err)) => {
warn!("peer {} storage_info failed: {}", host, err);
Some(rustfs_madmin::StorageInfo {
disks: get_offline_disks(&host, &endpoints),
..Default::default()
})
}
Err(_) => {
warn!("peer {} storage_info timed out after {:?}", host, peer_timeout);
client.evict_connection().await;
Some(rustfs_madmin::StorageInfo {
disks: get_offline_disks(&host, &endpoints),
..Default::default()
})
}
}
} else {
None
}
});
}
let mut replies = join_all(futures).await;
replies.push(Some(api.local_storage_info().await));
let mut disks = Vec::new();
for info in replies.into_iter().flatten() {
disks.extend(info.disks);
}
let backend = api.backend_info().await;
rustfs_madmin::StorageInfo { disks, backend }
}
pub async fn server_info(&self) -> Vec<ServerProperties> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
let endpoints = get_global_endpoints();
let peer_timeout = Duration::from_secs(2);
for client in self.peer_clients.iter() {
let endpoints = endpoints.clone();
futures.push(async move {
if let Some(client) = client {
let host = client.host.to_string();
match timeout(peer_timeout, client.server_info()).await {
Ok(Ok(info)) => info,
Ok(Err(err)) => {
warn!("peer {} server_info failed: {}", host, err);
// client.server_info handles eviction internally on error, but fallback needed
offline_server_properties(&host, &endpoints)
}
Err(_) => {
warn!("peer {} server_info timed out after {:?}", host, peer_timeout);
client.evict_connection().await;
offline_server_properties(&host, &endpoints)
}
}
} else {
ServerProperties::default()
}
});
}
join_all(futures).await
}
pub async fn load_user(&self, access_key: &str, temp: bool) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let ak = access_key.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_user(&ak, temp).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn load_group(&self, group: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let gname = group.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_group(&gname).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn delete_service_account(&self, access_key: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let ak = access_key.to_string();
futures.push(async move {
if let Some(client) = client {
match client.delete_service_account(&ak).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn load_service_account(&self, access_key: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let ak = access_key.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_service_account(&ak).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn reload_pool_meta(&self) {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().flatten() {
futures.push(client.reload_pool_meta());
}
let results = join_all(futures).await;
for result in results {
if let Err(err) = result {
error!("notification reload_pool_meta err {:?}", err);
}
}
}
#[tracing::instrument(skip(self))]
pub async fn load_rebalance_meta(&self, start: bool) {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for (i, client) in self.peer_clients.iter().flatten().enumerate() {
warn!(
"notification load_rebalance_meta start: {}, index: {}, client: {:?}",
start, i, client.host
);
futures.push(client.load_rebalance_meta(start));
}
let results = join_all(futures).await;
for result in results {
if let Err(err) = result {
error!("notification load_rebalance_meta err {:?}", err);
} else {
warn!("notification load_rebalance_meta success");
}
}
}
pub async fn stop_rebalance(&self) {
warn!("notification stop_rebalance start");
let Some(store) = new_object_layer_fn() else {
error!("stop_rebalance: not init");
return;
};
// warn!("notification stop_rebalance load_rebalance_meta");
// self.load_rebalance_meta(false).await;
// warn!("notification stop_rebalance load_rebalance_meta done");
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().flatten() {
futures.push(client.stop_rebalance());
}
let results = join_all(futures).await;
for result in results {
if let Err(err) = result {
error!("notification stop_rebalance err {:?}", err);
}
}
warn!("notification stop_rebalance stop_rebalance start");
let _ = store.stop_rebalance().await;
warn!("notification stop_rebalance stop_rebalance done");
}
pub async fn load_bucket_metadata(&self, bucket: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let b = bucket.to_string();
futures.push(async move {
if let Some(client) = client {
match client.load_bucket_metadata(&b).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let b = bucket.to_string();
futures.push(async move {
if let Some(client) = client {
match client.delete_bucket_metadata(&b).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn start_profiling(&self, profiler: &str) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
let pf = profiler.to_string();
futures.push(async move {
if let Some(client) = client {
match client.start_profiling(&pf).await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn get_cpus(&self) -> Vec<Cpus> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_cpus().await.unwrap_or_default()
} else {
Cpus::default()
}
});
}
join_all(futures).await
}
pub async fn get_net_info(&self) -> Vec<NetInfo> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_net_info().await.unwrap_or_default()
} else {
NetInfo::default()
}
});
}
join_all(futures).await
}
pub async fn get_partitions(&self) -> Vec<Partitions> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_partitions().await.unwrap_or_default()
} else {
Partitions::default()
}
});
}
join_all(futures).await
}
pub async fn get_os_info(&self) -> Vec<OsInfo> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_os_info().await.unwrap_or_default()
} else {
OsInfo::default()
}
});
}
join_all(futures).await
}
pub async fn get_sys_services(&self) -> Vec<SysService> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_se_linux_info().await.unwrap_or_default()
} else {
SysService::default()
}
});
}
join_all(futures).await
}
pub async fn get_sys_config(&self) -> Vec<SysConfig> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_sys_config().await.unwrap_or_default()
} else {
SysConfig::default()
}
});
}
join_all(futures).await
}
pub async fn get_sys_errors(&self) -> Vec<SysErrors> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_sys_errors().await.unwrap_or_default()
} else {
SysErrors::default()
}
});
}
join_all(futures).await
}
pub async fn get_mem_info(&self) -> Vec<MemInfo> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_mem_info().await.unwrap_or_default()
} else {
MemInfo::default()
}
});
}
join_all(futures).await
}
pub async fn get_proc_info(&self) -> Vec<ProcInfo> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
futures.push(async move {
if let Some(client) = client {
client.get_proc_info().await.unwrap_or_default()
} else {
ProcInfo::default()
}
});
}
join_all(futures).await
}
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Vec<RealtimeMetrics> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter().cloned() {
let t_clone = t;
let opts_clone = opts;
futures.push(async move {
if let Some(client) = client {
client.get_metrics(t_clone, opts_clone).await.unwrap_or_default()
} else {
RealtimeMetrics::default()
}
});
}
join_all(futures).await
}
pub async fn reload_site_replication_config(&self) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
futures.push(async move {
if let Some(client) = client {
match client.reload_site_replication_config().await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
pub async fn load_transition_tier_config(&self) -> Vec<NotificationPeerErr> {
let mut futures = Vec::with_capacity(self.peer_clients.len());
for client in self.peer_clients.iter() {
futures.push(async move {
if let Some(client) = client {
match client.load_transition_tier_config().await {
Ok(_) => NotificationPeerErr {
host: client.host.to_string(),
err: None,
},
Err(e) => NotificationPeerErr {
host: client.host.to_string(),
err: Some(e),
},
}
} else {
NotificationPeerErr {
host: "".to_string(),
err: Some(Error::other("peer is not reachable")),
}
}
});
}
join_all(futures).await
}
}
async fn call_peer_with_timeout<F, Fut>(
timeout_dur: Duration,
host_label: &str,
op: F,
fallback: impl FnOnce() -> ServerProperties,
) -> ServerProperties
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<ServerProperties>> + Send,
{
match timeout(timeout_dur, op()).await {
Ok(Ok(info)) => info,
Ok(Err(err)) => {
warn!("peer {host_label} server_info failed: {err}");
fallback()
}
Err(_) => {
warn!("peer {host_label} server_info timed out after {:?}", timeout_dur);
fallback()
}
}
}
fn offline_server_properties(host: &str, endpoints: &EndpointServerPools) -> ServerProperties {
ServerProperties {
uptime: SystemTime::now()
.duration_since(*GLOBAL_BOOT_TIME.get().unwrap())
.unwrap_or_default()
.as_secs(),
version: get_commit_id(),
endpoint: host.to_string(),
state: ItemState::Offline.to_string().to_owned(),
disks: get_offline_disks(host, endpoints),
..Default::default()
}
}
fn get_offline_disks(offline_host: &str, endpoints: &EndpointServerPools) -> Vec<rustfs_madmin::Disk> {
let mut offline_disks = Vec::new();
for pool in endpoints.as_ref() {
for ep in pool.endpoints.as_ref() {
if (offline_host.is_empty() && ep.is_local) || offline_host == ep.host_port() {
offline_disks.push(rustfs_madmin::Disk {
endpoint: ep.to_string(),
state: ItemState::Offline.to_string().to_owned(),
pool_index: ep.pool_idx,
set_index: ep.set_idx,
disk_index: ep.disk_idx,
..Default::default()
});
}
}
}
offline_disks
}
#[cfg(test)]
mod tests {
use super::*;
fn build_props(endpoint: &str) -> ServerProperties {
ServerProperties {
endpoint: endpoint.to_string(),
..Default::default()
}
}
#[tokio::test]
async fn call_peer_with_timeout_returns_value_when_fast() {
let result = call_peer_with_timeout(
Duration::from_millis(50),
"peer-1",
|| async { Ok::<_, Error>(build_props("fast")) },
|| build_props("fallback"),
)
.await;
assert_eq!(result.endpoint, "fast");
}
#[tokio::test]
async fn call_peer_with_timeout_uses_fallback_on_error() {
let result = call_peer_with_timeout(
Duration::from_millis(50),
"peer-2",
|| async { Err::<ServerProperties, _>(Error::other("boom")) },
|| build_props("fallback"),
)
.await;
assert_eq!(result.endpoint, "fallback");
}
#[tokio::test]
async fn call_peer_with_timeout_uses_fallback_on_timeout() {
let result = call_peer_with_timeout(
Duration::from_millis(5),
"peer-3",
|| async {
tokio::time::sleep(Duration::from_millis(25)).await;
Ok::<_, Error>(build_props("slow"))
},
|| build_props("fallback"),
)
.await;
assert_eq!(result.endpoint, "fallback");
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/data_usage.rs | crates/ecstore/src/data_usage.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
collections::{HashMap, hash_map::Entry},
sync::Arc,
time::SystemTime,
};
pub mod local_snapshot;
pub use local_snapshot::{
DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta,
data_usage_dir, data_usage_state_dir, ensure_data_usage_layout, read_snapshot as read_local_snapshot, snapshot_file_name,
snapshot_object_path, snapshot_path, write_snapshot as write_local_snapshot,
};
use crate::{
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, store::ECStore, store_api::StorageAPI,
};
use rustfs_common::data_usage::{
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
};
use rustfs_utils::path::SLASH_SEPARATOR;
use tokio::fs;
use tracing::{error, info, warn};
use crate::error::Error;
// Data usage storage constants
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
// Data usage storage paths
lazy_static::lazy_static! {
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
crate::disk::RUSTFS_META_BUCKET,
SLASH_SEPARATOR,
crate::disk::BUCKET_META_PREFIX
);
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
crate::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
DATA_USAGE_OBJ_NAME
);
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
crate::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
DATA_USAGE_BLOOM_NAME
);
}
/// Store data usage info to backend storage
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<(), Error> {
// Prevent older data from overwriting newer persisted stats
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await
&& let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf)
&& let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update)
&& new_ts <= existing_ts
{
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
let data =
serde_json::to_vec(&data_usage_info).map_err(|e| Error::other(format!("Failed to serialize data usage info: {e}")))?;
// Save to backend using the same mechanism as original code
crate::config::com::save_config(store, &DATA_USAGE_OBJ_NAME_PATH, data)
.await
.map_err(Error::other)?;
Ok(())
}
/// Load data usage info from backend storage
pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
let buf: Vec<u8> = match read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await {
Ok(data) => data,
Err(e) => {
error!("Failed to read data usage info from backend: {}", e);
if e == crate::error::Error::ConfigNotFound {
warn!("Data usage config not found, building basic statistics");
return build_basic_data_usage_info(store).await;
}
return Err(Error::other(e));
}
};
let mut data_usage_info: DataUsageInfo =
serde_json::from_slice(&buf).map_err(|e| Error::other(format!("Failed to deserialize data usage info: {e}")))?;
info!("Loaded data usage info from backend with {} buckets", data_usage_info.buckets_count);
// Validate data and supplement if empty
if data_usage_info.buckets_count == 0 || data_usage_info.buckets_usage.is_empty() {
warn!("Loaded data is empty, supplementing with basic statistics");
if let Ok(basic_info) = build_basic_data_usage_info(store.clone()).await {
data_usage_info.buckets_count = basic_info.buckets_count;
data_usage_info.buckets_usage = basic_info.buckets_usage;
data_usage_info.bucket_sizes = basic_info.bucket_sizes;
data_usage_info.objects_total_count = basic_info.objects_total_count;
data_usage_info.objects_total_size = basic_info.objects_total_size;
data_usage_info.last_update = basic_info.last_update;
}
}
// Handle backward compatibility
if data_usage_info.buckets_usage.is_empty() {
data_usage_info.buckets_usage = data_usage_info
.bucket_sizes
.iter()
.map(|(bucket, &size)| {
(
bucket.clone(),
rustfs_common::data_usage::BucketUsageInfo {
size,
..Default::default()
},
)
})
.collect();
}
if data_usage_info.bucket_sizes.is_empty() {
data_usage_info.bucket_sizes = data_usage_info
.buckets_usage
.iter()
.map(|(bucket, bui)| (bucket.clone(), bui.size))
.collect();
}
// Handle replication info
for (bucket, bui) in &data_usage_info.buckets_usage {
if (bui.replicated_size_v1 > 0
|| bui.replication_failed_count_v1 > 0
|| bui.replication_failed_size_v1 > 0
|| bui.replication_pending_count_v1 > 0)
&& let Ok((cfg, _)) = get_replication_config(bucket).await
&& !cfg.role.is_empty()
{
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
Ok(data_usage_info)
}
/// Aggregate usage information from local disk snapshots.
fn merge_snapshot(aggregated: &mut DataUsageInfo, mut snapshot: LocalUsageSnapshot, latest_update: &mut Option<SystemTime>) {
if let Some(update) = snapshot.last_update
&& latest_update.is_none_or(|current| update > current)
{
*latest_update = Some(update);
}
snapshot.recompute_totals();
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
aggregated.versions_total_count = aggregated.versions_total_count.saturating_add(snapshot.versions_total_count);
aggregated.delete_markers_total_count = aggregated
.delete_markers_total_count
.saturating_add(snapshot.delete_markers_total_count);
aggregated.objects_total_size = aggregated.objects_total_size.saturating_add(snapshot.objects_total_size);
for (bucket, usage) in snapshot.buckets_usage.into_iter() {
let bucket_size = usage.size;
match aggregated.buckets_usage.entry(bucket.clone()) {
Entry::Occupied(mut entry) => entry.get_mut().merge(&usage),
Entry::Vacant(entry) => {
entry.insert(usage.clone());
}
}
aggregated
.bucket_sizes
.entry(bucket)
.and_modify(|size| *size = size.saturating_add(bucket_size))
.or_insert(bucket_size);
}
}
pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskUsageStatus>, DataUsageInfo), Error> {
let mut aggregated = DataUsageInfo::default();
let mut latest_update: Option<SystemTime> = None;
let mut statuses: Vec<DiskUsageStatus> = Vec::new();
for (pool_idx, pool) in store.pools.iter().enumerate() {
for set_disks in pool.disk_set.iter() {
let disk_entries = {
let guard = set_disks.disks.read().await;
guard.clone()
};
for (disk_index, disk_opt) in disk_entries.into_iter().enumerate() {
let Some(disk) = disk_opt else {
continue;
};
if !disk.is_local() {
continue;
}
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
Some(id) => id.to_string(),
None => continue,
};
let root = disk.path();
let mut status = DiskUsageStatus {
disk_id: disk_id.clone(),
pool_index: Some(pool_idx),
set_index: Some(set_disks.set_index),
disk_index: Some(disk_index),
last_update: None,
snapshot_exists: false,
};
let snapshot_result = read_local_snapshot(root.as_path(), &disk_id).await;
// If a snapshot is corrupted or unreadable, skip it but keep processing others
if let Err(err) = &snapshot_result {
warn!(
"Failed to read data usage snapshot for disk {} (pool {}, set {}, disk {}): {}",
disk_id, pool_idx, set_disks.set_index, disk_index, err
);
// Best-effort cleanup so next scan can rebuild a fresh snapshot instead of repeatedly failing
let snapshot_file = snapshot_path(root.as_path(), &disk_id);
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
&& remove_err.kind() != std::io::ErrorKind::NotFound
{
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
}
if let Ok(Some(mut snapshot)) = snapshot_result {
status.last_update = snapshot.last_update;
status.snapshot_exists = true;
if snapshot.meta.disk_id.is_empty() {
snapshot.meta.disk_id = disk_id.clone();
}
if snapshot.meta.pool_index.is_none() {
snapshot.meta.pool_index = Some(pool_idx);
}
if snapshot.meta.set_index.is_none() {
snapshot.meta.set_index = Some(set_disks.set_index);
}
if snapshot.meta.disk_index.is_none() {
snapshot.meta.disk_index = Some(disk_index);
}
merge_snapshot(&mut aggregated, snapshot, &mut latest_update);
}
statuses.push(status);
}
}
}
aggregated.buckets_count = aggregated.buckets_usage.len() as u64;
aggregated.last_update = latest_update;
aggregated.disk_usage_status = statuses.clone();
Ok((statuses, aggregated))
}
/// Calculate accurate bucket usage statistics by enumerating objects through the object layer.
pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Result<BucketUsageInfo, Error> {
let mut continuation: Option<String> = None;
let mut objects_count: u64 = 0;
let mut versions_count: u64 = 0;
let mut total_size: u64 = 0;
let mut delete_markers: u64 = 0;
loop {
let result = store
.clone()
.list_objects_v2(
bucket_name,
"", // prefix
continuation.clone(),
None, // delimiter
1000, // max_keys
false, // fetch_owner
None, // start_after
false, // incl_deleted
)
.await?;
for object in result.objects.iter() {
if object.is_dir {
continue;
}
if object.delete_marker {
delete_markers = delete_markers.saturating_add(1);
continue;
}
let object_size = object.size.max(0) as u64;
objects_count = objects_count.saturating_add(1);
total_size = total_size.saturating_add(object_size);
let detected_versions = if object.num_versions > 0 {
object.num_versions as u64
} else {
1
};
versions_count = versions_count.saturating_add(detected_versions);
}
if !result.is_truncated {
break;
}
continuation = result.next_continuation_token.clone();
if continuation.is_none() {
warn!(
"Bucket {} listing marked truncated but no continuation token returned; stopping early",
bucket_name
);
break;
}
}
if versions_count == 0 {
versions_count = objects_count;
}
let usage = BucketUsageInfo {
size: total_size,
objects_count,
versions_count,
delete_markers_count: delete_markers,
..Default::default()
};
Ok(usage)
}
/// Build basic data usage info with real object counts
async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
let mut data_usage_info = DataUsageInfo::default();
// Get bucket list
match store.list_bucket(&crate::store_api::BucketOptions::default()).await {
Ok(buckets) => {
data_usage_info.buckets_count = buckets.len() as u64;
data_usage_info.last_update = Some(SystemTime::now());
let mut total_objects = 0u64;
let mut total_versions = 0u64;
let mut total_size = 0u64;
let mut total_delete_markers = 0u64;
for bucket_info in buckets {
if bucket_info.name.starts_with('.') {
continue; // Skip system buckets
}
match compute_bucket_usage(store.clone(), &bucket_info.name).await {
Ok(bucket_usage) => {
total_objects = total_objects.saturating_add(bucket_usage.objects_count);
total_versions = total_versions.saturating_add(bucket_usage.versions_count);
total_size = total_size.saturating_add(bucket_usage.size);
total_delete_markers = total_delete_markers.saturating_add(bucket_usage.delete_markers_count);
data_usage_info
.buckets_usage
.insert(bucket_info.name.clone(), bucket_usage.clone());
data_usage_info.bucket_sizes.insert(bucket_info.name, bucket_usage.size);
}
Err(e) => {
warn!("Failed to compute bucket usage for {}: {}", bucket_info.name, e);
}
}
}
data_usage_info.objects_total_count = total_objects;
data_usage_info.versions_total_count = total_versions;
data_usage_info.objects_total_size = total_size;
data_usage_info.delete_markers_total_count = total_delete_markers;
}
Err(e) => {
warn!("Failed to list buckets for basic data usage info: {}", e);
}
}
Ok(data_usage_info)
}
/// Create a data usage cache entry from size summary
pub fn create_cache_entry_from_summary(summary: &SizeSummary) -> DataUsageEntry {
let mut entry = DataUsageEntry::default();
entry.add_sizes(summary);
entry
}
/// Convert data usage cache to DataUsageInfo
pub fn cache_to_data_usage_info(cache: &DataUsageCache, path: &str, buckets: &[crate::store_api::BucketInfo]) -> DataUsageInfo {
let e = match cache.find(path) {
Some(e) => e,
None => return DataUsageInfo::default(),
};
let flat = cache.flatten(&e);
let mut buckets_usage = HashMap::new();
for bucket in buckets.iter() {
let e = match cache.find(&bucket.name) {
Some(e) => e,
None => continue,
};
let flat = cache.flatten(&e);
let mut bui = rustfs_common::data_usage::BucketUsageInfo {
size: flat.size as u64,
versions_count: flat.versions as u64,
objects_count: flat.objects as u64,
delete_markers_count: flat.delete_markers as u64,
object_size_histogram: flat.obj_sizes.to_map(),
object_versions_histogram: flat.obj_versions.to_map(),
..Default::default()
};
if let Some(rs) = &flat.replication_stats {
bui.replica_size = rs.replica_size;
bui.replica_count = rs.replica_count;
for (arn, stat) in rs.targets.iter() {
bui.replication_info.insert(
arn.clone(),
BucketTargetUsageInfo {
replication_pending_size: stat.pending_size,
replicated_size: stat.replicated_size,
replication_failed_size: stat.failed_size,
replication_pending_count: stat.pending_count,
replication_failed_count: stat.failed_count,
replicated_count: stat.replicated_count,
..Default::default()
},
);
}
}
buckets_usage.insert(bucket.name.clone(), bui);
}
DataUsageInfo {
last_update: cache.info.last_update,
objects_total_count: flat.objects as u64,
versions_total_count: flat.versions as u64,
delete_markers_total_count: flat.delete_markers as u64,
objects_total_size: flat.size as u64,
buckets_count: e.children.len() as u64,
buckets_usage,
..Default::default()
}
}
// Helper functions for DataUsageCache operations
pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str) -> crate::error::Result<DataUsageCache> {
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
use crate::store_api::{ObjectIO, ObjectOptions};
use http::HeaderMap;
use rand::Rng;
use std::path::Path;
use std::time::Duration;
use tokio::time::sleep;
let mut d = DataUsageCache::default();
let mut retries = 0;
while retries < 5 {
let path = Path::new(BUCKET_META_PREFIX).join(name);
match store
.get_object_reader(
RUSTFS_META_BUCKET,
path.to_str().unwrap(),
None,
HeaderMap::new(),
&ObjectOptions {
no_lock: true,
..Default::default()
},
)
.await
{
Ok(mut reader) => {
if let Ok(info) = DataUsageCache::unmarshal(&reader.read_all().await?) {
d = info
}
break;
}
Err(err) => match err {
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
match store
.get_object_reader(
RUSTFS_META_BUCKET,
name,
None,
HeaderMap::new(),
&ObjectOptions {
no_lock: true,
..Default::default()
},
)
.await
{
Ok(mut reader) => {
if let Ok(info) = DataUsageCache::unmarshal(&reader.read_all().await?) {
d = info
}
break;
}
Err(_) => match err {
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
break;
}
_ => {}
},
}
}
_ => {
break;
}
},
}
retries += 1;
let dur = {
let mut rng = rand::rng();
rng.random_range(0..1_000)
};
sleep(Duration::from_millis(dur)).await;
}
Ok(d)
}
pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate::error::Result<()> {
use crate::config::com::save_config;
use crate::disk::BUCKET_META_PREFIX;
use crate::new_object_layer_fn;
use std::path::Path;
let Some(store) = new_object_layer_fn() else {
return Err(crate::error::Error::other("errServerNotInitialized"));
};
let buf = cache.marshal_msg().map_err(crate::error::Error::other)?;
let buf_clone = buf.clone();
let store_clone = store.clone();
let name = Path::new(BUCKET_META_PREFIX).join(name).to_string_lossy().to_string();
let name_clone = name.clone();
tokio::spawn(async move {
let _ = save_config(store_clone, &format!("{}{}", &name_clone, ".bkp"), buf_clone).await;
});
save_config(store, &name, buf).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_common::data_usage::BucketUsageInfo;
fn aggregate_for_test(
inputs: Vec<(DiskUsageStatus, Result<Option<LocalUsageSnapshot>, Error>)>,
) -> (Vec<DiskUsageStatus>, DataUsageInfo) {
let mut aggregated = DataUsageInfo::default();
let mut latest_update: Option<SystemTime> = None;
let mut statuses = Vec::new();
for (mut status, snapshot_result) in inputs {
if let Ok(Some(snapshot)) = snapshot_result {
status.snapshot_exists = true;
status.last_update = snapshot.last_update;
merge_snapshot(&mut aggregated, snapshot, &mut latest_update);
}
statuses.push(status);
}
aggregated.buckets_count = aggregated.buckets_usage.len() as u64;
aggregated.last_update = latest_update;
aggregated.disk_usage_status = statuses.clone();
(statuses, aggregated)
}
#[test]
fn aggregate_skips_corrupted_snapshot_and_preserves_other_disks() {
let mut good_snapshot = LocalUsageSnapshot::new(LocalUsageSnapshotMeta {
disk_id: "good-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(0),
});
good_snapshot.last_update = Some(SystemTime::now());
good_snapshot.buckets_usage.insert(
"bucket-a".to_string(),
BucketUsageInfo {
objects_count: 3,
versions_count: 3,
size: 42,
..Default::default()
},
);
good_snapshot.recompute_totals();
let bad_snapshot_err: Result<Option<LocalUsageSnapshot>, Error> = Err(Error::other("corrupted snapshot payload"));
let inputs = vec![
(
DiskUsageStatus {
disk_id: "bad-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(1),
last_update: None,
snapshot_exists: false,
},
bad_snapshot_err,
),
(
DiskUsageStatus {
disk_id: "good-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(0),
last_update: None,
snapshot_exists: false,
},
Ok(Some(good_snapshot)),
),
];
let (statuses, aggregated) = aggregate_for_test(inputs);
// Bad disk stays non-existent, good disk is marked present
let bad_status = statuses.iter().find(|s| s.disk_id == "bad-disk").unwrap();
assert!(!bad_status.snapshot_exists);
let good_status = statuses.iter().find(|s| s.disk_id == "good-disk").unwrap();
assert!(good_status.snapshot_exists);
// Aggregated data is from good snapshot only
assert_eq!(aggregated.objects_total_count, 3);
assert_eq!(aggregated.objects_total_size, 42);
assert_eq!(aggregated.buckets_count, 1);
assert_eq!(aggregated.buckets_usage.get("bucket-a").map(|b| (b.objects_count, b.size)), Some((3, 42)));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/file_cache.rs | crates/ecstore/src/file_cache.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! High-performance file content and metadata caching using moka
//!
//! This module provides optimized caching for file operations to reduce
//! redundant I/O and improve overall system performance.
use super::disk::error::{Error, Result};
use bytes::Bytes;
use moka::future::Cache;
use rustfs_filemeta::FileMeta;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
pub struct OptimizedFileCache {
// Use moka as high-performance async cache
metadata_cache: Cache<PathBuf, Arc<FileMeta>>,
file_content_cache: Cache<PathBuf, Bytes>,
// Performance monitoring
cache_hits: std::sync::atomic::AtomicU64,
cache_misses: std::sync::atomic::AtomicU64,
}
impl OptimizedFileCache {
pub fn new() -> Self {
Self {
metadata_cache: Cache::builder()
.max_capacity(2048)
.time_to_live(Duration::from_secs(300)) // 5 minutes TTL
.time_to_idle(Duration::from_secs(60)) // 1 minute idle
.build(),
file_content_cache: Cache::builder()
.max_capacity(512) // Smaller file content cache
.time_to_live(Duration::from_secs(120))
.weigher(|_key: &PathBuf, value: &Bytes| value.len() as u32)
.build(),
cache_hits: std::sync::atomic::AtomicU64::new(0),
cache_misses: std::sync::atomic::AtomicU64::new(0),
}
}
pub async fn get_metadata(&self, path: PathBuf) -> Result<Arc<FileMeta>> {
if let Some(cached) = self.metadata_cache.get(&path).await {
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
return Ok(cached);
}
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
// Cache miss, read file
let data = tokio::fs::read(&path)
.await
.map_err(|e| Error::other(format!("Read metadata failed: {e}")))?;
let mut meta = FileMeta::default();
meta.unmarshal_msg(&data)?;
let arc_meta = Arc::new(meta);
self.metadata_cache.insert(path, arc_meta.clone()).await;
Ok(arc_meta)
}
pub async fn get_file_content(&self, path: PathBuf) -> Result<Bytes> {
if let Some(cached) = self.file_content_cache.get(&path).await {
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
return Ok(cached);
}
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let data = tokio::fs::read(&path)
.await
.map_err(|e| Error::other(format!("Read file failed: {e}")))?;
let bytes = Bytes::from(data);
self.file_content_cache.insert(path, bytes.clone()).await;
Ok(bytes)
}
// Prefetch related files
pub async fn prefetch_related(&self, base_path: &Path, patterns: &[&str]) {
let mut prefetch_tasks = Vec::new();
for pattern in patterns {
let path = base_path.join(pattern);
if tokio::fs::metadata(&path).await.is_ok() {
let cache = self.clone();
let path_clone = path.clone();
prefetch_tasks.push(async move {
let _ = cache.get_metadata(path_clone).await;
});
}
}
// Parallel prefetch, don't wait for completion
if !prefetch_tasks.is_empty() {
tokio::spawn(async move {
futures::future::join_all(prefetch_tasks).await;
});
}
}
// Batch metadata reading with deduplication
pub async fn get_metadata_batch(
&self,
paths: Vec<PathBuf>,
) -> Vec<std::result::Result<Arc<FileMeta>, rustfs_filemeta::Error>> {
let mut results = Vec::with_capacity(paths.len());
let mut cache_futures = Vec::new();
// First, attempt to get from cache
for (i, path) in paths.iter().enumerate() {
if let Some(cached) = self.metadata_cache.get(path).await {
results.push((i, Ok(cached)));
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
} else {
cache_futures.push((i, path.clone()));
}
}
// For cache misses, read from filesystem
if !cache_futures.is_empty() {
let mut fs_results = Vec::new();
for (i, path) in cache_futures {
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
match tokio::fs::read(&path).await {
Ok(data) => {
let mut meta = FileMeta::default();
match meta.unmarshal_msg(&data) {
Ok(_) => {
let arc_meta = Arc::new(meta);
self.metadata_cache.insert(path, arc_meta.clone()).await;
fs_results.push((i, Ok(arc_meta)));
}
Err(e) => {
fs_results.push((i, Err(e)));
}
}
}
Err(_e) => {
fs_results.push((i, Err(rustfs_filemeta::Error::Unexpected)));
}
}
}
results.extend(fs_results);
}
// Sort results back to original order
results.sort_by_key(|(i, _)| *i);
results.into_iter().map(|(_, result)| result).collect()
}
// Invalidate cache entries for a path
pub async fn invalidate(&self, path: &Path) {
self.metadata_cache.remove(path).await;
self.file_content_cache.remove(path).await;
}
// Get cache statistics
pub fn get_stats(&self) -> FileCacheStats {
let hits = self.cache_hits.load(std::sync::atomic::Ordering::Relaxed);
let misses = self.cache_misses.load(std::sync::atomic::Ordering::Relaxed);
let hit_rate = if hits + misses > 0 {
(hits as f64 / (hits + misses) as f64) * 100.0
} else {
0.0
};
FileCacheStats {
metadata_cache_size: self.metadata_cache.entry_count(),
content_cache_size: self.file_content_cache.entry_count(),
cache_hits: hits,
cache_misses: misses,
hit_rate,
total_weight: 0, // Simplified for compatibility
}
}
// Clear all caches
pub async fn clear(&self) {
self.metadata_cache.invalidate_all();
self.file_content_cache.invalidate_all();
// Wait for invalidation to complete
self.metadata_cache.run_pending_tasks().await;
self.file_content_cache.run_pending_tasks().await;
}
}
impl Clone for OptimizedFileCache {
fn clone(&self) -> Self {
Self {
metadata_cache: self.metadata_cache.clone(),
file_content_cache: self.file_content_cache.clone(),
cache_hits: std::sync::atomic::AtomicU64::new(self.cache_hits.load(std::sync::atomic::Ordering::Relaxed)),
cache_misses: std::sync::atomic::AtomicU64::new(self.cache_misses.load(std::sync::atomic::Ordering::Relaxed)),
}
}
}
#[derive(Debug)]
pub struct FileCacheStats {
pub metadata_cache_size: u64,
pub content_cache_size: u64,
pub cache_hits: u64,
pub cache_misses: u64,
pub hit_rate: f64,
pub total_weight: u64,
}
impl Default for OptimizedFileCache {
fn default() -> Self {
Self::new()
}
}
// Global cache instance
use std::sync::OnceLock;
static GLOBAL_FILE_CACHE: OnceLock<OptimizedFileCache> = OnceLock::new();
pub fn get_global_file_cache() -> &'static OptimizedFileCache {
GLOBAL_FILE_CACHE.get_or_init(OptimizedFileCache::new)
}
// Utility functions for common operations
pub async fn read_metadata_cached(path: PathBuf) -> Result<Arc<FileMeta>> {
get_global_file_cache().get_metadata(path).await
}
pub async fn read_file_content_cached(path: PathBuf) -> Result<Bytes> {
get_global_file_cache().get_file_content(path).await
}
pub async fn prefetch_metadata_patterns(base_path: &Path, patterns: &[&str]) {
get_global_file_cache().prefetch_related(base_path, patterns).await;
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
use tempfile::tempdir;
#[tokio::test]
async fn test_file_cache_basic() {
let cache = OptimizedFileCache::new();
// Create a temporary file
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let mut file = std::fs::File::create(&file_path).unwrap();
writeln!(file, "test content").unwrap();
drop(file);
// First read should be cache miss
let content1 = cache.get_file_content(file_path.clone()).await.unwrap();
assert_eq!(content1, Bytes::from("test content\n"));
// Second read should be cache hit
let content2 = cache.get_file_content(file_path.clone()).await.unwrap();
assert_eq!(content2, content1);
let stats = cache.get_stats();
assert!(stats.cache_hits > 0);
assert!(stats.cache_misses > 0);
}
#[tokio::test]
async fn test_metadata_batch_read() {
let cache = OptimizedFileCache::new();
// Create test files
let dir = tempdir().unwrap();
let mut paths = Vec::new();
for i in 0..5 {
let file_path = dir.path().join(format!("test_{i}.txt"));
let mut file = std::fs::File::create(&file_path).unwrap();
writeln!(file, "content {i}").unwrap();
paths.push(file_path);
}
// Note: This test would need actual FileMeta files to work properly
// For now, we just test that the function runs without errors
let results = cache.get_metadata_batch(paths).await;
assert_eq!(results.len(), 5);
}
#[tokio::test]
async fn test_cache_invalidation() {
let cache = OptimizedFileCache::new();
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let mut file = std::fs::File::create(&file_path).unwrap();
writeln!(file, "test content").unwrap();
drop(file);
// Read file to populate cache
let _ = cache.get_file_content(file_path.clone()).await.unwrap();
// Invalidate cache
cache.invalidate(&file_path).await;
// Next read should be cache miss again
let _ = cache.get_file_content(file_path.clone()).await.unwrap();
let stats = cache.get_stats();
assert!(stats.cache_misses >= 2);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/pools.rs | crates/ecstore/src/pools.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw};
use crate::config::com::{CONFIG_PREFIX, read_config, save_config};
use crate::data_usage::DATA_USAGE_CACHE_NAME;
use crate::disk::error::DiskError;
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
use crate::error::{Error, Result};
use crate::error::{
StorageError, is_err_bucket_exists, is_err_bucket_not_found, is_err_data_movement_overwrite, is_err_object_not_found,
is_err_version_not_found,
};
use crate::new_object_layer_fn;
use crate::notification_sys::get_global_notification_sys;
use crate::set_disk::SetDisks;
use crate::store_api::{
BucketOptions, CompletePart, GetObjectReader, MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI,
};
use crate::{sets::Sets, store::ECStore};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use futures::future::BoxFuture;
use http::HeaderMap;
use rmp_serde::{Deserializer, Serializer};
use rustfs_common::defer;
use rustfs_common::heal_channel::HealOpts;
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use rustfs_rio::{HashReader, WarpReader};
use rustfs_utils::path::{SLASH_SEPARATOR, encode_dir_object, path_join};
use rustfs_workers::workers::Workers;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Display;
use std::io::{Cursor, Write};
use std::path::PathBuf;
use std::sync::Arc;
use time::{Duration, OffsetDateTime};
use tokio::io::{AsyncReadExt, BufReader};
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
pub const POOL_META_NAME: &str = "pool.bin";
pub const POOL_META_FORMAT: u16 = 1;
pub const POOL_META_VERSION: u16 = 1;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolStatus {
#[serde(rename = "id")]
pub id: usize,
#[serde(rename = "cmdline")]
pub cmd_line: String,
#[serde(rename = "lastUpdate", with = "time::serde::rfc3339")]
pub last_update: OffsetDateTime,
#[serde(rename = "decommissionInfo")]
pub decommission: Option<PoolDecommissionInfo>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct PoolMeta {
pub version: u16,
pub pools: Vec<PoolStatus>,
pub dont_save: bool,
}
impl PoolMeta {
pub fn new(pools: &[Arc<Sets>], prev_meta: &PoolMeta) -> Self {
let mut new_meta = Self {
version: POOL_META_VERSION,
pools: Vec::new(),
..Default::default()
};
for (idx, pool) in pools.iter().enumerate() {
let mut skip = false;
for current_pool in prev_meta.pools.iter() {
if current_pool.cmd_line == pool.endpoints.cmd_line {
new_meta.pools.push(current_pool.clone());
skip = true;
break;
}
}
if skip {
continue;
}
new_meta.pools.push(PoolStatus {
cmd_line: pool.endpoints.cmd_line.clone(),
id: idx,
last_update: OffsetDateTime::now_utc(),
decommission: None,
});
}
new_meta
}
pub fn is_suspended(&self, idx: usize) -> bool {
if idx >= self.pools.len() {
return false;
}
self.pools[idx].decommission.is_some()
}
pub async fn load(&mut self, pool: Arc<Sets>, _pools: Vec<Arc<Sets>>) -> Result<()> {
let data = match read_config(pool, POOL_META_NAME).await {
Ok(data) => {
if data.is_empty() {
return Ok(());
} else if data.len() <= 4 {
return Err(Error::other("poolMeta: no data"));
}
data
}
Err(err) => {
if err == Error::ConfigNotFound {
return Ok(());
}
return Err(err);
}
};
let format = LittleEndian::read_u16(&data[0..2]);
if format != POOL_META_FORMAT {
return Err(Error::other(format!("PoolMeta: unknown format: {format}")));
}
let version = LittleEndian::read_u16(&data[2..4]);
if version != POOL_META_VERSION {
return Err(Error::other(format!("PoolMeta: unknown version: {version}")));
}
let mut buf = Deserializer::new(Cursor::new(&data[4..]));
let meta: PoolMeta = Deserialize::deserialize(&mut buf)?;
*self = meta;
if self.version != POOL_META_VERSION {
return Err(Error::other(format!("unexpected PoolMeta version: {}", self.version)));
}
Ok(())
}
pub async fn save(&self, pools: Vec<Arc<Sets>>) -> Result<()> {
if self.dont_save {
return Ok(());
}
let mut data = Vec::new();
data.write_u16::<LittleEndian>(POOL_META_FORMAT)?;
data.write_u16::<LittleEndian>(POOL_META_VERSION)?;
let mut buf = Vec::new();
self.serialize(&mut Serializer::new(&mut buf))?;
data.write_all(&buf)?;
for pool in pools {
save_config(pool, POOL_META_NAME, data.clone()).await?;
}
Ok(())
}
pub fn decommission_cancel(&mut self, idx: usize) -> bool {
if let Some(stats) = self.pools.get_mut(idx) {
if let Some(d) = &stats.decommission {
if !d.canceled {
stats.last_update = OffsetDateTime::now_utc();
let mut pd = d.clone();
pd.start_time = None;
pd.canceled = true;
pd.failed = false;
pd.complete = false;
stats.decommission = Some(pd);
true
} else {
false
}
} else {
false
}
} else {
false
}
}
pub fn decommission_failed(&mut self, idx: usize) -> bool {
if let Some(stats) = self.pools.get_mut(idx) {
if let Some(d) = &stats.decommission {
if !d.failed {
stats.last_update = OffsetDateTime::now_utc();
let mut pd = d.clone();
pd.start_time = None;
pd.canceled = false;
pd.failed = true;
pd.complete = false;
stats.decommission = Some(pd);
true
} else {
false
}
} else {
false
}
} else {
false
}
}
pub fn decommission_complete(&mut self, idx: usize) -> bool {
if let Some(stats) = self.pools.get_mut(idx) {
if let Some(d) = &stats.decommission {
if !d.complete {
stats.last_update = OffsetDateTime::now_utc();
let mut pd = d.clone();
pd.start_time = None;
pd.canceled = false;
pd.failed = false;
pd.complete = true;
stats.decommission = Some(pd);
true
} else {
false
}
} else {
false
}
} else {
false
}
}
pub fn decommission(&mut self, idx: usize, pi: PoolSpaceInfo) -> Result<()> {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return Err(StorageError::DecommissionAlreadyRunning);
}
let now = OffsetDateTime::now_utc();
pool.last_update = now;
pool.decommission = Some(PoolDecommissionInfo {
start_time: Some(now),
start_size: pi.free,
total_size: pi.total,
current_size: pi.free,
..Default::default()
});
}
Ok(())
}
pub fn queue_buckets(&mut self, idx: usize, bks: Vec<DecomBucketInfo>) {
for bk in bks.iter() {
if let Some(dec) = self.pools[idx].decommission.as_mut() {
dec.bucket_push(bk);
}
}
}
pub fn pending_buckets(&self, idx: usize) -> Vec<DecomBucketInfo> {
let mut list = Vec::new();
if let Some(pool) = self.pools.get(idx)
&& let Some(ref info) = pool.decommission
{
for bk in info.queued_buckets.iter() {
let (name, prefix) = path2_bucket_object(bk);
list.push(DecomBucketInfo { name, prefix });
}
}
list
}
pub fn is_bucket_decommissioned(&self, idx: usize, bucket: String) -> bool {
if let Some(ref info) = self.pools[idx].decommission {
info.is_bucket_decommissioned(&bucket)
} else {
false
}
}
pub fn bucket_done(&mut self, idx: usize, bucket: String) -> bool {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(info) = pool.decommission.as_mut() {
info.bucket_pop(&bucket)
} else {
false
}
} else {
false
}
}
pub fn count_item(&mut self, idx: usize, size: usize, failed: bool) {
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
if failed {
info.items_decommission_failed += 1;
info.bytes_failed += size;
} else {
info.items_decommissioned += 1;
info.bytes_done += size;
}
}
}
pub fn track_current_bucket_object(&mut self, idx: usize, bucket: String, object: String) {
if self.pools.get(idx).is_none_or(|v| v.decommission.is_none()) {
return;
}
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
info.object = object;
info.bucket = bucket;
}
}
pub async fn update_after(&mut self, idx: usize, pools: Vec<Arc<Sets>>, duration: Duration) -> Result<bool> {
if self.pools.get(idx).is_none_or(|v| v.decommission.is_none()) {
return Err(Error::other("InvalidArgument"));
}
let now = OffsetDateTime::now_utc();
if now.unix_timestamp() - self.pools[idx].last_update.unix_timestamp() > duration.whole_seconds() {
self.pools[idx].last_update = now;
self.save(pools).await?;
return Ok(true);
}
Ok(false)
}
#[allow(dead_code)]
pub fn validate(&self, pools: Vec<Arc<Sets>>) -> Result<bool> {
struct PoolInfo {
position: usize,
completed: bool,
decom_started: bool,
}
let mut remembered_pools = HashMap::new();
for (idx, pool) in self.pools.iter().enumerate() {
let mut complete = false;
let mut decom_started = false;
if let Some(decommission) = &pool.decommission {
if decommission.complete {
complete = true;
}
decom_started = true;
}
remembered_pools.insert(
pool.cmd_line.clone(),
PoolInfo {
position: idx,
completed: complete,
decom_started,
},
);
}
let mut specified_pools = HashMap::new();
for (idx, pool) in pools.iter().enumerate() {
specified_pools.insert(pool.endpoints.cmd_line.clone(), idx);
}
let mut update = false;
// Determine whether the selected pool should be removed from the retired list.
for k in specified_pools.keys() {
if let Some(pi) = remembered_pools.get(k) {
if pi.completed {
error!(
"pool({}) = {} is decommissioned, please remove from server command line",
pi.position + 1,
k
);
// return Err(Error::other(format!(
// "pool({}) = {} is decommissioned, please remove from server command line",
// pi.position + 1,
// k
// )));
}
} else {
// If the previous pool no longer exists, allow updates because a new pool may have been added.
update = true;
}
}
if specified_pools.len() == remembered_pools.len() {
for (k, pi) in remembered_pools.iter() {
if let Some(pos) = specified_pools.get(k)
&& *pos != pi.position
{
update = true; // Pool order changed, allow the update.
}
}
}
if !update {
update = specified_pools.len() != remembered_pools.len();
}
Ok(update)
}
pub fn return_resumable_pools(&self) -> Vec<PoolStatus> {
let mut new_pools = Vec::new();
for pool in &self.pools {
if let Some(decommission) = &pool.decommission {
if decommission.complete || decommission.canceled {
// Recovery is not required when:
// - Decommissioning completed
// - Decommissioning was cancelled
continue;
}
// All other scenarios require recovery
new_pools.push(pool.clone());
}
}
new_pools
}
}
fn path2_bucket_object(name: &str) -> (String, String) {
path2_bucket_object_with_base_path("", name)
}
fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, String) {
// Trim the base path and leading slash
let trimmed_path = path
.strip_prefix(base_path)
.unwrap_or(path)
.strip_prefix(SLASH_SEPARATOR)
.unwrap_or(path);
// Find the position of the first '/'
let pos = trimmed_path.find(SLASH_SEPARATOR).unwrap_or(trimmed_path.len());
// Split into bucket and prefix
let bucket = &trimmed_path[0..pos];
let prefix = &trimmed_path[pos + 1..]; // +1 to skip the '/' character if it exists
(bucket.to_string(), prefix.to_string())
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct PoolDecommissionInfo {
#[serde(rename = "startTime", with = "time::serde::rfc3339::option")]
pub start_time: Option<OffsetDateTime>,
#[serde(rename = "startSize")]
pub start_size: usize,
#[serde(rename = "totalSize")]
pub total_size: usize,
#[serde(rename = "currentSize")]
pub current_size: usize,
#[serde(rename = "complete")]
pub complete: bool,
#[serde(rename = "failed")]
pub failed: bool,
#[serde(rename = "canceled")]
pub canceled: bool,
#[serde(skip)]
pub queued_buckets: Vec<String>,
#[serde(skip)]
pub decommissioned_buckets: Vec<String>,
#[serde(skip)]
pub bucket: String,
#[serde(skip)]
pub prefix: String,
#[serde(skip)]
pub object: String,
#[serde(rename = "objectsDecommissioned")]
pub items_decommissioned: usize,
#[serde(rename = "objectsDecommissionedFailed")]
pub items_decommission_failed: usize,
#[serde(rename = "bytesDecommissioned")]
pub bytes_done: usize,
#[serde(rename = "bytesDecommissionedFailed")]
pub bytes_failed: usize,
}
impl PoolDecommissionInfo {
pub fn bucket_push(&mut self, bucket: &DecomBucketInfo) {
for b in self.queued_buckets.iter() {
if self.is_bucket_decommissioned(b) {
return;
}
if b == &bucket.to_string() {
return;
}
}
self.queued_buckets.push(bucket.to_string());
self.bucket = bucket.name.clone();
self.prefix = bucket.prefix.clone();
}
pub fn is_bucket_decommissioned(&self, bucket: &String) -> bool {
for b in self.decommissioned_buckets.iter() {
if b == bucket {
return true;
}
}
false
}
pub fn bucket_pop(&mut self, bucket: &String) -> bool {
self.decommissioned_buckets.push(bucket.clone());
let mut found = None;
for (i, b) in self.queued_buckets.iter().enumerate() {
if b == bucket {
found = Some(i);
break;
}
}
if let Some(i) = found {
self.queued_buckets.remove(i);
if &self.bucket == bucket {
self.bucket = "".to_owned();
self.prefix = "".to_owned();
self.object = "".to_owned();
}
return true;
}
false
}
}
#[derive(Debug)]
pub struct PoolSpaceInfo {
pub free: usize,
pub total: usize,
pub used: usize,
}
#[derive(Debug, Default, Clone)]
pub struct DecomBucketInfo {
pub name: String,
pub prefix: String,
}
impl Display for DecomBucketInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
path_join(&[PathBuf::from(self.name.clone()), PathBuf::from(self.prefix.clone())]).to_string_lossy()
)
}
}
impl ECStore {
pub async fn status(&self, idx: usize) -> Result<PoolStatus> {
let space_info = self.get_decommission_pool_space_info(idx).await?;
let pool_meta = self.pool_meta.read().await;
let mut pool_info = pool_meta.pools[idx].clone();
if let Some(d) = pool_info.decommission.as_mut() {
d.total_size = space_info.total;
d.current_size = space_info.free;
} else {
pool_info.decommission = Some(PoolDecommissionInfo {
total_size: space_info.total,
current_size: space_info.free,
..Default::default()
});
}
Ok(pool_info)
}
async fn get_decommission_pool_space_info(&self, idx: usize) -> Result<PoolSpaceInfo> {
if let Some(sets) = self.pools.get(idx) {
let mut info = sets.storage_info().await;
info.backend = self.backend_info().await;
let total = get_total_usable_capacity(&info.disks, &info);
let free = get_total_usable_capacity_free(&info.disks, &info);
Ok(PoolSpaceInfo {
free,
total,
used: total - free,
})
} else {
Err(Error::other("InvalidArgument"))
}
}
#[tracing::instrument(skip(self))]
pub async fn decommission_cancel(&self, idx: usize) -> Result<()> {
if self.single_pool() {
return Err(Error::other("InvalidArgument"));
}
let Some(has_canceler) = self.decommission_cancelers.get(idx) else {
return Err(Error::other("InvalidArgument"));
};
if has_canceler.is_none() {
return Err(StorageError::DecommissionNotStarted);
}
let mut lock = self.pool_meta.write().await;
if lock.decommission_cancel(idx) {
lock.save(self.pools.clone()).await?;
drop(lock);
if let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
}
Ok(())
}
pub async fn is_decommission_running(&self) -> bool {
let pool_meta = self.pool_meta.read().await;
for pool in pool_meta.pools.iter() {
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return true;
}
}
false
}
#[tracing::instrument(skip(self, rx))]
pub async fn decommission(&self, rx: CancellationToken, indices: Vec<usize>) -> Result<()> {
warn!("decommission: {:?}", indices);
if indices.is_empty() {
return Err(Error::other("InvalidArgument"));
}
if self.single_pool() {
return Err(Error::other("InvalidArgument"));
}
self.start_decommission(indices.clone()).await?;
let rx_clone = rx.clone();
tokio::spawn(async move {
let Some(store) = new_object_layer_fn() else {
error!("store not init");
return;
};
for idx in indices.iter() {
store.do_decommission_in_routine(rx_clone.clone(), *idx).await;
}
});
Ok(())
}
#[allow(unused_assignments)]
#[tracing::instrument(skip(self, set, wk, rcfg))]
async fn decommission_entry(
self: &Arc<Self>,
idx: usize,
entry: MetaCacheEntry,
bucket: String,
set: Arc<SetDisks>,
wk: Arc<Workers>,
rcfg: Option<String>,
) {
warn!("decommission_entry: {} {}", &bucket, &entry.name);
wk.give().await;
if entry.is_dir() {
warn!("decommission_entry: skip dir {}", &entry.name);
return;
}
let mut fivs = match entry.file_info_versions(&bucket) {
Ok(f) => f,
Err(err) => {
error!("decommission_pool: file_info_versions err {:?}", &err);
return;
}
};
fivs.versions.sort_by(|a, b| b.mod_time.cmp(&a.mod_time));
let mut decommissioned: usize = 0;
let expired: usize = 0;
for version in fivs.versions.iter() {
// TODO: filterLifecycle
let remaining_versions = fivs.versions.len() - expired;
if version.deleted && remaining_versions == 1 && rcfg.is_none() {
//
decommissioned += 1;
info!("decommission_pool: DELETE marked object with no other non-current versions will be skipped");
continue;
}
let version_id = version.version_id.map(|v| v.to_string());
let mut ignore = false;
let mut failure = false;
let mut error = None;
if version.deleted {
// TODO: other params
if let Err(err) = self
.delete_object(
bucket.as_str(),
&version.name,
ObjectOptions {
versioned: true,
version_id: version_id.clone(),
mod_time: version.mod_time,
src_pool_idx: idx,
data_movement: true,
delete_marker: true,
skip_decommissioned: true,
..Default::default()
},
)
.await
{
if is_err_object_not_found(&err) || is_err_version_not_found(&err) || is_err_data_movement_overwrite(&err) {
ignore = true;
continue;
}
failure = true;
error = Some(err)
}
{
self.pool_meta.write().await.count_item(idx, 0, failure);
}
if !failure {
decommissioned += 1;
}
info!(
"decommission_pool: DecomCopyDeleteMarker {} {} {:?} {:?}",
&bucket, &version.name, &version_id, error
);
continue;
}
for _i in 0..3 {
if version.is_remote() {
// TODO: DecomTieredObject
}
let bucket = bucket.clone();
let rd = match set
.get_object_reader(
bucket.as_str(),
&encode_dir_object(&version.name),
None,
HeaderMap::new(),
&ObjectOptions {
version_id: version_id.clone(),
no_lock: true,
..Default::default()
},
)
.await
{
Ok(rd) => rd,
Err(err) => {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
ignore = true;
break;
}
if !ignore {
//
if bucket == RUSTFS_META_BUCKET && version.name.contains(DATA_USAGE_CACHE_NAME) {
ignore = true;
error!("decommission_pool: ignore data usage cache {}", &version.name);
break;
}
}
failure = true;
error!("decommission_pool: get_object_reader err {:?}", &err);
continue;
}
};
let bucket_name = bucket.clone();
let object_name = rd.object_info.name.clone();
if let Err(err) = self.clone().decommission_object(idx, bucket, rd).await {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) || is_err_data_movement_overwrite(&err) {
ignore = true;
break;
}
failure = true;
error!("decommission_pool: decommission_object err {:?}", &err);
continue;
}
warn!(
"decommission_pool: decommission_object done {}/{} {}",
&bucket_name, &object_name, &version.name
);
failure = false;
break;
}
if ignore {
info!("decommission_pool: ignore {}", &version.name);
continue;
}
{
self.pool_meta.write().await.count_item(idx, decommissioned, failure);
}
if failure {
break;
}
decommissioned += 1;
}
if decommissioned == fivs.versions.len()
&& let Err(err) = set
.delete_object(
bucket.as_str(),
&encode_dir_object(&entry.name),
ObjectOptions {
delete_prefix: true,
delete_prefix_object: true,
..Default::default()
},
)
.await
{
error!("decommission_pool: delete_object err {:?}", &err);
}
{
let mut pool_meta = self.pool_meta.write().await;
pool_meta.track_current_bucket_object(idx, bucket.clone(), entry.name.clone());
let ok = pool_meta
.update_after(idx, self.pools.clone(), Duration::seconds(30))
.await
.unwrap_or_default();
drop(pool_meta);
if ok && let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
}
warn!("decommission_pool: decommission_entry done {} {}", &bucket, &entry.name);
}
#[tracing::instrument(skip(self, rx))]
async fn decommission_pool(
self: &Arc<Self>,
rx: CancellationToken,
idx: usize,
pool: Arc<Sets>,
bi: DecomBucketInfo,
) -> Result<()> {
let wk = Workers::new(pool.disk_set.len() * 2).map_err(Error::other)?;
// let mut vc = None;
// replication
let rcfg: Option<String> = None;
if bi.name != RUSTFS_META_BUCKET {
let _versioning = BucketVersioningSys::get(&bi.name).await?;
// vc = Some(versioning);
// TODO: LifecycleSys
// TODO: BucketObjectLockSys
// TODO: ReplicationConfig
}
for (set_idx, set) in pool.disk_set.iter().enumerate() {
wk.clone().take().await;
warn!("decommission_pool: decommission_pool {} {}", set_idx, &bi.name);
let decommission_entry: ListCallback = Arc::new({
let this = Arc::clone(self);
let bucket = bi.name.clone();
let wk = wk.clone();
let set = set.clone();
let rcfg = rcfg.clone();
move |entry: MetaCacheEntry| {
let this = this.clone();
let bucket = bucket.clone();
let wk = wk.clone();
let set = set.clone();
let rcfg = rcfg.clone();
Box::pin(async move {
wk.take().await;
this.decommission_entry(idx, entry, bucket, set, wk, rcfg).await
})
}
});
let set = set.clone();
let rx_clone = rx.clone();
let bi = bi.clone();
let set_id = set_idx;
let wk_clone = wk.clone();
tokio::spawn(async move {
loop {
if rx_clone.is_cancelled() {
warn!("decommission_pool: cancel {}", set_id);
break;
}
warn!("decommission_pool: list_objects_to_decommission {} {}", set_id, &bi.name);
match set
.list_objects_to_decommission(rx_clone.clone(), bi.clone(), decommission_entry.clone())
.await
{
Ok(_) => {
warn!("decommission_pool: list_objects_to_decommission {} done", set_id);
break;
}
Err(err) => {
error!("decommission_pool: list_objects_to_decommission {} err {:?}", set_id, &err);
if is_err_bucket_not_found(&err) {
warn!("decommission_pool: list_objects_to_decommission {} volume not found", set_id);
break;
}
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
}
}
}
wk_clone.give().await;
});
}
warn!("decommission_pool: decommission_pool wait {} {}", idx, &bi.name);
wk.wait().await;
warn!("decommission_pool: decommission_pool done {} {}", idx, &bi.name);
Ok(())
}
#[tracing::instrument(skip(self, rx))]
pub async fn do_decommission_in_routine(self: &Arc<Self>, rx: CancellationToken, idx: usize) {
if let Err(err) = self.decommission_in_background(rx, idx).await {
error!("decom err {:?}", &err);
if let Err(er) = self.decommission_failed(idx).await {
error!("decom failed err {:?}", &er);
} else {
warn!("decommission: decommission_failed {}", idx);
}
return;
}
warn!("decommission: decommission_in_background complete {}", idx);
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disks_layout.rs | crates/ecstore/src/disks_layout.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_utils::string::{ArgPattern, find_ellipses_patterns, has_ellipses};
use serde::Deserialize;
use std::collections::HashSet;
use std::env;
use std::io::{Error, Result};
use tracing::debug;
/// Supported set sizes this is used to find the optimal
/// single set size.
const SET_SIZES: [usize; 15] = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
const ENV_RUSTFS_ERASURE_SET_DRIVE_COUNT: &str = "RUSTFS_ERASURE_SET_DRIVE_COUNT";
#[derive(Deserialize, Debug, Default)]
pub struct PoolDisksLayout {
cmd_line: String,
layout: Vec<Vec<String>>,
}
impl PoolDisksLayout {
fn new(args: impl Into<String>, layout: Vec<Vec<String>>) -> Self {
PoolDisksLayout {
cmd_line: args.into(),
layout,
}
}
fn count(&self) -> usize {
self.layout.len()
}
fn get_cmd_line(&self) -> &str {
&self.cmd_line
}
pub fn iter(&self) -> impl Iterator<Item = &Vec<String>> {
self.layout.iter()
}
}
#[derive(Deserialize, Debug, Default)]
pub struct DisksLayout {
pub legacy: bool,
pub pools: Vec<PoolDisksLayout>,
}
// impl<T: AsRef<str>> TryFrom<&[T]> for DisksLayout {
// type Error = Error;
// fn try_from(args: &[T]) -> Result<Self, Self::Error> {
// if args.is_empty() {
// return Err(Error::from_string("Invalid argument"));
// }
// let is_ellipses = args.iter().any(|v| has_ellipses(&[v]));
// // None of the args have ellipses use the old style.
// if !is_ellipses {
// let set_args = get_all_sets(is_ellipses, args)?;
// return Ok(DisksLayout {
// legacy: true,
// pools: vec![PoolDisksLayout::new(
// args.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(" "),
// set_args,
// )],
// });
// }
// let mut layout = Vec::with_capacity(args.len());
// for arg in args.iter() {
// if !has_ellipses(&[arg]) && args.len() > 1 {
// return Err(Error::from_string(
// "all args must have ellipses for pool expansion (Invalid arguments specified)",
// ));
// }
// let set_args = get_all_sets(is_ellipses, &[arg])?;
// layout.push(PoolDisksLayout::new(arg.as_ref(), set_args));
// }
// Ok(DisksLayout {
// legacy: false,
// pools: layout,
// })
// }
// }
impl DisksLayout {
pub fn from_volumes<T: AsRef<str>>(args: &[T]) -> Result<Self> {
if args.is_empty() {
return Err(Error::other("Invalid argument"));
}
let is_ellipses = args.iter().any(|v| has_ellipses(&[v]));
let set_drive_count_env = env::var(ENV_RUSTFS_ERASURE_SET_DRIVE_COUNT).unwrap_or_else(|err| {
debug!("{} not set use default:0, {:?}", ENV_RUSTFS_ERASURE_SET_DRIVE_COUNT, err);
"0".to_string()
});
let set_drive_count: usize = set_drive_count_env.parse().map_err(Error::other)?;
// None of the args have ellipses use the old style.
if !is_ellipses {
let set_args = get_all_sets(set_drive_count, is_ellipses, args)?;
return Ok(DisksLayout {
legacy: true,
pools: vec![PoolDisksLayout::new(
args.iter().map(AsRef::as_ref).collect::<Vec<&str>>().join(" "),
set_args,
)],
});
}
let mut layout = Vec::with_capacity(args.len());
for arg in args.iter() {
if !has_ellipses(&[arg]) && args.len() > 1 {
return Err(Error::other(
"all args must have ellipses for pool expansion (Invalid arguments specified)",
));
}
let set_args = get_all_sets(set_drive_count, is_ellipses, &[arg])?;
layout.push(PoolDisksLayout::new(arg.as_ref(), set_args));
}
Ok(DisksLayout {
legacy: false,
pools: layout,
})
}
pub fn is_empty_layout(&self) -> bool {
self.pools.is_empty()
|| self.pools[0].layout.is_empty()
|| self.pools[0].layout[0].is_empty()
|| self.pools[0].layout[0][0].is_empty()
}
pub fn is_single_drive_layout(&self) -> bool {
self.pools.len() == 1 && self.pools[0].layout.len() == 1 && self.pools[0].layout[0].len() == 1
}
pub fn get_single_drive_layout(&self) -> &str {
&self.pools[0].layout[0][0]
}
/// returns the total number of sets in the layout.
pub fn get_set_count(&self, i: usize) -> usize {
self.pools.get(i).map_or(0, |v| v.count())
}
/// returns the total number of drives in the layout.
pub fn get_drives_per_set(&self, i: usize) -> usize {
self.pools.get(i).map_or(0, |v| v.layout.first().map_or(0, |v| v.len()))
}
/// returns the command line for the given index.
pub fn get_cmd_line(&self, i: usize) -> String {
self.pools.get(i).map_or(String::new(), |v| v.get_cmd_line().to_owned())
}
}
/// parses all ellipses input arguments, expands them into
/// corresponding list of endpoints chunked evenly in accordance with a
/// specific set size.
///
/// For example: {1...64} is divided into 4 sets each of size 16.
/// This applies to even distributed setup syntax as well.
fn get_all_sets<T: AsRef<str>>(set_drive_count: usize, is_ellipses: bool, args: &[T]) -> Result<Vec<Vec<String>>> {
let endpoint_set = if is_ellipses {
EndpointSet::from_volumes(args, set_drive_count)?
} else {
let set_indexes = if args.len() > 1 {
get_set_indexes(args, &[args.len()], set_drive_count, &[])?
} else {
vec![vec![args.len()]]
};
let endpoints = args.iter().map(|v| v.as_ref().to_string()).collect();
EndpointSet::new(endpoints, set_indexes)
};
let set_args = endpoint_set.get();
let mut unique_args = HashSet::with_capacity(set_args.len());
for args in set_args.iter() {
for arg in args {
if unique_args.contains(arg) {
return Err(Error::other(format!("Input args {arg} has duplicate ellipses")));
}
unique_args.insert(arg);
}
}
Ok(set_args)
}
/// represents parsed ellipses values, also provides
/// methods to get the sets of endpoints.
#[derive(Debug, Default)]
struct EndpointSet {
_arg_patterns: Vec<ArgPattern>,
endpoints: Vec<String>,
set_indexes: Vec<Vec<usize>>,
}
// impl<T: AsRef<str>> TryFrom<&[T]> for EndpointSet {
// type Error = Error;
// fn try_from(args: &[T]) -> Result<Self, Self::Error> {
// let mut arg_patterns = Vec::with_capacity(args.len());
// for arg in args {
// arg_patterns.push(find_ellipses_patterns(arg.as_ref())?);
// }
// let total_sizes = get_total_sizes(&arg_patterns);
// let set_indexes = get_set_indexes(args, &total_sizes, &arg_patterns)?;
// let mut endpoints = Vec::new();
// for ap in arg_patterns.iter() {
// let aps = ap.expand();
// for bs in aps {
// endpoints.push(bs.join(""));
// }
// }
// Ok(EndpointSet {
// set_indexes,
// _arg_patterns: arg_patterns,
// endpoints,
// })
// }
// }
impl EndpointSet {
/// Create a new EndpointSet with the given endpoints and set indexes.
pub fn new(endpoints: Vec<String>, set_indexes: Vec<Vec<usize>>) -> Self {
Self {
endpoints,
set_indexes,
..Default::default()
}
}
pub fn from_volumes<T: AsRef<str>>(args: &[T], set_drive_count: usize) -> Result<Self> {
let mut arg_patterns = Vec::with_capacity(args.len());
for arg in args {
arg_patterns.push(find_ellipses_patterns(arg.as_ref())?);
}
let total_sizes = get_total_sizes(&arg_patterns);
let set_indexes = get_set_indexes(args, &total_sizes, set_drive_count, &arg_patterns)?;
let mut endpoints = Vec::new();
for ap in arg_patterns.iter() {
let aps = ap.expand();
for bs in aps {
endpoints.push(bs.join(""));
}
}
Ok(EndpointSet {
set_indexes,
_arg_patterns: arg_patterns,
endpoints,
})
}
/// returns the sets representation of the endpoints
/// this function also intelligently decides on what will
/// be the right set size etc.
pub fn get(&self) -> Vec<Vec<String>> {
let mut sets: Vec<Vec<String>> = Vec::new();
let mut start = 0;
for set_idx in self.set_indexes.iter() {
for idx in set_idx {
let end = idx + start;
sets.push(self.endpoints[start..end].to_vec());
start = end;
}
}
sets
}
}
/// returns the greatest common divisor of all the ellipses sizes.
fn get_divisible_size(total_sizes: &[usize]) -> usize {
fn gcd(mut x: usize, mut y: usize) -> usize {
while y != 0 {
// be equivalent to: x, y = y, x%y
std::mem::swap(&mut x, &mut y);
y %= x;
}
x
}
total_sizes.iter().skip(1).fold(total_sizes[0], |acc, &y| gcd(acc, y))
}
fn possible_set_counts(set_size: usize) -> Vec<usize> {
let mut ss = Vec::new();
for s in SET_SIZES {
if set_size.is_multiple_of(s) {
ss.push(s);
}
}
ss
}
/// checks whether given count is a valid set size for erasure coding.
fn is_valid_set_size(count: usize) -> bool {
count >= SET_SIZES[0] && count <= SET_SIZES[SET_SIZES.len() - 1]
}
/// Final set size with all the symmetry accounted for.
fn common_set_drive_count(divisible_size: usize, set_counts: &[usize]) -> usize {
// prefers set_counts to be sorted for optimal behavior.
if divisible_size < set_counts[set_counts.len() - 1] {
return divisible_size;
}
let mut prev_d = divisible_size / set_counts[0];
let mut set_size = 0;
for &cnt in set_counts {
if divisible_size.is_multiple_of(cnt) {
let d = divisible_size / cnt;
if d <= prev_d {
prev_d = d;
set_size = cnt;
}
}
}
set_size
}
/// returns symmetrical setCounts based on the input argument patterns,
/// the symmetry calculation is to ensure that we also use uniform number
/// of drives common across all ellipses patterns.
fn possible_set_counts_with_symmetry(set_counts: &[usize], arg_patterns: &[ArgPattern]) -> Vec<usize> {
let mut new_set_counts: HashSet<usize> = HashSet::new();
for &ss in set_counts {
let mut symmetry = false;
for arg_pattern in arg_patterns {
for p in arg_pattern.as_ref().iter() {
if p.len() > ss {
symmetry = (p.len() % ss) == 0;
} else {
symmetry = (ss % p.len()) == 0;
}
}
}
if !new_set_counts.contains(&ss) && (symmetry || arg_patterns.is_empty()) {
new_set_counts.insert(ss);
}
}
let mut set_counts: Vec<usize> = new_set_counts.into_iter().collect();
set_counts.sort_unstable();
set_counts
}
/// returns list of indexes which provides the set size
/// on each index, this function also determines the final set size
/// The final set size has the affinity towards choosing smaller
/// indexes (total sets)
fn get_set_indexes<T: AsRef<str>>(
args: &[T],
total_sizes: &[usize],
set_drive_count: usize,
arg_patterns: &[ArgPattern],
) -> Result<Vec<Vec<usize>>> {
if args.is_empty() || total_sizes.is_empty() {
return Err(Error::other("Invalid argument"));
}
for &size in total_sizes {
// Check if total_sizes has minimum range upto set_size
if size < SET_SIZES[0] || size < set_drive_count {
return Err(Error::other(format!("Incorrect number of endpoints provided, size {size}")));
}
}
let common_size = get_divisible_size(total_sizes);
let mut set_counts = possible_set_counts(common_size);
if set_counts.is_empty() {
return Err(Error::other(format!(
"Incorrect number of endpoints provided, number of drives {} is not divisible by any supported erasure set sizes {}",
common_size, 0
)));
}
// Returns possible set counts with symmetry.
set_counts = possible_set_counts_with_symmetry(&set_counts, arg_patterns);
if set_counts.is_empty() {
return Err(Error::other("No symmetric distribution detected with input endpoints provided"));
}
let set_size = {
if set_drive_count > 0 {
let has_set_drive_count = set_counts.contains(&set_drive_count);
if !has_set_drive_count {
return Err(Error::other(format!(
"Invalid set drive count {}. Acceptable values for {:?} number drives are {:?}",
set_drive_count, common_size, &set_counts
)));
}
set_drive_count
} else {
set_counts = possible_set_counts_with_symmetry(&set_counts, arg_patterns);
if set_counts.is_empty() {
return Err(Error::other(format!(
"No symmetric distribution detected with input endpoints , drives {} cannot be spread symmetrically by any supported erasure set sizes {:?}",
common_size, &set_counts
)));
}
// Final set size with all the symmetry accounted for.
common_set_drive_count(common_size, &set_counts)
}
};
if !is_valid_set_size(set_size) {
return Err(Error::other("Incorrect number of endpoints provided3"));
}
Ok(total_sizes
.iter()
.map(|&size| (0..(size / set_size)).map(|_| set_size).collect())
.collect())
}
/// Return the total size for each argument patterns.
fn get_total_sizes(arg_patterns: &[ArgPattern]) -> Vec<usize> {
arg_patterns.iter().map(|v| v.total_sizes()).collect()
}
#[cfg(test)]
mod test {
use rustfs_utils::string::Pattern;
use super::*;
impl PartialEq for EndpointSet {
fn eq(&self, other: &Self) -> bool {
self._arg_patterns == other._arg_patterns && self.set_indexes == other.set_indexes
}
}
#[test]
fn test_get_divisible_size() {
struct TestCase {
total_sizes: Vec<usize>,
result: usize,
}
let test_cases = [
TestCase {
total_sizes: vec![24, 32, 16],
result: 8,
},
TestCase {
total_sizes: vec![32, 8, 4],
result: 4,
},
TestCase {
total_sizes: vec![8, 8, 8],
result: 8,
},
TestCase {
total_sizes: vec![24],
result: 24,
},
];
for (i, test_case) in test_cases.iter().enumerate() {
let ret = get_divisible_size(&test_case.total_sizes);
assert_eq!(ret, test_case.result, "Test{}: Expected {}, got {}", i + 1, test_case.result, ret);
}
}
#[test]
fn test_get_set_indexes() {
#[derive(Default)]
struct TestCase<'a> {
num: usize,
args: Vec<&'a str>,
total_sizes: Vec<usize>,
indexes: Vec<Vec<usize>>,
success: bool,
}
let test_cases = [
TestCase {
num: 1,
args: vec!["data{1...17}/export{1...52}"],
total_sizes: vec![14144],
..Default::default()
},
TestCase {
num: 2,
args: vec!["data{1...3}"],
total_sizes: vec![3],
indexes: vec![vec![3]],
success: true,
},
TestCase {
num: 3,
args: vec!["data/controller1/export{1...2}, data/controller2/export{1...4}, data/controller3/export{1...8}"],
total_sizes: vec![2, 4, 8],
indexes: vec![vec![2], vec![2, 2], vec![2, 2, 2, 2]],
success: true,
},
TestCase {
num: 4,
args: vec!["data{1...27}"],
total_sizes: vec![27],
indexes: vec![vec![9, 9, 9]],
success: true,
},
TestCase {
num: 5,
args: vec!["http://host{1...3}/data{1...180}"],
total_sizes: vec![540],
indexes: vec![vec![
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15,
]],
success: true,
},
TestCase {
num: 6,
args: vec!["http://host{1...2}.rack{1...4}/data{1...180}"],
total_sizes: vec![1440],
indexes: vec![vec![
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16,
]],
success: true,
},
TestCase {
num: 7,
args: vec!["http://host{1...2}/data{1...180}"],
total_sizes: vec![360],
indexes: vec![vec![
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12,
]],
success: true,
},
TestCase {
num: 8,
args: vec!["data/controller1/export{1...4}, data/controller2/export{1...8}, data/controller3/export{1...12}"],
total_sizes: vec![4, 8, 12],
indexes: vec![vec![4], vec![4, 4], vec![4, 4, 4]],
success: true,
},
TestCase {
num: 9,
args: vec!["data{1...64}"],
total_sizes: vec![64],
indexes: vec![vec![16, 16, 16, 16]],
success: true,
},
TestCase {
num: 10,
args: vec!["data{1...24}"],
total_sizes: vec![24],
indexes: vec![vec![12, 12]],
success: true,
},
TestCase {
num: 11,
args: vec!["data/controller{1...11}/export{1...8}"],
total_sizes: vec![88],
indexes: vec![vec![11, 11, 11, 11, 11, 11, 11, 11]],
success: true,
},
TestCase {
num: 12,
args: vec!["data{1...4}"],
total_sizes: vec![4],
indexes: vec![vec![4]],
success: true,
},
TestCase {
num: 13,
args: vec!["data/controller1/export{1...10}, data/controller2/export{1...10}, data/controller3/export{1...10}"],
total_sizes: vec![10, 10, 10],
indexes: vec![vec![10], vec![10], vec![10]],
success: true,
},
TestCase {
num: 14,
args: vec!["data{1...16}/export{1...52}"],
total_sizes: vec![832],
indexes: vec![vec![
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
]],
success: true,
},
TestCase {
num: 15,
args: vec!["https://node{1...3}.example.net/mnt/drive{1...8}"],
total_sizes: vec![24],
indexes: vec![vec![12, 12]],
success: true,
},
];
for test_case in test_cases {
let mut arg_patterns = Vec::new();
for v in test_case.args.iter() {
match find_ellipses_patterns(v) {
Ok(patterns) => {
arg_patterns.push(patterns);
}
Err(err) => {
panic!("Test{}: Unexpected failure {:?}", test_case.num, err);
}
}
}
match get_set_indexes(test_case.args.as_slice(), test_case.total_sizes.as_slice(), 0, arg_patterns.as_slice()) {
Ok(got_indexes) => {
if !test_case.success {
panic!("Test{}: Expected failure but passed instead", test_case.num);
}
assert_eq!(
test_case.indexes, got_indexes,
"Test{}: Expected {:?}, got {:?}",
test_case.num, test_case.indexes, got_indexes
)
}
Err(err) => {
if test_case.success {
panic!("Test{}: Expected success but failed instead {:?}", test_case.num, err);
}
}
}
}
}
fn get_sequences(start: usize, number: usize, padding_len: usize) -> Vec<String> {
let mut seq = Vec::new();
for i in start..=number {
if padding_len == 0 {
seq.push(format!("{i}"));
} else {
seq.push(format!("{i:0padding_len$}"));
}
}
seq
}
#[test]
fn test_into_endpoint_set() {
#[derive(Default)]
struct TestCase<'a> {
num: usize,
arg: &'a str,
es: EndpointSet,
success: bool,
}
let test_cases = [
// Tests invalid inputs.
TestCase {
num: 1,
arg: "...",
..Default::default()
},
// No range specified.
TestCase {
num: 2,
arg: "{...}",
..Default::default()
},
// Invalid range.
TestCase {
num: 3,
arg: "http://rustfs{2...3}/export/set{1...0}",
..Default::default()
},
// Range cannot be smaller than 4 minimum.
TestCase {
num: 4,
arg: "/export{1..2}",
..Default::default()
},
// Unsupported characters.
TestCase {
num: 5,
arg: "/export/test{1...2O}",
..Default::default()
},
// Tests valid inputs.
TestCase {
num: 6,
arg: "{1...27}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![Pattern {
seq: get_sequences(1, 27, 0),
..Default::default()
}])],
set_indexes: vec![vec![9, 9, 9]],
..Default::default()
},
success: true,
},
TestCase {
num: 7,
arg: "/export/set{1...64}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![Pattern {
seq: get_sequences(1, 64, 0),
prefix: "/export/set".to_owned(),
..Default::default()
}])],
set_indexes: vec![vec![16, 16, 16, 16]],
..Default::default()
},
success: true,
},
// Valid input for distributed setup.
TestCase {
num: 8,
arg: "http://rustfs{2...3}/export/set{1...64}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![
Pattern {
seq: get_sequences(1, 64, 0),
..Default::default()
},
Pattern {
seq: get_sequences(2, 3, 0),
prefix: "http://rustfs".to_owned(),
suffix: "/export/set".to_owned(),
},
])],
set_indexes: vec![vec![16, 16, 16, 16, 16, 16, 16, 16]],
..Default::default()
},
success: true,
},
// Supporting some advanced cases.
TestCase {
num: 9,
arg: "http://rustfs{1...64}.mydomain.net/data",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![Pattern {
seq: get_sequences(1, 64, 0),
prefix: "http://rustfs".to_owned(),
suffix: ".mydomain.net/data".to_owned(),
}])],
set_indexes: vec![vec![16, 16, 16, 16]],
..Default::default()
},
success: true,
},
TestCase {
num: 10,
arg: "http://rack{1...4}.mydomain.rustfs{1...16}/data",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![
Pattern {
seq: get_sequences(1, 16, 0),
suffix: "/data".to_owned(),
..Default::default()
},
Pattern {
seq: get_sequences(1, 4, 0),
prefix: "http://rack".to_owned(),
suffix: ".mydomain.rustfs".to_owned(),
},
])],
set_indexes: vec![vec![16, 16, 16, 16]],
..Default::default()
},
success: true,
},
// Supporting kubernetes cases.
TestCase {
num: 11,
arg: "http://rustfs{0...15}.mydomain.net/data{0...1}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![
Pattern {
seq: get_sequences(0, 1, 0),
..Default::default()
},
Pattern {
seq: get_sequences(0, 15, 0),
prefix: "http://rustfs".to_owned(),
suffix: ".mydomain.net/data".to_owned(),
},
])],
set_indexes: vec![vec![16, 16]],
..Default::default()
},
success: true,
},
// No host regex, just disks.
TestCase {
num: 12,
arg: "http://server1/data{1...32}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![Pattern {
seq: get_sequences(1, 32, 0),
prefix: "http://server1/data".to_owned(),
..Default::default()
}])],
set_indexes: vec![vec![16, 16]],
..Default::default()
},
success: true,
},
// No host regex, just disks with two position numerics.
TestCase {
num: 13,
arg: "http://server1/data{01...32}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![Pattern {
seq: get_sequences(1, 32, 2),
prefix: "http://server1/data".to_owned(),
..Default::default()
}])],
set_indexes: vec![vec![16, 16]],
..Default::default()
},
success: true,
},
// More than 2 ellipses are supported as well.
TestCase {
num: 14,
arg: "http://rustfs{2...3}/export/set{1...64}/test{1...2}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![
Pattern {
seq: get_sequences(1, 2, 0),
..Default::default()
},
Pattern {
seq: get_sequences(1, 64, 0),
suffix: "/test".to_owned(),
..Default::default()
},
Pattern {
seq: get_sequences(2, 3, 0),
prefix: "http://rustfs".to_owned(),
suffix: "/export/set".to_owned(),
},
])],
set_indexes: vec![vec![16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]],
..Default::default()
},
success: true,
},
// More than an ellipse per argument for standalone setup.
TestCase {
num: 15,
arg: "/export{1...10}/disk{1...10}",
es: EndpointSet {
_arg_patterns: vec![ArgPattern::new(vec![
Pattern {
seq: get_sequences(1, 10, 0),
..Default::default()
},
Pattern {
seq: get_sequences(1, 10, 0),
prefix: "/export".to_owned(),
suffix: "/disk".to_owned(),
},
])],
set_indexes: vec![vec![10, 10, 10, 10, 10, 10, 10, 10, 10, 10]],
..Default::default()
},
success: true,
},
];
for test_case in test_cases {
match EndpointSet::from_volumes([test_case.arg].as_slice(), 0) {
Ok(got_es) => {
if !test_case.success {
panic!("Test{}: Expected failure but passed instead", test_case.num);
}
assert_eq!(
test_case.es, got_es,
"Test{}: Expected {:?}, got {:?}",
test_case.num, test_case.es, got_es
)
}
Err(err) => {
if test_case.success {
panic!("Test{}: Expected success but failed instead {:?}", test_case.num, err);
}
}
}
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/store_utils.rs | crates/ecstore/src/store_utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::storageclass::STANDARD;
use crate::disk::RUSTFS_META_BUCKET;
use regex::Regex;
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS};
use std::collections::HashMap;
use std::io::{Error, Result};
pub fn clean_metadata(metadata: &mut HashMap<String, String>) {
remove_standard_storage_class(metadata);
clean_metadata_keys(metadata, &["md5Sum", "etag", "expires", AMZ_OBJECT_TAGGING, "last-modified"]);
}
pub fn remove_standard_storage_class(metadata: &mut HashMap<String, String>) {
if metadata.get(AMZ_STORAGE_CLASS) == Some(&STANDARD.to_string()) {
metadata.remove(AMZ_STORAGE_CLASS);
}
}
pub fn clean_metadata_keys(metadata: &mut HashMap<String, String>, key_names: &[&str]) {
for key in key_names {
metadata.remove(key.to_owned());
}
}
// Check whether the bucket is the metadata bucket
fn is_meta_bucket(bucket_name: &str) -> bool {
bucket_name == RUSTFS_META_BUCKET
}
// Check whether the bucket is reserved
fn is_reserved_bucket(bucket_name: &str) -> bool {
bucket_name == "rustfs"
}
// Check whether the bucket name is reserved or invalid
pub fn is_reserved_or_invalid_bucket(bucket_entry: &str, strict: bool) -> bool {
if bucket_entry.is_empty() {
return true;
}
let bucket_entry = bucket_entry.trim_end_matches('/');
let result = check_bucket_name(bucket_entry, strict).is_err();
result || is_meta_bucket(bucket_entry) || is_reserved_bucket(bucket_entry)
}
// Check whether the bucket name is valid
fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> {
if bucket_name.trim().is_empty() {
return Err(Error::other("Bucket name cannot be empty"));
}
if bucket_name.len() < 3 {
return Err(Error::other("Bucket name cannot be shorter than 3 characters"));
}
if bucket_name.len() > 63 {
return Err(Error::other("Bucket name cannot be longer than 63 characters"));
}
let ip_address_regex = Regex::new(r"^(\d+\.){3}\d+$").unwrap();
if ip_address_regex.is_match(bucket_name) {
return Err(Error::other("Bucket name cannot be an IP address"));
}
let valid_bucket_name_regex = if strict {
Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap()
} else {
Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-_:]{1,61}[A-Za-z0-9]$").unwrap()
};
if !valid_bucket_name_regex.is_match(bucket_name) {
return Err(Error::other("Bucket name contains invalid characters"));
}
// Check for "..", ".-", "-."
if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") {
return Err(Error::other("Bucket name contains invalid characters"));
}
Ok(())
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/store_init.rs | crates/ecstore/src/store_init.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::{KVS, storageclass};
use crate::disk::error_reduce::{count_errs, reduce_write_quorum_errs};
use crate::disk::{self, DiskAPI};
use crate::error::{Error, Result};
use crate::{
disk::{
DiskInfoOptions, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET,
error::DiskError,
format::{FormatErasureVersion, FormatMetaVersion, FormatV3},
new_disk,
},
endpoints::Endpoints,
};
use futures::future::join_all;
use std::collections::{HashMap, hash_map::Entry};
use tracing::{info, warn};
use uuid::Uuid;
pub async fn init_disks(eps: &Endpoints, opt: &DiskOption) -> (Vec<Option<DiskStore>>, Vec<Option<DiskError>>) {
let mut futures = Vec::with_capacity(eps.as_ref().len());
for ep in eps.as_ref().iter() {
futures.push(new_disk(ep, opt));
}
let mut res = Vec::with_capacity(eps.as_ref().len());
let mut errors = Vec::with_capacity(eps.as_ref().len());
let results = join_all(futures).await;
for result in results {
match result {
Ok(s) => {
res.push(Some(s));
errors.push(None);
}
Err(e) => {
res.push(None);
errors.push(Some(e));
}
}
}
(res, errors)
}
pub async fn connect_load_init_formats(
first_disk: bool,
disks: &[Option<DiskStore>],
set_count: usize,
set_drive_count: usize,
deployment_id: Option<Uuid>,
) -> Result<FormatV3> {
let (formats, errs) = load_format_erasure_all(disks, false).await;
// debug!("load_format_erasure_all errs {:?}", &errs);
check_disk_fatal_errs(&errs)?;
check_format_erasure_values(&formats, set_drive_count)?;
if first_disk && should_init_erasure_disks(&errs) {
// UnformattedDisk, not format file create
info!("first_disk && should_init_erasure_disks");
// new format and save
let fm = init_format_erasure(disks, set_count, set_drive_count, deployment_id).await?;
return Ok(fm);
}
info!(
"first_disk: {}, should_init_erasure_disks: {}",
first_disk,
should_init_erasure_disks(&errs)
);
let unformatted = quorum_unformatted_disks(&errs);
if unformatted && !first_disk {
return Err(Error::NotFirstDisk);
}
if unformatted && first_disk {
return Err(Error::FirstDiskWait);
}
let fm = get_format_erasure_in_quorum(&formats)?;
Ok(fm)
}
pub fn quorum_unformatted_disks(errs: &[Option<DiskError>]) -> bool {
count_errs(errs, &DiskError::UnformattedDisk) > (errs.len() / 2)
}
pub fn should_init_erasure_disks(errs: &[Option<DiskError>]) -> bool {
count_errs(errs, &DiskError::UnformattedDisk) == errs.len()
}
pub fn check_disk_fatal_errs(errs: &[Option<DiskError>]) -> disk::error::Result<()> {
if count_errs(errs, &DiskError::UnsupportedDisk) == errs.len() {
return Err(DiskError::UnsupportedDisk);
}
if count_errs(errs, &DiskError::FileAccessDenied) == errs.len() {
return Err(DiskError::FileAccessDenied);
}
if count_errs(errs, &DiskError::DiskNotDir) == errs.len() {
return Err(DiskError::DiskNotDir);
}
Ok(())
}
async fn init_format_erasure(
disks: &[Option<DiskStore>],
set_count: usize,
set_drive_count: usize,
deployment_id: Option<Uuid>,
) -> Result<FormatV3> {
let fm = FormatV3::new(set_count, set_drive_count);
let mut fms = vec![None; disks.len()];
for i in 0..set_count {
for j in 0..set_drive_count {
let idx = i * set_drive_count + j;
let mut newfm = fm.clone();
newfm.erasure.this = fm.erasure.sets[i][j];
if let Some(id) = deployment_id {
newfm.id = id;
}
fms[idx] = Some(newfm);
}
}
save_format_file_all(disks, &fms).await?;
get_format_erasure_in_quorum(&fms)
}
pub fn get_format_erasure_in_quorum(formats: &[Option<FormatV3>]) -> Result<FormatV3> {
let mut countmap = HashMap::new();
for f in formats.iter() {
if f.is_some() {
let ds = f.as_ref().unwrap().drives();
let v = countmap.entry(ds);
match v {
Entry::Occupied(mut entry) => *entry.get_mut() += 1,
Entry::Vacant(vacant) => {
vacant.insert(1);
}
};
}
}
let (max_drives, max_count) = countmap.iter().max_by_key(|&(_, c)| c).unwrap_or((&0, &0));
if *max_drives == 0 || *max_count <= formats.len() / 2 {
warn!("get_format_erasure_in_quorum fi: {:?}", &formats);
return Err(Error::ErasureReadQuorum);
}
let format = formats
.iter()
.find(|f| f.as_ref().is_some_and(|v| v.drives().eq(max_drives)))
.ok_or(Error::ErasureReadQuorum)?;
let mut format = format.as_ref().unwrap().clone();
format.erasure.this = Uuid::nil();
Ok(format)
}
pub fn check_format_erasure_values(
formats: &[Option<FormatV3>],
// disks: &Vec<Option<DiskStore>>,
set_drive_count: usize,
) -> Result<()> {
for f in formats.iter() {
if f.is_none() {
continue;
}
let f = f.as_ref().unwrap();
check_format_erasure_value(f)?;
if formats.len() != f.erasure.sets.len() * f.erasure.sets[0].len() {
return Err(Error::other("formats length for erasure.sets not mtach"));
}
if f.erasure.sets[0].len() != set_drive_count {
return Err(Error::other("erasure set length not match set_drive_count"));
}
}
Ok(())
}
fn check_format_erasure_value(format: &FormatV3) -> Result<()> {
if format.version != FormatMetaVersion::V1 {
return Err(Error::other("invalid FormatMetaVersion"));
}
if format.erasure.version != FormatErasureVersion::V3 {
return Err(Error::other("invalid FormatErasureVersion"));
}
Ok(())
}
// load_format_erasure_all reads all format.json files
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<DiskError>>) {
let mut futures = Vec::with_capacity(disks.len());
let mut datas = Vec::with_capacity(disks.len());
let mut errors = Vec::with_capacity(disks.len());
for disk in disks.iter() {
futures.push(async move {
if let Some(disk) = disk {
load_format_erasure(disk, heal).await
} else {
Err(DiskError::DiskNotFound)
}
});
}
let results = join_all(futures).await;
for (i, result) in results.into_iter().enumerate() {
match result {
Ok(s) => {
if !heal {
let _ = disks[i].as_ref().unwrap().set_disk_id(Some(s.erasure.this)).await;
}
datas.push(Some(s));
errors.push(None);
}
Err(e) => {
datas.push(None);
errors.push(Some(e));
}
}
}
(datas, errors)
}
pub async fn load_format_erasure(disk: &DiskStore, heal: bool) -> disk::error::Result<FormatV3> {
let data = disk
.read_all(RUSTFS_META_BUCKET, FORMAT_CONFIG_FILE)
.await
.map_err(|e| match e {
DiskError::FileNotFound => DiskError::UnformattedDisk,
DiskError::DiskNotFound => DiskError::UnformattedDisk,
_ => {
warn!("load_format_erasure err: {:?} {:?}", disk.to_string(), e);
e
}
})?;
let mut fm = FormatV3::try_from(data.as_ref())?;
if heal {
let info = disk
.disk_info(&DiskInfoOptions {
noop: heal,
..Default::default()
})
.await?;
fm.disk_info = Some(info);
}
Ok(fm)
}
async fn save_format_file_all(disks: &[Option<DiskStore>], formats: &[Option<FormatV3>]) -> disk::error::Result<()> {
let mut futures = Vec::with_capacity(disks.len());
for (i, disk) in disks.iter().enumerate() {
futures.push(save_format_file(disk, &formats[i]));
}
let mut errors = Vec::with_capacity(disks.len());
let results = join_all(futures).await;
for result in results {
match result {
Ok(_) => {
errors.push(None);
}
Err(e) => {
errors.push(Some(e));
}
}
}
if let Some(e) = reduce_write_quorum_errs(&errors, &[], disks.len()) {
return Err(e);
}
Ok(())
}
pub async fn save_format_file(disk: &Option<DiskStore>, format: &Option<FormatV3>) -> disk::error::Result<()> {
let Some(disk) = disk else {
return Err(DiskError::DiskNotFound);
};
let Some(format) = format else {
return Err(DiskError::other("format is none"));
};
let json_data = format.to_json()?;
let tmpfile = Uuid::new_v4().to_string();
disk.write_all(RUSTFS_META_BUCKET, tmpfile.as_str(), json_data.into_bytes().into())
.await?;
disk.rename_file(RUSTFS_META_BUCKET, tmpfile.as_str(), RUSTFS_META_BUCKET, FORMAT_CONFIG_FILE)
.await?;
disk.set_disk_id(Some(format.erasure.this)).await?;
Ok(())
}
pub fn ec_drives_no_config(set_drive_count: usize) -> Result<usize> {
let sc = storageclass::lookup_config(&KVS::new(), set_drive_count)?;
Ok(sc.get_parity_for_sc(storageclass::STANDARD).unwrap_or_default())
}
// #[derive(Debug, PartialEq, thiserror::Error)]
// pub enum ErasureError {
// #[error("erasure read quorum")]
// ErasureReadQuorum,
// #[error("erasure write quorum")]
// _ErasureWriteQuorum,
// #[error("not first disk")]
// NotFirstDisk,
// #[error("first disk wait")]
// FirstDiskWait,
// #[error("invalid part id {0}")]
// InvalidPart(usize),
// }
// impl ErasureError {
// pub fn is(&self, err: &Error) -> bool {
// if let Some(e) = err.downcast_ref::<ErasureError>() {
// return self == e;
// }
// false
// }
// }
// impl ErasureError {
// pub fn to_u32(&self) -> u32 {
// match self {
// ErasureError::ErasureReadQuorum => 0x01,
// ErasureError::_ErasureWriteQuorum => 0x02,
// ErasureError::NotFirstDisk => 0x03,
// ErasureError::FirstDiskWait => 0x04,
// ErasureError::InvalidPart(_) => 0x05,
// }
// }
// pub fn from_u32(error: u32) -> Option<Self> {
// match error {
// 0x01 => Some(ErasureError::ErasureReadQuorum),
// 0x02 => Some(ErasureError::_ErasureWriteQuorum),
// 0x03 => Some(ErasureError::NotFirstDisk),
// 0x04 => Some(ErasureError::FirstDiskWait),
// 0x05 => Some(ErasureError::InvalidPart(Default::default())),
// _ => None,
// }
// }
// }
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/event_notification.rs | crates/ecstore/src/event_notification.rs | #![allow(unused_imports)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_variables)]
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use crate::bucket::metadata::BucketMetadata;
use crate::event::name::EventName;
use crate::event::targetlist::TargetList;
use crate::store::ECStore;
use crate::store_api::ObjectInfo;
pub struct EventNotifier {
target_list: TargetList,
//bucket_rules_map: HashMap<String , HashMap<EventName, Rules>>,
}
impl EventNotifier {
pub fn new() -> Arc<RwLock<Self>> {
Arc::new(RwLock::new(Self {
target_list: TargetList::new(),
//bucket_rules_map: HashMap::new(),
}))
}
fn get_arn_list(&self) -> Vec<String> {
todo!();
}
fn set(&self, bucket: &str, meta: BucketMetadata) {
todo!();
}
fn init_bucket_targets(&self, api: ECStore) -> Result<(), std::io::Error> {
/*if err := self.target_list.Add(globalNotifyTargetList.Targets()...); err != nil {
return err
}
self.target_list = self.target_list.Init(runtime.GOMAXPROCS(0)) // TODO: make this configurable (y4m4)
nil*/
todo!();
}
fn send(&self, args: EventArgs) {
todo!();
}
}
#[derive(Debug, Default)]
pub struct EventArgs {
pub event_name: String,
pub bucket_name: String,
pub object: ObjectInfo,
pub req_params: HashMap<String, String>,
pub resp_elements: HashMap<String, String>,
pub host: String,
pub user_agent: String,
}
impl EventArgs {}
pub fn send_event(args: EventArgs) {}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rebalance.rs | crates/ecstore/src/rebalance.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw};
use crate::config::com::{read_config_with_metadata, save_config_with_opts};
use crate::disk::error::DiskError;
use crate::error::{Error, Result};
use crate::error::{is_err_data_movement_overwrite, is_err_object_not_found, is_err_version_not_found};
use crate::global::get_global_endpoints;
use crate::pools::ListCallback;
use crate::set_disk::SetDisks;
use crate::store::ECStore;
use crate::store_api::{CompletePart, GetObjectReader, ObjectIO, ObjectOptions, PutObjReader};
use http::HeaderMap;
use rustfs_common::defer;
use rustfs_filemeta::{FileInfo, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use rustfs_rio::{HashReader, WarpReader};
use rustfs_utils::path::encode_dir_object;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::io::Cursor;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::io::{AsyncReadExt, BufReader};
use tokio::time::{Duration, Instant};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use uuid::Uuid;
const REBAL_META_FMT: u16 = 1; // Replace with actual format value
const REBAL_META_VER: u16 = 1; // Replace with actual version value
const REBAL_META_NAME: &str = "rebalance.bin";
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct RebalanceStats {
#[serde(rename = "ifs")]
pub init_free_space: u64, // Pool free space at the start of rebalance
#[serde(rename = "ic")]
pub init_capacity: u64, // Pool capacity at the start of rebalance
#[serde(rename = "bus")]
pub buckets: Vec<String>, // Buckets being rebalanced or to be rebalanced
#[serde(rename = "rbs")]
pub rebalanced_buckets: Vec<String>, // Buckets rebalanced
#[serde(rename = "bu")]
pub bucket: String, // Last rebalanced bucket
#[serde(rename = "ob")]
pub object: String, // Last rebalanced object
#[serde(rename = "no")]
pub num_objects: u64, // Number of objects rebalanced
#[serde(rename = "nv")]
pub num_versions: u64, // Number of versions rebalanced
#[serde(rename = "bs")]
pub bytes: u64, // Number of bytes rebalanced
#[serde(rename = "par")]
pub participating: bool, // Whether the pool is participating in rebalance
#[serde(rename = "inf")]
pub info: RebalanceInfo, // Rebalance operation info
}
impl RebalanceStats {
pub fn update(&mut self, bucket: String, fi: &FileInfo) {
if fi.is_latest {
self.num_objects += 1;
}
self.num_versions += 1;
let on_disk_size = if !fi.deleted {
fi.size * (fi.erasure.data_blocks + fi.erasure.parity_blocks) as i64 / fi.erasure.data_blocks as i64
} else {
0
};
self.bytes += on_disk_size as u64;
self.bucket = bucket;
self.object = fi.name.clone();
}
}
pub type RStats = Vec<Arc<RebalanceStats>>;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
pub enum RebalStatus {
#[default]
None,
Started,
Completed,
Stopped,
Failed,
}
impl fmt::Display for RebalStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let status = match self {
RebalStatus::None => "None",
RebalStatus::Started => "Started",
RebalStatus::Completed => "Completed",
RebalStatus::Stopped => "Stopped",
RebalStatus::Failed => "Failed",
};
write!(f, "{status}")
}
}
impl From<u8> for RebalStatus {
fn from(value: u8) -> Self {
match value {
1 => RebalStatus::Started,
2 => RebalStatus::Completed,
3 => RebalStatus::Stopped,
4 => RebalStatus::Failed,
_ => RebalStatus::None,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
pub enum RebalSaveOpt {
#[default]
Stats,
StoppedAt,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct RebalanceInfo {
#[serde(rename = "startTs")]
pub start_time: Option<OffsetDateTime>, // Time at which rebalance-start was issued
#[serde(rename = "stopTs")]
pub end_time: Option<OffsetDateTime>, // Time at which rebalance operation completed or rebalance-stop was called
#[serde(rename = "status")]
pub status: RebalStatus, // Current state of rebalance operation
}
#[allow(dead_code)]
#[derive(Debug, Clone, Default)]
pub struct DiskStat {
pub total_space: u64,
pub available_space: u64,
}
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RebalanceMeta {
#[serde(skip)]
pub cancel: Option<CancellationToken>, // To be invoked on rebalance-stop
#[serde(skip)]
pub last_refreshed_at: Option<OffsetDateTime>,
#[serde(rename = "stopTs")]
pub stopped_at: Option<OffsetDateTime>, // Time when rebalance-stop was issued
#[serde(rename = "id")]
pub id: String, // ID of the ongoing rebalance operation
#[serde(rename = "pf")]
pub percent_free_goal: f64, // Computed from total free space and capacity at the start of rebalance
#[serde(rename = "rss")]
pub pool_stats: Vec<RebalanceStats>, // Per-pool rebalance stats keyed by pool index
}
impl RebalanceMeta {
pub fn new() -> Self {
Self::default()
}
pub async fn load<S: StorageAPI>(&mut self, store: Arc<S>) -> Result<()> {
self.load_with_opts(store, ObjectOptions::default()).await
}
pub async fn load_with_opts<S: StorageAPI>(&mut self, store: Arc<S>, opts: ObjectOptions) -> Result<()> {
let (data, _) = read_config_with_metadata(store, REBAL_META_NAME, &opts).await?;
if data.is_empty() {
info!("rebalanceMeta load_with_opts: no data");
return Ok(());
}
if data.len() <= 4 {
return Err(Error::other("rebalanceMeta load_with_opts: no data"));
}
// Read header
match u16::from_le_bytes([data[0], data[1]]) {
REBAL_META_FMT => {}
fmt => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown format: {fmt}"))),
}
match u16::from_le_bytes([data[2], data[3]]) {
REBAL_META_VER => {}
ver => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown version: {ver}"))),
}
let meta: Self = rmp_serde::from_read(Cursor::new(&data[4..]))?;
*self = meta;
self.last_refreshed_at = Some(OffsetDateTime::now_utc());
info!("rebalanceMeta load_with_opts: loaded meta done");
Ok(())
}
pub async fn save<S: StorageAPI>(&self, store: Arc<S>) -> Result<()> {
self.save_with_opts(store, ObjectOptions::default()).await
}
pub async fn save_with_opts<S: StorageAPI>(&self, store: Arc<S>, opts: ObjectOptions) -> Result<()> {
if self.pool_stats.is_empty() {
info!("rebalanceMeta save_with_opts: no pool stats");
return Ok(());
}
let mut data = Vec::new();
// Initialize the header
data.extend(&REBAL_META_FMT.to_le_bytes());
data.extend(&REBAL_META_VER.to_le_bytes());
let msg = rmp_serde::to_vec(self)?;
data.extend(msg);
save_config_with_opts(store, REBAL_META_NAME, data, &opts).await?;
Ok(())
}
}
impl ECStore {
#[tracing::instrument(skip_all)]
pub async fn load_rebalance_meta(&self) -> Result<()> {
let mut meta = RebalanceMeta::new();
info!("rebalanceMeta: store load rebalance meta");
match meta.load(self.pools[0].clone()).await {
Ok(_) => {
info!("rebalanceMeta: rebalance meta loaded0");
{
let mut rebalance_meta = self.rebalance_meta.write().await;
*rebalance_meta = Some(meta);
drop(rebalance_meta);
}
info!("rebalanceMeta: rebalance meta loaded1");
if let Err(err) = self.update_rebalance_stats().await {
error!("Failed to update rebalance stats: {}", err);
} else {
info!("rebalanceMeta: rebalance meta loaded2");
}
}
Err(err) => {
if err != Error::ConfigNotFound {
error!("rebalanceMeta: load rebalance meta err {:?}", &err);
return Err(err);
}
info!("rebalanceMeta: not found, rebalance not started");
}
}
Ok(())
}
#[tracing::instrument(skip_all)]
pub async fn update_rebalance_stats(&self) -> Result<()> {
let mut ok = false;
let pool_stats = {
let rebalance_meta = self.rebalance_meta.read().await;
rebalance_meta.as_ref().map(|v| v.pool_stats.clone()).unwrap_or_default()
};
info!("update_rebalance_stats: pool_stats: {:?}", &pool_stats);
for i in 0..self.pools.len() {
if pool_stats.get(i).is_none() {
info!("update_rebalance_stats: pool {} not found", i);
let mut rebalance_meta = self.rebalance_meta.write().await;
info!("update_rebalance_stats: pool {} not found, add", i);
if let Some(meta) = rebalance_meta.as_mut() {
meta.pool_stats.push(RebalanceStats::default());
}
ok = true;
drop(rebalance_meta);
}
}
if ok {
info!("update_rebalance_stats: save rebalance meta");
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(meta) = rebalance_meta.as_ref() {
meta.save(self.pools[0].clone()).await?;
}
}
Ok(())
}
// async fn find_index(&self, index: usize) -> Option<usize> {
// if let Some(meta) = self.rebalance_meta.read().await.as_ref() {
// return meta.pool_stats.get(index).map(|_v| index);
// }
// None
// }
#[tracing::instrument(skip(self))]
pub async fn init_rebalance_meta(&self, bucktes: Vec<String>) -> Result<String> {
info!("init_rebalance_meta: start rebalance");
let si = self.storage_info().await;
let mut disk_stats = vec![DiskStat::default(); self.pools.len()];
let mut total_cap = 0;
let mut total_free = 0;
for disk in si.disks.iter() {
if disk.pool_index < 0 || disk_stats.len() <= disk.pool_index as usize {
continue;
}
total_cap += disk.total_space;
total_free += disk.available_space;
disk_stats[disk.pool_index as usize].total_space += disk.total_space;
disk_stats[disk.pool_index as usize].available_space += disk.available_space;
}
let percent_free_goal = total_free as f64 / total_cap as f64;
let mut pool_stats = Vec::with_capacity(self.pools.len());
let now = OffsetDateTime::now_utc();
for disk_stat in disk_stats.iter() {
let mut pool_stat = RebalanceStats {
init_free_space: disk_stat.available_space,
init_capacity: disk_stat.total_space,
buckets: bucktes.clone(),
rebalanced_buckets: Vec::with_capacity(bucktes.len()),
..Default::default()
};
if (disk_stat.available_space as f64 / disk_stat.total_space as f64) < percent_free_goal {
pool_stat.participating = true;
pool_stat.info = RebalanceInfo {
start_time: Some(now),
status: RebalStatus::Started,
..Default::default()
};
}
pool_stats.push(pool_stat);
}
let meta = RebalanceMeta {
id: Uuid::new_v4().to_string(),
percent_free_goal,
pool_stats,
..Default::default()
};
meta.save(self.pools[0].clone()).await?;
info!("init_rebalance_meta: rebalance meta saved");
let id = meta.id.clone();
{
let mut rebalance_meta = self.rebalance_meta.write().await;
*rebalance_meta = Some(meta);
drop(rebalance_meta);
}
Ok(id)
}
#[tracing::instrument(skip(self, fi))]
pub async fn update_pool_stats(&self, pool_index: usize, bucket: String, fi: &FileInfo) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
pool_stat.update(bucket, fi);
}
Ok(())
}
#[tracing::instrument(skip(self))]
pub async fn next_rebal_bucket(&self, pool_index: usize) -> Result<Option<String>> {
info!("next_rebal_bucket: pool_index: {}", pool_index);
let rebalance_meta = self.rebalance_meta.read().await;
info!("next_rebal_bucket: rebalance_meta: {:?}", rebalance_meta);
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(pool_stat) = meta.pool_stats.get(pool_index)
{
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
return Ok(None);
}
if pool_stat.buckets.is_empty() {
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
return Ok(None);
}
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
return Ok(Some(pool_stat.buckets[0].clone()));
}
info!("next_rebal_bucket: pool_index: {} None", pool_index);
Ok(None)
}
#[tracing::instrument(skip(self))]
pub async fn bucket_rebalance_done(&self, pool_index: usize, bucket: String) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
// Use retain to filter out buckets slated for removal
let mut found = false;
pool_stat.buckets.retain(|b| {
if b.as_str() == bucket.as_str() {
found = true;
pool_stat.rebalanced_buckets.push(b.clone());
false // Remove this element
} else {
true // Keep this element
}
});
if found {
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
return Ok(());
} else {
info!("bucket_rebalance_done: bucket {} not found", bucket);
}
}
info!("bucket_rebalance_done: bucket {} not found", bucket);
Ok(())
}
pub async fn is_rebalance_started(&self) -> bool {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(ref meta) = *rebalance_meta {
if meta.stopped_at.is_some() {
info!("is_rebalance_started: rebalance stopped");
return false;
}
meta.pool_stats.iter().enumerate().for_each(|(i, v)| {
info!(
"is_rebalance_started: pool_index: {}, participating: {:?}, status: {:?}",
i, v.participating, v.info.status
);
});
if meta
.pool_stats
.iter()
.any(|v| v.participating && v.info.status != RebalStatus::Completed)
{
info!("is_rebalance_started: rebalance started");
return true;
}
}
info!("is_rebalance_started: rebalance not started");
false
}
pub async fn is_pool_rebalancing(&self, pool_index: usize) -> bool {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(ref meta) = *rebalance_meta {
if meta.stopped_at.is_some() {
return false;
}
if let Some(pool_stat) = meta.pool_stats.get(pool_index) {
return pool_stat.participating && pool_stat.info.status == RebalStatus::Started;
}
}
false
}
#[tracing::instrument(skip(self))]
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(cancel_tx) = meta.cancel.as_ref()
{
cancel_tx.cancel();
}
Ok(())
}
#[tracing::instrument(skip_all)]
pub async fn start_rebalance(self: &Arc<Self>) {
info!("start_rebalance: start rebalance");
// let rebalance_meta = self.rebalance_meta.read().await;
let cancel_tx = CancellationToken::new();
let rx = cancel_tx.clone();
{
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
meta.cancel = Some(cancel_tx)
} else {
info!("start_rebalance: rebalance_meta is None exit");
return;
}
drop(rebalance_meta);
}
let participants = {
if let Some(ref meta) = *self.rebalance_meta.read().await {
// if meta.stopped_at.is_some() {
// warn!("start_rebalance: rebalance already stopped exit");
// return;
// }
let mut participants = vec![false; meta.pool_stats.len()];
for (i, pool_stat) in meta.pool_stats.iter().enumerate() {
info!("start_rebalance: pool {} status: {:?}", i, pool_stat.info.status);
if pool_stat.info.status != RebalStatus::Started {
info!("start_rebalance: pool {} not started, skipping", i);
continue;
}
info!("start_rebalance: pool {} participating: {:?}", i, pool_stat.participating);
participants[i] = pool_stat.participating;
}
participants
} else {
info!("start_rebalance:2 rebalance_meta is None exit");
Vec::new()
}
};
for (idx, participating) in participants.iter().enumerate() {
if !*participating {
info!("start_rebalance: pool {} is not participating, skipping", idx);
continue;
}
if !get_global_endpoints().as_ref().get(idx).is_some_and(|v| {
info!("start_rebalance: pool {} endpoints: {:?}", idx, v.endpoints);
v.endpoints.as_ref().first().is_some_and(|e| {
info!("start_rebalance: pool {} endpoint: {:?}, is_local: {}", idx, e, e.is_local);
e.is_local
})
}) {
info!("start_rebalance: pool {} is not local, skipping", idx);
continue;
}
let pool_idx = idx;
let store = self.clone();
let rx_clone = rx.clone();
tokio::spawn(async move {
if let Err(err) = store.rebalance_buckets(rx_clone, pool_idx).await {
error!("Rebalance failed for pool {}: {}", pool_idx, err);
} else {
info!("Rebalance completed for pool {}", pool_idx);
}
});
}
info!("start_rebalance: rebalance started done");
}
#[tracing::instrument(skip(self, rx))]
async fn rebalance_buckets(self: &Arc<Self>, rx: CancellationToken, pool_index: usize) -> Result<()> {
let (done_tx, mut done_rx) = tokio::sync::mpsc::channel::<Result<()>>(1);
// Save rebalance metadata periodically
let store = self.clone();
let save_task = tokio::spawn(async move {
let mut timer = tokio::time::interval_at(Instant::now() + Duration::from_secs(30), Duration::from_secs(10));
let mut msg: String;
let mut quit = false;
loop {
tokio::select! {
// TODO: cancel rebalance
Some(result) = done_rx.recv() => {
quit = true;
let now = OffsetDateTime::now_utc();
let state = match result {
Ok(_) => {
info!("rebalance_buckets: completed");
msg = format!("Rebalance completed at {now:?}");
RebalStatus::Completed},
Err(err) => {
info!("rebalance_buckets: error: {:?}", err);
// TODO: check stop
if err.to_string().contains("canceled") {
msg = format!("Rebalance stopped at {now:?}");
RebalStatus::Stopped
} else {
msg = format!("Rebalance stopped at {now:?} with err {err:?}");
RebalStatus::Failed
}
}
};
{
info!("rebalance_buckets: save rebalance meta, pool_index: {}, state: {:?}", pool_index, state);
let mut rebalance_meta = store.rebalance_meta.write().await;
if let Some(rbm) = rebalance_meta.as_mut() {
info!("rebalance_buckets: save rebalance meta2, pool_index: {}, state: {:?}", pool_index, state);
rbm.pool_stats[pool_index].info.status = state;
rbm.pool_stats[pool_index].info.end_time = Some(now);
}
}
}
_ = timer.tick() => {
let now = OffsetDateTime::now_utc();
msg = format!("Saving rebalance metadata at {now:?}");
}
}
if let Err(err) = store.save_rebalance_stats(pool_index, RebalSaveOpt::Stats).await {
error!("{} err: {:?}", msg, err);
} else {
info!(msg);
}
if quit {
info!("{}: exiting save_task", msg);
return;
}
timer.reset();
}
});
info!("Pool {} rebalancing is started", pool_index);
loop {
if rx.is_cancelled() {
info!("Pool {} rebalancing is stopped", pool_index);
done_tx.send(Err(Error::other("rebalance stopped canceled"))).await.ok();
break;
}
if let Some(bucket) = self.next_rebal_bucket(pool_index).await? {
info!("Rebalancing bucket: start {}", bucket);
if let Err(err) = self.rebalance_bucket(rx.clone(), bucket.clone(), pool_index).await {
if err.to_string().contains("not initialized") {
info!("rebalance_bucket: rebalance not initialized, continue");
continue;
}
error!("Error rebalancing bucket {}: {:?}", bucket, err);
done_tx.send(Err(err)).await.ok();
break;
}
info!("Rebalance bucket: done {} ", bucket);
self.bucket_rebalance_done(pool_index, bucket).await?;
} else {
info!("Rebalance bucket: no bucket to rebalance");
break;
}
}
info!("Pool {} rebalancing is done", pool_index);
done_tx.send(Ok(())).await.ok();
save_task.await.ok();
info!("Pool {} rebalancing is done2", pool_index);
Ok(())
}
async fn check_if_rebalance_done(&self, pool_index: usize) -> bool {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
// Check if the pool's rebalance status is already completed
if pool_stat.info.status == RebalStatus::Completed {
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
return true;
}
// Calculate the percentage of free space improvement
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
pool_stat.info.status = RebalStatus::Completed;
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
return true;
}
}
false
}
#[allow(unused_assignments)]
#[tracing::instrument(skip(self, set))]
async fn rebalance_entry(
self: Arc<Self>,
bucket: String,
pool_index: usize,
entry: MetaCacheEntry,
set: Arc<SetDisks>,
// wk: Arc<Workers>,
) {
info!("rebalance_entry: start rebalance_entry");
// defer!(|| async {
// warn!("rebalance_entry: defer give worker start");
// wk.give().await;
// warn!("rebalance_entry: defer give worker done");
// });
if entry.is_dir() {
info!("rebalance_entry: entry is dir, skipping");
return;
}
if self.check_if_rebalance_done(pool_index).await {
info!("rebalance_entry: rebalance done, skipping pool {}", pool_index);
return;
}
let mut fivs = match entry.file_info_versions(&bucket) {
Ok(fivs) => fivs,
Err(err) => {
error!("rebalance_entry Error getting file info versions: {}", err);
info!("rebalance_entry: Error getting file info versions, skipping");
return;
}
};
fivs.versions.sort_by(|a, b| b.mod_time.cmp(&a.mod_time));
let mut rebalanced: usize = 0;
let expired: usize = 0;
for version in fivs.versions.iter() {
if version.is_remote() {
info!("rebalance_entry Entry {} is remote, skipping", version.name);
continue;
}
// TODO: filterLifecycle
let remaining_versions = fivs.versions.len() - expired;
if version.deleted && remaining_versions == 1 {
rebalanced += 1;
info!("rebalance_entry Entry {} is deleted and last version, skipping", version.name);
continue;
}
let version_id = version.version_id.map(|v| v.to_string());
let mut ignore = false;
let mut failure = false;
let mut error = None;
if version.deleted {
if let Err(err) = set
.delete_object(
&bucket,
&version.name,
ObjectOptions {
versioned: true,
version_id: version_id.clone(),
mod_time: version.mod_time,
src_pool_idx: pool_index,
data_movement: true,
delete_marker: true,
skip_decommissioned: true,
..Default::default()
},
)
.await
{
if is_err_object_not_found(&err) || is_err_version_not_found(&err) || is_err_data_movement_overwrite(&err) {
ignore = true;
info!("rebalance_entry {} Entry {} is already deleted, skipping", &bucket, version.name);
continue;
}
error = Some(err);
failure = true;
}
if !failure {
error!("rebalance_entry {} Entry {} deleted successfully", &bucket, &version.name);
let _ = self.update_pool_stats(pool_index, bucket.clone(), version).await;
rebalanced += 1;
} else {
error!(
"rebalance_entry {} Error deleting entry {}/{:?}: {:?}",
&bucket, &version.name, &version.version_id, error
);
}
continue;
}
for _i in 0..3 {
info!("rebalance_entry: get_object_reader, bucket: {}, version: {}", &bucket, &version.name);
let rd = match set
.get_object_reader(
bucket.as_str(),
&encode_dir_object(&version.name),
None,
HeaderMap::new(),
&ObjectOptions {
version_id: version_id.clone(),
no_lock: true, // NoDecryption
..Default::default()
},
)
.await
{
Ok(rd) => rd,
Err(err) => {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
ignore = true;
info!(
"rebalance_entry: get_object_reader, bucket: {}, version: {}, ignore",
&bucket, &version.name
);
break;
}
failure = true;
error!("rebalance_entry: get_object_reader err {:?}", &err);
continue;
}
};
if let Err(err) = self.clone().rebalance_object(pool_index, bucket.clone(), rd).await {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) || is_err_data_movement_overwrite(&err) {
ignore = true;
info!("rebalance_entry {} Entry {} is already deleted, skipping", &bucket, version.name);
break;
}
failure = true;
error!("rebalance_entry: rebalance_object err {:?}", &err);
continue;
}
failure = false;
info!("rebalance_entry {} Entry {} rebalanced successfully", &bucket, &version.name);
break;
}
if ignore {
info!("rebalance_entry {} Entry {} is already deleted, skipping", &bucket, version.name);
continue;
}
if failure {
error!(
"rebalance_entry {} Error rebalancing entry {}/{:?}: {:?}",
&bucket, &version.name, &version.version_id, error
);
break;
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/sets.rs | crates/ecstore/src/sets.rs | #![allow(clippy::map_entry)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, sync::Arc};
use crate::disk::error_reduce::count_errs;
use crate::error::{Error, Result};
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
use crate::{
disk::{
DiskAPI, DiskInfo, DiskOption, DiskStore,
error::DiskError,
format::{DistributionAlgoVersion, FormatV3},
new_disk,
},
endpoints::{Endpoints, PoolEndpoints},
error::StorageError,
global::{GLOBAL_LOCAL_DISK_SET_DRIVES, is_dist_erasure},
set_disk::SetDisks,
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult,
ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
},
store_init::{check_format_erasure_values, get_format_erasure_in_quorum, load_format_erasure_all, save_format_file},
};
use futures::future::join_all;
use http::HeaderMap;
use rustfs_common::heal_channel::HealOpts;
use rustfs_common::{
GLOBAL_LOCAL_NODE_NAME,
heal_channel::{DriveState, HealItemType},
};
use rustfs_filemeta::FileInfo;
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
use tokio::sync::broadcast::{Receiver, Sender};
use tokio::time::Duration;
use tracing::warn;
use tracing::{error, info};
#[derive(Debug, Clone)]
pub struct Sets {
pub id: Uuid,
// pub sets: Vec<Objects>,
// pub disk_set: Vec<Vec<Option<DiskStore>>>, // [set_count_idx][set_drive_count_idx] = disk_idx
pub disk_set: Vec<Arc<SetDisks>>, // [set_count_idx][set_drive_count_idx] = disk_idx
pub pool_idx: usize,
pub endpoints: PoolEndpoints,
pub format: FormatV3,
pub parity_count: usize,
pub set_count: usize,
pub set_drive_count: usize,
pub default_parity_count: usize,
pub distribution_algo: DistributionAlgoVersion,
exit_signal: Option<Sender<()>>,
}
impl Drop for Sets {
fn drop(&mut self) {
if let Some(exit_signal) = self.exit_signal.take() {
let _ = exit_signal.send(());
}
}
}
impl Sets {
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, parity_count))]
pub async fn new(
disks: Vec<Option<DiskStore>>,
endpoints: &PoolEndpoints,
fm: &FormatV3,
pool_idx: usize,
parity_count: usize,
) -> Result<Arc<Self>> {
let set_count = fm.erasure.sets.len();
let set_drive_count = fm.erasure.sets[0].len();
let mut unique: Vec<Vec<String>> = (0..set_count).map(|_| vec![]).collect();
for (idx, endpoint) in endpoints.endpoints.as_ref().iter().enumerate() {
let set_idx = idx / set_drive_count;
if endpoint.is_local && !unique[set_idx].contains(&"local".to_string()) {
unique[set_idx].push("local".to_string());
}
if !endpoint.is_local {
let host_port = format!("{}:{}", endpoint.url.host_str().unwrap(), endpoint.url.port().unwrap());
if !unique[set_idx].contains(&host_port) {
unique[set_idx].push(host_port);
}
}
}
let mut disk_set = Vec::with_capacity(set_count);
// Create fast lock manager for high performance
let fast_lock_manager = Arc::new(rustfs_lock::FastObjectLockManager::new());
for i in 0..set_count {
let mut set_drive = Vec::with_capacity(set_drive_count);
let mut set_endpoints = Vec::with_capacity(set_drive_count);
for j in 0..set_drive_count {
let idx = i * set_drive_count + j;
let mut disk = disks[idx].clone();
let endpoint = endpoints.endpoints.as_ref()[idx].clone();
set_endpoints.push(endpoint);
if disk.is_none() {
warn!("sets new set_drive {}-{} is none", i, j);
set_drive.push(None);
continue;
}
if disk.as_ref().unwrap().is_local() && is_dist_erasure().await {
let local_disk = {
let local_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.read().await;
local_set_drives[pool_idx][i][j].clone()
};
if local_disk.is_none() {
warn!("sets new set_drive {}-{} local_disk is none", i, j);
set_drive.push(None);
continue;
}
let _ = disk.as_ref().unwrap().close().await;
disk = local_disk;
}
let has_disk_id = disk.as_ref().unwrap().get_disk_id().await.unwrap_or_else(|err| {
if err == DiskError::UnformattedDisk {
error!("get_disk_id err {:?}", err);
} else {
warn!("get_disk_id err {:?}", err);
}
None
});
if let Some(_disk_id) = has_disk_id {
set_drive.push(disk);
} else {
error!("sets new set_drive {}-{} get_disk_id is none", i, j);
set_drive.push(None);
}
}
// Note: write_quorum was used for the old lock system, no longer needed with FastLock
let _write_quorum = set_drive_count - parity_count;
let set_disks = SetDisks::new(
fast_lock_manager.clone(),
GLOBAL_LOCAL_NODE_NAME.read().await.to_string(),
Arc::new(RwLock::new(set_drive)),
set_drive_count,
parity_count,
i,
pool_idx,
set_endpoints,
fm.clone(),
)
.await;
disk_set.push(set_disks);
}
let (tx, rx) = tokio::sync::broadcast::channel(1);
let sets = Arc::new(Self {
id: fm.id,
// sets: todo!(),
disk_set,
pool_idx,
endpoints: endpoints.clone(),
format: fm.clone(),
parity_count,
set_count,
set_drive_count,
default_parity_count: parity_count,
distribution_algo: fm.erasure.distribution_algo.clone(),
exit_signal: Some(tx),
});
let asets = sets.clone();
let rx1 = rx.resubscribe();
tokio::spawn(async move { asets.monitor_and_connect_endpoints(rx1).await });
// let sets2 = sets.clone();
// let rx2 = rx.resubscribe();
// tokio::spawn(async move { sets2.cleanup_deleted_objects_loop(rx2).await });
Ok(sets)
}
pub fn set_drive_count(&self) -> usize {
self.set_drive_count
}
// pub async fn cleanup_deleted_objects_loop(self: Arc<Self>, mut rx: Receiver<()>) {
// tokio::time::sleep(Duration::from_secs(5)).await;
// info!("start cleanup_deleted_objects_loop");
// // TODO: config interval
// let mut interval = tokio::time::interval(Duration::from_secs(15 * 3));
// loop {
// tokio::select! {
// _= interval.tick()=>{
// info!("cleanup_deleted_objects_loop tick");
// for set in self.disk_set.iter() {
// set.clone().cleanup_deleted_objects().await;
// }
// interval.reset();
// },
// _ = rx.recv() => {
// warn!("cleanup_deleted_objects_loop ctx cancelled");
// break;
// }
// }
// }
// warn!("cleanup_deleted_objects_loop exit");
// }
pub async fn monitor_and_connect_endpoints(&self, mut rx: Receiver<()>) {
tokio::time::sleep(Duration::from_secs(5)).await;
info!("start monitor_and_connect_endpoints");
self.connect_disks().await;
// TODO: config interval
let mut interval = tokio::time::interval(Duration::from_secs(15));
loop {
tokio::select! {
_= interval.tick()=>{
// debug!("tick...");
self.connect_disks().await;
interval.reset();
},
_ = rx.recv() => {
warn!("monitor_and_connect_endpoints ctx cancelled");
break;
}
}
}
warn!("monitor_and_connect_endpoints exit");
}
async fn connect_disks(&self) {
// debug!("start connect_disks ...");
for set in self.disk_set.iter() {
set.connect_disks().await;
}
// debug!("done connect_disks ...");
}
pub fn get_disks(&self, set_idx: usize) -> Arc<SetDisks> {
self.disk_set[set_idx].clone()
}
pub fn get_disks_by_key(&self, key: &str) -> Arc<SetDisks> {
self.get_disks(self.get_hashed_set_index(key))
}
fn get_hashed_set_index(&self, input: &str) -> usize {
match self.distribution_algo {
DistributionAlgoVersion::V1 => crc_hash(input, self.disk_set.len()),
DistributionAlgoVersion::V2 | DistributionAlgoVersion::V3 => sip_hash(input, self.disk_set.len(), self.id.as_bytes()),
}
}
// async fn commit_rename_data_dir(
// &self,
// disks: &Vec<Option<DiskStore>>,
// bucket: &str,
// object: &str,
// data_dir: &str,
// // write_quorum: usize,
// ) -> Vec<Option<Error>> {
// unimplemented!()
// }
async fn delete_prefix(&self, bucket: &str, object: &str) -> Result<()> {
let mut futures = Vec::new();
let opt = ObjectOptions {
delete_prefix: true,
..Default::default()
};
for set in self.disk_set.iter() {
futures.push(set.delete_object(bucket, object, opt.clone()));
}
let _results = join_all(futures).await;
Ok(())
}
}
// #[derive(Debug)]
// pub struct Objects {
// pub endpoints: Vec<Endpoint>,
// pub disks: Vec<usize>,
// pub set_index: usize,
// pub pool_index: usize,
// pub set_drive_count: usize,
// pub default_parity_count: usize,
// }
struct DelObj {
// set_idx: usize,
orig_idx: usize,
obj: ObjectToDelete,
}
#[async_trait::async_trait]
impl ObjectIO for Sets {
#[tracing::instrument(level = "debug", skip(self, object, h, opts))]
async fn get_object_reader(
&self,
bucket: &str,
object: &str,
range: Option<HTTPRangeSpec>,
h: HeaderMap,
opts: &ObjectOptions,
) -> Result<GetObjectReader> {
self.get_disks_by_key(object)
.get_object_reader(bucket, object, range, h, opts)
.await
}
#[tracing::instrument(level = "debug", skip(self, data))]
async fn put_object(&self, bucket: &str, object: &str, data: &mut PutObjReader, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).put_object(bucket, object, data, opts).await
}
}
#[async_trait::async_trait]
impl StorageAPI for Sets {
#[tracing::instrument(skip(self))]
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
for set in self.disk_set.iter() {
futures.push(set.storage_info())
}
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
#[tracing::instrument(skip(self))]
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
for set in self.disk_set.iter() {
futures.push(set.local_storage_info())
}
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
#[tracing::instrument(skip(self))]
async fn make_bucket(&self, _bucket: &str, _opts: &MakeBucketOptions) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn get_bucket_info(&self, _bucket: &str, _opts: &BucketOptions) -> Result<BucketInfo> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_bucket(&self, _opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn delete_bucket(&self, _bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_objects_v2(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_object_versions(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_marker: Option<String>,
_version_marker: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).get_object_info(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn copy_object(
&self,
src_bucket: &str,
src_object: &str,
dst_bucket: &str,
dst_object: &str,
src_info: &mut ObjectInfo,
src_opts: &ObjectOptions,
dst_opts: &ObjectOptions,
) -> Result<ObjectInfo> {
let src_set = self.get_disks_by_key(src_object);
let dst_set = self.get_disks_by_key(dst_object);
let cp_src_dst_same = path_join_buf(&[src_bucket, src_object]) == path_join_buf(&[dst_bucket, dst_object]);
if cp_src_dst_same {
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
&& src_vid == dst_vid
{
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if !dst_opts.versioned && src_opts.version_id.is_none() {
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if dst_opts.versioned && src_opts.version_id != dst_opts.version_id {
src_info.version_only = true;
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
}
let put_opts = ObjectOptions {
user_defined: dst_opts.user_defined.clone(),
versioned: dst_opts.versioned,
version_id: dst_opts.version_id.clone(),
mod_time: dst_opts.mod_time,
..Default::default()
};
if let Some(put_object_reader) = src_info.put_object_reader.as_mut() {
return dst_set.put_object(dst_bucket, dst_object, put_object_reader, &put_opts).await;
}
Err(StorageError::InvalidArgument(
src_bucket.to_owned(),
src_object.to_owned(),
"put_object_reader2 is none".to_owned(),
))
}
#[tracing::instrument(skip(self))]
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, _force_del_marker: bool) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
if opts.delete_prefix && !opts.delete_prefix_object {
self.delete_prefix(bucket, object).await?;
return Ok(ObjectInfo::default());
}
self.get_disks_by_key(object).delete_object(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn delete_objects(
&self,
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
// Default return value
let mut del_objects = vec![DeletedObject::default(); objects.len()];
let mut del_errs = Vec::with_capacity(objects.len());
for _ in 0..objects.len() {
del_errs.push(None)
}
let mut set_obj_map = HashMap::new();
// hash key
for (i, obj) in objects.iter().enumerate() {
let idx = self.get_hashed_set_index(obj.object_name.as_str());
if !set_obj_map.contains_key(&idx) {
set_obj_map.insert(
idx,
vec![DelObj {
// set_idx: idx,
orig_idx: i,
obj: obj.clone(),
}],
);
} else if let Some(val) = set_obj_map.get_mut(&idx) {
val.push(DelObj {
// set_idx: idx,
orig_idx: i,
obj: obj.clone(),
});
}
}
// TODO: concurrency
for (k, v) in set_obj_map {
let disks = self.get_disks(k);
let objs: Vec<ObjectToDelete> = v.iter().map(|v| v.obj.clone()).collect();
let (dobjects, errs) = disks.delete_objects(bucket, objs, opts.clone()).await;
for (i, err) in errs.into_iter().enumerate() {
let obj = v.get(i).unwrap();
del_errs[obj.orig_idx] = err;
del_objects[obj.orig_idx] = dobjects.get(i).unwrap().clone();
}
}
(del_objects, del_errs)
}
async fn list_object_parts(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_number_marker: Option<usize>,
max_parts: usize,
opts: &ObjectOptions,
) -> Result<ListPartsInfo> {
self.get_disks_by_key(object)
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn list_multipart_uploads(
&self,
bucket: &str,
prefix: &str,
key_marker: Option<String>,
upload_id_marker: Option<String>,
delimiter: Option<String>,
max_uploads: usize,
) -> Result<ListMultipartsInfo> {
self.get_disks_by_key(prefix)
.list_multipart_uploads(bucket, prefix, key_marker, upload_id_marker, delimiter, max_uploads)
.await
}
#[tracing::instrument(skip(self))]
async fn new_multipart_upload(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<MultipartUploadResult> {
self.get_disks_by_key(object).new_multipart_upload(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object).transition_object(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
self.get_disks_by_key(object).add_partial(bucket, object, version_id).await
}
#[tracing::instrument(skip(self))]
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object)
.restore_transitioned_object(bucket, object, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn copy_object_part(
&self,
_src_bucket: &str,
_src_object: &str,
_dst_bucket: &str,
_dst_object: &str,
_upload_id: &str,
_part_id: usize,
_start_offset: i64,
_length: i64,
_src_info: &ObjectInfo,
_src_opts: &ObjectOptions,
_dst_opts: &ObjectOptions,
) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn put_object_part(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_id: usize,
data: &mut PutObjReader,
opts: &ObjectOptions,
) -> Result<PartInfo> {
self.get_disks_by_key(object)
.put_object_part(bucket, object, upload_id, part_id, data, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn get_multipart_info(
&self,
bucket: &str,
object: &str,
upload_id: &str,
opts: &ObjectOptions,
) -> Result<MultipartInfo> {
self.get_disks_by_key(object)
.get_multipart_info(bucket, object, upload_id, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn abort_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object)
.abort_multipart_upload(bucket, object, upload_id, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn complete_multipart_upload(
self: Arc<Self>,
bucket: &str,
object: &str,
upload_id: &str,
uploaded_parts: Vec<CompletePart>,
opts: &ObjectOptions,
) -> Result<ObjectInfo> {
self.get_disks_by_key(object)
.complete_multipart_upload(bucket, object, upload_id, uploaded_parts, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn get_disks(&self, _pool_idx: usize, _set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).put_object_metadata(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String> {
self.get_disks_by_key(object).get_object_tags(bucket, object, opts).await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object)
.put_object_tags(bucket, object, tags, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).delete_object_tags(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
let (disks, _) = init_storage_disks_with_errors(
&self.endpoints.endpoints,
&DiskOption {
cleanup: false,
health_check: false,
},
)
.await;
let (formats, errs) = load_format_erasure_all(&disks, true).await;
if let Err(err) = check_format_erasure_values(&formats, self.set_drive_count) {
info!("failed to check formats erasure values: {}", err);
return Ok((HealResultItem::default(), Some(err)));
}
let ref_format = match get_format_erasure_in_quorum(&formats) {
Ok(format) => format,
Err(err) => return Ok((HealResultItem::default(), Some(err))),
};
let mut res = HealResultItem {
heal_item_type: HealItemType::Metadata.to_string(),
detail: "disk-format".to_string(),
disk_count: self.set_count * self.set_drive_count,
set_count: self.set_count,
..Default::default()
};
let before_derives = formats_to_drives_info(&self.endpoints.endpoints, &formats, &errs);
res.before.drives = vec![HealDriveInfo::default(); before_derives.len()];
res.after.drives = vec![HealDriveInfo::default(); before_derives.len()];
for v in before_derives.iter() {
res.before.drives.push(v.clone());
res.after.drives.push(v.clone());
}
if count_errs(&errs, &DiskError::UnformattedDisk) == 0 {
info!("disk formats success, NoHealRequired, errs: {:?}", errs);
return Ok((res, Some(StorageError::NoHealRequired)));
}
// if !self.format.eq(&ref_format) {
// info!("format ({:?}) not eq ref_format ({:?})", self.format, ref_format);
// return Ok((res, Some(Error::new(DiskError::CorruptedFormat))));
// }
let (new_format_sets, _) = new_heal_format_sets(&ref_format, self.set_count, self.set_drive_count, &formats, &errs);
if !dry_run {
let mut tmp_new_formats = vec![None; self.set_count * self.set_drive_count];
for (i, set) in new_format_sets.iter().enumerate() {
for (j, fm) in set.iter().enumerate() {
if let Some(fm) = fm {
res.after.drives[i * self.set_drive_count + j].uuid = fm.erasure.this.to_string();
res.after.drives[i * self.set_drive_count + j].state = DriveState::Ok.to_string();
tmp_new_formats[i * self.set_drive_count + j] = Some(fm.clone());
}
}
}
// Save new formats `format.json` on unformatted disks.
for (fm, disk) in tmp_new_formats.iter_mut().zip(disks.iter()) {
if fm.is_some() && disk.is_some() && save_format_file(disk, fm).await.is_err() {
let _ = disk.as_ref().unwrap().close().await;
*fm = None;
}
}
for (index, fm) in tmp_new_formats.iter().enumerate() {
if let Some(fm) = fm {
let (m, n) = match ref_format.find_disk_index_by_disk_id(fm.erasure.this) {
Ok((m, n)) => (m, n),
Err(_) => continue,
};
if let Some(set) = self.disk_set.get(m)
&& let Some(Some(disk)) = set.disks.read().await.get(n)
{
let _ = disk.close().await;
}
if let Some(Some(disk)) = disks.get(index) {
self.disk_set[m].renew_disk(&disk.endpoint()).await;
}
}
}
}
Ok((res, None))
}
#[tracing::instrument(skip(self))]
async fn heal_bucket(&self, _bucket: &str, _opts: &HealOpts) -> Result<HealResultItem> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn heal_object(
&self,
bucket: &str,
object: &str,
version_id: &str,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)> {
self.get_disks_by_key(object)
.heal_object(bucket, object, version_id, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn get_pool_and_set(&self, _id: &str) -> Result<(Option<usize>, Option<usize>, Option<usize>)> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn check_abandoned_parts(&self, _bucket: &str, _object: &str, _opts: &HealOpts) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(level = "debug", skip(self))]
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
let gor = self.get_object_reader(bucket, object, None, HeaderMap::new(), opts).await?;
let mut reader = gor.stream;
// Stream data to sink instead of reading all into memory to prevent OOM
tokio::io::copy(&mut reader, &mut tokio::io::sink()).await?;
Ok(())
}
}
async fn _close_storage_disks(disks: &[Option<DiskStore>]) {
let mut futures = Vec::with_capacity(disks.len());
for disk in disks.iter().flatten() {
let disk = disk.clone();
futures.push(tokio::spawn(async move {
let _ = disk.close().await;
}));
}
let _ = join_all(futures).await;
}
async fn init_storage_disks_with_errors(
endpoints: &Endpoints,
opts: &DiskOption,
) -> (Vec<Option<DiskStore>>, Vec<Option<DiskError>>) {
// Bootstrap disks.
// let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
// let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
let mut futures = Vec::with_capacity(endpoints.as_ref().len());
for endpoint in endpoints.as_ref().iter() {
futures.push(new_disk(endpoint, opts));
// let ep = endpoint.clone();
// let opt = opts.clone();
// let disks_clone = disks.clone();
// let errs_clone = errs.clone();
// futures.push(tokio::spawn(async move {
// match new_disk(&ep, &opt).await {
// Ok(disk) => {
// disks_clone.write().await[index] = Some(disk);
// errs_clone.write().await[index] = None;
// }
// Err(err) => {
// disks_clone.write().await[index] = None;
// errs_clone.write().await[index] = Some(err);
// }
// }
// }));
}
// let _ = join_all(futures).await;
// let disks = disks.read().await.clone();
// let errs = errs.read().await.clone();
let mut disks = Vec::with_capacity(endpoints.as_ref().len());
let mut errs = Vec::with_capacity(endpoints.as_ref().len());
let results = join_all(futures).await;
for result in results {
match result {
Ok(disk) => {
disks.push(Some(disk));
errs.push(None);
}
Err(err) => {
disks.push(None);
errs.push(Some(err));
}
}
}
(disks, errs)
}
fn formats_to_drives_info(endpoints: &Endpoints, formats: &[Option<FormatV3>], errs: &[Option<DiskError>]) -> Vec<HealDriveInfo> {
let mut before_drives = Vec::with_capacity(endpoints.as_ref().len());
for (index, format) in formats.iter().enumerate() {
let drive = endpoints.get_string(index);
let state = if format.is_some() {
DriveState::Ok.to_string()
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bitrot.rs | crates/ecstore/src/bitrot.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::disk::error::DiskError;
use crate::disk::{self, DiskAPI as _, DiskStore};
use crate::erasure_coding::{BitrotReader, BitrotWriterWrapper, CustomWriter};
use rustfs_utils::HashAlgorithm;
use std::io::Cursor;
use tokio::io::AsyncRead;
/// Create a BitrotReader from either inline data or disk file stream
///
/// # Parameters
/// * `inline_data` - Optional inline data, if present, will use Cursor to read from memory
/// * `disk` - Optional disk reference for file stream reading
/// * `bucket` - Bucket name for file path
/// * `path` - File path within the bucket
/// * `offset` - Starting offset for reading
/// * `length` - Length to read
/// * `shard_size` - Shard size for erasure coding
/// * `checksum_algo` - Hash algorithm for bitrot verification
#[allow(clippy::too_many_arguments)]
pub async fn create_bitrot_reader(
inline_data: Option<&[u8]>,
disk: Option<&DiskStore>,
bucket: &str,
path: &str,
offset: usize,
length: usize,
shard_size: usize,
checksum_algo: HashAlgorithm,
) -> disk::error::Result<Option<BitrotReader<Box<dyn AsyncRead + Send + Sync + Unpin>>>> {
// Calculate the total length to read, including the checksum overhead
let length = length.div_ceil(shard_size) * checksum_algo.size() + length;
let offset = offset.div_ceil(shard_size) * checksum_algo.size() + offset;
if let Some(data) = inline_data {
// Use inline data
let mut rd = Cursor::new(data.to_vec());
rd.set_position(offset as u64);
let reader = BitrotReader::new(Box::new(rd) as Box<dyn AsyncRead + Send + Sync + Unpin>, shard_size, checksum_algo);
Ok(Some(reader))
} else if let Some(disk) = disk {
// Read from disk
match disk.read_file_stream(bucket, path, offset, length - offset).await {
Ok(rd) => {
let reader = BitrotReader::new(rd, shard_size, checksum_algo);
Ok(Some(reader))
}
Err(e) => Err(e),
}
} else {
// Neither inline data nor disk available
Ok(None)
}
}
/// Create a new BitrotWriterWrapper based on the provided parameters
///
/// # Parameters
/// - `is_inline_buffer`: If true, creates an in-memory buffer writer; if false, uses disk storage
/// - `disk`: Optional disk instance for file creation (used when is_inline_buffer is false)
/// - `shard_size`: Size of each shard for bitrot calculation
/// - `checksum_algo`: Hash algorithm to use for bitrot verification
/// - `volume`: Volume/bucket name for disk storage
/// - `path`: File path for disk storage
/// - `length`: Expected file length for disk storage
///
/// # Returns
/// A Result containing the BitrotWriterWrapper or an error
pub async fn create_bitrot_writer(
is_inline_buffer: bool,
disk: Option<&DiskStore>,
volume: &str,
path: &str,
length: i64,
shard_size: usize,
checksum_algo: HashAlgorithm,
) -> disk::error::Result<BitrotWriterWrapper> {
let writer = if is_inline_buffer {
CustomWriter::new_inline_buffer()
} else if let Some(disk) = disk {
let length = if length > 0 {
let length = length as usize;
(length.div_ceil(shard_size) * checksum_algo.size() + length) as i64
} else {
0
};
let file = disk.create_file("", volume, path, length).await?;
CustomWriter::new_tokio_writer(file)
} else {
return Err(DiskError::DiskNotFound);
};
Ok(BitrotWriterWrapper::new(writer, shard_size, checksum_algo))
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_create_bitrot_reader_with_inline_data() {
let test_data = b"hello world test data";
let shard_size = 16;
let checksum_algo = HashAlgorithm::HighwayHash256;
let result =
create_bitrot_reader(Some(test_data), None, "test-bucket", "test-path", 0, 0, shard_size, checksum_algo).await;
assert!(result.is_ok());
assert!(result.unwrap().is_some());
}
#[tokio::test]
async fn test_create_bitrot_reader_without_data_or_disk() {
let shard_size = 16;
let checksum_algo = HashAlgorithm::HighwayHash256;
let result = create_bitrot_reader(None, None, "test-bucket", "test-path", 0, 1024, shard_size, checksum_algo).await;
assert!(result.is_ok());
assert!(result.unwrap().is_none());
}
#[tokio::test]
async fn test_create_bitrot_writer_inline() {
use rustfs_utils::HashAlgorithm;
let wrapper = create_bitrot_writer(
true, // is_inline_buffer
None, // disk not needed for inline buffer
"test-volume",
"test-path",
1024, // length
1024, // shard_size
HashAlgorithm::HighwayHash256,
)
.await;
assert!(wrapper.is_ok());
let mut wrapper = wrapper.unwrap();
// Test writing some data
let test_data = b"hello world";
let result = wrapper.write(test_data).await;
assert!(result.is_ok());
// Test getting inline data
let inline_data = wrapper.into_inline_data();
assert!(inline_data.is_some());
// The inline data should contain both hash and data
let data = inline_data.unwrap();
assert!(!data.is_empty());
}
#[tokio::test]
async fn test_create_bitrot_writer_disk_without_disk() {
use rustfs_utils::HashAlgorithm;
// Test error case: trying to create disk writer without providing disk instance
let wrapper = create_bitrot_writer(
false, // is_inline_buffer = false, so needs disk
None, // disk = None, should cause error
"test-volume",
"test-path",
1024, // length
1024, // shard_size
HashAlgorithm::HighwayHash256,
)
.await;
assert!(wrapper.is_err());
let error = wrapper.unwrap_err();
println!("error: {error:?}");
assert_eq!(error, DiskError::DiskNotFound);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/encode.rs | crates/ecstore/src/erasure_coding/encode.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::BitrotWriterWrapper;
use super::Erasure;
use crate::disk::error::Error;
use crate::disk::error_reduce::count_errs;
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_write_quorum_errs};
use bytes::Bytes;
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use std::sync::Arc;
use std::vec;
use tokio::io::AsyncRead;
use tokio::sync::mpsc;
use tracing::error;
pub(crate) struct MultiWriter<'a> {
writers: &'a mut [Option<BitrotWriterWrapper>],
write_quorum: usize,
errs: Vec<Option<Error>>,
}
impl<'a> MultiWriter<'a> {
pub fn new(writers: &'a mut [Option<BitrotWriterWrapper>], write_quorum: usize) -> Self {
let length = writers.len();
MultiWriter {
writers,
write_quorum,
errs: vec![None; length],
}
}
async fn write_shard(writer_opt: &mut Option<BitrotWriterWrapper>, err: &mut Option<Error>, shard: &Bytes) {
match writer_opt {
Some(writer) => {
match writer.write(shard).await {
Ok(n) => {
if n < shard.len() {
*err = Some(Error::ShortWrite);
*writer_opt = None; // Mark as failed
} else {
*err = None;
}
}
Err(e) => {
*err = Some(Error::from(e));
}
}
}
None => {
*err = Some(Error::DiskNotFound);
}
}
}
pub async fn write(&mut self, data: Vec<Bytes>) -> std::io::Result<()> {
assert_eq!(data.len(), self.writers.len());
{
let mut futures = FuturesUnordered::new();
for ((writer_opt, err), shard) in self.writers.iter_mut().zip(self.errs.iter_mut()).zip(data.iter()) {
if err.is_some() {
continue; // Skip if we already have an error for this writer
}
futures.push(Self::write_shard(writer_opt, err, shard));
}
while let Some(()) = futures.next().await {}
}
let nil_count = self.errs.iter().filter(|&e| e.is_none()).count();
if nil_count >= self.write_quorum {
return Ok(());
}
if let Some(write_err) = reduce_write_quorum_errs(&self.errs, OBJECT_OP_IGNORED_ERRS, self.write_quorum) {
error!(
"reduce_write_quorum_errs: {:?}, offline-disks={}/{}, errs={:?}",
write_err,
count_errs(&self.errs, &Error::DiskNotFound),
self.writers.len(),
self.errs
);
return Err(std::io::Error::other(format!(
"Failed to write data: {} (offline-disks={}/{})",
write_err,
count_errs(&self.errs, &Error::DiskNotFound),
self.writers.len()
)));
}
Err(std::io::Error::other(format!(
"Failed to write data: (offline-disks={}/{}): {}",
count_errs(&self.errs, &Error::DiskNotFound),
self.writers.len(),
self.errs
.iter()
.map(|e| e.as_ref().map_or("<nil>".to_string(), |e| e.to_string()))
.collect::<Vec<_>>()
.join(", ")
)))
}
pub async fn _shutdown(&mut self) -> std::io::Result<()> {
for writer in self.writers.iter_mut().flatten() {
writer.shutdown().await?;
}
Ok(())
}
}
impl Erasure {
pub async fn encode<R>(
self: Arc<Self>,
mut reader: R,
writers: &mut [Option<BitrotWriterWrapper>],
quorum: usize,
) -> std::io::Result<(R, usize)>
where
R: AsyncRead + Send + Sync + Unpin + 'static,
{
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let task = tokio::spawn(async move {
let block_size = self.block_size;
let mut total = 0;
let mut buf = vec![0u8; block_size];
loop {
match rustfs_utils::read_full(&mut reader, &mut buf).await {
Ok(n) if n > 0 => {
total += n;
let res = self.encode_data(&buf[..n])?;
if let Err(err) = tx.send(res).await {
return Err(std::io::Error::other(format!("Failed to send encoded data : {err}")));
}
}
Ok(_) => {
break;
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
// Check if the inner error is a checksum mismatch - if so, propagate it
if let Some(inner) = e.get_ref()
&& rustfs_rio::is_checksum_mismatch(inner)
{
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
break;
}
Err(e) => {
return Err(e);
}
}
}
Ok((reader, total))
});
let mut writers = MultiWriter::new(writers, quorum);
while let Some(block) = rx.recv().await {
if block.is_empty() {
break;
}
writers.write(block).await?;
}
let (reader, total) = task.await??;
// writers.shutdown().await?;
Ok((reader, total))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/decode.rs | crates/ecstore/src/erasure_coding/decode.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::BitrotReader;
use super::Erasure;
use crate::disk::error::Error;
use crate::disk::error_reduce::reduce_errs;
use futures::stream::{FuturesUnordered, StreamExt};
use pin_project_lite::pin_project;
use std::io;
use std::io::ErrorKind;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tracing::error;
pin_project! {
pub(crate) struct ParallelReader<R> {
#[pin]
readers: Vec<Option<BitrotReader<R>>>,
offset: usize,
shard_size: usize,
shard_file_size: usize,
data_shards: usize,
total_shards: usize,
}
}
impl<R> ParallelReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
// Readers should handle disk errors before being passed in, ensuring each reader reaches the available number of BitrotReaders
pub fn new(readers: Vec<Option<BitrotReader<R>>>, e: Erasure, offset: usize, total_length: usize) -> Self {
let shard_size = e.shard_size();
let shard_file_size = e.shard_file_size(total_length as i64) as usize;
let offset = (offset / e.block_size) * shard_size;
// Ensure offset does not exceed shard_file_size
ParallelReader {
readers,
offset,
shard_size,
shard_file_size,
data_shards: e.data_shards,
total_shards: e.data_shards + e.parity_shards,
}
}
}
impl<R> ParallelReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
pub async fn read(&mut self) -> (Vec<Option<Vec<u8>>>, Vec<Option<Error>>) {
// if self.readers.len() != self.total_shards {
// return Err(io::Error::new(ErrorKind::InvalidInput, "Invalid number of readers"));
// }
let num_readers = self.readers.len();
let shard_size = if self.offset + self.shard_size > self.shard_file_size {
self.shard_file_size - self.offset
} else {
self.shard_size
};
if shard_size == 0 {
return (vec![None; num_readers], vec![None; num_readers]);
}
let mut shards: Vec<Option<Vec<u8>>> = vec![None; num_readers];
let mut errs = vec![None; num_readers];
let mut futures = Vec::with_capacity(self.total_shards);
let reader_iter: std::slice::IterMut<'_, Option<BitrotReader<R>>> = self.readers.iter_mut();
for (i, reader) in reader_iter.enumerate() {
let future = if let Some(reader) = reader {
Box::pin(async move {
let mut buf = vec![0u8; shard_size];
match reader.read(&mut buf).await {
Ok(n) => {
buf.truncate(n);
(i, Ok(buf))
}
Err(e) => (i, Err(Error::from(e))),
}
}) as std::pin::Pin<Box<dyn std::future::Future<Output = (usize, Result<Vec<u8>, Error>)> + Send>>
} else {
// Return FileNotFound error when reader is None
Box::pin(async move { (i, Err(Error::FileNotFound)) })
as std::pin::Pin<Box<dyn std::future::Future<Output = (usize, Result<Vec<u8>, Error>)> + Send>>
};
futures.push(future);
}
if futures.len() >= self.data_shards {
let mut fut_iter = futures.into_iter();
let mut sets = FuturesUnordered::new();
for _ in 0..self.data_shards {
if let Some(future) = fut_iter.next() {
sets.push(future);
}
}
let mut success = 0;
while let Some((i, result)) = sets.next().await {
match result {
Ok(v) => {
shards[i] = Some(v);
success += 1;
}
Err(e) => {
errs[i] = Some(e);
if let Some(future) = fut_iter.next() {
sets.push(future);
}
}
}
if success >= self.data_shards {
break;
}
}
}
(shards, errs)
}
pub fn can_decode(&self, shards: &[Option<Vec<u8>>]) -> bool {
shards.iter().filter(|s| s.is_some()).count() >= self.data_shards
}
}
/// Get the total length of data blocks
fn get_data_block_len(shards: &[Option<Vec<u8>>], data_blocks: usize) -> usize {
let mut size = 0;
for shard in shards.iter().take(data_blocks).flatten() {
size += shard.len();
}
size
}
/// Write data blocks from encoded blocks to target, supporting offset and length
async fn write_data_blocks<W>(
writer: &mut W,
en_blocks: &[Option<Vec<u8>>],
data_blocks: usize,
mut offset: usize,
length: usize,
) -> std::io::Result<usize>
where
W: tokio::io::AsyncWrite + Send + Sync + Unpin,
{
if get_data_block_len(en_blocks, data_blocks) < length {
error!("write_data_blocks get_data_block_len < length");
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Not enough data blocks to write"));
}
let mut total_written = 0;
let mut write_left = length;
for block_op in &en_blocks[..data_blocks] {
let Some(block) = block_op else {
error!("write_data_blocks block_op.is_none()");
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Missing data block"));
};
if offset >= block.len() {
offset -= block.len();
continue;
}
let block_slice = &block[offset..];
offset = 0;
if write_left < block_slice.len() {
writer.write_all(&block_slice[..write_left]).await.map_err(|e| {
error!("write_data_blocks write_all err: {}", e);
e
})?;
total_written += write_left;
break;
}
let n = block_slice.len();
writer.write_all(block_slice).await.map_err(|e| {
error!("write_data_blocks write_all2 err: {}", e);
e
})?;
write_left -= n;
total_written += n;
}
Ok(total_written)
}
impl Erasure {
pub async fn decode<W, R>(
&self,
writer: &mut W,
readers: Vec<Option<BitrotReader<R>>>,
offset: usize,
length: usize,
total_length: usize,
) -> (usize, Option<std::io::Error>)
where
W: AsyncWrite + Send + Sync + Unpin,
R: AsyncRead + Unpin + Send + Sync,
{
if readers.len() != self.data_shards + self.parity_shards {
return (0, Some(io::Error::new(ErrorKind::InvalidInput, "Invalid number of readers")));
}
if offset + length > total_length {
return (0, Some(io::Error::new(ErrorKind::InvalidInput, "offset + length exceeds total length")));
}
let mut ret_err = None;
if length == 0 {
return (0, ret_err);
}
let mut written = 0;
let mut reader = ParallelReader::new(readers, self.clone(), offset, total_length);
let start = offset / self.block_size;
let end = (offset + length) / self.block_size;
for i in start..=end {
let (block_offset, block_length) = if start == end {
(offset % self.block_size, length)
} else if i == start {
(offset % self.block_size, self.block_size - (offset % self.block_size))
} else if i == end {
(0, (offset + length) % self.block_size)
} else {
(0, self.block_size)
};
if block_length == 0 {
// error!("erasure decode decode block_length == 0");
break;
}
let (mut shards, errs) = reader.read().await;
if ret_err.is_none()
&& let (_, Some(err)) = reduce_errs(&errs, &[])
&& (err == Error::FileNotFound || err == Error::FileCorrupt)
{
ret_err = Some(err.into());
}
if !reader.can_decode(&shards) {
error!("erasure decode can_decode errs: {:?}", &errs);
ret_err = Some(Error::ErasureReadQuorum.into());
break;
}
// Decode the shards
if let Err(e) = self.decode_data(&mut shards) {
error!("erasure decode decode_data err: {:?}", e);
ret_err = Some(e);
break;
}
let n = match write_data_blocks(writer, &shards, self.data_shards, block_offset, block_length).await {
Ok(n) => n,
Err(e) => {
error!("erasure decode write_data_blocks err: {:?}", e);
ret_err = Some(e);
break;
}
};
written += n;
}
if ret_err.is_some() {
return (written, ret_err);
}
if written < length {
ret_err = Some(Error::LessData.into());
}
(written, ret_err)
}
}
#[cfg(test)]
mod tests {
use rustfs_utils::HashAlgorithm;
use crate::{disk::error::DiskError, erasure_coding::BitrotWriter};
use super::*;
use std::io::Cursor;
#[tokio::test]
async fn test_parallel_reader_normal() {
const BLOCK_SIZE: usize = 64;
const NUM_SHARDS: usize = 2;
const DATA_SHARDS: usize = 8;
const PARITY_SHARDS: usize = 4;
const SHARD_SIZE: usize = BLOCK_SIZE / DATA_SHARDS;
let reader_offset = 0;
let mut readers = vec![];
for i in 0..(DATA_SHARDS + PARITY_SHARDS) {
readers.push(Some(
create_reader(SHARD_SIZE, NUM_SHARDS, (i % 256) as u8, &HashAlgorithm::HighwayHash256, false).await,
));
}
let erausre = Erasure::new(DATA_SHARDS, PARITY_SHARDS, BLOCK_SIZE);
let mut parallel_reader = ParallelReader::new(readers, erausre, reader_offset, NUM_SHARDS * BLOCK_SIZE);
for _ in 0..NUM_SHARDS {
let (bufs, errs) = parallel_reader.read().await;
bufs.into_iter().enumerate().for_each(|(index, buf)| {
if index < DATA_SHARDS {
assert!(buf.is_some());
let buf = buf.unwrap();
assert_eq!(SHARD_SIZE, buf.len());
assert_eq!(index as u8, buf[0]);
} else {
assert!(buf.is_none());
}
});
assert!(errs.iter().filter(|err| err.is_some()).count() == 0);
}
}
#[tokio::test]
async fn test_parallel_reader_with_offline_disks() {
const OFFLINE_DISKS: usize = 2;
const NUM_SHARDS: usize = 2;
const BLOCK_SIZE: usize = 64;
const DATA_SHARDS: usize = 8;
const PARITY_SHARDS: usize = 4;
const SHARD_SIZE: usize = BLOCK_SIZE / DATA_SHARDS;
let reader_offset = 0;
let mut readers = vec![];
for i in 0..(DATA_SHARDS + PARITY_SHARDS) {
if i < OFFLINE_DISKS {
// Two disks are offline
readers.push(None);
} else {
readers.push(Some(
create_reader(SHARD_SIZE, NUM_SHARDS, (i % 256) as u8, &HashAlgorithm::HighwayHash256, false).await,
));
}
}
let erausre = Erasure::new(DATA_SHARDS, PARITY_SHARDS, BLOCK_SIZE);
let mut parallel_reader = ParallelReader::new(readers, erausre, reader_offset, NUM_SHARDS * BLOCK_SIZE);
for _ in 0..NUM_SHARDS {
let (bufs, errs) = parallel_reader.read().await;
assert_eq!(DATA_SHARDS, bufs.iter().filter(|buf| buf.is_some()).count());
assert_eq!(OFFLINE_DISKS, errs.iter().filter(|err| err.is_some()).count());
}
}
#[tokio::test]
async fn test_parallel_reader_with_bitrots() {
const BITROT_DISKS: usize = 2;
const NUM_SHARDS: usize = 2;
const BLOCK_SIZE: usize = 64;
const DATA_SHARDS: usize = 8;
const PARITY_SHARDS: usize = 4;
const SHARD_SIZE: usize = BLOCK_SIZE / DATA_SHARDS;
let reader_offset = 0;
let mut readers = vec![];
for i in 0..(DATA_SHARDS + PARITY_SHARDS) {
readers.push(Some(
create_reader(SHARD_SIZE, NUM_SHARDS, (i % 256) as u8, &HashAlgorithm::HighwayHash256, i < BITROT_DISKS).await,
));
}
let erausre = Erasure::new(DATA_SHARDS, PARITY_SHARDS, BLOCK_SIZE);
let mut parallel_reader = ParallelReader::new(readers, erausre, reader_offset, NUM_SHARDS * BLOCK_SIZE);
for _ in 0..NUM_SHARDS {
let (bufs, errs) = parallel_reader.read().await;
assert_eq!(DATA_SHARDS, bufs.iter().filter(|buf| buf.is_some()).count());
assert_eq!(
BITROT_DISKS,
errs.iter()
.filter(|err| {
match err {
Some(DiskError::Io(err)) => {
err.kind() == std::io::ErrorKind::InvalidData && err.to_string().contains("bitrot")
}
_ => false,
}
})
.count()
);
}
}
async fn create_reader(
shard_size: usize,
num_shards: usize,
value: u8,
hash_algo: &HashAlgorithm,
bitrot: bool,
) -> BitrotReader<Cursor<Vec<u8>>> {
let len = (hash_algo.size() + shard_size) * num_shards;
let buf = Cursor::new(vec![0u8; len]);
let mut writer = BitrotWriter::new(buf, shard_size, hash_algo.clone());
for _ in 0..num_shards {
writer.write(vec![value; shard_size].as_slice()).await.unwrap();
}
let mut buf = writer.into_inner().into_inner();
if bitrot {
for i in 0..num_shards {
// Rot one bit for each shard
buf[i * (hash_algo.size() + shard_size)] ^= 1;
}
}
let reader_cursor = Cursor::new(buf);
BitrotReader::new(reader_cursor, shard_size, hash_algo.clone())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/heal.rs | crates/ecstore/src/erasure_coding/heal.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::BitrotReader;
use super::BitrotWriterWrapper;
use super::decode::ParallelReader;
use crate::disk::error::{Error, Result};
use crate::erasure_coding::encode::MultiWriter;
use bytes::Bytes;
use tokio::io::AsyncRead;
use tracing::info;
impl super::Erasure {
pub async fn heal<R>(
&self,
writers: &mut [Option<BitrotWriterWrapper>],
readers: Vec<Option<BitrotReader<R>>>,
total_length: usize,
_prefer: &[bool],
) -> Result<()>
where
R: AsyncRead + Unpin + Send + Sync,
{
info!(
"Erasure heal, writers len: {}, readers len: {}, total_length: {}",
writers.len(),
readers.len(),
total_length
);
if writers.len() != self.parity_shards + self.data_shards {
return Err(Error::other("invalid argument"));
}
let mut reader = ParallelReader::new(readers, self.clone(), 0, total_length);
let start_block = 0;
let mut end_block = total_length / self.block_size;
if !total_length.is_multiple_of(self.block_size) {
end_block += 1;
}
for _ in start_block..end_block {
let (mut shards, errs) = reader.read().await;
// Check if we have enough shards to reconstruct data
// We need at least data_shards available shards (data + parity combined)
let available_shards = errs.iter().filter(|e| e.is_none()).count();
if available_shards < self.data_shards {
return Err(Error::other(format!(
"can not reconstruct data: not enough available shards (need {}, have {}) {errs:?}",
self.data_shards, available_shards
)));
}
if self.parity_shards > 0 {
self.decode_data(&mut shards)?;
}
let shards = shards
.into_iter()
.map(|s| Bytes::from(s.unwrap_or_default()))
.collect::<Vec<_>>();
// Calculate proper write quorum for heal operation
// For heal, we only write to disks that need healing, so write quorum should be
// the number of available writers (disks that need healing)
let available_writers = writers.iter().filter(|w| w.is_some()).count();
let write_quorum = available_writers.max(1); // At least 1 writer must succeed
let mut writers = MultiWriter::new(writers, write_quorum);
writers.write(shards).await?;
}
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/mod.rs | crates/ecstore/src/erasure_coding/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod decode;
pub mod encode;
pub mod erasure;
pub mod heal;
mod bitrot;
pub use bitrot::*;
pub use erasure::{Erasure, ReedSolomonEncoder, calc_shard_size};
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/erasure.rs | crates/ecstore/src/erasure_coding/erasure.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Erasure coding implementation using Reed-Solomon SIMD backend.
//!
//! This module provides erasure coding functionality with high-performance SIMD
//! Reed-Solomon implementation:
//!
//! ## Reed-Solomon Implementation
//!
//! ### SIMD Mode (Only)
//! - **Performance**: Uses SIMD optimization for high-performance encoding/decoding
//! - **Compatibility**: Works with any shard size through SIMD implementation
//! - **Reliability**: High-performance SIMD implementation for large data processing
//! - **Use case**: Optimized for maximum performance in large data processing scenarios
//!
//! ## Example
//!
//! ```ignore
//! use rustfs_ecstore::erasure_coding::Erasure;
//!
//! let erasure = Erasure::new(4, 2, 1024); // 4 data shards, 2 parity shards, 1KB block size
//! let data = b"hello world";
//! let shards = erasure.encode_data(data).unwrap();
//! // Simulate loss and recovery...
//! ```
use bytes::{Bytes, BytesMut};
use reed_solomon_simd;
use smallvec::SmallVec;
use std::io;
use tokio::io::AsyncRead;
use tracing::warn;
use uuid::Uuid;
/// Reed-Solomon encoder using SIMD implementation.
pub struct ReedSolomonEncoder {
data_shards: usize,
parity_shards: usize,
// Use RwLock to ensure thread safety, implementing Send + Sync
encoder_cache: std::sync::RwLock<Option<reed_solomon_simd::ReedSolomonEncoder>>,
decoder_cache: std::sync::RwLock<Option<reed_solomon_simd::ReedSolomonDecoder>>,
}
impl Clone for ReedSolomonEncoder {
fn clone(&self) -> Self {
Self {
data_shards: self.data_shards,
parity_shards: self.parity_shards,
// Create an empty cache for the new instance instead of sharing one
encoder_cache: std::sync::RwLock::new(None),
decoder_cache: std::sync::RwLock::new(None),
}
}
}
impl ReedSolomonEncoder {
/// Create a new Reed-Solomon encoder with specified data and parity shards.
pub fn new(data_shards: usize, parity_shards: usize) -> io::Result<Self> {
Ok(ReedSolomonEncoder {
data_shards,
parity_shards,
encoder_cache: std::sync::RwLock::new(None),
decoder_cache: std::sync::RwLock::new(None),
})
}
/// Encode data shards with parity.
pub fn encode(&self, shards: SmallVec<[&mut [u8]; 16]>) -> io::Result<()> {
let mut shards_vec: Vec<&mut [u8]> = shards.into_vec();
if shards_vec.is_empty() {
return Ok(());
}
let simd_result = self.encode_with_simd(&mut shards_vec);
match simd_result {
Ok(()) => Ok(()),
Err(simd_error) => {
warn!("SIMD encoding failed: {}", simd_error);
Err(simd_error)
}
}
}
fn encode_with_simd(&self, shards_vec: &mut [&mut [u8]]) -> io::Result<()> {
let shard_len = shards_vec[0].len();
// Get or create encoder
let mut encoder = {
let mut cache_guard = self
.encoder_cache
.write()
.map_err(|_| io::Error::other("Failed to acquire encoder cache lock"))?;
match cache_guard.take() {
Some(mut cached_encoder) => {
// Use reset method to reset existing encoder to adapt to new parameters
if let Err(e) = cached_encoder.reset(self.data_shards, self.parity_shards, shard_len) {
warn!("Failed to reset SIMD encoder: {:?}, creating new one", e);
// If reset fails, create new encoder
reed_solomon_simd::ReedSolomonEncoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {e:?}")))?
} else {
cached_encoder
}
}
None => {
// First use, create new encoder
reed_solomon_simd::ReedSolomonEncoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {e:?}")))?
}
}
};
// Add original shards
for (i, shard) in shards_vec.iter().enumerate().take(self.data_shards) {
encoder
.add_original_shard(shard)
.map_err(|e| io::Error::other(format!("Failed to add shard {i}: {e:?}")))?;
}
// Encode and get recovery shards
let result = encoder
.encode()
.map_err(|e| io::Error::other(format!("SIMD encoding failed: {e:?}")))?;
// Copy recovery shards to output buffer
for (i, recovery_shard) in result.recovery_iter().enumerate() {
if i + self.data_shards < shards_vec.len() {
shards_vec[i + self.data_shards].copy_from_slice(recovery_shard);
}
}
// Return encoder to cache (encoder is automatically reset after result is dropped, can be reused)
drop(result); // Explicitly drop result to ensure encoder is reset
*self
.encoder_cache
.write()
.map_err(|_| io::Error::other("Failed to return encoder to cache"))? = Some(encoder);
Ok(())
}
/// Reconstruct missing shards.
pub fn reconstruct(&self, shards: &mut [Option<Vec<u8>>]) -> io::Result<()> {
// Use SIMD for reconstruction
let simd_result = self.reconstruct_with_simd(shards);
match simd_result {
Ok(()) => Ok(()),
Err(simd_error) => {
warn!("SIMD reconstruction failed: {}", simd_error);
Err(simd_error)
}
}
}
fn reconstruct_with_simd(&self, shards: &mut [Option<Vec<u8>>]) -> io::Result<()> {
// Find a valid shard to determine length
let shard_len = shards
.iter()
.find_map(|s| s.as_ref().map(|v| v.len()))
.ok_or_else(|| io::Error::other("No valid shards found for reconstruction"))?;
let mut decoder = {
let mut cache_guard = self
.decoder_cache
.write()
.map_err(|_| io::Error::other("Failed to acquire decoder cache lock"))?;
match cache_guard.take() {
Some(mut cached_decoder) => {
if let Err(e) = cached_decoder.reset(self.data_shards, self.parity_shards, shard_len) {
warn!("Failed to reset SIMD decoder: {:?}, creating new one", e);
reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?
} else {
cached_decoder
}
}
None => reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?,
}
};
// Add available shards (both data and parity)
for (i, shard_opt) in shards.iter().enumerate() {
if let Some(shard) = shard_opt {
if i < self.data_shards {
decoder
.add_original_shard(i, shard)
.map_err(|e| io::Error::other(format!("Failed to add original shard for reconstruction: {e:?}")))?;
} else {
let recovery_idx = i - self.data_shards;
decoder
.add_recovery_shard(recovery_idx, shard)
.map_err(|e| io::Error::other(format!("Failed to add recovery shard for reconstruction: {e:?}")))?;
}
}
}
let result = decoder
.decode()
.map_err(|e| io::Error::other(format!("SIMD decode error: {e:?}")))?;
// Fill in missing data shards from reconstruction result
for (i, shard_opt) in shards.iter_mut().enumerate() {
if shard_opt.is_none() && i < self.data_shards {
for (restored_index, restored_data) in result.restored_original_iter() {
if restored_index == i {
*shard_opt = Some(restored_data.to_vec());
break;
}
}
}
}
drop(result);
*self
.decoder_cache
.write()
.map_err(|_| io::Error::other("Failed to return decoder to cache"))? = Some(decoder);
Ok(())
}
}
/// Erasure coding utility for data reliability using Reed-Solomon codes.
///
/// This struct provides encoding and decoding of data into data and parity shards.
/// It supports splitting data into multiple shards, generating parity for fault tolerance,
/// and reconstructing lost shards.
///
/// # Fields
/// - `data_shards`: Number of data shards.
/// - `parity_shards`: Number of parity shards.
/// - `encoder`: Optional ReedSolomon encoder instance.
/// - `block_size`: Block size for each shard.
/// - `_id`: Unique identifier for the erasure instance.
/// - `_buf`: Internal buffer for block operations.
///
/// # Example
/// ```ignore
/// use rustfs_ecstore::erasure_coding::Erasure;
/// let erasure = Erasure::new(4, 2, 8);
/// let data = b"hello world";
/// let shards = erasure.encode_data(data).unwrap();
/// // Simulate loss and recovery...
/// ```
#[derive(Default)]
pub struct Erasure {
pub data_shards: usize,
pub parity_shards: usize,
encoder: Option<ReedSolomonEncoder>,
pub block_size: usize,
_id: Uuid,
_buf: Vec<u8>,
}
impl Clone for Erasure {
fn clone(&self) -> Self {
Self {
data_shards: self.data_shards,
parity_shards: self.parity_shards,
encoder: self.encoder.clone(),
block_size: self.block_size,
_id: Uuid::new_v4(), // Generate new ID for clone
_buf: vec![0u8; self.block_size],
}
}
}
pub fn calc_shard_size(block_size: usize, data_shards: usize) -> usize {
(block_size.div_ceil(data_shards) + 1) & !1
}
impl Erasure {
/// Create a new Erasure instance.
///
/// # Arguments
/// * `data_shards` - Number of data shards.
/// * `parity_shards` - Number of parity shards.
/// * `block_size` - Block size for each shard.
pub fn new(data_shards: usize, parity_shards: usize, block_size: usize) -> Self {
let encoder = if parity_shards > 0 {
Some(ReedSolomonEncoder::new(data_shards, parity_shards).unwrap())
} else {
None
};
Erasure {
data_shards,
parity_shards,
block_size,
encoder,
_id: Uuid::new_v4(),
_buf: vec![0u8; block_size],
}
}
/// Encode data into data and parity shards.
///
/// # Arguments
/// * `data` - The input data to encode.
///
/// # Returns
/// A vector of encoded shards as `Bytes`.
#[tracing::instrument(level = "info", skip_all, fields(data_len=data.len()))]
pub fn encode_data(&self, data: &[u8]) -> io::Result<Vec<Bytes>> {
// let shard_size = self.shard_size();
// let total_size = shard_size * self.total_shard_count();
// Data shard count
let per_shard_size = calc_shard_size(data.len(), self.data_shards);
// Total required size
let need_total_size = per_shard_size * self.total_shard_count();
// Create a new buffer with the required total length for all shards
let mut data_buffer = BytesMut::with_capacity(need_total_size);
// Copy source data
data_buffer.extend_from_slice(data);
data_buffer.resize(need_total_size, 0u8);
{
// EC encode, the result will be written into data_buffer
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(per_shard_size).collect();
// Only do EC if parity_shards > 0
if self.parity_shards > 0 {
if let Some(encoder) = self.encoder.as_ref() {
encoder.encode(data_slices)?;
} else {
warn!("parity_shards > 0, but encoder is None");
}
}
}
// Zero-copy split, all shards reference data_buffer
let mut data_buffer = data_buffer.freeze();
let mut shards = Vec::with_capacity(self.total_shard_count());
for _ in 0..self.total_shard_count() {
let shard = data_buffer.split_to(per_shard_size);
shards.push(shard);
}
Ok(shards)
}
/// Decode and reconstruct missing shards in-place.
///
/// # Arguments
/// * `shards` - Mutable slice of optional shard data. Missing shards should be `None`.
///
/// # Returns
/// Ok if reconstruction succeeds, error otherwise.
pub fn decode_data(&self, shards: &mut [Option<Vec<u8>>]) -> io::Result<()> {
if self.parity_shards > 0 {
if let Some(encoder) = self.encoder.as_ref() {
encoder.reconstruct(shards)?;
} else {
warn!("parity_shards > 0, but encoder is None");
}
}
Ok(())
}
/// Get the total number of shards (data + parity).
pub fn total_shard_count(&self) -> usize {
self.data_shards + self.parity_shards
}
// /// Calculate the shard size and total size for a given data size.
// // Returns (shard_size, total_size) for the given data size
// fn need_size(&self, data_size: usize) -> (usize, usize) {
// let shard_size = self.shard_size(data_size);
// (shard_size, shard_size * (self.total_shard_count()))
// }
/// Calculate the size of each shard.
pub fn shard_size(&self) -> usize {
calc_shard_size(self.block_size, self.data_shards)
}
/// Calculate the total erasure file size for a given original size.
// Returns the final erasure size from the original size
pub fn shard_file_size(&self, total_length: i64) -> i64 {
if total_length == 0 {
return 0;
}
if total_length < 0 {
return total_length;
}
let total_length = total_length as usize;
let num_shards = total_length / self.block_size;
let last_block_size = total_length % self.block_size;
let last_shard_size = calc_shard_size(last_block_size, self.data_shards);
(num_shards * self.shard_size() + last_shard_size) as i64
}
/// Calculate the offset in the erasure file where reading begins.
// Returns the offset in the erasure file where reading begins
pub fn shard_file_offset(&self, start_offset: usize, length: usize, total_length: usize) -> usize {
let shard_size = self.shard_size();
let shard_file_size = self.shard_file_size(total_length as i64) as usize;
let end_shard = (start_offset + length) / self.block_size;
let mut till_offset = end_shard * shard_size + shard_size;
if till_offset > shard_file_size {
till_offset = shard_file_size;
}
till_offset
}
/// Encode all data from a reader in blocks, calling an async callback for each encoded block.
/// This method is async and returns the total bytes read after all blocks are processed.
///
/// # Arguments
/// * `reader` - An async reader implementing AsyncRead + Send + Sync + Unpin
/// * `mut on_block` - Async callback that receives encoded blocks and returns a Result
/// * `F` - Callback type: FnMut(Result<Vec<Bytes>, std::io::Error>) -> Future<Output=Result<(), E>> + Send
/// * `Fut` - Future type returned by the callback
/// * `E` - Error type returned by the callback
/// * `R` - Reader type implementing AsyncRead + Send + Sync + Unpin
///
/// # Returns
/// Result<usize, E> containing total bytes read, or error from callback
///
/// # Errors
/// Returns error if reading from reader fails or if callback returns error
pub async fn encode_stream_callback_async<F, Fut, E, R>(
self: std::sync::Arc<Self>,
reader: &mut R,
mut on_block: F,
) -> Result<usize, E>
where
R: AsyncRead + Send + Sync + Unpin,
F: FnMut(std::io::Result<Vec<Bytes>>) -> Fut + Send,
Fut: std::future::Future<Output = Result<(), E>> + Send,
{
let block_size = self.block_size;
let mut total = 0;
loop {
let mut buf = vec![0u8; block_size];
match rustfs_utils::read_full(&mut *reader, &mut buf).await {
Ok(n) if n > 0 => {
warn!("encode_stream_callback_async read n={}", n);
total += n;
let res = self.encode_data(&buf[..n]);
on_block(res).await?
}
Ok(_) => {
warn!("encode_stream_callback_async read unexpected ok");
break;
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
warn!("encode_stream_callback_async read unexpected eof");
break;
}
Err(e) => {
warn!("encode_stream_callback_async read error={:?}", e);
on_block(Err(e)).await?;
break;
}
}
buf.clear();
}
Ok(total)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_shard_file_size_cases2() {
let erasure = Erasure::new(12, 4, 1024 * 1024);
assert_eq!(erasure.shard_file_size(1572864), 131074);
}
#[test]
fn test_shard_file_size_cases() {
let erasure = Erasure::new(4, 2, 8);
// Case 1: total_length == 0
assert_eq!(erasure.shard_file_size(0), 0);
// Case 2: total_length < block_size
assert_eq!(erasure.shard_file_size(5), 2); // 5 div_ceil 4 = 2
// Case 3: total_length == block_size
assert_eq!(erasure.shard_file_size(8), 2);
// Case 4: total_length > block_size, not aligned
assert_eq!(erasure.shard_file_size(13), 4); // 8/8=1, last=5, 5 div_ceil 4=2, 1*2+2=4
// Case 5: total_length > block_size, aligned
assert_eq!(erasure.shard_file_size(16), 4); // 16/8=2, last=0, 2*2+0=4
assert_eq!(erasure.shard_file_size(1248739), 312186); // 1248739/8=156092, last=3, 3 div_ceil 4=1, 156092*2+1=312185
assert_eq!(erasure.shard_file_size(43), 12); // 43/8=5, last=3, 3 div_ceil 4=1, 5*2+1=11
assert_eq!(erasure.shard_file_size(1572864), 393216); // 43/8=5, last=3, 3 div_ceil 4=1, 5*2+1=11
}
#[test]
fn test_encode_decode_roundtrip() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 1024; // SIMD mode
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Use sufficient test data for SIMD optimization
let test_data = b"SIMD mode test data for encoding and decoding roundtrip verification with sufficient length to ensure shard size requirements are met for proper SIMD optimization.".repeat(20); // ~3KB for SIMD
let data = &test_data;
let encoded_shards = erasure.encode_data(data).unwrap();
assert_eq!(encoded_shards.len(), data_shards + parity_shards);
// Create decode input with some shards missing, convert to the format expected by decode_data
let mut decode_input: Vec<Option<Vec<u8>>> = vec![None; data_shards + parity_shards];
for i in 0..data_shards {
decode_input[i] = Some(encoded_shards[i].to_vec());
}
erasure.decode_data(&mut decode_input).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in decode_input.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(&recovered, data);
}
#[test]
fn test_encode_decode_large_1m() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 512 * 3; // SIMD mode
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Generate 1MB test data
let data: Vec<u8> = (0..1048576).map(|i| (i % 256) as u8).collect();
let encoded_shards = erasure.encode_data(&data).unwrap();
assert_eq!(encoded_shards.len(), data_shards + parity_shards);
// Create decode input with some shards missing, convert to the format expected by decode_data
let mut decode_input: Vec<Option<Vec<u8>>> = vec![None; data_shards + parity_shards];
for i in 0..data_shards {
decode_input[i] = Some(encoded_shards[i].to_vec());
}
erasure.decode_data(&mut decode_input).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in decode_input.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(recovered, data);
}
#[test]
fn test_encode_all_zero_data() {
let data_shards = 3;
let parity_shards = 2;
let block_size = 6;
let erasure = Erasure::new(data_shards, parity_shards, block_size);
let data = vec![0u8; block_size];
let shards = erasure.encode_data(&data).unwrap();
assert_eq!(shards.len(), data_shards + parity_shards);
let total_len: usize = shards.iter().map(|b| b.len()).sum();
assert_eq!(total_len, erasure.shard_size() * (data_shards + parity_shards));
}
#[test]
fn test_shard_size_and_file_size() {
let erasure = Erasure::new(4, 2, 8);
assert_eq!(erasure.shard_file_size(33), 10);
assert_eq!(erasure.shard_file_size(0), 0);
}
#[test]
fn test_shard_file_offset() {
let erasure = Erasure::new(8, 8, 1024 * 1024);
let offset = erasure.shard_file_offset(0, 86, 86);
println!("offset={offset}");
assert!(offset > 0);
let total_length = erasure.shard_file_size(86);
println!("total_length={total_length}");
assert!(total_length > 0);
}
#[tokio::test]
async fn test_encode_stream_callback_async_error_propagation() {
use std::io::Cursor;
use std::sync::Arc;
use tokio::sync::mpsc;
let data_shards = 4;
let parity_shards = 2;
let block_size = 1024; // SIMD mode
let erasure = Arc::new(Erasure::new(data_shards, parity_shards, block_size));
// Use test data suitable for SIMD mode
let data =
b"Async error test data with sufficient length to meet requirements for proper testing and validation.".repeat(20); // ~2KB
let mut reader = Cursor::new(data);
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let erasure_clone = erasure.clone();
let handle = tokio::spawn(async move {
erasure_clone
.encode_stream_callback_async::<_, _, (), _>(&mut reader, move |res| {
let tx = tx.clone();
async move {
let shards = res.unwrap();
tx.send(shards).await.unwrap();
Ok(())
}
})
.await
.unwrap();
});
let result = handle.await;
assert!(result.is_ok());
let collected_shards = rx.recv().await.unwrap();
assert_eq!(collected_shards.len(), data_shards + parity_shards);
}
#[tokio::test]
async fn test_encode_stream_callback_async_channel_decode() {
use std::io::Cursor;
use std::sync::Arc;
use tokio::sync::mpsc;
let data_shards = 4;
let parity_shards = 2;
let block_size = 1024; // SIMD mode
let erasure = Arc::new(Erasure::new(data_shards, parity_shards, block_size));
// Use test data that fits in exactly one block to avoid multi-block complexity
let data =
b"Channel async callback test data with sufficient length to ensure proper operation and validation requirements."
.repeat(8); // ~1KB
let data_clone = data.clone(); // Clone for later comparison
let mut reader = Cursor::new(data);
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let erasure_clone = erasure.clone();
let handle = tokio::spawn(async move {
erasure_clone
.encode_stream_callback_async::<_, _, (), _>(&mut reader, move |res| {
let tx = tx.clone();
async move {
let shards = res.unwrap();
tx.send(shards).await.unwrap();
Ok(())
}
})
.await
.unwrap();
});
let result = handle.await;
assert!(result.is_ok());
let shards = rx.recv().await.unwrap();
assert_eq!(shards.len(), data_shards + parity_shards);
// Test decode using the old API that operates in-place
let mut decode_input: Vec<Option<Vec<u8>>> = vec![None; data_shards + parity_shards];
for i in 0..data_shards {
decode_input[i] = Some(shards[i].to_vec());
}
erasure.decode_data(&mut decode_input).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in decode_input.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data_clone.len());
assert_eq!(&recovered, &data_clone);
}
// SIMD mode specific tests
mod simd_tests {
use super::*;
#[test]
fn test_simd_encode_decode_roundtrip() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 1024; // Use larger block size for SIMD mode
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Use data that will create shards >= 512 bytes for SIMD optimization
let test_data = b"SIMD mode test data for encoding and decoding roundtrip verification with sufficient length to ensure shard size requirements are met for proper SIMD optimization and validation.";
let data = test_data.repeat(25); // Create much larger data: ~5KB total, ~1.25KB per shard
let encoded_shards = erasure.encode_data(&data).unwrap();
assert_eq!(encoded_shards.len(), data_shards + parity_shards);
// Create decode input with some shards missing
let mut shards_opt: Vec<Option<Vec<u8>>> = encoded_shards.iter().map(|shard| Some(shard.to_vec())).collect();
// Lose one data shard and one parity shard (should still be recoverable)
shards_opt[1] = None; // Lose second data shard
shards_opt[5] = None; // Lose second parity shard
erasure.decode_data(&mut shards_opt).unwrap();
// Verify recovered data
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(&recovered, &data);
}
#[test]
fn test_simd_all_zero_data() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 1024; // Use larger block size for SIMD mode
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Create all-zero data that ensures adequate shard size for SIMD optimization
let data = vec![0u8; 1024]; // 1KB of zeros, each shard will be 256 bytes
let encoded_shards = erasure.encode_data(&data).unwrap();
assert_eq!(encoded_shards.len(), data_shards + parity_shards);
// Verify that all data shards are zeros
for (i, shard) in encoded_shards.iter().enumerate().take(data_shards) {
assert!(shard.iter().all(|&x| x == 0), "Data shard {i} should be all zeros");
}
// Test recovery with some shards missing
let mut shards_opt: Vec<Option<Vec<u8>>> = encoded_shards.iter().map(|shard| Some(shard.to_vec())).collect();
// Lose maximum recoverable shards (equal to parity_shards)
shards_opt[0] = None; // Lose first data shard
shards_opt[4] = None; // Lose first parity shard
erasure.decode_data(&mut shards_opt).unwrap();
// Verify recovered data is still all zeros
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert!(recovered.iter().all(|&x| x == 0), "Recovered data should be all zeros");
}
#[test]
fn test_simd_large_data_1kb() {
let data_shards = 8;
let parity_shards = 4;
let block_size = 1024; // 1KB block size optimal for SIMD
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Create 1KB of test data
let mut data = Vec::with_capacity(1024);
for i in 0..1024 {
data.push((i % 256) as u8);
}
let shards = erasure.encode_data(&data).unwrap();
assert_eq!(shards.len(), data_shards + parity_shards);
// Simulate the loss of multiple shards
let mut shards_opt: Vec<Option<Vec<u8>>> = shards.iter().map(|b| Some(b.to_vec())).collect();
shards_opt[0] = None;
shards_opt[3] = None;
shards_opt[9] = None; // Parity shard
shards_opt[11] = None; // Parity shard
// Decode
erasure.decode_data(&mut shards_opt).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(&recovered, &data);
}
#[test]
fn test_simd_minimum_shard_size() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 256; // Use 256 bytes to ensure sufficient shard size
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// Create data that will result in 64+ byte shards
let data = vec![0x42u8; 200]; // 200 bytes, should create ~50 byte shards per data shard
let result = erasure.encode_data(&data);
// This might fail due to SIMD shard size requirements
match result {
Ok(shards) => {
println!("SIMD encoding succeeded with shard size: {}", shards[0].len());
// Test decoding
let mut shards_opt: Vec<Option<Vec<u8>>> = shards.iter().map(|b| Some(b.to_vec())).collect();
shards_opt[1] = None;
let decode_result = erasure.decode_data(&mut shards_opt);
match decode_result {
Ok(_) => {
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/erasure_coding/bitrot.rs | crates/ecstore/src/erasure_coding/bitrot.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use bytes::Bytes;
use pin_project_lite::pin_project;
use rustfs_utils::HashAlgorithm;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tracing::error;
use uuid::Uuid;
pin_project! {
/// BitrotReader reads (hash+data) blocks from an async reader and verifies hash integrity.
pub struct BitrotReader<R> {
#[pin]
inner: R,
hash_algo: HashAlgorithm,
shard_size: usize,
buf: Vec<u8>,
hash_buf: Vec<u8>,
// hash_read: usize,
// data_buf: Vec<u8>,
// data_read: usize,
// hash_checked: bool,
id: Uuid,
}
}
impl<R> BitrotReader<R>
where
R: AsyncRead + Unpin + Send + Sync,
{
/// Create a new BitrotReader.
pub fn new(inner: R, shard_size: usize, algo: HashAlgorithm) -> Self {
let hash_size = algo.size();
Self {
inner,
hash_algo: algo,
shard_size,
buf: Vec::new(),
hash_buf: vec![0u8; hash_size],
// hash_read: 0,
// data_buf: Vec::new(),
// data_read: 0,
// hash_checked: false,
id: Uuid::new_v4(),
}
}
/// Read a single (hash+data) block, verify hash, and return the number of bytes read into `out`.
/// Returns an error if hash verification fails or data exceeds shard_size.
pub async fn read(&mut self, out: &mut [u8]) -> std::io::Result<usize> {
if out.len() > self.shard_size {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("data size {} exceeds shard size {}", out.len(), self.shard_size),
));
}
let hash_size = self.hash_algo.size();
// Read hash
if hash_size > 0 {
self.inner.read_exact(&mut self.hash_buf).await.map_err(|e| {
error!("bitrot reader read hash error: {}", e);
e
})?;
}
// Read data
let mut data_len = 0;
while data_len < out.len() {
let n = self.inner.read(&mut out[data_len..]).await.map_err(|e| {
error!("bitrot reader read data error: {}", e);
e
})?;
if n == 0 {
break;
}
data_len += n;
}
if hash_size > 0 {
let actual_hash = self.hash_algo.hash_encode(&out[..data_len]);
if actual_hash.as_ref() != self.hash_buf.as_slice() {
error!("bitrot reader hash mismatch, id={} data_len={}, out_len={}", self.id, data_len, out.len());
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "bitrot hash mismatch"));
}
}
Ok(data_len)
}
}
pin_project! {
/// BitrotWriter writes (hash+data) blocks to an async writer.
pub struct BitrotWriter<W> {
#[pin]
inner: W,
hash_algo: HashAlgorithm,
shard_size: usize,
buf: Vec<u8>,
finished: bool,
}
}
impl<W> BitrotWriter<W>
where
W: AsyncWrite + Unpin + Send + Sync,
{
/// Create a new BitrotWriter.
pub fn new(inner: W, shard_size: usize, algo: HashAlgorithm) -> Self {
let hash_algo = algo;
Self {
inner,
hash_algo,
shard_size,
buf: Vec::new(),
finished: false,
}
}
pub fn into_inner(self) -> W {
self.inner
}
/// Write a (hash+data) block. Returns the number of data bytes written.
/// Returns an error if called after a short write or if data exceeds shard_size.
pub async fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
if buf.is_empty() {
return Ok(0);
}
if self.finished {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "bitrot writer already finished"));
}
if buf.len() > self.shard_size {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("data size {} exceeds shard size {}", buf.len(), self.shard_size),
));
}
if buf.len() < self.shard_size {
self.finished = true;
}
let hash_algo = &self.hash_algo;
if hash_algo.size() > 0 {
let hash = hash_algo.hash_encode(buf);
self.buf.extend_from_slice(hash.as_ref());
}
self.buf.extend_from_slice(buf);
self.inner.write_all(&self.buf).await?;
// self.inner.flush().await?;
let n = buf.len();
self.buf.clear();
Ok(n)
}
pub async fn shutdown(&mut self) -> std::io::Result<()> {
self.inner.shutdown().await
}
}
pub fn bitrot_shard_file_size(size: usize, shard_size: usize, algo: HashAlgorithm) -> usize {
if algo != HashAlgorithm::HighwayHash256S {
return size;
}
size.div_ceil(shard_size) * algo.size() + size
}
pub async fn bitrot_verify<R: AsyncRead + Unpin + Send>(
mut r: R,
want_size: usize,
part_size: usize,
algo: HashAlgorithm,
_want: Bytes, // FIXME: useless parameter?
mut shard_size: usize,
) -> std::io::Result<()> {
let mut hash_buf = vec![0; algo.size()];
let mut left = want_size;
if left != bitrot_shard_file_size(part_size, shard_size, algo.clone()) {
return Err(std::io::Error::other("bitrot shard file size mismatch"));
}
while left > 0 {
let n = r.read_exact(&mut hash_buf).await?;
left -= n;
if left < shard_size {
shard_size = left;
}
let mut buf = vec![0; shard_size];
let read = r.read_exact(&mut buf).await?;
let actual_hash = algo.hash_encode(&buf);
if actual_hash.as_ref() != &hash_buf[0..n] {
return Err(std::io::Error::other("bitrot hash mismatch"));
}
left -= read;
}
Ok(())
}
/// Custom writer enum that supports inline buffer storage
pub enum CustomWriter {
/// Inline buffer writer - stores data in memory
InlineBuffer(Vec<u8>),
/// Disk-based writer using tokio file
Other(Box<dyn AsyncWrite + Unpin + Send + Sync>),
}
impl CustomWriter {
/// Create a new inline buffer writer
pub fn new_inline_buffer() -> Self {
Self::InlineBuffer(Vec::new())
}
/// Create a new disk writer from any AsyncWrite implementation
pub fn new_tokio_writer<W>(writer: W) -> Self
where
W: AsyncWrite + Unpin + Send + Sync + 'static,
{
Self::Other(Box::new(writer))
}
/// Get the inline buffer data if this is an inline buffer writer
pub fn get_inline_data(&self) -> Option<&[u8]> {
match self {
Self::InlineBuffer(data) => Some(data),
Self::Other(_) => None,
}
}
/// Extract the inline buffer data, consuming the writer
pub fn into_inline_data(self) -> Option<Vec<u8>> {
match self {
Self::InlineBuffer(data) => Some(data),
Self::Other(_) => None,
}
}
}
impl AsyncWrite for CustomWriter {
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<std::io::Result<usize>> {
match self.get_mut() {
Self::InlineBuffer(data) => {
data.extend_from_slice(buf);
std::task::Poll::Ready(Ok(buf.len()))
}
Self::Other(writer) => {
let pinned_writer = std::pin::Pin::new(writer.as_mut());
pinned_writer.poll_write(cx, buf)
}
}
}
fn poll_flush(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<std::io::Result<()>> {
match self.get_mut() {
Self::InlineBuffer(_) => std::task::Poll::Ready(Ok(())),
Self::Other(writer) => {
let pinned_writer = std::pin::Pin::new(writer.as_mut());
pinned_writer.poll_flush(cx)
}
}
}
fn poll_shutdown(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<std::io::Result<()>> {
match self.get_mut() {
Self::InlineBuffer(_) => std::task::Poll::Ready(Ok(())),
Self::Other(writer) => {
let pinned_writer = std::pin::Pin::new(writer.as_mut());
pinned_writer.poll_shutdown(cx)
}
}
}
}
/// Wrapper around BitrotWriter that uses our custom writer
pub struct BitrotWriterWrapper {
bitrot_writer: BitrotWriter<CustomWriter>,
writer_type: WriterType,
}
/// Enum to track the type of writer we're using
enum WriterType {
InlineBuffer,
Other,
}
impl std::fmt::Debug for BitrotWriterWrapper {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BitrotWriterWrapper")
.field(
"writer_type",
&match self.writer_type {
WriterType::InlineBuffer => "InlineBuffer",
WriterType::Other => "Other",
},
)
.finish()
}
}
impl BitrotWriterWrapper {
/// Create a new BitrotWriterWrapper with custom writer
pub fn new(writer: CustomWriter, shard_size: usize, checksum_algo: HashAlgorithm) -> Self {
let writer_type = match &writer {
CustomWriter::InlineBuffer(_) => WriterType::InlineBuffer,
CustomWriter::Other(_) => WriterType::Other,
};
Self {
bitrot_writer: BitrotWriter::new(writer, shard_size, checksum_algo),
writer_type,
}
}
/// Write data to the bitrot writer
pub async fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.bitrot_writer.write(buf).await
}
pub async fn shutdown(&mut self) -> std::io::Result<()> {
self.bitrot_writer.shutdown().await
}
/// Extract the inline buffer data, consuming the wrapper
pub fn into_inline_data(self) -> Option<Vec<u8>> {
match self.writer_type {
WriterType::InlineBuffer => {
let writer = self.bitrot_writer.into_inner();
writer.into_inline_data()
}
WriterType::Other => None,
}
}
}
#[cfg(test)]
mod tests {
use super::BitrotReader;
use super::BitrotWriter;
use rustfs_utils::HashAlgorithm;
use std::io::Cursor;
#[tokio::test]
async fn test_bitrot_read_write_ok() {
let data = b"hello world! this is a test shard.";
let data_size = data.len();
let shard_size = 8;
let buf: Vec<u8> = Vec::new();
let writer = Cursor::new(buf);
let mut bitrot_writer = BitrotWriter::new(writer, shard_size, HashAlgorithm::HighwayHash256);
let mut n = 0;
for chunk in data.chunks(shard_size) {
n += bitrot_writer.write(chunk).await.unwrap();
}
assert_eq!(n, data.len());
// Read
let reader = bitrot_writer.into_inner();
let reader = Cursor::new(reader.into_inner());
let mut bitrot_reader = BitrotReader::new(reader, shard_size, HashAlgorithm::HighwayHash256);
let mut out = Vec::new();
let mut n = 0;
while n < data_size {
let mut buf = vec![0u8; shard_size];
let m = bitrot_reader.read(&mut buf).await.unwrap();
assert_eq!(&buf[..m], &data[n..n + m]);
out.extend_from_slice(&buf[..m]);
n += m;
}
assert_eq!(n, data_size);
assert_eq!(data, &out[..]);
}
#[tokio::test]
async fn test_bitrot_read_hash_mismatch() {
let data = b"test data for bitrot";
let data_size = data.len();
let shard_size = 8;
let buf: Vec<u8> = Vec::new();
let writer = Cursor::new(buf);
let mut bitrot_writer = BitrotWriter::new(writer, shard_size, HashAlgorithm::HighwayHash256);
for chunk in data.chunks(shard_size) {
let _ = bitrot_writer.write(chunk).await.unwrap();
}
let mut written = bitrot_writer.into_inner().into_inner();
// change the last byte to make hash mismatch
let pos = written.len() - 1;
written[pos] ^= 0xFF;
let reader = Cursor::new(written);
let mut bitrot_reader = BitrotReader::new(reader, shard_size, HashAlgorithm::HighwayHash256);
let count = data_size.div_ceil(shard_size);
let mut idx = 0;
let mut n = 0;
while n < data_size {
let mut buf = vec![0u8; shard_size];
let res = bitrot_reader.read(&mut buf).await;
if idx == count - 1 {
// The last chunk should trigger an error
assert!(res.is_err());
assert_eq!(res.unwrap_err().kind(), std::io::ErrorKind::InvalidData);
break;
}
let m = res.unwrap();
assert_eq!(&buf[..m], &data[n..n + m]);
n += m;
idx += 1;
}
}
#[tokio::test]
async fn test_bitrot_read_write_none_hash() {
let data = b"bitrot none hash test data!";
let data_size = data.len();
let shard_size = 8;
let buf: Vec<u8> = Vec::new();
let writer = Cursor::new(buf);
let mut bitrot_writer = BitrotWriter::new(writer, shard_size, HashAlgorithm::None);
let mut n = 0;
for chunk in data.chunks(shard_size) {
n += bitrot_writer.write(chunk).await.unwrap();
}
assert_eq!(n, data.len());
let reader = bitrot_writer.into_inner();
let reader = Cursor::new(reader.into_inner());
let mut bitrot_reader = BitrotReader::new(reader, shard_size, HashAlgorithm::None);
let mut out = Vec::new();
let mut n = 0;
while n < data_size {
let mut buf = vec![0u8; shard_size];
let m = bitrot_reader.read(&mut buf).await.unwrap();
assert_eq!(&buf[..m], &data[n..n + m]);
out.extend_from_slice(&buf[..m]);
n += m;
}
assert_eq!(n, data_size);
assert_eq!(data, &out[..]);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rpc/remote_disk.rs | crates/ecstore/src/rpc/remote_disk.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
path::PathBuf,
sync::{Arc, atomic::Ordering},
time::Duration,
};
use bytes::Bytes;
use futures::lock::Mutex;
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
use rustfs_protos::{
node_service_time_out_client,
proto_gen::node_service::{
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequest,
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
},
};
use rustfs_utils::string::parse_bool_with_default;
use tokio::time;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use crate::disk::{
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
disk_store::{
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration,
},
endpoint::Endpoint,
};
use crate::disk::{FileReader, FileWriter};
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
use crate::{
disk::error::{Error, Result},
rpc::build_auth_headers,
};
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
use rustfs_rio::{HttpReader, HttpWriter};
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
use tonic::Request;
use uuid::Uuid;
#[derive(Debug)]
pub struct RemoteDisk {
pub id: Mutex<Option<Uuid>>,
pub addr: String,
pub url: url::Url,
pub root: PathBuf,
endpoint: Endpoint,
/// Whether health checking is enabled
health_check: bool,
/// Health tracker for connection monitoring
health: Arc<DiskHealthTracker>,
/// Cancellation token for monitoring tasks
cancel_token: CancellationToken,
}
impl RemoteDisk {
pub async fn new(ep: &Endpoint, opt: &DiskOption) -> Result<Self> {
// let root = fs::canonicalize(ep.url.path()).await?;
let root = PathBuf::from(ep.get_file_path());
let addr = if let Some(port) = ep.url.port() {
format!("{}://{}:{}", ep.url.scheme(), ep.url.host_str().unwrap(), port)
} else {
format!("{}://{}", ep.url.scheme(), ep.url.host_str().unwrap())
};
let env_health_check = std::env::var(ENV_RUSTFS_DRIVE_ACTIVE_MONITORING)
.map(|v| parse_bool_with_default(&v, true))
.unwrap_or(true);
let disk = Self {
id: Mutex::new(None),
addr: addr.clone(),
url: ep.url.clone(),
root,
endpoint: ep.clone(),
health_check: opt.health_check && env_health_check,
health: Arc::new(DiskHealthTracker::new()),
cancel_token: CancellationToken::new(),
};
// Start health monitoring
disk.start_health_monitoring();
Ok(disk)
}
/// Start health monitoring for the remote disk
fn start_health_monitoring(&self) {
if self.health_check {
let health = Arc::clone(&self.health);
let cancel_token = self.cancel_token.clone();
let addr = self.addr.clone();
tokio::spawn(async move {
Self::monitor_remote_disk_health(addr, health, cancel_token).await;
});
}
}
/// Monitor remote disk health periodically
async fn monitor_remote_disk_health(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
let mut interval = time::interval(CHECK_EVERY);
// Perform basic connectivity check
if Self::perform_connectivity_check(&addr).await.is_err() && health.swap_ok_to_faulty() {
warn!("Remote disk health check failed for {}: marking as faulty", addr);
// Start recovery monitoring
let health_clone = Arc::clone(&health);
let addr_clone = addr.clone();
let cancel_clone = cancel_token.clone();
tokio::spawn(async move {
Self::monitor_remote_disk_recovery(addr_clone, health_clone, cancel_clone).await;
});
}
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
debug!("Health monitoring cancelled for remote disk: {}", addr);
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
// Skip health check if disk is already marked as faulty
if health.is_faulty() {
continue;
}
let last_success_nanos = health.last_success.load(Ordering::Relaxed);
let elapsed = Duration::from_nanos(
(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64 - last_success_nanos) as u64
);
if elapsed < SKIP_IF_SUCCESS_BEFORE {
continue;
}
// Perform basic connectivity check
if Self::perform_connectivity_check(&addr).await.is_err() && health.swap_ok_to_faulty() {
warn!("Remote disk health check failed for {}: marking as faulty", addr);
// Start recovery monitoring
let health_clone = Arc::clone(&health);
let addr_clone = addr.clone();
let cancel_clone = cancel_token.clone();
tokio::spawn(async move {
Self::monitor_remote_disk_recovery(addr_clone, health_clone, cancel_clone).await;
});
}
}
}
}
}
/// Monitor remote disk recovery and mark as healthy when recovered
async fn monitor_remote_disk_recovery(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if Self::perform_connectivity_check(&addr).await.is_ok() {
info!("Remote disk recovered: {}", addr);
health.set_ok();
return;
}
}
}
}
}
/// Perform basic connectivity check for remote disk
async fn perform_connectivity_check(addr: &str) -> Result<()> {
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
let Some(host) = url.host_str() else {
return Err(Error::other("No host in URL".to_string()));
};
let port = url.port_or_known_default().unwrap_or(80);
// Try to establish TCP connection
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
Ok(Ok(stream)) => {
drop(stream);
Ok(())
}
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
}
}
/// Execute operation with timeout and health tracking
async fn execute_with_timeout<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
where
F: FnOnce() -> Fut,
Fut: std::future::Future<Output = Result<T>>,
{
// Check if disk is faulty
if self.health.is_faulty() {
warn!("disk {} health is faulty, returning error", self.to_string());
return Err(DiskError::FaultyDisk);
}
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, std::sync::atomic::Ordering::Relaxed);
self.health.increment_waiting();
// Execute operation with timeout
let result = time::timeout(timeout_duration, operation()).await;
match result {
Ok(operation_result) => {
// Log success and decrement waiting counter
if operation_result.is_ok() {
self.health.log_success();
}
self.health.decrement_waiting();
operation_result
}
Err(_) => {
// Timeout occurred, mark disk as potentially faulty
self.health.decrement_waiting();
warn!("Remote disk operation timeout after {:?}", timeout_duration);
Err(Error::other(format!("Remote disk operation timeout after {:?}", timeout_duration)))
}
}
}
}
// TODO: all api need to handle errors
#[async_trait::async_trait]
impl DiskAPI for RemoteDisk {
#[tracing::instrument(skip(self))]
fn to_string(&self) -> String {
self.endpoint.to_string()
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
// If disk is marked as faulty, consider it offline
!self.health.is_faulty()
}
#[tracing::instrument(skip(self))]
fn is_local(&self) -> bool {
false
}
#[tracing::instrument(skip(self))]
fn host_name(&self) -> String {
self.endpoint.host_port()
}
#[tracing::instrument(skip(self))]
fn endpoint(&self) -> Endpoint {
self.endpoint.clone()
}
#[tracing::instrument(skip(self))]
async fn close(&self) -> Result<()> {
self.cancel_token.cancel();
Ok(())
}
#[tracing::instrument(skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
Ok(*self.id.lock().await)
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
let mut lock = self.id.lock().await;
*lock = id;
Ok(())
}
#[tracing::instrument(skip(self))]
fn path(&self) -> PathBuf {
self.root.clone()
}
#[tracing::instrument(skip(self))]
fn get_disk_location(&self) -> DiskLocation {
DiskLocation {
pool_idx: {
if self.endpoint.pool_idx < 0 {
None
} else {
Some(self.endpoint.pool_idx as usize)
}
},
set_idx: {
if self.endpoint.set_idx < 0 {
None
} else {
Some(self.endpoint.set_idx as usize)
}
},
disk_idx: {
if self.endpoint.disk_idx < 0 {
None
} else {
Some(self.endpoint.disk_idx as usize)
}
},
}
}
#[tracing::instrument(skip(self))]
async fn make_volume(&self, volume: &str) -> Result<()> {
info!("make_volume");
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.make_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
info!("make_volumes");
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeVolumesRequest {
disk: self.endpoint.to_string(),
volumes: volumes.iter().map(|s| (*s).to_string()).collect(),
});
let response = client.make_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
info!("list_volumes");
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListVolumesRequest {
disk: self.endpoint.to_string(),
});
let response = client.list_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let infos = response
.volume_infos
.into_iter()
.filter_map(|json_str| serde_json::from_str::<VolumeInfo>(&json_str).ok())
.collect();
Ok(infos)
},
Duration::ZERO,
)
.await
}
#[tracing::instrument(skip(self))]
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
info!("stat_volume");
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(StatVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.stat_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let volume_info = serde_json::from_str::<VolumeInfo>(&response.volume_info)?;
Ok(volume_info)
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn delete_volume(&self, volume: &str) -> Result<()> {
info!("delete_volume {}/{}", self.endpoint.to_string(), volume);
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.delete_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
Duration::ZERO,
)
.await
}
// // FIXME: TODO: use writer
// #[tracing::instrument(skip(self, wr))]
// async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
// let now = std::time::SystemTime::now();
// info!("walk_dir {}/{}/{:?}", self.endpoint.to_string(), opts.bucket, opts.filter_prefix);
// let mut wr = wr;
// let mut out = MetacacheWriter::new(&mut wr);
// let mut buf = Vec::new();
// opts.serialize(&mut Serializer::new(&mut buf))?;
// let mut client = node_service_time_out_client(&self.addr)
// .await
// .map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
// let request = Request::new(WalkDirRequest {
// disk: self.endpoint.to_string(),
// walk_dir_options: buf.into(),
// });
// let mut response = client.walk_dir(request).await?.into_inner();
// loop {
// match response.next().await {
// Some(Ok(resp)) => {
// if !resp.success {
// if let Some(err) = resp.error_info {
// if err == "Unexpected EOF" {
// return Err(Error::Io(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)));
// } else {
// return Err(Error::other(err));
// }
// }
// return Err(Error::other("unknown error"));
// }
// let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
// .map_err(|_| Error::other(format!("Unexpected response: {:?}", response)))?;
// out.write_obj(&entry).await?;
// }
// None => break,
// _ => return Err(Error::other(format!("Unexpected response: {:?}", response))),
// }
// }
// info!(
// "walk_dir {}/{:?} done {:?}",
// opts.bucket,
// opts.filter_prefix,
// now.elapsed().unwrap_or_default()
// );
// Ok(())
// }
#[tracing::instrument(skip(self))]
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
info!("delete_version");
self.execute_with_timeout(
|| async {
let file_info = serde_json::to_string(&fi)?;
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
force_del_marker,
opts,
});
let response = client.delete_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
// let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
info!("delete_versions");
if self.health.is_faulty() {
return vec![Some(DiskError::FaultyDisk); versions.len()];
}
let opts = match serde_json::to_string(&opts) {
Ok(opts) => opts,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
};
let mut versions_str = Vec::with_capacity(versions.len());
for file_info_versions in versions.iter() {
versions_str.push(match serde_json::to_string(file_info_versions) {
Ok(versions_str) => versions_str,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
});
}
let mut client = match node_service_time_out_client(&self.addr).await {
Ok(client) => client,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
};
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
versions: versions_str,
opts,
});
// TODO: use Error not string
let result = self
.execute_with_timeout(
|| async {
client
.delete_versions(request)
.await
.map_err(|err| Error::other(format!("delete_versions failed: {err}")))
},
get_max_timeout_duration(),
)
.await;
let response = match result {
Ok(response) => response,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(err.clone()));
}
return errors;
}
};
let response = response.into_inner();
if !response.success {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(response.error.clone().map(|e| e.error_info).unwrap_or_default())));
}
return errors;
}
response
.errors
.iter()
.map(|error| {
if error.is_empty() {
None
} else {
Some(Error::other(error.to_string()))
}
})
.collect()
}
#[tracing::instrument(skip(self))]
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
info!("delete_paths");
let paths = paths.to_owned();
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeletePathsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
paths: paths.clone(),
});
let response = client.delete_paths(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
info!("write_metadata {}/{}", volume, path);
let file_info = serde_json::to_string(&fi)?;
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(WriteMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info: file_info.clone(),
});
let response = client.write_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
info!("update_metadata");
let file_info = serde_json::to_string(&fi)?;
let opts_str = serde_json::to_string(&opts)?;
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(UpdateMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info: file_info.clone(),
opts: opts_str.clone(),
});
let response = client.update_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn read_version(
&self,
_org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
info!("read_version");
let opts_str = serde_json::to_string(opts)?;
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
version_id: version_id.to_string(),
opts: opts_str.clone(),
});
let response = client.read_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let file_info = serde_json::from_str::<FileInfo>(&response.file_info)?;
Ok(file_info)
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path);
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadXlRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
read_data,
});
let response = client.read_xl(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(raw_file_info)
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
info!("rename_data {}/{}/{}/{}", self.addr, self.endpoint.to_string(), dst_volume, dst_path);
self.execute_with_timeout(
|| async {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(RenameDataRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
file_info,
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
});
let response = client.rename_data(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let rename_data_resp = serde_json::from_str::<RenameDataResp>(&response.rename_data_resp)?;
Ok(rename_data_resp)
},
get_max_timeout_duration(),
)
.await
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
debug!("list_dir {}/{}", volume, dir_path);
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
dir_path: dir_path.to_string(),
count,
});
let response = client.list_dir(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(response.volumes)
}
#[tracing::instrument(skip(self, wr))]
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
info!("walk_dir {}", self.endpoint.to_string());
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
let url = format!(
"{}/rustfs/rpc/walk_dir?disk={}",
self.endpoint.grid_host(),
urlencoding::encode(self.endpoint.to_string().as_str()),
);
let opts = serde_json::to_vec(&opts)?;
let mut headers = HeaderMap::new();
headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
build_auth_headers(&url, &Method::GET, &mut headers);
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rpc/peer_s3_client.rs | crates/ecstore/src/rpc/peer_s3_client.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::metadata_sys;
use crate::disk::error::DiskError;
use crate::disk::error::{Error, Result};
use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs};
use crate::disk::{DiskAPI, DiskStore, disk_store::get_max_timeout_duration};
use crate::global::GLOBAL_LOCAL_DISK_MAP;
use crate::store::all_local_disk;
use crate::store_utils::is_reserved_or_invalid_bucket;
use crate::{
disk::{
self, VolumeInfo,
disk_store::{CHECK_EVERY, CHECK_TIMEOUT_DURATION, DiskHealthTracker},
},
endpoints::{EndpointServerPools, Node},
store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions},
};
use async_trait::async_trait;
use futures::future::join_all;
use rustfs_common::heal_channel::{DriveState, HealItemType, HealOpts, RUSTFS_RESERVED_BUCKET};
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
use rustfs_protos::node_service_time_out_client;
use rustfs_protos::proto_gen::node_service::{
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
};
use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration};
use tokio::{net::TcpStream, sync::RwLock, time};
use tokio_util::sync::CancellationToken;
use tonic::Request;
use tracing::{debug, info, warn};
type Client = Arc<Box<dyn PeerS3Client>>;
#[async_trait]
pub trait PeerS3Client: Debug + Sync + Send + 'static {
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()>;
async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>>;
async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()>;
async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo>;
fn get_pools(&self) -> Option<Vec<usize>>;
}
#[derive(Debug, Clone)]
pub struct S3PeerSys {
pub clients: Vec<Client>,
pub pools_count: usize,
}
impl S3PeerSys {
pub fn new(eps: &EndpointServerPools) -> Self {
Self {
clients: Self::new_clients(eps),
pools_count: eps.as_ref().len(),
}
}
fn new_clients(eps: &EndpointServerPools) -> Vec<Client> {
let nodes = eps.get_nodes();
let v: Vec<Client> = nodes
.iter()
.map(|e| {
if e.is_local {
let cli: Box<dyn PeerS3Client> = Box::new(LocalPeerS3Client::new(Some(e.clone()), Some(e.pools.clone())));
Arc::new(cli)
} else {
let cli: Box<dyn PeerS3Client> = Box::new(RemotePeerS3Client::new(Some(e.clone()), Some(e.pools.clone())));
Arc::new(cli)
}
})
.collect();
v
}
}
impl S3PeerSys {
pub async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
let mut opts = *opts;
let mut futures = Vec::with_capacity(self.clients.len());
for client in self.clients.iter() {
// client_clon
futures.push(async move { (client.get_bucket_info(bucket, &BucketOptions::default()).await).err() });
}
let errs = join_all(futures).await;
let mut pool_errs = Vec::new();
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;
pool_errs.push(reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, qu));
}
if !opts.recreate {
opts.remove = is_all_buckets_not_found(&pool_errs);
opts.recreate = !opts.remove;
}
let mut futures = Vec::new();
let heal_bucket_results = Arc::new(RwLock::new(vec![HealResultItem::default(); self.clients.len()]));
for (idx, client) in self.clients.iter().enumerate() {
let opts_clone = opts;
let heal_bucket_results_clone = heal_bucket_results.clone();
futures.push(async move {
match client.heal_bucket(bucket, &opts_clone).await {
Ok(res) => {
heal_bucket_results_clone.write().await[idx] = res;
None
}
Err(err) => Some(err),
}
});
}
let errs = join_all(futures).await;
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, qu) {
tracing::error!("heal_bucket per_pool_errs: {per_pool_errs:?}");
tracing::error!("heal_bucket reduce_write_quorum_errs: {pool_err}");
return Err(pool_err);
}
}
if let Some(err) = reduce_write_quorum_errs(&errs, BUCKET_OP_IGNORED_ERRS, (errs.len() / 2) + 1) {
tracing::error!("heal_bucket errs: {errs:?}");
tracing::error!("heal_bucket reduce_write_quorum_errs: {err}");
return Err(err);
}
for (i, err) in errs.iter().enumerate() {
if err.is_none() {
return Ok(heal_bucket_results.read().await[i].clone());
}
}
Err(Error::VolumeNotFound)
}
pub async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
let mut futures = Vec::with_capacity(self.clients.len());
for cli in self.clients.iter() {
futures.push(cli.make_bucket(bucket, opts));
}
let mut errors = vec![None; self.clients.len()];
let results = join_all(futures).await;
for (i, result) in results.into_iter().enumerate() {
match result {
Ok(_) => {
errors[i] = None;
}
Err(e) => {
errors[i] = Some(e);
}
}
}
for i in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (j, cli) in self.clients.iter().enumerate() {
let pools = cli.get_pools();
let idx = i;
if pools.unwrap_or_default().contains(&idx) {
per_pool_errs[j] = errors[j].clone();
}
}
if let Some(pool_err) =
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
{
tracing::error!("make_bucket per_pool_errs: {per_pool_errs:?}");
tracing::error!("make_bucket reduce_write_quorum_errs: {pool_err}");
return Err(pool_err);
}
}
Ok(())
}
pub async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
let mut futures = Vec::with_capacity(self.clients.len());
for cli in self.clients.iter() {
futures.push(cli.list_bucket(opts));
}
let mut errors = vec![None; self.clients.len()];
let mut node_buckets = vec![None; self.clients.len()];
let results = join_all(futures).await;
for (i, result) in results.into_iter().enumerate() {
match result {
Ok(res) => {
node_buckets[i] = Some(res);
errors[i] = None;
}
Err(e) => {
node_buckets[i] = None;
errors[i] = Some(e);
}
}
}
let mut result_map: HashMap<&String, BucketInfo> = HashMap::new();
for i in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (j, cli) in self.clients.iter().enumerate() {
let pools = cli.get_pools();
let idx = i;
if pools.unwrap_or_default().contains(&idx) {
per_pool_errs[j] = errors[j].clone();
}
}
let quorum = per_pool_errs.len() / 2;
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, quorum) {
tracing::error!("list_bucket per_pool_errs: {per_pool_errs:?}");
tracing::error!("list_bucket reduce_write_quorum_errs: {pool_err}");
return Err(pool_err);
}
let mut bucket_map: HashMap<&String, usize> = HashMap::new();
for (j, node_bucket) in node_buckets.iter().enumerate() {
if let Some(buckets) = node_bucket.as_ref() {
if buckets.is_empty() {
continue;
}
if !self.clients[j].get_pools().unwrap_or_default().contains(&i) {
continue;
}
for bucket in buckets.iter() {
if result_map.contains_key(&bucket.name) {
continue;
}
// incr bucket_map count create if not exists
let count = bucket_map.entry(&bucket.name).or_insert(0usize);
*count += 1;
if *count >= quorum {
result_map.insert(&bucket.name, bucket.clone());
}
}
}
}
// TODO: MRF
}
let mut buckets: Vec<BucketInfo> = result_map.into_values().collect();
buckets.sort_by_key(|b| b.name.clone());
Ok(buckets)
}
pub async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()> {
let mut futures = Vec::with_capacity(self.clients.len());
for cli in self.clients.iter() {
futures.push(cli.delete_bucket(bucket, opts));
}
let mut errors = vec![None; self.clients.len()];
let results = join_all(futures).await;
for (i, result) in results.into_iter().enumerate() {
match result {
Ok(_) => {
errors[i] = None;
}
Err(e) => {
errors[i] = Some(e);
}
}
}
if let Some(err) = reduce_write_quorum_errs(&errors, BUCKET_OP_IGNORED_ERRS, (errors.len() / 2) + 1) {
if !Error::is_err_object_not_found(&err) && !opts.no_recreate {
let _ = self.make_bucket(bucket, &MakeBucketOptions::default()).await;
}
return Err(err);
}
Ok(())
}
pub async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo> {
let mut futures = Vec::with_capacity(self.clients.len());
for cli in self.clients.iter() {
futures.push(cli.get_bucket_info(bucket, opts));
}
let mut ress = vec![None; self.clients.len()];
let mut errors = vec![None; self.clients.len()];
let results = join_all(futures).await;
for (i, result) in results.into_iter().enumerate() {
match result {
Ok(res) => {
ress[i] = Some(res);
errors[i] = None;
}
Err(e) => {
ress[i] = None;
errors[i] = Some(e);
}
}
}
for i in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (j, cli) in self.clients.iter().enumerate() {
let pools = cli.get_pools();
let idx = i;
if pools.unwrap_or_default().contains(&idx) {
per_pool_errs[j] = errors[j].clone();
}
}
if let Some(pool_err) =
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
{
return Err(pool_err);
}
}
ress.into_iter()
.filter(|op| op.is_some())
.find_map(|op| op.clone())
.ok_or(Error::VolumeNotFound)
}
pub fn get_pools(&self) -> Option<Vec<usize>> {
unimplemented!()
}
}
#[derive(Debug)]
pub struct LocalPeerS3Client {
// pub local_disks: Vec<DiskStore>,
// pub node: Node,
pub pools: Option<Vec<usize>>,
}
impl LocalPeerS3Client {
pub fn new(_node: Option<Node>, pools: Option<Vec<usize>>) -> Self {
Self {
// local_disks,
// node,
pools,
}
}
}
#[async_trait]
impl PeerS3Client for LocalPeerS3Client {
fn get_pools(&self) -> Option<Vec<usize>> {
self.pools.clone()
}
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
heal_bucket_local(bucket, opts).await
}
async fn list_bucket(&self, _opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
let local_disks = all_local_disk().await;
let mut futures = Vec::with_capacity(local_disks.len());
for disk in local_disks.iter() {
futures.push(disk.list_volumes());
}
let results = join_all(futures).await;
let mut ress = Vec::new();
let mut errs = Vec::new();
for result in results {
match result {
Ok(res) => {
ress.push(res);
errs.push(None);
}
Err(e) => errs.push(Some(e)),
}
}
let mut uniq_map: HashMap<&String, &VolumeInfo> = HashMap::new();
for info_list in ress.iter() {
for info in info_list.iter() {
if is_reserved_or_invalid_bucket(&info.name, false) {
continue;
}
if !uniq_map.contains_key(&info.name) {
uniq_map.insert(&info.name, info);
}
}
}
let buckets: Vec<BucketInfo> = uniq_map
.values()
.map(|&v| BucketInfo {
name: v.name.clone(),
created: v.created,
..Default::default()
})
.collect();
Ok(buckets)
}
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
let local_disks = all_local_disk().await;
let mut futures = Vec::with_capacity(local_disks.len());
for disk in local_disks.iter() {
futures.push(async move {
match disk.make_volume(bucket).await {
Ok(_) => Ok(()),
Err(e) => {
if opts.force_create && matches!(e, Error::VolumeExists) {
return Ok(());
}
Err(e)
}
}
});
}
let results = join_all(futures).await;
let mut errs = Vec::new();
for res in results {
match res {
Ok(_) => errs.push(None),
Err(e) => errs.push(Some(e)),
}
}
if let Some(err) = reduce_write_quorum_errs(&errs, BUCKET_OP_IGNORED_ERRS, (local_disks.len() / 2) + 1) {
return Err(err);
}
Ok(())
}
async fn get_bucket_info(&self, bucket: &str, _opts: &BucketOptions) -> Result<BucketInfo> {
let local_disks = all_local_disk().await;
let mut futures = Vec::with_capacity(local_disks.len());
for disk in local_disks.iter() {
futures.push(disk.stat_volume(bucket));
}
let results = join_all(futures).await;
let mut ress = Vec::with_capacity(local_disks.len());
let mut errs = Vec::with_capacity(local_disks.len());
for res in results {
match res {
Ok(r) => {
errs.push(None);
ress.push(Some(r));
}
Err(e) => {
errs.push(Some(e));
ress.push(None);
}
}
}
// TODO: reduceWriteQuorumErrs
let mut versioned = false;
if let Ok(sys) = metadata_sys::get(bucket).await {
versioned = sys.versioning();
}
ress.iter()
.find_map(|op| {
op.as_ref().map(|v| BucketInfo {
name: v.name.clone(),
created: v.created,
versioning: versioned,
..Default::default()
})
})
.ok_or(Error::VolumeNotFound)
}
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
let local_disks = all_local_disk().await;
let mut futures = Vec::with_capacity(local_disks.len());
for disk in local_disks.iter() {
futures.push(disk.delete_volume(bucket));
}
let results = join_all(futures).await;
let mut errs = Vec::new();
let mut recreate = false;
for res in results {
match res {
Ok(_) => errs.push(None),
Err(e) => {
if matches!(e, Error::VolumeNotEmpty) {
recreate = true;
}
errs.push(Some(e))
}
}
}
// For errVolumeNotEmpty, do not delete; recreate only the entries already removed
for (idx, err) in errs.into_iter().enumerate() {
if err.is_none() && recreate {
let _ = local_disks[idx].make_volume(bucket).await;
}
}
if recreate {
return Err(Error::VolumeNotEmpty);
}
// TODO: reduceWriteQuorumErrs
Ok(())
}
}
#[derive(Debug)]
pub struct RemotePeerS3Client {
pub node: Option<Node>,
pub pools: Option<Vec<usize>>,
addr: String,
/// Health tracker for connection monitoring
health: Arc<DiskHealthTracker>,
/// Cancellation token for monitoring tasks
cancel_token: CancellationToken,
}
impl RemotePeerS3Client {
pub fn new(node: Option<Node>, pools: Option<Vec<usize>>) -> Self {
let addr = node.as_ref().map(|v| v.url.to_string()).unwrap_or_default().to_string();
let client = Self {
node,
pools,
addr,
health: Arc::new(DiskHealthTracker::new()),
cancel_token: CancellationToken::new(),
};
// Start health monitoring
client.start_health_monitoring();
client
}
pub fn get_addr(&self) -> String {
self.addr.clone()
}
/// Start health monitoring for the remote peer
fn start_health_monitoring(&self) {
let health = Arc::clone(&self.health);
let cancel_token = self.cancel_token.clone();
let addr = self.addr.clone();
tokio::spawn(async move {
Self::monitor_remote_peer_health(addr, health, cancel_token).await;
});
}
/// Monitor remote peer health periodically
async fn monitor_remote_peer_health(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
debug!("Health monitoring cancelled for remote peer: {}", addr);
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
// Skip health check if peer is already marked as faulty
if health.is_faulty() {
continue;
}
// Perform basic connectivity check
if Self::perform_connectivity_check(&addr).await.is_err() && health.swap_ok_to_faulty() {
warn!("Remote peer health check failed for {}: marking as faulty", addr);
// Start recovery monitoring
let health_clone = Arc::clone(&health);
let addr_clone = addr.clone();
let cancel_clone = cancel_token.clone();
tokio::spawn(async move {
Self::monitor_remote_peer_recovery(addr_clone, health_clone, cancel_clone).await;
});
}
}
}
}
}
/// Monitor remote peer recovery and mark as healthy when recovered
async fn monitor_remote_peer_recovery(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
let mut interval = time::interval(Duration::from_secs(5)); // Check every 5 seconds
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if Self::perform_connectivity_check(&addr).await.is_ok() {
info!("Remote peer recovered: {}", addr);
health.set_ok();
return;
}
}
}
}
}
/// Perform basic connectivity check for remote peer
async fn perform_connectivity_check(addr: &str) -> Result<()> {
use tokio::time::timeout;
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
let Some(host) = url.host_str() else {
return Err(Error::other("No host in URL".to_string()));
};
let port = url.port_or_known_default().unwrap_or(80);
// Try to establish TCP connection
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
Ok(Ok(_)) => Ok(()),
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
}
}
/// Execute operation with timeout and health tracking
async fn execute_with_timeout<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
where
F: FnOnce() -> Fut,
Fut: std::future::Future<Output = Result<T>>,
{
// Check if peer is faulty
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, std::sync::atomic::Ordering::Relaxed);
self.health.increment_waiting();
// Execute operation with timeout
let result = time::timeout(timeout_duration, operation()).await;
match result {
Ok(operation_result) => {
// Log success and decrement waiting counter
if operation_result.is_ok() {
self.health.log_success();
}
self.health.decrement_waiting();
operation_result
}
Err(_) => {
// Timeout occurred, mark peer as potentially faulty
self.health.decrement_waiting();
warn!("Remote peer operation timeout after {:?}", timeout_duration);
Err(Error::other(format!("Remote peer operation timeout after {:?}", timeout_duration)))
}
}
}
}
#[async_trait]
impl PeerS3Client for RemotePeerS3Client {
fn get_pools(&self) -> Option<Vec<usize>> {
self.pools.clone()
}
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
self.execute_with_timeout(
|| async {
let options: String = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(HealBucketRequest {
bucket: bucket.to_string(),
options,
});
let response = client.heal_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::other(""))
};
}
Ok(HealResultItem {
heal_item_type: HealItemType::Bucket.to_string(),
bucket: bucket.to_string(),
set_count: 0,
..Default::default()
})
},
get_max_timeout_duration(),
)
.await
}
async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
self.execute_with_timeout(
|| async {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListBucketRequest { options });
let response = client.list_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::other(""))
};
}
let bucket_infos = response
.bucket_infos
.into_iter()
.filter_map(|json_str| serde_json::from_str::<BucketInfo>(&json_str).ok())
.collect();
Ok(bucket_infos)
},
get_max_timeout_duration(),
)
.await
}
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
self.execute_with_timeout(
|| async {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeBucketRequest {
name: bucket.to_string(),
options,
});
let response = client.make_bucket(request).await?.into_inner();
// TODO: deal with error
if !response.success {
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::other(""))
};
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo> {
self.execute_with_timeout(
|| async {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GetBucketInfoRequest {
bucket: bucket.to_string(),
options,
});
let response = client.get_bucket_info(request).await?.into_inner();
if !response.success {
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::other(""))
};
}
let bucket_info = serde_json::from_str::<BucketInfo>(&response.bucket_info)?;
Ok(bucket_info)
},
get_max_timeout_duration(),
)
.await
}
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
self.execute_with_timeout(
|| async {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteBucketRequest {
bucket: bucket.to_string(),
});
let response = client.delete_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::other(""))
};
}
Ok(())
},
get_max_timeout_duration(),
)
.await
}
}
pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
let disks = clone_drives().await;
let before_state = Arc::new(RwLock::new(vec![String::new(); disks.len()]));
let after_state = Arc::new(RwLock::new(vec![String::new(); disks.len()]));
let mut futures = Vec::new();
for (index, disk) in disks.iter().enumerate() {
let disk = disk.clone();
let bucket = bucket.to_string();
let bs_clone = before_state.clone();
let as_clone = after_state.clone();
futures.push(async move {
let disk = match disk {
Some(disk) => disk,
None => {
bs_clone.write().await[index] = DriveState::Offline.to_string();
as_clone.write().await[index] = DriveState::Offline.to_string();
return Some(Error::DiskNotFound);
}
};
bs_clone.write().await[index] = DriveState::Ok.to_string();
as_clone.write().await[index] = DriveState::Ok.to_string();
if bucket == RUSTFS_RESERVED_BUCKET {
return None;
}
match disk.stat_volume(&bucket).await {
Ok(_) => None,
Err(err) => match err {
Error::DiskNotFound => {
bs_clone.write().await[index] = DriveState::Offline.to_string();
as_clone.write().await[index] = DriveState::Offline.to_string();
Some(err)
}
Error::VolumeNotFound => {
bs_clone.write().await[index] = DriveState::Missing.to_string();
as_clone.write().await[index] = DriveState::Missing.to_string();
Some(err)
}
_ => {
bs_clone.write().await[index] = DriveState::Corrupt.to_string();
as_clone.write().await[index] = DriveState::Corrupt.to_string();
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rpc/peer_rest_client.rs | crates/ecstore/src/rpc/peer_rest_client.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, Result};
use crate::{
endpoints::EndpointServerPools,
global::is_dist_erasure,
metrics_realtime::{CollectMetricsOpts, MetricType},
};
use rmp_serde::{Deserializer, Serializer};
use rustfs_madmin::{
ServerProperties,
health::{Cpus, MemInfo, OsInfo, Partitions, ProcInfo, SysConfig, SysErrors, SysService},
metrics::RealtimeMetrics,
net::NetInfo,
};
use rustfs_protos::{
evict_failed_connection, node_service_time_out_client,
proto_gen::node_service::{
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
GetSeLinuxInfoRequest, GetSysConfigRequest, GetSysErrorsRequest, LoadBucketMetadataRequest, LoadGroupRequest,
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
},
};
use rustfs_utils::XHost;
use serde::{Deserialize, Serialize as _};
use std::{collections::HashMap, io::Cursor, time::SystemTime};
use tonic::Request;
use tracing::warn;
pub const PEER_RESTSIGNAL: &str = "signal";
pub const PEER_RESTSUB_SYS: &str = "sub-sys";
pub const PEER_RESTDRY_RUN: &str = "dry-run";
#[derive(Clone, Debug)]
pub struct PeerRestClient {
pub host: XHost,
pub grid_host: String,
}
impl PeerRestClient {
pub fn new(host: XHost, grid_host: String) -> Self {
Self { host, grid_host }
}
pub async fn new_clients(eps: EndpointServerPools) -> (Vec<Option<Self>>, Vec<Option<Self>>) {
if !is_dist_erasure().await {
return (Vec::new(), Vec::new());
}
let eps = eps.clone();
let hosts = eps.hosts_sorted();
let mut remote = Vec::with_capacity(hosts.len());
let mut all = vec![None; hosts.len()];
for (i, hs_host) in hosts.iter().enumerate() {
if let Some(host) = hs_host
&& let Some(grid_host) = eps.find_grid_hosts_from_peer(host)
{
let client = PeerRestClient::new(host.clone(), grid_host);
all[i] = Some(client.clone());
remote.push(Some(client));
}
}
if all.len() != remote.len() + 1 {
warn!("Expected number of all hosts ({}) to be remote +1 ({})", all.len(), remote.len());
}
(remote, all)
}
/// Evict the connection to this peer from the global cache.
/// This should be called when communication with this peer fails.
pub async fn evict_connection(&self) {
evict_failed_connection(&self.grid_host).await;
}
}
impl PeerRestClient {
pub async fn local_storage_info(&self) -> Result<rustfs_madmin::StorageInfo> {
let result = self.local_storage_info_inner().await;
if result.is_err() {
// Evict stale connection on any error for cluster recovery
self.evict_connection().await;
}
result
}
async fn local_storage_info_inner(&self) -> Result<rustfs_madmin::StorageInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LocalStorageInfoRequest { metrics: true });
let response = client.local_storage_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.storage_info;
let mut buf = Deserializer::new(Cursor::new(data));
let storage_info: rustfs_madmin::StorageInfo = Deserialize::deserialize(&mut buf)?;
Ok(storage_info)
}
pub async fn server_info(&self) -> Result<ServerProperties> {
let result = self.server_info_inner().await;
if result.is_err() {
// Evict stale connection on any error for cluster recovery
self.evict_connection().await;
}
result
}
async fn server_info_inner(&self) -> Result<ServerProperties> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ServerInfoRequest { metrics: true });
let response = client.server_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.server_properties;
let mut buf = Deserializer::new(Cursor::new(data));
let storage_properties: ServerProperties = Deserialize::deserialize(&mut buf)?;
Ok(storage_properties)
}
pub async fn get_cpus(&self) -> Result<Cpus> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetCpusRequest {});
let response = client.get_cpus(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.cpus;
let mut buf = Deserializer::new(Cursor::new(data));
let cpus: Cpus = Deserialize::deserialize(&mut buf)?;
Ok(cpus)
}
pub async fn get_net_info(&self) -> Result<NetInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetNetInfoRequest {});
let response = client.get_net_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.net_info;
let mut buf = Deserializer::new(Cursor::new(data));
let net_info: NetInfo = Deserialize::deserialize(&mut buf)?;
Ok(net_info)
}
pub async fn get_partitions(&self) -> Result<Partitions> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetPartitionsRequest {});
let response = client.get_partitions(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.partitions;
let mut buf = Deserializer::new(Cursor::new(data));
let partitions: Partitions = Deserialize::deserialize(&mut buf)?;
Ok(partitions)
}
pub async fn get_os_info(&self) -> Result<OsInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetOsInfoRequest {});
let response = client.get_os_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.os_info;
let mut buf = Deserializer::new(Cursor::new(data));
let os_info: OsInfo = Deserialize::deserialize(&mut buf)?;
Ok(os_info)
}
pub async fn get_se_linux_info(&self) -> Result<SysService> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSeLinuxInfoRequest {});
let response = client.get_se_linux_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.sys_services;
let mut buf = Deserializer::new(Cursor::new(data));
let sys_services: SysService = Deserialize::deserialize(&mut buf)?;
Ok(sys_services)
}
pub async fn get_sys_config(&self) -> Result<SysConfig> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSysConfigRequest {});
let response = client.get_sys_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.sys_config;
let mut buf = Deserializer::new(Cursor::new(data));
let sys_config: SysConfig = Deserialize::deserialize(&mut buf)?;
Ok(sys_config)
}
pub async fn get_sys_errors(&self) -> Result<SysErrors> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSysErrorsRequest {});
let response = client.get_sys_errors(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.sys_errors;
let mut buf = Deserializer::new(Cursor::new(data));
let sys_errors: SysErrors = Deserialize::deserialize(&mut buf)?;
Ok(sys_errors)
}
pub async fn get_mem_info(&self) -> Result<MemInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetMemInfoRequest {});
let response = client.get_mem_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.mem_info;
let mut buf = Deserializer::new(Cursor::new(data));
let mem_info: MemInfo = Deserialize::deserialize(&mut buf)?;
Ok(mem_info)
}
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Result<RealtimeMetrics> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let mut buf_t = Vec::new();
t.serialize(&mut Serializer::new(&mut buf_t))?;
let mut buf_o = Vec::new();
opts.serialize(&mut Serializer::new(&mut buf_o))?;
let request = Request::new(GetMetricsRequest {
metric_type: buf_t.into(),
opts: buf_o.into(),
});
let response = client.get_metrics(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.realtime_metrics;
let mut buf = Deserializer::new(Cursor::new(data));
let realtime_metrics: RealtimeMetrics = Deserialize::deserialize(&mut buf)?;
Ok(realtime_metrics)
}
pub async fn get_proc_info(&self) -> Result<ProcInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetProcInfoRequest {});
let response = client.get_proc_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
let data = response.proc_info;
let mut buf = Deserializer::new(Cursor::new(data));
let proc_info: ProcInfo = Deserialize::deserialize(&mut buf)?;
Ok(proc_info)
}
pub async fn start_profiling(&self, profiler: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(StartProfilingRequest {
profiler: profiler.to_string(),
});
let response = client.start_profiling(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn download_profile_data(&self) -> Result<()> {
todo!()
}
pub async fn get_bucket_stats(&self) -> Result<()> {
todo!()
}
pub async fn get_sr_metrics(&self) -> Result<()> {
todo!()
}
pub async fn get_all_bucket_stats(&self) -> Result<()> {
todo!()
}
pub async fn load_bucket_metadata(&self, bucket: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadBucketMetadataRequest {
bucket: bucket.to_string(),
});
let response = client.load_bucket_metadata(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteBucketMetadataRequest {
bucket: bucket.to_string(),
});
let response = client.delete_bucket_metadata(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn delete_policy(&self, policy: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeletePolicyRequest {
policy_name: policy.to_string(),
});
let response = client.delete_policy(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_policy(&self, policy: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadPolicyRequest {
policy_name: policy.to_string(),
});
let response = client.load_policy(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadPolicyMappingRequest {
user_or_group: user_or_group.to_string(),
user_type,
is_group,
});
let response = client.load_policy_mapping(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn delete_user(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteUserRequest {
access_key: access_key.to_string(),
});
let result = client.delete_user(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn delete_service_account(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteServiceAccountRequest {
access_key: access_key.to_string(),
});
let result = client.delete_service_account(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_user(&self, access_key: &str, temp: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadUserRequest {
access_key: access_key.to_string(),
temp,
});
let result = client.load_user(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_service_account(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadServiceAccountRequest {
access_key: access_key.to_string(),
});
let result = client.load_service_account(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_group(&self, group: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadGroupRequest {
group: group.to_string(),
});
let result = client.load_group(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn reload_site_replication_config(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ReloadSiteReplicationConfigRequest {});
let response = client.reload_site_replication_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn signal_service(&self, sig: u64, sub_sys: &str, dry_run: bool, _exec_at: SystemTime) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let mut vars = HashMap::new();
vars.insert(PEER_RESTSIGNAL.to_string(), sig.to_string());
vars.insert(PEER_RESTSUB_SYS.to_string(), sub_sys.to_string());
vars.insert(PEER_RESTDRY_RUN.to_string(), dry_run.to_string());
let request = Request::new(SignalServiceRequest {
vars: Some(Mss { value: vars }),
});
let response = client.signal_service(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn get_metacache_listing(&self) -> Result<()> {
let _client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
todo!()
}
pub async fn update_metacache_listing(&self) -> Result<()> {
let _client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
todo!()
}
pub async fn reload_pool_meta(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ReloadPoolMetaRequest {});
let response = client.reload_pool_meta(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn stop_rebalance(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(StopRebalanceRequest {});
let response = client.stop_rebalance(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_rebalance_meta(&self, start_rebalance: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadRebalanceMetaRequest { start_rebalance });
let response = client.load_rebalance_meta(request).await?.into_inner();
warn!("load_rebalance_meta response {:?}, grid_host: {:?}", response, &self.grid_host);
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
pub async fn load_transition_tier_config(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadTransitionTierConfigRequest {});
let response = client.load_transition_tier_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
}
return Err(Error::other(""));
}
Ok(())
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rpc/http_auth.rs | crates/ecstore/src/rpc/http_auth.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::Engine as _;
use base64::engine::general_purpose;
use hmac::{Hmac, KeyInit, Mac};
use http::HeaderMap;
use http::HeaderValue;
use http::Method;
use http::Uri;
use rustfs_credentials::get_global_action_cred;
use sha2::Sha256;
use time::OffsetDateTime;
use tracing::error;
type HmacSha256 = Hmac<Sha256>;
const SIGNATURE_HEADER: &str = "x-rustfs-signature";
const TIMESTAMP_HEADER: &str = "x-rustfs-timestamp";
const SIGNATURE_VALID_DURATION: i64 = 300; // 5 minutes
/// Get the shared secret for HMAC signing
fn get_shared_secret() -> String {
if let Some(cred) = get_global_action_cred() {
cred.secret_key
} else {
// Fallback to environment variable if global credentials are not available
std::env::var("RUSTFS_RPC_SECRET").unwrap_or_else(|_| "rustfs-default-secret".to_string())
}
}
/// Generate HMAC-SHA256 signature for the given data
fn generate_signature(secret: &str, url: &str, method: &Method, timestamp: i64) -> String {
let uri: Uri = url.parse().expect("Invalid URL");
let path_and_query = uri.path_and_query().unwrap();
let url = path_and_query.to_string();
let data = format!("{url}|{method}|{timestamp}");
let mut mac = HmacSha256::new_from_slice(secret.as_bytes()).expect("HMAC can take key of any size");
mac.update(data.as_bytes());
let result = mac.finalize();
general_purpose::STANDARD.encode(result.into_bytes())
}
/// Build headers with authentication signature
pub fn build_auth_headers(url: &str, method: &Method, headers: &mut HeaderMap) {
let secret = get_shared_secret();
let timestamp = OffsetDateTime::now_utc().unix_timestamp();
let signature = generate_signature(&secret, url, method, timestamp);
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(×tamp.to_string()).unwrap());
}
/// Verify the request signature for RPC requests
pub fn verify_rpc_signature(url: &str, method: &Method, headers: &HeaderMap) -> std::io::Result<()> {
let secret = get_shared_secret();
// Get signature from header
let signature = headers
.get(SIGNATURE_HEADER)
.and_then(|v| v.to_str().ok())
.ok_or_else(|| std::io::Error::other("Missing signature header"))?;
// Get timestamp from header
let timestamp_str = headers
.get(TIMESTAMP_HEADER)
.and_then(|v| v.to_str().ok())
.ok_or_else(|| std::io::Error::other("Missing timestamp header"))?;
let timestamp: i64 = timestamp_str
.parse()
.map_err(|_| std::io::Error::other("Invalid timestamp format"))?;
// Check timestamp validity (prevent replay attacks)
let current_time = OffsetDateTime::now_utc().unix_timestamp();
if current_time.saturating_sub(timestamp) > SIGNATURE_VALID_DURATION {
return Err(std::io::Error::other("Request timestamp expired"));
}
// Generate expected signature
let expected_signature = generate_signature(&secret, url, method, timestamp);
// Compare signatures
if signature != expected_signature {
error!(
"verify_rpc_signature: Invalid signature: secret {}, url {}, method {}, timestamp {}, signature {}, expected_signature {}",
secret, url, method, timestamp, signature, expected_signature
);
return Err(std::io::Error::other("Invalid signature"));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use http::{HeaderMap, Method};
use time::OffsetDateTime;
#[test]
fn test_get_shared_secret() {
let secret = get_shared_secret();
assert!(!secret.is_empty(), "Secret should not be empty");
let url = "http://node1:7000/rustfs/rpc/read_file_stream?disk=http%3A%2F%2Fnode1%3A7000%2Fdata%2Frustfs3&volume=.rustfs.sys&path=pool.bin%2Fdd0fd773-a962-4265-b543-783ce83953e9%2Fpart.1&offset=0&length=44";
let method = Method::GET;
let mut headers = HeaderMap::new();
build_auth_headers(url, &method, &mut headers);
let url = "/rustfs/rpc/read_file_stream?disk=http%3A%2F%2Fnode1%3A7000%2Fdata%2Frustfs3&volume=.rustfs.sys&path=pool.bin%2Fdd0fd773-a962-4265-b543-783ce83953e9%2Fpart.1&offset=0&length=44";
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_ok(), "Valid signature should pass verification");
}
#[test]
fn test_generate_signature_deterministic() {
let secret = "test-secret";
let url = "http://example.com/api/test";
let method = Method::GET;
let timestamp = 1640995200; // Fixed timestamp
let signature1 = generate_signature(secret, url, &method, timestamp);
let signature2 = generate_signature(secret, url, &method, timestamp);
assert_eq!(signature1, signature2, "Same inputs should produce same signature");
assert!(!signature1.is_empty(), "Signature should not be empty");
}
#[test]
fn test_generate_signature_different_inputs() {
let secret = "test-secret";
let url = "http://example.com/api/test";
let method = Method::GET;
let timestamp = 1640995200;
let signature1 = generate_signature(secret, url, &method, timestamp);
let signature2 = generate_signature(secret, "http://different.com/api/test2", &method, timestamp);
let signature3 = generate_signature(secret, url, &Method::POST, timestamp);
let signature4 = generate_signature(secret, url, &method, timestamp + 1);
assert_ne!(signature1, signature2, "Different URLs should produce different signatures");
assert_ne!(signature1, signature3, "Different methods should produce different signatures");
assert_ne!(signature1, signature4, "Different timestamps should produce different signatures");
}
#[test]
fn test_build_auth_headers() {
let url = "http://example.com/api/test";
let method = Method::POST;
let mut headers = HeaderMap::new();
build_auth_headers(url, &method, &mut headers);
// Verify headers are present
assert!(headers.contains_key(SIGNATURE_HEADER), "Should contain signature header");
assert!(headers.contains_key(TIMESTAMP_HEADER), "Should contain timestamp header");
// Verify header values are not empty
let signature = headers.get(SIGNATURE_HEADER).unwrap().to_str().unwrap();
let timestamp_str = headers.get(TIMESTAMP_HEADER).unwrap().to_str().unwrap();
assert!(!signature.is_empty(), "Signature should not be empty");
assert!(!timestamp_str.is_empty(), "Timestamp should not be empty");
// Verify timestamp is a valid integer
let timestamp: i64 = timestamp_str.parse().expect("Timestamp should be valid integer");
let current_time = OffsetDateTime::now_utc().unix_timestamp();
// Should be within a reasonable range (within 1 second of current time)
assert!((current_time - timestamp).abs() <= 1, "Timestamp should be close to current time");
}
#[test]
fn test_verify_rpc_signature_success() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Build headers with valid signature
build_auth_headers(url, &method, &mut headers);
// Verify should succeed
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_ok(), "Valid signature should pass verification");
}
#[test]
fn test_verify_rpc_signature_invalid_signature() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Build headers with valid signature first
build_auth_headers(url, &method, &mut headers);
// Tamper with the signature
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str("invalid-signature").unwrap());
// Verify should fail
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Invalid signature should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Invalid signature");
}
#[test]
fn test_verify_rpc_signature_expired_timestamp() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Set expired timestamp (older than SIGNATURE_VALID_DURATION)
let expired_timestamp = OffsetDateTime::now_utc().unix_timestamp() - SIGNATURE_VALID_DURATION - 10;
let secret = get_shared_secret();
let signature = generate_signature(&secret, url, &method, expired_timestamp);
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(&expired_timestamp.to_string()).unwrap());
// Verify should fail due to expired timestamp
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Expired timestamp should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Request timestamp expired");
}
#[test]
fn test_verify_rpc_signature_missing_signature_header() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Add only timestamp header, missing signature
let timestamp = OffsetDateTime::now_utc().unix_timestamp();
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(×tamp.to_string()).unwrap());
// Verify should fail
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Missing signature header should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Missing signature header");
}
#[test]
fn test_verify_rpc_signature_missing_timestamp_header() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Add only signature header, missing timestamp
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str("some-signature").unwrap());
// Verify should fail
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Missing timestamp header should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Missing timestamp header");
}
#[test]
fn test_verify_rpc_signature_invalid_timestamp_format() {
let url = "http://example.com/api/test";
let method = Method::GET;
let mut headers = HeaderMap::new();
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str("some-signature").unwrap());
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str("invalid-timestamp").unwrap());
// Verify should fail
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Invalid timestamp format should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Invalid timestamp format");
}
#[test]
fn test_verify_rpc_signature_url_mismatch() {
let original_url = "http://example.com/api/test";
let different_url = "http://example.com/api/different";
let method = Method::GET;
let mut headers = HeaderMap::new();
// Build headers for one URL
build_auth_headers(original_url, &method, &mut headers);
// Try to verify with a different URL
let result = verify_rpc_signature(different_url, &method, &headers);
assert!(result.is_err(), "URL mismatch should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Invalid signature");
}
#[test]
fn test_verify_rpc_signature_method_mismatch() {
let url = "http://example.com/api/test";
let original_method = Method::GET;
let different_method = Method::POST;
let mut headers = HeaderMap::new();
// Build headers for one method
build_auth_headers(url, &original_method, &mut headers);
// Try to verify with a different method
let result = verify_rpc_signature(url, &different_method, &headers);
assert!(result.is_err(), "Method mismatch should fail verification");
let error = result.unwrap_err();
assert_eq!(error.to_string(), "Invalid signature");
}
#[test]
fn test_signature_valid_duration_boundary() {
let url = "http://example.com/api/test";
let method = Method::GET;
let secret = get_shared_secret();
let mut headers = HeaderMap::new();
let current_time = OffsetDateTime::now_utc().unix_timestamp();
// Test timestamp just within valid duration
let valid_timestamp = current_time - SIGNATURE_VALID_DURATION + 1;
let signature = generate_signature(&secret, url, &method, valid_timestamp);
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(&valid_timestamp.to_string()).unwrap());
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_ok(), "Timestamp within valid duration should pass");
// Test timestamp just outside valid duration
let mut headers = HeaderMap::new();
let invalid_timestamp = current_time - SIGNATURE_VALID_DURATION - 15;
let signature = generate_signature(&secret, url, &method, invalid_timestamp);
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(&invalid_timestamp.to_string()).unwrap());
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_err(), "Timestamp outside valid duration should fail");
}
#[test]
fn test_round_trip_authentication() {
let test_cases = vec![
("http://example.com/api/test", Method::GET),
("https://api.rustfs.com/v1/bucket", Method::POST),
("http://localhost:9000/admin/info", Method::PUT),
("https://storage.example.com/path/to/object?query=param", Method::DELETE),
];
for (url, method) in test_cases {
let mut headers = HeaderMap::new();
// Build authentication headers
build_auth_headers(url, &method, &mut headers);
// Verify the signature should succeed
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_ok(), "Round-trip test failed for {method} {url}");
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/rpc/mod.rs | crates/ecstore/src/rpc/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod http_auth;
mod peer_rest_client;
mod peer_s3_client;
mod remote_disk;
pub use http_auth::{build_auth_headers, verify_rpc_signature};
pub use peer_rest_client::PeerRestClient;
pub use peer_s3_client::{LocalPeerS3Client, PeerS3Client, RemotePeerS3Client, S3PeerSys};
pub use remote_disk::RemoteDisk;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/disk_store.rs | crates/ecstore/src/disk/disk_store.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::disk::{
CheckPartsResp, DeleteOptions, DiskAPI, DiskError, DiskInfo, DiskInfoOptions, DiskLocation, Endpoint, Error,
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, Result, UpdateMetadataOpts, VolumeInfo,
WalkDirOptions, local::LocalDisk,
};
use bytes::Bytes;
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
use rustfs_utils::string::parse_bool_with_default;
use std::{
path::PathBuf,
sync::{
Arc,
atomic::{AtomicI64, AtomicU32, Ordering},
},
time::Duration,
};
use tokio::{sync::RwLock, time};
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use uuid::Uuid;
/// Disk health status constants
const DISK_HEALTH_OK: u32 = 0;
const DISK_HEALTH_FAULTY: u32 = 1;
pub const ENV_RUSTFS_DRIVE_ACTIVE_MONITORING: &str = "RUSTFS_DRIVE_ACTIVE_MONITORING";
pub const ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION: &str = "RUSTFS_DRIVE_MAX_TIMEOUT_DURATION";
pub const CHECK_EVERY: Duration = Duration::from_secs(15);
pub const SKIP_IF_SUCCESS_BEFORE: Duration = Duration::from_secs(5);
pub const CHECK_TIMEOUT_DURATION: Duration = Duration::from_secs(5);
lazy_static::lazy_static! {
static ref TEST_OBJ: String = format!("health-check-{}", Uuid::new_v4());
static ref TEST_DATA: Bytes = Bytes::from(vec![42u8; 2048]);
static ref TEST_BUCKET: String = ".rustfs.sys/tmp".to_string();
}
pub fn get_max_timeout_duration() -> Duration {
std::env::var(ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION)
.map(|v| Duration::from_secs(v.parse::<u64>().unwrap_or(30)))
.unwrap_or(Duration::from_secs(30))
}
/// DiskHealthTracker tracks the health status of a disk.
/// Similar to Go's diskHealthTracker.
#[derive(Debug)]
pub struct DiskHealthTracker {
/// Atomic timestamp of last successful operation
pub last_success: AtomicI64,
/// Atomic timestamp of last operation start
pub last_started: AtomicI64,
/// Atomic disk status (OK or Faulty)
pub status: AtomicU32,
/// Atomic number of waiting operations
pub waiting: AtomicU32,
}
impl DiskHealthTracker {
/// Create a new disk health tracker
pub fn new() -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
Self {
last_success: AtomicI64::new(now),
last_started: AtomicI64::new(now),
status: AtomicU32::new(DISK_HEALTH_OK),
waiting: AtomicU32::new(0),
}
}
/// Log a successful operation
pub fn log_success(&self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.last_success.store(now, Ordering::Relaxed);
}
/// Check if disk is faulty
pub fn is_faulty(&self) -> bool {
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
}
/// Set disk as faulty
pub fn set_faulty(&self) {
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
}
/// Set disk as OK
pub fn set_ok(&self) {
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
}
pub fn swap_ok_to_faulty(&self) -> bool {
self.status
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
}
/// Increment waiting operations counter
pub fn increment_waiting(&self) {
self.waiting.fetch_add(1, Ordering::Relaxed);
}
/// Decrement waiting operations counter
pub fn decrement_waiting(&self) {
self.waiting.fetch_sub(1, Ordering::Relaxed);
}
/// Get waiting operations count
pub fn waiting_count(&self) -> u32 {
self.waiting.load(Ordering::Relaxed)
}
/// Get last success timestamp
pub fn last_success(&self) -> i64 {
self.last_success.load(Ordering::Relaxed)
}
}
impl Default for DiskHealthTracker {
fn default() -> Self {
Self::new()
}
}
/// Health check context key for tracking disk operations
#[derive(Debug, Clone)]
struct HealthDiskCtxKey;
#[derive(Debug)]
struct HealthDiskCtxValue {
last_success: Arc<AtomicI64>,
}
impl HealthDiskCtxValue {
fn log_success(&self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.last_success.store(now, Ordering::Relaxed);
}
}
/// LocalDiskWrapper wraps a DiskStore with health tracking capabilities.
/// This is similar to Go's xlStorageDiskIDCheck.
#[derive(Debug, Clone)]
pub struct LocalDiskWrapper {
/// The underlying disk store
disk: Arc<LocalDisk>,
/// Health tracker
health: Arc<DiskHealthTracker>,
/// Whether health checking is enabled
health_check: bool,
/// Cancellation token for monitoring tasks
cancel_token: CancellationToken,
/// Disk ID for stale checking
disk_id: Arc<RwLock<Option<Uuid>>>,
}
impl LocalDiskWrapper {
/// Create a new LocalDiskWrapper
pub fn new(disk: Arc<LocalDisk>, health_check: bool) -> Self {
// Check environment variable for health check override
// Default to true if not set, but only enable if both param and env are true
let env_health_check = std::env::var(ENV_RUSTFS_DRIVE_ACTIVE_MONITORING)
.map(|v| parse_bool_with_default(&v, true))
.unwrap_or(true);
let ret = Self {
disk,
health: Arc::new(DiskHealthTracker::new()),
health_check: health_check && env_health_check,
cancel_token: CancellationToken::new(),
disk_id: Arc::new(RwLock::new(None)),
};
ret.start_monitoring();
ret
}
pub fn get_disk(&self) -> Arc<LocalDisk> {
self.disk.clone()
}
/// Start the disk monitoring if health_check is enabled
pub fn start_monitoring(&self) {
if self.health_check {
let health = Arc::clone(&self.health);
let cancel_token = self.cancel_token.clone();
let disk = Arc::clone(&self.disk);
tokio::spawn(async move {
Self::monitor_disk_writable(disk, health, cancel_token).await;
});
}
}
/// Stop the disk monitoring
pub async fn stop_monitoring(&self) {
self.cancel_token.cancel();
}
/// Monitor disk writability periodically
async fn monitor_disk_writable(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
// TODO: config interval
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
if health.status.load(Ordering::Relaxed) != DISK_HEALTH_OK {
continue;
}
let last_success_nanos = health.last_success.load(Ordering::Relaxed);
let elapsed = Duration::from_nanos(
(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64 - last_success_nanos) as u64
);
if elapsed < SKIP_IF_SUCCESS_BEFORE {
continue;
}
tokio::time::sleep(Duration::from_secs(1)).await;
debug!("health check: performing health check");
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
// Health check failed, disk is considered faulty
health.increment_waiting(); // Balance the increment from failed operation
let health_clone = Arc::clone(&health);
let disk_clone = disk.clone();
let cancel_clone = cancel_token.clone();
tokio::spawn(async move {
Self::monitor_disk_status(disk_clone, health_clone, cancel_clone).await;
});
}
}
}
}
}
/// Perform a health check by writing and reading a test file
async fn perform_health_check(
disk: Arc<LocalDisk>,
test_bucket: &str,
test_filename: &str,
test_data: &Bytes,
check_faulty_only: bool,
timeout_duration: Duration,
) -> Result<()> {
// Perform health check with timeout
let health_check_result = tokio::time::timeout(timeout_duration, async {
// Try to write test data
disk.write_all(test_bucket, test_filename, test_data.clone()).await?;
// Try to read back the data
let read_data = disk.read_all(test_bucket, test_filename).await?;
// Verify data integrity
if read_data.len() != test_data.len() {
warn!(
"health check: test file data length mismatch: expected {} bytes, got {}",
test_data.len(),
read_data.len()
);
if check_faulty_only {
return Ok(());
}
return Err(DiskError::FaultyDisk);
}
// Clean up
disk.delete(
test_bucket,
test_filename,
DeleteOptions {
recursive: false,
immediate: false,
undo_write: false,
old_data_dir: None,
},
)
.await?;
Ok(())
})
.await;
match health_check_result {
Ok(result) => match result {
Ok(()) => Ok(()),
Err(e) => {
debug!("health check: failed: {:?}", e);
if e == DiskError::FaultyDisk {
return Err(e);
}
if check_faulty_only { Ok(()) } else { Err(e) }
}
},
Err(_) => {
// Timeout occurred
warn!("health check: timeout after {:?}", timeout_duration);
Err(DiskError::FaultyDisk)
}
}
}
/// Monitor disk status and try to bring it back online
async fn monitor_disk_status(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
const CHECK_EVERY: Duration = Duration::from_secs(5);
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
match Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, false, CHECK_TIMEOUT_DURATION).await {
Ok(_) => {
info!("Disk {} is back online", disk.to_string());
health.set_ok();
health.decrement_waiting();
return;
}
Err(e) => {
warn!("Disk {} still faulty: {:?}", disk.to_string(), e);
}
}
}
}
}
}
async fn check_id(&self, want_id: Option<Uuid>) -> Result<()> {
if want_id.is_none() {
return Ok(());
}
let stored_disk_id = self.disk.get_disk_id().await?;
if stored_disk_id != want_id {
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
}
Ok(())
}
/// Check if disk ID is stale
async fn check_disk_stale(&self) -> Result<()> {
let Some(current_disk_id) = *self.disk_id.read().await else {
return Ok(());
};
let stored_disk_id = match self.disk.get_disk_id().await? {
Some(id) => id,
None => return Ok(()), // Empty disk ID is allowed during initialization
};
if current_disk_id != stored_disk_id {
return Err(DiskError::DiskNotFound);
}
Ok(())
}
/// Set the disk ID
pub async fn set_disk_id_internal(&self, id: Option<Uuid>) -> Result<()> {
let mut disk_id = self.disk_id.write().await;
*disk_id = id;
Ok(())
}
/// Get the current disk ID
pub async fn get_current_disk_id(&self) -> Option<Uuid> {
*self.disk_id.read().await
}
/// Track disk health for an operation.
/// This method should wrap disk operations to ensure health checking.
pub async fn track_disk_health<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
where
F: FnOnce() -> Fut,
Fut: std::future::Future<Output = Result<T>>,
{
// Check if disk is faulty
if self.health.is_faulty() {
warn!("disk {} health is faulty, returning error", self.to_string());
return Err(DiskError::FaultyDisk);
}
// Check if disk is stale
self.check_disk_stale().await?;
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, Ordering::Relaxed);
self.health.increment_waiting();
if timeout_duration == Duration::ZERO {
let result = operation().await;
self.health.decrement_waiting();
if result.is_ok() {
self.health.log_success();
}
return result;
}
// Execute the operation with timeout
let result = tokio::time::timeout(timeout_duration, operation()).await;
match result {
Ok(operation_result) => {
// Log success and decrement waiting counter
if operation_result.is_ok() {
self.health.log_success();
}
self.health.decrement_waiting();
operation_result
}
Err(_) => {
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
self.health.decrement_waiting();
warn!("disk operation timeout after {:?}", timeout_duration);
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
}
}
}
}
#[async_trait::async_trait]
impl DiskAPI for LocalDiskWrapper {
fn to_string(&self) -> String {
self.disk.to_string()
}
async fn is_online(&self) -> bool {
let Ok(Some(disk_id)) = self.disk.get_disk_id().await else {
return false;
};
let Some(current_disk_id) = *self.disk_id.read().await else {
return false;
};
current_disk_id == disk_id
}
fn is_local(&self) -> bool {
self.disk.is_local()
}
fn host_name(&self) -> String {
self.disk.host_name()
}
fn endpoint(&self) -> Endpoint {
self.disk.endpoint()
}
async fn close(&self) -> Result<()> {
self.stop_monitoring().await;
self.disk.close().await
}
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
self.disk.get_disk_id().await
}
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
self.set_disk_id_internal(id).await
}
fn path(&self) -> PathBuf {
self.disk.path()
}
fn get_disk_location(&self) -> DiskLocation {
self.disk.get_disk_location()
}
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
if opts.noop && opts.metrics {
let mut info = DiskInfo::default();
// Add health metrics
info.metrics.total_waiting = self.health.waiting_count();
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
return Ok(info);
}
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
let result = self.disk.disk_info(opts).await?;
if let Some(current_disk_id) = *self.disk_id.read().await
&& Some(current_disk_id) != result.id
{
return Err(DiskError::DiskNotFound);
};
Ok(result)
}
async fn make_volume(&self, volume: &str) -> Result<()> {
self.track_disk_health(|| async { self.disk.make_volume(volume).await }, get_max_timeout_duration())
.await
}
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
self.track_disk_health(|| async { self.disk.make_volumes(volumes).await }, get_max_timeout_duration())
.await
}
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
self.track_disk_health(|| async { self.disk.list_volumes().await }, Duration::ZERO)
.await
}
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
self.track_disk_health(|| async { self.disk.stat_volume(volume).await }, get_max_timeout_duration())
.await
}
async fn delete_volume(&self, volume: &str) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete_volume(volume).await }, Duration::ZERO)
.await
}
async fn walk_dir<W: tokio::io::AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
self.track_disk_health(|| async { self.disk.walk_dir(opts, wr).await }, Duration::ZERO)
.await
}
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
self.track_disk_health(
|| async { self.disk.delete_version(volume, path, fi, force_del_marker, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
// Check if disk is faulty before proceeding
if self.health.is_faulty() {
return vec![Some(DiskError::FaultyDisk); versions.len()];
}
// Check if disk is stale
if let Err(e) = self.check_disk_stale().await {
return vec![Some(e); versions.len()];
}
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, Ordering::Relaxed);
self.health.increment_waiting();
// Execute the operation
let result = self.disk.delete_versions(volume, versions, opts).await;
self.health.decrement_waiting();
let has_err = result.iter().any(|e| e.is_some());
if !has_err {
// Log success and decrement waiting counter
self.health.log_success();
}
result
}
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete_paths(volume, paths).await }, get_max_timeout_duration())
.await
}
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
self.track_disk_health(
|| async { self.disk.write_metadata(org_volume, volume, path, fi).await },
get_max_timeout_duration(),
)
.await
}
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
self.track_disk_health(
|| async { self.disk.update_metadata(volume, path, fi, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn read_version(
&self,
org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
self.track_disk_health(
|| async { self.disk.read_version(org_volume, volume, path, version_id, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
self.track_disk_health(|| async { self.disk.read_xl(volume, path, read_data).await }, get_max_timeout_duration())
.await
}
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
self.track_disk_health(
|| async { self.disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await },
get_max_timeout_duration(),
)
.await
}
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
self.track_disk_health(
|| async { self.disk.list_dir(origvolume, volume, dir_path, count).await },
get_max_timeout_duration(),
)
.await
}
async fn read_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileReader> {
self.track_disk_health(|| async { self.disk.read_file(volume, path).await }, get_max_timeout_duration())
.await
}
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<crate::disk::FileReader> {
self.track_disk_health(
|| async { self.disk.read_file_stream(volume, path, offset, length).await },
get_max_timeout_duration(),
)
.await
}
async fn append_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileWriter> {
self.track_disk_health(|| async { self.disk.append_file(volume, path).await }, Duration::ZERO)
.await
}
async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: i64) -> Result<crate::disk::FileWriter> {
self.track_disk_health(
|| async { self.disk.create_file(origvolume, volume, path, file_size).await },
Duration::ZERO,
)
.await
}
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
self.track_disk_health(
|| async { self.disk.rename_file(src_volume, src_path, dst_volume, dst_path).await },
get_max_timeout_duration(),
)
.await
}
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()> {
self.track_disk_health(
|| async { self.disk.rename_part(src_volume, src_path, dst_volume, dst_path, meta).await },
get_max_timeout_duration(),
)
.await
}
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete(volume, path, opt).await }, get_max_timeout_duration())
.await
}
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
self.track_disk_health(|| async { self.disk.verify_file(volume, path, fi).await }, Duration::ZERO)
.await
}
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
self.track_disk_health(|| async { self.disk.check_parts(volume, path, fi).await }, Duration::ZERO)
.await
}
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
self.track_disk_health(|| async { self.disk.read_parts(bucket, paths).await }, Duration::ZERO)
.await
}
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
self.track_disk_health(|| async { self.disk.read_multiple(req).await }, Duration::ZERO)
.await
}
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()> {
self.track_disk_health(|| async { self.disk.write_all(volume, path, data).await }, get_max_timeout_duration())
.await
}
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes> {
self.track_disk_health(|| async { self.disk.read_all(volume, path).await }, get_max_timeout_duration())
.await
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/error_reduce.rs | crates/ecstore/src/disk/error_reduce.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::Error;
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
Error::DiskNotFound,
Error::FaultyDisk,
Error::FaultyRemoteDisk,
Error::DiskAccessDenied,
Error::DiskOngoingReq,
Error::UnformattedDisk,
];
pub static BUCKET_OP_IGNORED_ERRS: &[Error] = &[
Error::DiskNotFound,
Error::FaultyDisk,
Error::FaultyRemoteDisk,
Error::DiskAccessDenied,
Error::UnformattedDisk,
];
pub static BASE_IGNORED_ERRS: &[Error] = &[Error::DiskNotFound, Error::FaultyDisk, Error::FaultyRemoteDisk];
pub fn reduce_write_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureWriteQuorum)
}
pub fn reduce_read_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureReadQuorum)
}
pub fn reduce_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize, quorun_err: Error) -> Option<Error> {
let (max_count, err) = reduce_errs(errors, ignored_errs);
if max_count >= quorun { err } else { Some(quorun_err) }
}
pub fn reduce_errs(errors: &[Option<Error>], ignored_errs: &[Error]) -> (usize, Option<Error>) {
let nil_error = Error::other("nil".to_string());
// First count the number of None values (treated as nil errors)
let nil_count = errors.iter().filter(|e| e.is_none()).count();
let err_counts = errors
.iter()
.filter_map(|e| e.as_ref()) // Only process errors stored in Some
.fold(std::collections::HashMap::new(), |mut acc, e| {
if is_ignored_err(ignored_errs, e) {
return acc;
}
*acc.entry(e.clone()).or_insert(0) += 1;
acc
});
// Find the most frequent non-nil error
let (best_err, best_count) = err_counts
.into_iter()
.max_by(|(_, c1), (_, c2)| c1.cmp(c2))
.unwrap_or((nil_error.clone(), 0));
// Compare nil errors with the top non-nil error and prefer the nil error
if nil_count > best_count || (nil_count == best_count && nil_count > 0) {
(nil_count, None)
} else {
(best_count, Some(best_err))
}
}
pub fn is_ignored_err(ignored_errs: &[Error], err: &Error) -> bool {
ignored_errs.iter().any(|e| e == err)
}
pub fn count_errs(errors: &[Option<Error>], err: &Error) -> usize {
errors.iter().filter(|&e| e.as_ref() == Some(err)).count()
}
pub fn is_all_buckets_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if err == &Error::DiskNotFound || err == &Error::VolumeNotFound {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
#[cfg(test)]
mod tests {
use super::*;
fn err_io(msg: &str) -> Error {
Error::Io(std::io::Error::other(msg))
}
#[test]
fn test_reduce_errs_basic() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_errs_ignored() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![e2.clone()];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_quorum_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
let quorum_err = Error::FaultyDisk;
// quorum = 2, should return e1
let res = reduce_quorum_errs(&errors, &ignored, 2, quorum_err.clone());
assert_eq!(res, Some(e1));
// quorum = 3, should return quorum error
let res = reduce_quorum_errs(&errors, &ignored, 3, quorum_err.clone());
assert_eq!(res, Some(quorum_err));
}
#[test]
fn test_count_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), None];
assert_eq!(count_errs(&errors, &e1), 2);
assert_eq!(count_errs(&errors, &e2), 1);
}
#[test]
fn test_is_ignored_err() {
let e1 = err_io("a");
let e2 = err_io("b");
let ignored = vec![e1.clone()];
assert!(is_ignored_err(&ignored, &e1));
assert!(!is_ignored_err(&ignored, &e2));
}
#[test]
fn test_reduce_errs_nil_tiebreak() {
// Error::Nil and another error have the same count, should prefer Nil
let e1 = err_io("a");
let errors = vec![Some(e1.clone()), None, Some(e1.clone()), None]; // e1:2, Nil:2
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, None); // None means Error::Nil is preferred
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/local.rs | crates/ecstore/src/disk/local.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::{Error, Result};
use super::os::{is_root_disk, rename_all};
use super::{
BUCKET_META_PREFIX, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
FileInfoVersions, RUSTFS_META_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp,
STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, os,
};
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
use crate::disk::error::FileAccessDeniedWithContext;
use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error};
use crate::disk::fs::{
O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename,
};
use crate::disk::os::{check_path_length, is_empty_dir};
use crate::disk::{
CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, CHECK_PART_VOLUME_NOT_FOUND,
FileReader, RUSTFS_META_TMP_DELETED_BUCKET, conv_part_err_to_int,
};
use crate::disk::{FileWriter, STORAGE_FORMAT_FILE};
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
use rustfs_utils::path::{
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, clean, decode_dir_object, encode_dir_object, has_suffix,
path_join, path_join_buf,
};
use tokio::time::interval;
use crate::erasure_coding::bitrot_verify;
use bytes::Bytes;
// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
use parking_lot::RwLock as ParkingLotRwLock;
use rustfs_filemeta::{
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
get_file_info, read_xl_meta_no_data,
};
use rustfs_utils::HashAlgorithm;
use rustfs_utils::os::get_info;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Debug;
use std::io::SeekFrom;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, OnceLock};
use std::time::Duration;
use std::{
fs::Metadata,
path::{Path, PathBuf},
};
use time::OffsetDateTime;
use tokio::fs::{self, File};
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
use uuid::Uuid;
#[derive(Debug, Clone)]
pub struct FormatInfo {
pub id: Option<Uuid>,
pub data: Bytes,
pub file_info: Option<Metadata>,
pub last_check: Option<OffsetDateTime>,
}
/// A helper enum to handle internal buffer types for writing data.
pub enum InternalBuf<'a> {
Ref(&'a [u8]),
Owned(Bytes),
}
pub struct LocalDisk {
pub root: PathBuf,
pub format_path: PathBuf,
pub format_info: RwLock<FormatInfo>,
pub endpoint: Endpoint,
pub disk_info_cache: Arc<Cache<DiskInfo>>,
pub scanning: AtomicU32,
pub rotational: bool,
pub fstype: String,
pub major: u64,
pub minor: u64,
pub nrrequests: u64,
// Performance optimization fields
path_cache: Arc<ParkingLotRwLock<HashMap<String, PathBuf>>>,
current_dir: Arc<OnceLock<PathBuf>>,
// pub id: Mutex<Option<Uuid>>,
// pub format_data: Mutex<Vec<u8>>,
// pub format_file_info: Mutex<Option<Metadata>>,
// pub format_last_check: Mutex<Option<OffsetDateTime>>,
exit_signal: Option<tokio::sync::broadcast::Sender<()>>,
}
impl Drop for LocalDisk {
fn drop(&mut self) {
if let Some(exit_signal) = self.exit_signal.take() {
let _ = exit_signal.send(());
}
}
}
impl Debug for LocalDisk {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LocalDisk")
.field("root", &self.root)
.field("format_path", &self.format_path)
.field("format_info", &self.format_info)
.field("endpoint", &self.endpoint)
.finish()
}
}
impl LocalDisk {
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
debug!("Creating local disk");
// Use optimized path resolution instead of absolutize() for better performance
let root = match std::fs::canonicalize(ep.get_file_path()) {
Ok(path) => path,
Err(e) => {
if e.kind() == ErrorKind::NotFound {
return Err(DiskError::VolumeNotFound);
}
return Err(to_file_error(e).into());
}
};
ensure_data_usage_layout(&root).await.map_err(DiskError::from)?;
if cleanup {
// TODO: remove temporary data
}
// Use optimized path resolution instead of absolutize_virtually
let format_path = root.join(RUSTFS_META_BUCKET).join(super::FORMAT_CONFIG_FILE);
debug!("format_path: {:?}", format_path);
let (format_data, format_meta) = read_file_exists(&format_path).await?;
let mut id = None;
// let mut format_legacy = false;
let mut format_last_check = None;
if !format_data.is_empty() {
let s = format_data.as_ref();
let fm = FormatV3::try_from(s).map_err(Error::other)?;
let (set_idx, disk_idx) = fm.find_disk_index_by_disk_id(fm.erasure.this)?;
if set_idx as i32 != ep.set_idx || disk_idx as i32 != ep.disk_idx {
return Err(DiskError::InconsistentDisk);
}
id = Some(fm.erasure.this);
// format_legacy = fm.erasure.distribution_algo == DistributionAlgoVersion::V1;
format_last_check = Some(OffsetDateTime::now_utc());
}
let format_info = FormatInfo {
id,
data: format_data,
file_info: format_meta,
last_check: format_last_check,
};
let root_clone = root.clone();
let update_fn: UpdateFn<DiskInfo> = Box::new(move || {
let disk_id = id;
let root = root_clone.clone();
Box::pin(async move {
match get_disk_info(root.clone()).await {
Ok((info, root)) => {
let disk_info = DiskInfo {
total: info.total,
free: info.free,
used: info.used,
used_inodes: info.files - info.ffree,
free_inodes: info.ffree,
major: info.major,
minor: info.minor,
fs_type: info.fstype,
root_disk: root,
id: disk_id,
..Default::default()
};
// if root {
// return Err(Error::new(DiskError::DriveIsRoot));
// }
// disk_info.healing =
Ok(disk_info)
}
Err(err) => Err(err.into()),
}
})
});
let cache = Cache::new(update_fn, Duration::from_secs(1), Opts::default());
// TODO: DIRECT support
// TODD: DiskInfo
let mut disk = Self {
root: root.clone(),
endpoint: ep.clone(),
format_path,
format_info: RwLock::new(format_info),
disk_info_cache: Arc::new(cache),
scanning: AtomicU32::new(0),
rotational: Default::default(),
fstype: Default::default(),
minor: Default::default(),
major: Default::default(),
nrrequests: Default::default(),
// // format_legacy,
// format_file_info: Mutex::new(format_meta),
// format_data: Mutex::new(format_data),
// format_last_check: Mutex::new(format_last_check),
path_cache: Arc::new(ParkingLotRwLock::new(HashMap::with_capacity(2048))),
current_dir: Arc::new(OnceLock::new()),
exit_signal: None,
};
let (info, _root) = get_disk_info(root).await?;
disk.major = info.major;
disk.minor = info.minor;
disk.fstype = info.fstype;
// if root {
// return Err(Error::new(DiskError::DriveIsRoot));
// }
if info.nrrequests > 0 {
disk.nrrequests = info.nrrequests;
}
if info.rotational {
disk.rotational = true;
}
disk.make_meta_volumes().await?;
let (exit_tx, exit_rx) = tokio::sync::broadcast::channel(1);
disk.exit_signal = Some(exit_tx);
let root = disk.root.clone();
tokio::spawn(Self::cleanup_deleted_objects_loop(root, exit_rx));
debug!("LocalDisk created: {:?}", disk);
Ok(disk)
}
async fn cleanup_deleted_objects_loop(root: PathBuf, mut exit_rx: tokio::sync::broadcast::Receiver<()>) {
let mut interval = interval(Duration::from_secs(60 * 5));
loop {
tokio::select! {
_ = interval.tick() => {
if let Err(err) = Self::cleanup_deleted_objects(root.clone()).await {
error!("cleanup_deleted_objects error: {:?}", err);
}
}
_ = exit_rx.recv() => {
info!("cleanup_deleted_objects_loop exit");
break;
}
}
}
}
async fn cleanup_deleted_objects(root: PathBuf) -> Result<()> {
let trash = path_join(&[root, RUSTFS_META_TMP_DELETED_BUCKET.into()]);
let mut entries = fs::read_dir(&trash).await?;
while let Some(entry) = entries.next_entry().await? {
let name = entry.file_name().to_string_lossy().to_string();
if name.is_empty() || name == "." || name == ".." {
continue;
}
let file_type = entry.file_type().await?;
let path = path_join(&[trash.clone(), name.into()]);
if file_type.is_dir() {
if let Err(e) = tokio::fs::remove_dir_all(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
} else if let Err(e) = tokio::fs::remove_file(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
}
Ok(())
}
fn is_valid_volname(volname: &str) -> bool {
if volname.len() < 3 {
return false;
}
if cfg!(target_os = "windows") {
// Windows volume names must not include reserved characters.
// This regular expression matches disallowed characters.
if volname.contains('|')
|| volname.contains('<')
|| volname.contains('>')
|| volname.contains('?')
|| volname.contains('*')
|| volname.contains(':')
|| volname.contains('"')
|| volname.contains('\\')
{
return false;
}
} else {
// Non-Windows systems may require additional validation rules.
}
true
}
#[tracing::instrument(level = "debug", skip(self))]
async fn check_format_json(&self) -> Result<Metadata> {
let md = std::fs::metadata(&self.format_path).map_err(to_unformatted_disk_error)?;
Ok(md)
}
async fn make_meta_volumes(&self) -> Result<()> {
let buckets = format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}");
let multipart = format!("{}/{}", RUSTFS_META_BUCKET, "multipart");
let config = format!("{}/{}", RUSTFS_META_BUCKET, "config");
let tmp = format!("{}/{}", RUSTFS_META_BUCKET, "tmp");
let defaults = vec![
buckets.as_str(),
multipart.as_str(),
config.as_str(),
tmp.as_str(),
RUSTFS_META_TMP_DELETED_BUCKET,
];
self.make_volumes(defaults).await
}
// Optimized path resolution with caching
pub fn resolve_abs_path(&self, path: impl AsRef<Path>) -> Result<PathBuf> {
let path_ref = path.as_ref();
let path_str = path_ref.to_string_lossy();
// Fast cache read
{
let cache = self.path_cache.read();
if let Some(cached_path) = cache.get(path_str.as_ref()) {
return Ok(cached_path.clone());
}
}
// Calculate absolute path without using path_absolutize for better performance
let abs_path = if path_ref.is_absolute() {
path_ref.to_path_buf()
} else {
self.root.join(path_ref)
};
// Normalize path components to avoid filesystem calls
let normalized = self.normalize_path_components(&abs_path);
// Cache the result
{
let mut cache = self.path_cache.write();
// Simple cache size control
if cache.len() >= 4096 {
// Clear half the cache - simple eviction strategy
let keys_to_remove: Vec<_> = cache.keys().take(cache.len() / 2).cloned().collect();
for key in keys_to_remove {
cache.remove(&key);
}
}
cache.insert(path_str.into_owned(), normalized.clone());
}
Ok(normalized)
}
// Lightweight path normalization without filesystem calls
fn normalize_path_components(&self, path: &Path) -> PathBuf {
let mut result = PathBuf::new();
for component in path.components() {
match component {
std::path::Component::Normal(name) => {
result.push(name);
}
std::path::Component::ParentDir => {
result.pop();
}
std::path::Component::CurDir => {
// Ignore current directory components
}
std::path::Component::RootDir => {
result.push(component);
}
std::path::Component::Prefix(_prefix) => {
result.push(component);
}
}
}
result
}
// Highly optimized object path generation
pub fn get_object_path(&self, bucket: &str, key: &str) -> Result<PathBuf> {
// For high-frequency paths, use faster string concatenation
let cache_key = if key.is_empty() {
bucket.to_string()
} else {
// Use with_capacity to pre-allocate, reducing memory reallocations
let mut path_str = String::with_capacity(bucket.len() + key.len() + 1);
path_str.push_str(bucket);
path_str.push('/');
path_str.push_str(key);
path_str
};
// Fast path: directly calculate based on root, avoiding cache lookup overhead for simple cases
Ok(self.root.join(&cache_key))
}
pub fn get_bucket_path(&self, bucket: &str) -> Result<PathBuf> {
Ok(self.root.join(bucket))
}
// Batch path generation with single lock acquisition
pub fn get_object_paths_batch(&self, requests: &[(String, String)]) -> Result<Vec<PathBuf>> {
let mut results = Vec::with_capacity(requests.len());
let mut cache_misses = Vec::new();
// First attempt to get all paths from cache
{
let cache = self.path_cache.read();
for (i, (bucket, key)) in requests.iter().enumerate() {
let cache_key = format!("{bucket}/{key}");
if let Some(cached_path) = cache.get(&cache_key) {
results.push((i, cached_path.clone()));
} else {
cache_misses.push((i, bucket, key, cache_key));
}
}
}
// Handle cache misses
if !cache_misses.is_empty() {
let mut new_entries = Vec::new();
for (i, _bucket, _key, cache_key) in cache_misses {
let path = self.root.join(&cache_key);
results.push((i, path.clone()));
new_entries.push((cache_key, path));
}
// Batch update cache
{
let mut cache = self.path_cache.write();
for (key, path) in new_entries {
cache.insert(key, path);
}
}
}
// Sort results back to original order
results.sort_by_key(|(i, _)| *i);
Ok(results.into_iter().map(|(_, path)| path).collect())
}
// Optimized metadata reading with caching
pub async fn read_metadata_cached(&self, path: PathBuf) -> Result<Arc<FileMeta>> {
read_metadata_cached(path).await
}
// Smart prefetching for related files
pub async fn read_version_with_prefetch(
&self,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
let file_path = self.get_object_path(volume, path)?;
// Async prefetch related files, don't block current read
if let Some(parent) = file_path.parent() {
prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
}
// Main read logic
let file_dir = self.get_bucket_path(volume)?;
let (data, _) = self.read_raw(volume, file_dir, file_path, opts.read_data).await?;
get_file_info(&data, volume, path, version_id, FileInfoOpts { data: opts.read_data })
.await
.map_err(|_e| DiskError::Unexpected)
}
// Batch metadata reading for multiple objects
pub async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result<Vec<Option<Arc<FileMeta>>>> {
let paths: Vec<PathBuf> = requests
.iter()
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE)))
.collect::<Result<Vec<_>>>()?;
let cache = get_global_file_cache();
let results = cache.get_metadata_batch(paths).await;
Ok(results.into_iter().map(|r| r.ok()).collect())
}
// /// Write to the filesystem atomically.
// /// This is done by first writing to a temporary location and then moving the file.
// pub(crate) async fn prepare_file_write<'a>(&self, path: &'a PathBuf) -> Result<FileWriter<'a>> {
// let tmp_path = self.get_object_path(RUSTFS_META_TMP_BUCKET, Uuid::new_v4().to_string().as_str())?;
// debug!("prepare_file_write tmp_path:{:?}, path:{:?}", &tmp_path, &path);
// let file = File::create(&tmp_path).await?;
// let writer = BufWriter::new(file);
// Ok(FileWriter {
// tmp_path,
// dest_path: path,
// writer,
// clean_tmp: true,
// })
// }
#[allow(unreachable_code)]
#[allow(unused_variables)]
pub async fn move_to_trash(&self, delete_path: &PathBuf, recursive: bool, immediate_purge: bool) -> Result<()> {
// if recursive {
// remove_all_std(delete_path).map_err(to_volume_error)?;
// } else {
// remove_std(delete_path).map_err(to_file_error)?;
// }
// return Ok(());
// TODO: async notifications for disk space checks and trash cleanup
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
// if let Some(parent) = trash_path.parent() {
// if !parent.exists() {
// fs::create_dir_all(parent).await?;
// }
// }
let err = if recursive {
rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?)
.await
.err()
} else {
rename(&delete_path, &trash_path)
.await
.map_err(|e| to_file_error(e).into())
.err()
};
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR) {
let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
let _ = rename_all(
encode_dir_object(delete_path.to_string_lossy().as_ref()),
trash_path2,
self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?,
)
.await;
}
if let Some(err) = err {
if err == Error::DiskFull {
if recursive {
remove_all_std(delete_path).map_err(to_volume_error)?;
} else {
remove_std(delete_path).map_err(to_file_error)?;
}
}
return Ok(());
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
#[async_recursion::async_recursion]
pub async fn delete_file(
&self,
base_path: &PathBuf,
delete_path: &PathBuf,
recursive: bool,
immediate_purge: bool,
) -> Result<()> {
// debug!("delete_file {:?}\n base_path:{:?}", &delete_path, &base_path);
if is_root_path(base_path) || is_root_path(delete_path) {
// debug!("delete_file skip {:?}", &delete_path);
return Ok(());
}
if !delete_path.starts_with(base_path) || base_path == delete_path {
// debug!("delete_file skip {:?}", &delete_path);
return Ok(());
}
if recursive {
self.move_to_trash(delete_path, recursive, immediate_purge).await?;
} else if delete_path.is_dir() {
// debug!("delete_file remove_dir {:?}", &delete_path);
if let Err(err) = fs::remove_dir(&delete_path).await {
// debug!("remove_dir err {:?} when {:?}", &err, &delete_path);
match err.kind() {
ErrorKind::NotFound => (),
ErrorKind::DirectoryNotEmpty => (),
kind => {
warn!("delete_file remove_dir {:?} err {}", &delete_path, kind.to_string());
return Err(Error::other(FileAccessDeniedWithContext {
path: delete_path.clone(),
source: err,
}));
}
}
}
// debug!("delete_file remove_dir done {:?}", &delete_path);
} else if let Err(err) = fs::remove_file(&delete_path).await {
// debug!("remove_file err {:?} when {:?}", &err, &delete_path);
match err.kind() {
ErrorKind::NotFound => (),
_ => {
warn!("delete_file remove_file {:?} err {:?}", &delete_path, &err);
return Err(Error::other(FileAccessDeniedWithContext {
path: delete_path.clone(),
source: err,
}));
}
}
}
if let Some(dir_path) = delete_path.parent() {
Box::pin(self.delete_file(base_path, &PathBuf::from(dir_path), false, false)).await?;
}
// debug!("delete_file done {:?}", &delete_path);
Ok(())
}
/// read xl.meta raw data
#[tracing::instrument(level = "debug", skip(self, volume_dir, file_path))]
async fn read_raw(
&self,
bucket: &str,
volume_dir: impl AsRef<Path>,
file_path: impl AsRef<Path>,
read_data: bool,
) -> Result<(Vec<u8>, Option<OffsetDateTime>)> {
if file_path.as_ref().as_os_str().is_empty() {
return Err(DiskError::FileNotFound);
}
let meta_path = file_path.as_ref().join(Path::new(STORAGE_FORMAT_FILE));
let res = {
if read_data {
self.read_all_data_with_dmtime(bucket, volume_dir, meta_path).await
} else {
match self.read_metadata_with_dmtime(meta_path).await {
Ok(res) => Ok(res),
Err(err) => {
if err == Error::FileNotFound
&& !skip_access_checks(volume_dir.as_ref().to_string_lossy().to_string().as_str())
&& let Err(e) = access(volume_dir.as_ref()).await
&& e.kind() == ErrorKind::NotFound
{
// warn!("read_metadata_with_dmtime os err {:?}", &aerr);
return Err(DiskError::VolumeNotFound);
}
Err(err)
}
}
}
};
let (buf, mtime) = res?;
if buf.is_empty() {
return Err(DiskError::FileNotFound);
}
Ok((buf, mtime))
}
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
// Try to use cached file content reading for better performance, with safe fallback
let path = file_path.as_ref().to_path_buf();
// First, try the cache
if let Ok(bytes) = get_global_file_cache().get_file_content(path.clone()).await {
return Ok(bytes.to_vec());
}
// Fallback to direct read if cache fails
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
Ok(data)
}
async fn read_metadata_with_dmtime(&self, file_path: impl AsRef<Path>) -> Result<(Vec<u8>, Option<OffsetDateTime>)> {
check_path_length(file_path.as_ref().to_string_lossy().as_ref())?;
let mut f = super::fs::open_file(file_path.as_ref(), O_RDONLY)
.await
.map_err(to_file_error)?;
let meta = f.metadata().await.map_err(to_file_error)?;
if meta.is_dir() {
// fix use io::Error
return Err(Error::FileNotFound);
}
let size = meta.len() as usize;
let data = read_xl_meta_no_data(&mut f, size).await?;
let modtime = match meta.modified() {
Ok(md) => Some(OffsetDateTime::from(md)),
Err(_) => None,
};
Ok((data, modtime))
}
async fn read_all_data(&self, volume: &str, volume_dir: impl AsRef<Path>, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
// TODO: timeout support
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir, file_path).await?;
Ok(data)
}
#[tracing::instrument(level = "debug", skip(self, volume_dir, file_path))]
async fn read_all_data_with_dmtime(
&self,
volume: &str,
volume_dir: impl AsRef<Path>,
file_path: impl AsRef<Path>,
) -> Result<(Vec<u8>, Option<OffsetDateTime>)> {
let mut f = match super::fs::open_file(file_path.as_ref(), O_RDONLY).await {
Ok(f) => f,
Err(e) => {
if e.kind() == ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(er) = access(volume_dir.as_ref()).await
&& er.kind() == ErrorKind::NotFound
{
warn!("read_all_data_with_dmtime os err {:?}", &er);
return Err(DiskError::VolumeNotFound);
}
return Err(to_file_error(e).into());
}
};
let meta = f.metadata().await.map_err(to_file_error)?;
if meta.is_dir() {
return Err(DiskError::FileNotFound);
}
let size = meta.len() as usize;
let mut bytes = Vec::new();
bytes.try_reserve_exact(size).map_err(Error::other)?;
f.read_to_end(&mut bytes).await.map_err(to_file_error)?;
let modtime = match meta.modified() {
Ok(md) => Some(OffsetDateTime::from(md)),
Err(_) => None,
};
Ok((bytes, modtime))
}
async fn delete_versions_internal(&self, volume: &str, path: &str, fis: &[FileInfo]) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
let xlpath = self.get_object_path(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str())?;
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir.as_path(), &xlpath).await?;
if data.is_empty() {
return Err(DiskError::FileNotFound);
}
let mut fm = FileMeta::default();
fm.unmarshal_msg(&data)?;
for fi in fis.iter() {
let data_dir = match fm.delete_version(fi) {
Ok(res) => res,
Err(err) => {
let err: DiskError = err.into();
if !fi.deleted && (err == DiskError::FileNotFound || err == DiskError::FileVersionNotFound) {
continue;
}
return Err(err);
}
};
if let Some(dir) = data_dir {
let vid = fi.version_id.unwrap_or_default();
let _ = fm.data.remove(vec![vid, dir]);
let dir_path = self.get_object_path(volume, format!("{path}/{dir}").as_str())?;
if let Err(err) = self.move_to_trash(&dir_path, true, false).await
&& !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound)
{
return Err(err);
};
}
}
// Remove xl.meta when no versions remain
if fm.versions.is_empty() {
self.delete_file(&volume_dir, &xlpath, true, false).await?;
return Ok(());
}
// Update xl.meta
let buf = fm.marshal_msg()?;
let volume_dir = self.get_bucket_path(volume)?;
self.write_all_private(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str(), buf.into(), true, &volume_dir)
.await?;
Ok(())
}
async fn write_all_meta(&self, volume: &str, path: &str, buf: &[u8], sync: bool) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
let file_path = volume_dir.join(Path::new(&path));
check_path_length(file_path.to_string_lossy().as_ref())?;
let tmp_volume_dir = self.get_bucket_path(super::RUSTFS_META_TMP_BUCKET)?;
let tmp_file_path = tmp_volume_dir.join(Path::new(Uuid::new_v4().to_string().as_str()));
self.write_all_internal(&tmp_file_path, InternalBuf::Ref(buf), sync, &tmp_volume_dir)
.await?;
rename_all(tmp_file_path, file_path, volume_dir).await
}
// write_all_public for trail
async fn write_all_public(&self, volume: &str, path: &str, data: Bytes) -> Result<()> {
if volume == RUSTFS_META_BUCKET && path == super::FORMAT_CONFIG_FILE {
let mut format_info = self.format_info.write().await;
format_info.data.clone_from(&data);
}
let volume_dir = self.get_bucket_path(volume)?;
self.write_all_private(volume, path, data, true, &volume_dir).await?;
Ok(())
}
// write_all_private with check_path_length
#[tracing::instrument(level = "debug", skip_all)]
pub async fn write_all_private(&self, volume: &str, path: &str, buf: Bytes, sync: bool, skip_parent: &Path) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
let file_path = volume_dir.join(Path::new(&path));
check_path_length(file_path.to_string_lossy().as_ref())?;
self.write_all_internal(&file_path, InternalBuf::Owned(buf), sync, skip_parent)
.await
}
// write_all_internal do write file
pub async fn write_all_internal(
&self,
file_path: &Path,
data: InternalBuf<'_>,
sync: bool,
skip_parent: &Path,
) -> Result<()> {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/fs.rs | crates/ecstore/src/disk/fs.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
fs::Metadata,
path::Path,
sync::{Arc, OnceLock},
};
use tokio::{
fs::{self, File},
io,
};
static READONLY_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
static WRITEONLY_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
static READWRITE_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
fn get_readonly_options() -> &'static Arc<fs::OpenOptions> {
READONLY_OPTIONS.get_or_init(|| {
let mut opts = fs::OpenOptions::new();
opts.read(true);
Arc::new(opts)
})
}
fn get_writeonly_options() -> &'static Arc<fs::OpenOptions> {
WRITEONLY_OPTIONS.get_or_init(|| {
let mut opts = fs::OpenOptions::new();
opts.write(true);
Arc::new(opts)
})
}
fn get_readwrite_options() -> &'static Arc<fs::OpenOptions> {
READWRITE_OPTIONS.get_or_init(|| {
let mut opts = fs::OpenOptions::new();
opts.read(true).write(true);
Arc::new(opts)
})
}
#[cfg(not(windows))]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
use std::os::unix::fs::MetadataExt;
if f1.dev() != f2.dev() {
return false;
}
if f1.ino() != f2.ino() {
return false;
}
if f1.size() != f2.size() {
return false;
}
if f1.permissions() != f2.permissions() {
return false;
}
if f1.mtime() != f2.mtime() {
return false;
}
true
}
#[cfg(windows)]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
if f1.permissions() != f2.permissions() {
return false;
}
if f1.file_type() != f2.file_type() {
return false;
}
if f1.len() != f2.len() {
return false;
}
true
}
type FileMode = usize;
pub const O_RDONLY: FileMode = 0x00000;
pub const O_WRONLY: FileMode = 0x00001;
pub const O_RDWR: FileMode = 0x00002;
pub const O_CREATE: FileMode = 0x00040;
// pub const O_EXCL: FileMode = 0x00080;
// pub const O_NOCTTY: FileMode = 0x00100;
pub const O_TRUNC: FileMode = 0x00200;
// pub const O_NONBLOCK: FileMode = 0x00800;
pub const O_APPEND: FileMode = 0x00400;
// pub const O_SYNC: FileMode = 0x01000;
// pub const O_ASYNC: FileMode = 0x02000;
// pub const O_CLOEXEC: FileMode = 0x80000;
// read: bool,
// write: bool,
// append: bool,
// truncate: bool,
// create: bool,
// create_new: bool,
pub async fn open_file(path: impl AsRef<Path>, mode: FileMode) -> io::Result<File> {
let base_opts = match mode & (O_RDONLY | O_WRONLY | O_RDWR) {
O_RDONLY => get_readonly_options(),
O_WRONLY => get_writeonly_options(),
O_RDWR => get_readwrite_options(),
_ => get_readonly_options(),
};
if (mode & (O_CREATE | O_APPEND | O_TRUNC)) != 0 {
let mut opts = (**base_opts).clone();
if mode & O_CREATE != 0 {
opts.create(true);
}
if mode & O_APPEND != 0 {
opts.append(true);
}
if mode & O_TRUNC != 0 {
opts.truncate(true);
}
opts.open(path.as_ref()).await
} else {
base_opts.open(path.as_ref()).await
}
}
pub async fn access(path: impl AsRef<Path>) -> io::Result<()> {
fs::metadata(path).await?;
Ok(())
}
pub fn access_std(path: impl AsRef<Path>) -> io::Result<()> {
std::fs::metadata(path)?;
Ok(())
}
pub async fn lstat(path: impl AsRef<Path>) -> io::Result<Metadata> {
fs::metadata(path).await
}
pub fn lstat_std(path: impl AsRef<Path>) -> io::Result<Metadata> {
std::fs::metadata(path)
}
pub async fn make_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir_all(path.as_ref()).await
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn remove(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
pub async fn remove_all(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir_all(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
#[tracing::instrument(level = "debug", skip_all)]
pub fn remove_std(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref();
let meta = std::fs::metadata(path)?;
if meta.is_dir() {
std::fs::remove_dir(path)
} else {
std::fs::remove_file(path)
}
}
pub fn remove_all_std(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref();
let meta = std::fs::metadata(path)?;
if meta.is_dir() {
std::fs::remove_dir_all(path)
} else {
std::fs::remove_file(path)
}
}
pub async fn mkdir(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir(path.as_ref()).await
}
pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
fs::rename(from, to).await
}
pub fn rename_std(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
std::fs::rename(from, to)
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn read_file(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
fs::read(path.as_ref()).await
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
use tokio::io::AsyncWriteExt;
#[tokio::test]
async fn test_file_mode_constants() {
assert_eq!(O_RDONLY, 0x00000);
assert_eq!(O_WRONLY, 0x00001);
assert_eq!(O_RDWR, 0x00002);
assert_eq!(O_CREATE, 0x00040);
assert_eq!(O_TRUNC, 0x00200);
assert_eq!(O_APPEND, 0x00400);
}
#[tokio::test]
async fn test_open_file_read_only() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_readonly.txt");
// Create a test file
tokio::fs::write(&file_path, b"test content").await.unwrap();
// Test opening in read-only mode
let file = open_file(&file_path, O_RDONLY).await;
assert!(file.is_ok());
}
#[tokio::test]
async fn test_open_file_write_only() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_writeonly.txt");
// Test opening in write-only mode with create flag
let mut file = open_file(&file_path, O_WRONLY | O_CREATE).await.unwrap();
// Should be able to write
file.write_all(b"write test").await.unwrap();
file.flush().await.unwrap();
}
#[tokio::test]
async fn test_open_file_read_write() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_readwrite.txt");
// Test opening in read-write mode with create flag
let mut file = open_file(&file_path, O_RDWR | O_CREATE).await.unwrap();
// Should be able to write and read
file.write_all(b"read-write test").await.unwrap();
file.flush().await.unwrap();
}
#[tokio::test]
async fn test_open_file_append() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_append.txt");
// Create initial content
tokio::fs::write(&file_path, b"initial").await.unwrap();
// Open in append mode
let mut file = open_file(&file_path, O_WRONLY | O_APPEND).await.unwrap();
file.write_all(b" appended").await.unwrap();
file.flush().await.unwrap();
// Verify content
let content = tokio::fs::read_to_string(&file_path).await.unwrap();
assert_eq!(content, "initial appended");
}
#[tokio::test]
async fn test_open_file_truncate() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_truncate.txt");
// Create initial content
tokio::fs::write(&file_path, b"initial content").await.unwrap();
// Open with truncate flag
let mut file = open_file(&file_path, O_WRONLY | O_TRUNC).await.unwrap();
file.write_all(b"new").await.unwrap();
file.flush().await.unwrap();
// Verify content was truncated
let content = tokio::fs::read_to_string(&file_path).await.unwrap();
assert_eq!(content, "new");
}
#[tokio::test]
async fn test_access() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_access.txt");
// Should fail for non-existent file
assert!(access(&file_path).await.is_err());
// Create file and test again
tokio::fs::write(&file_path, b"test").await.unwrap();
assert!(access(&file_path).await.is_ok());
}
#[test]
fn test_access_std() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_access_std.txt");
// Should fail for non-existent file
assert!(access_std(&file_path).is_err());
// Create file and test again
std::fs::write(&file_path, b"test").unwrap();
assert!(access_std(&file_path).is_ok());
}
#[tokio::test]
async fn test_lstat() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_lstat.txt");
// Create test file
tokio::fs::write(&file_path, b"test content").await.unwrap();
// Test lstat
let metadata = lstat(&file_path).await.unwrap();
assert!(metadata.is_file());
assert_eq!(metadata.len(), 12); // "test content" is 12 bytes
}
#[test]
fn test_lstat_std() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_lstat_std.txt");
// Create test file
std::fs::write(&file_path, b"test content").unwrap();
// Test lstat_std
let metadata = lstat_std(&file_path).unwrap();
assert!(metadata.is_file());
assert_eq!(metadata.len(), 12); // "test content" is 12 bytes
}
#[tokio::test]
async fn test_make_dir_all() {
let temp_dir = TempDir::new().unwrap();
let nested_path = temp_dir.path().join("level1").join("level2").join("level3");
// Should create nested directories
assert!(make_dir_all(&nested_path).await.is_ok());
assert!(nested_path.exists());
assert!(nested_path.is_dir());
}
#[tokio::test]
async fn test_remove_file() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_remove.txt");
// Create test file
tokio::fs::write(&file_path, b"test").await.unwrap();
assert!(file_path.exists());
// Remove file
assert!(remove(&file_path).await.is_ok());
assert!(!file_path.exists());
}
#[tokio::test]
async fn test_remove_directory() {
let temp_dir = TempDir::new().unwrap();
let dir_path = temp_dir.path().join("test_remove_dir");
// Create test directory
tokio::fs::create_dir(&dir_path).await.unwrap();
assert!(dir_path.exists());
// Remove directory
assert!(remove(&dir_path).await.is_ok());
assert!(!dir_path.exists());
}
#[tokio::test]
async fn test_remove_all() {
let temp_dir = TempDir::new().unwrap();
let dir_path = temp_dir.path().join("test_remove_all");
let file_path = dir_path.join("nested_file.txt");
// Create nested structure
tokio::fs::create_dir(&dir_path).await.unwrap();
tokio::fs::write(&file_path, b"nested content").await.unwrap();
// Remove all
assert!(remove_all(&dir_path).await.is_ok());
assert!(!dir_path.exists());
}
#[test]
fn test_remove_std() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_remove_std.txt");
// Create test file
std::fs::write(&file_path, b"test").unwrap();
assert!(file_path.exists());
// Remove file
assert!(remove_std(&file_path).is_ok());
assert!(!file_path.exists());
}
#[test]
fn test_remove_all_std() {
let temp_dir = TempDir::new().unwrap();
let dir_path = temp_dir.path().join("test_remove_all_std");
let file_path = dir_path.join("nested_file.txt");
// Create nested structure
std::fs::create_dir(&dir_path).unwrap();
std::fs::write(&file_path, b"nested content").unwrap();
// Remove all
assert!(remove_all_std(&dir_path).is_ok());
assert!(!dir_path.exists());
}
#[tokio::test]
async fn test_mkdir() {
let temp_dir = TempDir::new().unwrap();
let dir_path = temp_dir.path().join("test_mkdir");
// Create directory
assert!(mkdir(&dir_path).await.is_ok());
assert!(dir_path.exists());
assert!(dir_path.is_dir());
}
#[tokio::test]
async fn test_rename() {
let temp_dir = TempDir::new().unwrap();
let old_path = temp_dir.path().join("old_name.txt");
let new_path = temp_dir.path().join("new_name.txt");
// Create test file
tokio::fs::write(&old_path, b"test content").await.unwrap();
assert!(old_path.exists());
assert!(!new_path.exists());
// Rename file
assert!(rename(&old_path, &new_path).await.is_ok());
assert!(!old_path.exists());
assert!(new_path.exists());
// Verify content preserved
let content = tokio::fs::read_to_string(&new_path).await.unwrap();
assert_eq!(content, "test content");
}
#[test]
fn test_rename_std() {
let temp_dir = TempDir::new().unwrap();
let old_path = temp_dir.path().join("old_name_std.txt");
let new_path = temp_dir.path().join("new_name_std.txt");
// Create test file
std::fs::write(&old_path, b"test content").unwrap();
assert!(old_path.exists());
assert!(!new_path.exists());
// Rename file
assert!(rename_std(&old_path, &new_path).is_ok());
assert!(!old_path.exists());
assert!(new_path.exists());
// Verify content preserved
let content = std::fs::read_to_string(&new_path).unwrap();
assert_eq!(content, "test content");
}
#[tokio::test]
async fn test_read_file() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_read.txt");
let test_content = b"This is test content for reading";
tokio::fs::write(&file_path, test_content).await.unwrap();
// Read file
let read_content = read_file(&file_path).await.unwrap();
assert_eq!(read_content, test_content);
}
#[tokio::test]
async fn test_read_file_nonexistent() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("nonexistent.txt");
// Should fail for non-existent file
assert!(read_file(&file_path).await.is_err());
}
#[tokio::test]
async fn test_same_file() {
let temp_dir = TempDir::new().unwrap();
let file_path = temp_dir.path().join("test_same.txt");
// Create test file
tokio::fs::write(&file_path, b"test content").await.unwrap();
// Get metadata twice
let metadata1 = tokio::fs::metadata(&file_path).await.unwrap();
let metadata2 = tokio::fs::metadata(&file_path).await.unwrap();
// Should be the same file
assert!(same_file(&metadata1, &metadata2));
}
#[tokio::test]
async fn test_different_files() {
let temp_dir = TempDir::new().unwrap();
let file1_path = temp_dir.path().join("file1.txt");
let file2_path = temp_dir.path().join("file2.txt");
// Create two different files
tokio::fs::write(&file1_path, b"content1").await.unwrap();
tokio::fs::write(&file2_path, b"content2").await.unwrap();
// Get metadata
let metadata1 = tokio::fs::metadata(&file1_path).await.unwrap();
let metadata2 = tokio::fs::metadata(&file2_path).await.unwrap();
// Should be different files
assert!(!same_file(&metadata1, &metadata2));
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/os.rs | crates/ecstore/src/disk/os.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
io,
path::{Component, Path},
};
use super::error::Result;
use crate::disk::error_conv::to_file_error;
use rustfs_utils::path::SLASH_SEPARATOR;
use tokio::fs;
use tracing::warn;
use super::error::DiskError;
/// Check path length according to OS limits.
pub fn check_path_length(path_name: &str) -> Result<()> {
// Apple OS X path length is limited to 1016
if cfg!(target_os = "macos") && path_name.len() > 1016 {
return Err(DiskError::FileNameTooLong);
}
// Disallow more than 1024 characters on windows, there
// are no known name_max limits on Windows.
if cfg!(target_os = "windows") && path_name.len() > 1024 {
return Err(DiskError::FileNameTooLong);
}
// On Unix we reject paths if they are just '.', '..' or '/'
let invalid_paths = [".", "..", "/"];
if invalid_paths.contains(&path_name) {
return Err(DiskError::FileAccessDenied);
}
// Check each path segment length is > 255 on all Unix
// platforms, look for this value as NAME_MAX in
// /usr/include/linux/limits.h
let mut count = 0usize;
for c in path_name.chars() {
match c {
'/' => count = 0,
'\\' if cfg!(target_os = "windows") => count = 0, // Reset
_ => {
count += 1;
if count > 255 {
return Err(DiskError::FileNameTooLong);
}
}
}
}
// Success.
Ok(())
}
/// Check if the given disk path is the root disk.
/// On Windows, always return false.
/// On Unix, compare the disk paths.
#[tracing::instrument(level = "debug", skip_all)]
pub fn is_root_disk(disk_path: &str, root_disk: &str) -> Result<bool> {
if cfg!(target_os = "windows") {
return Ok(false);
}
rustfs_utils::os::same_disk(disk_path, root_disk).map_err(|e| to_file_error(e).into())
}
/// Create a directory and all its parent components if they are missing.
#[tracing::instrument(level = "debug", skip_all)]
pub async fn make_dir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> Result<()> {
check_path_length(path.as_ref().to_string_lossy().to_string().as_str())?;
reliable_mkdir_all(path.as_ref(), base_dir.as_ref())
.await
.map_err(to_file_error)?;
Ok(())
}
/// Check if a directory is empty.
/// Only reads one entry to determine if the directory is empty.
#[tracing::instrument(level = "debug", skip_all)]
pub async fn is_empty_dir(path: impl AsRef<Path>) -> bool {
read_dir(path.as_ref(), 1).await.is_ok_and(|v| v.is_empty())
}
// read_dir count read limit. when count == 0 unlimit.
/// Return file names in the directory.
#[tracing::instrument(level = "debug", skip_all)]
pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec<String>> {
let mut entries = fs::read_dir(path.as_ref()).await?;
let mut volumes = Vec::new();
let mut count = count;
while let Some(entry) = entries.next_entry().await? {
let name = entry.file_name().to_string_lossy().to_string();
if name.is_empty() || name == "." || name == ".." {
continue;
}
let file_type = entry.file_type().await?;
if file_type.is_file() {
volumes.push(name);
} else if file_type.is_dir() {
volumes.push(format!("{name}{SLASH_SEPARATOR}"));
}
count -= 1;
if count == 0 {
break;
}
}
Ok(volumes)
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn rename_all(
src_file_path: impl AsRef<Path>,
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> Result<()> {
reliable_rename(src_file_path, dst_file_path.as_ref(), base_dir)
.await
.map_err(to_file_error)?;
Ok(())
}
async fn reliable_rename(
src_file_path: impl AsRef<Path>,
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> io::Result<()> {
if let Some(parent) = dst_file_path.as_ref().parent()
&& !file_exists(parent)
{
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
let mut i = 0;
loop {
if let Err(e) = super::fs::rename_std(src_file_path.as_ref(), dst_file_path.as_ref()) {
if e.kind() == io::ErrorKind::NotFound {
break;
}
if i == 0 {
i += 1;
continue;
}
warn!(
"reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}",
src_file_path.as_ref(),
dst_file_path.as_ref(),
base_dir.as_ref(),
e
);
return Err(e);
}
break;
}
Ok(())
}
pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> io::Result<()> {
let mut i = 0;
let mut base_dir = base_dir.as_ref();
loop {
if let Err(e) = os_mkdir_all(path.as_ref(), base_dir).await {
if e.kind() == io::ErrorKind::NotFound && i == 0 {
i += 1;
if let Some(base_parent) = base_dir.parent()
&& let Some(c) = base_parent.components().next()
&& c != Component::RootDir
{
base_dir = base_parent
}
continue;
}
return Err(e);
}
break;
}
Ok(())
}
/// Create a directory and all its parent components if they are missing.
/// Without recursion support, fall back to create_dir_all
/// This function will not create directories under base_dir.
#[tracing::instrument(level = "debug", skip_all)]
pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> io::Result<()> {
if !base_dir.as_ref().to_string_lossy().is_empty() && base_dir.as_ref().starts_with(dir_path.as_ref()) {
return Ok(());
}
if let Some(parent) = dir_path.as_ref().parent() {
// Without recursion support, fall back to create_dir_all
if let Err(e) = super::fs::make_dir_all(&parent).await {
if e.kind() == io::ErrorKind::AlreadyExists {
return Ok(());
}
return Err(e);
}
// Box::pin(os_mkdir_all(&parent, &base_dir)).await?;
}
if let Err(e) = super::fs::mkdir(dir_path.as_ref()).await {
if e.kind() == io::ErrorKind::AlreadyExists {
return Ok(());
}
return Err(e);
}
Ok(())
}
/// Check if a file exists.
/// Returns true if the file exists, false otherwise.
#[tracing::instrument(level = "debug", skip_all)]
pub fn file_exists(path: impl AsRef<Path>) -> bool {
std::fs::metadata(path.as_ref()).map(|_| true).unwrap_or(false)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/error_conv.rs | crates/ecstore/src/disk/error_conv.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::DiskError;
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::FileNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::IsADirectory => DiskError::IsNotRegular.into(),
std::io::ErrorKind::NotADirectory => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::UnexpectedEof => DiskError::FaultyDisk.into(),
std::io::ErrorKind::TooManyLinks => DiskError::TooManyOpenFiles.into(),
std::io::ErrorKind::InvalidInput => DiskError::FileNotFound.into(),
std::io::ErrorKind::InvalidData => DiskError::FileCorrupt.into(),
std::io::ErrorKind::StorageFull => DiskError::DiskFull.into(),
_ => io_err,
}
}
pub fn to_volume_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::VolumeNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => DiskError::VolumeNotEmpty.into(),
std::io::ErrorKind::NotADirectory => DiskError::IsNotRegular.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::VolumeNotFound.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
err => err.into(),
},
Err(err) => to_file_error(err),
},
_ => to_file_error(io_err),
}
}
pub fn to_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::DiskNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::DiskNotFound.into(),
DiskError::VolumeNotFound => DiskError::DiskNotFound.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
DiskError::VolumeAccessDenied => DiskError::DiskAccessDenied.into(),
err => err.into(),
},
Err(err) => to_volume_error(err),
},
_ => to_volume_error(io_err),
}
}
// only errors from FileSystem operations
pub fn to_access_error(io_err: std::io::Error, per_err: DiskError) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::PermissionDenied => per_err.into(),
std::io::ErrorKind::NotADirectory => per_err.into(),
std::io::ErrorKind::NotFound => DiskError::VolumeNotFound.into(),
std::io::ErrorKind::UnexpectedEof => DiskError::FaultyDisk.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::DiskAccessDenied => per_err.into(),
DiskError::FileAccessDenied => per_err.into(),
DiskError::FileNotFound => DiskError::VolumeNotFound.into(),
err => err.into(),
},
Err(err) => to_volume_error(err),
},
_ => to_volume_error(io_err),
}
}
pub fn to_unformatted_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::UnformattedDisk.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::UnformattedDisk.into(),
DiskError::DiskNotFound => DiskError::UnformattedDisk.into(),
DiskError::VolumeNotFound => DiskError::UnformattedDisk.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
DiskError::DiskAccessDenied => DiskError::DiskAccessDenied.into(),
_ => DiskError::CorruptedBackend.into(),
},
Err(_err) => DiskError::CorruptedBackend.into(),
},
_ => DiskError::CorruptedBackend.into(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Error as IoError, ErrorKind};
// Helper function to create IO errors with specific kinds
fn create_io_error(kind: ErrorKind) -> IoError {
IoError::new(kind, "test error")
}
// Helper function to create IO errors with DiskError as the source
fn create_io_error_with_disk_error(disk_error: DiskError) -> IoError {
IoError::other(disk_error)
}
// Helper function to check if an IoError contains a specific DiskError
fn contains_disk_error(io_error: IoError, expected: DiskError) -> bool {
if let Ok(disk_error) = io_error.downcast::<DiskError>() {
std::mem::discriminant(&disk_error) == std::mem::discriminant(&expected)
} else {
false
}
}
#[test]
fn test_to_file_error_basic_conversions() {
// Test NotFound -> FileNotFound
let result = to_file_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::FileNotFound));
// Test PermissionDenied -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test IsADirectory -> IsNotRegular
let result = to_file_error(create_io_error(ErrorKind::IsADirectory));
assert!(contains_disk_error(result, DiskError::IsNotRegular));
// Test NotADirectory -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::NotADirectory));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test DirectoryNotEmpty -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::DirectoryNotEmpty));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test UnexpectedEof -> FaultyDisk
let result = to_file_error(create_io_error(ErrorKind::UnexpectedEof));
assert!(contains_disk_error(result, DiskError::FaultyDisk));
// Test TooManyLinks -> TooManyOpenFiles
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::TooManyLinks));
assert!(contains_disk_error(result, DiskError::TooManyOpenFiles));
}
// Test InvalidInput -> FileNotFound
let result = to_file_error(create_io_error(ErrorKind::InvalidInput));
assert!(contains_disk_error(result, DiskError::FileNotFound));
// Test InvalidData -> FileCorrupt
let result = to_file_error(create_io_error(ErrorKind::InvalidData));
assert!(contains_disk_error(result, DiskError::FileCorrupt));
// Test StorageFull -> DiskFull
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::StorageFull));
assert!(contains_disk_error(result, DiskError::DiskFull));
}
}
#[test]
fn test_to_file_error_passthrough_unknown() {
// Test that unknown error kinds are passed through unchanged
let original = create_io_error(ErrorKind::Interrupted);
let result = to_file_error(original);
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_volume_error_basic_conversions() {
// Test NotFound -> VolumeNotFound
let result = to_volume_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test PermissionDenied -> DiskAccessDenied
let result = to_volume_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test DirectoryNotEmpty -> VolumeNotEmpty
let result = to_volume_error(create_io_error(ErrorKind::DirectoryNotEmpty));
assert!(contains_disk_error(result, DiskError::VolumeNotEmpty));
// Test NotADirectory -> IsNotRegular
let result = to_volume_error(create_io_error(ErrorKind::NotADirectory));
assert!(contains_disk_error(result, DiskError::IsNotRegular));
}
#[test]
fn test_to_volume_error_other_with_disk_error() {
// Test Other error kind with FileNotFound DiskError -> VolumeNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test Other error kind with FileAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_volume_error_fallback_to_file_error() {
// Test fallback to to_file_error for unknown error kinds
let result = to_volume_error(create_io_error(ErrorKind::Interrupted));
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_disk_error_basic_conversions() {
// Test NotFound -> DiskNotFound
let result = to_disk_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test PermissionDenied -> DiskAccessDenied
let result = to_disk_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
}
#[test]
fn test_to_disk_error_other_with_disk_error() {
// Test Other error kind with FileNotFound DiskError -> DiskNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test Other error kind with VolumeNotFound DiskError -> DiskNotFound
let io_error = create_io_error_with_disk_error(DiskError::VolumeNotFound);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test Other error kind with FileAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with VolumeAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::VolumeAccessDenied);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_disk_error_fallback_to_volume_error() {
// Test fallback to to_volume_error for unknown error kinds
let result = to_disk_error(create_io_error(ErrorKind::Interrupted));
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_access_error_basic_conversions() {
let permission_error = DiskError::FileAccessDenied;
// Test PermissionDenied -> specified permission error
let result = to_access_error(create_io_error(ErrorKind::PermissionDenied), permission_error);
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test NotADirectory -> specified permission error
let result = to_access_error(create_io_error(ErrorKind::NotADirectory), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test NotFound -> VolumeNotFound
let result = to_access_error(create_io_error(ErrorKind::NotFound), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test UnexpectedEof -> FaultyDisk
let result = to_access_error(create_io_error(ErrorKind::UnexpectedEof), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::FaultyDisk));
}
#[test]
fn test_to_access_error_other_with_disk_error() {
let permission_error = DiskError::VolumeAccessDenied;
// Test Other error kind with DiskAccessDenied -> specified permission error
let io_error = create_io_error_with_disk_error(DiskError::DiskAccessDenied);
let result = to_access_error(io_error, permission_error);
assert!(contains_disk_error(result, DiskError::VolumeAccessDenied));
// Test Other error kind with FileAccessDenied -> specified permission error
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeAccessDenied));
// Test Other error kind with FileNotFound -> VolumeNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_access_error_fallback_to_volume_error() {
let permission_error = DiskError::FileAccessDenied;
// Test fallback to to_volume_error for unknown error kinds
let result = to_access_error(create_io_error(ErrorKind::Interrupted), permission_error);
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_unformatted_disk_error_basic_conversions() {
// Test NotFound -> UnformattedDisk
let result = to_unformatted_disk_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test PermissionDenied -> DiskAccessDenied
let result = to_unformatted_disk_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
}
#[test]
fn test_to_unformatted_disk_error_other_with_disk_error() {
// Test Other error kind with FileNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with DiskNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::DiskNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with VolumeNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::VolumeNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with FileAccessDenied -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with DiskAccessDenied -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::DiskAccessDenied);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> CorruptedBackend
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::CorruptedBackend));
}
#[test]
fn test_to_unformatted_disk_error_recursive_behavior() {
// Test with non-Other error kind that should be handled without infinite recursion
let result = to_unformatted_disk_error(create_io_error(ErrorKind::Interrupted));
// This should not cause infinite recursion and should produce CorruptedBackend
assert!(contains_disk_error(result, DiskError::CorruptedBackend));
}
#[test]
fn test_error_chain_conversions() {
// Test complex error conversion chains
let original_error = create_io_error(ErrorKind::NotFound);
// Chain: NotFound -> FileNotFound (via to_file_error) -> VolumeNotFound (via to_volume_error)
let file_error = to_file_error(original_error);
let volume_error = to_volume_error(file_error);
assert!(contains_disk_error(volume_error, DiskError::VolumeNotFound));
}
#[test]
fn test_cross_platform_error_kinds() {
// Test error kinds that may not be available on all platforms
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::TooManyLinks));
assert!(contains_disk_error(result, DiskError::TooManyOpenFiles));
}
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::StorageFull));
assert!(contains_disk_error(result, DiskError::DiskFull));
}
}
#[test]
fn test_error_conversion_with_different_kinds() {
// Test multiple error kinds to ensure comprehensive coverage
let test_cases = vec![
(ErrorKind::NotFound, DiskError::FileNotFound),
(ErrorKind::PermissionDenied, DiskError::FileAccessDenied),
(ErrorKind::IsADirectory, DiskError::IsNotRegular),
(ErrorKind::InvalidData, DiskError::FileCorrupt),
];
for (kind, expected_disk_error) in test_cases {
let result = to_file_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{kind:?} -> DiskError::{expected_disk_error:?}"
);
}
}
#[test]
fn test_volume_error_conversion_chain() {
// Test volume error conversion with different input types
let test_cases = vec![
(ErrorKind::NotFound, DiskError::VolumeNotFound),
(ErrorKind::PermissionDenied, DiskError::DiskAccessDenied),
(ErrorKind::DirectoryNotEmpty, DiskError::VolumeNotEmpty),
];
for (kind, expected_disk_error) in test_cases {
let result = to_volume_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{kind:?} -> DiskError::{expected_disk_error:?}"
);
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/error.rs | crates/ecstore/src/disk/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// use crate::quorum::CheckErrorFn;
use std::hash::{Hash, Hasher};
use std::io::{self};
use std::path::PathBuf;
pub type Error = DiskError;
pub type Result<T> = core::result::Result<T, Error>;
// DiskError == StorageErr
#[derive(Debug, thiserror::Error)]
pub enum DiskError {
#[error("maximum versions exceeded, please delete few versions to proceed")]
MaxVersionsExceeded,
#[error("unexpected error")]
Unexpected,
#[error("corrupted format")]
CorruptedFormat,
#[error("corrupted backend")]
CorruptedBackend,
#[error("unformatted disk error")]
UnformattedDisk,
#[error("inconsistent drive found")]
InconsistentDisk,
#[error("drive does not support O_DIRECT")]
UnsupportedDisk,
#[error("drive path full")]
DiskFull,
#[error("disk not a dir")]
DiskNotDir,
#[error("disk not found")]
DiskNotFound,
#[error("drive still did not complete the request")]
DiskOngoingReq,
#[error("drive is part of root drive, will not be used")]
DriveIsRoot,
#[error("remote drive is faulty")]
FaultyRemoteDisk,
#[error("drive is faulty")]
FaultyDisk,
#[error("drive access denied")]
DiskAccessDenied,
#[error("file not found")]
FileNotFound,
#[error("file version not found")]
FileVersionNotFound,
#[error("too many open files, please increase 'ulimit -n'")]
TooManyOpenFiles,
#[error("file name too long")]
FileNameTooLong,
#[error("volume already exists")]
VolumeExists,
#[error("not of regular file type")]
IsNotRegular,
#[error("path not found")]
PathNotFound,
#[error("volume not found")]
VolumeNotFound,
#[error("volume is not empty")]
VolumeNotEmpty,
#[error("volume access denied")]
VolumeAccessDenied,
#[error("disk access denied")]
FileAccessDenied,
#[error("file is corrupted")]
FileCorrupt,
#[error("short write")]
ShortWrite,
#[error("bit-rot hash algorithm is invalid")]
BitrotHashAlgoInvalid,
#[error("Rename across devices not allowed, please fix your backend configuration")]
CrossDeviceLink,
#[error("less data available than what was requested")]
LessData,
#[error("more data was sent than what was advertised")]
MoreData,
#[error("outdated XL meta")]
OutdatedXLMeta,
#[error("part missing or corrupt")]
PartMissingOrCorrupt,
#[error("No healing is required")]
NoHealRequired,
#[error("method not allowed")]
MethodNotAllowed,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("io error {0}")]
Io(io::Error),
#[error("source stalled")]
SourceStalled,
#[error("timeout")]
Timeout,
}
impl DiskError {
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
DiskError::Io(std::io::Error::other(error))
}
pub fn is_all_not_found(errs: &[Option<DiskError>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
pub fn is_err_object_not_found(err: &DiskError) -> bool {
matches!(err, &DiskError::FileNotFound) || matches!(err, &DiskError::VolumeNotFound)
}
pub fn is_err_version_not_found(err: &DiskError) -> bool {
matches!(err, &DiskError::FileVersionNotFound)
}
// /// If all errors are of the same fatal disk error type, returns the corresponding error.
// /// Otherwise, returns Ok.
// pub fn check_disk_fatal_errs(errs: &[Option<Error>]) -> Result<()> {
// if DiskError::UnsupportedDisk.count_errs(errs) == errs.len() {
// return Err(DiskError::UnsupportedDisk.into());
// }
// if DiskError::FileAccessDenied.count_errs(errs) == errs.len() {
// return Err(DiskError::FileAccessDenied.into());
// }
// if DiskError::DiskNotDir.count_errs(errs) == errs.len() {
// return Err(DiskError::DiskNotDir.into());
// }
// Ok(())
// }
// pub fn count_errs(&self, errs: &[Option<Error>]) -> usize {
// errs.iter()
// .filter(|&err| match err {
// None => false,
// Some(e) => self.is(e),
// })
// .count()
// }
// pub fn quorum_unformatted_disks(errs: &[Option<Error>]) -> bool {
// DiskError::UnformattedDisk.count_errs(errs) > (errs.len() / 2)
// }
// pub fn should_init_erasure_disks(errs: &[Option<Error>]) -> bool {
// DiskError::UnformattedDisk.count_errs(errs) == errs.len()
// }
// // Check if the error is a disk error
// pub fn is(&self, err: &DiskError) -> bool {
// if let Some(e) = err.downcast_ref::<DiskError>() {
// e == self
// } else {
// false
// }
// }
}
impl From<rustfs_filemeta::Error> for DiskError {
fn from(e: rustfs_filemeta::Error) -> Self {
match e {
rustfs_filemeta::Error::Io(e) => DiskError::other(e),
rustfs_filemeta::Error::FileNotFound => DiskError::FileNotFound,
rustfs_filemeta::Error::FileVersionNotFound => DiskError::FileVersionNotFound,
rustfs_filemeta::Error::FileCorrupt => DiskError::FileCorrupt,
rustfs_filemeta::Error::MethodNotAllowed => DiskError::MethodNotAllowed,
e => DiskError::other(e),
}
}
}
impl From<std::io::Error> for DiskError {
fn from(e: std::io::Error) -> Self {
e.downcast::<DiskError>().unwrap_or_else(DiskError::Io)
}
}
impl From<DiskError> for std::io::Error {
fn from(e: DiskError) -> Self {
match e {
DiskError::Io(io_error) => io_error,
e => std::io::Error::other(e),
}
}
}
impl From<tonic::Status> for DiskError {
fn from(e: tonic::Status) -> Self {
DiskError::other(e.message().to_string())
}
}
impl From<rustfs_protos::proto_gen::node_service::Error> for DiskError {
fn from(e: rustfs_protos::proto_gen::node_service::Error) -> Self {
if let Some(err) = DiskError::from_u32(e.code) {
if matches!(err, DiskError::Io(_)) {
DiskError::other(e.error_info)
} else {
err
}
} else {
DiskError::other(e.error_info)
}
}
}
impl From<DiskError> for rustfs_protos::proto_gen::node_service::Error {
fn from(e: DiskError) -> Self {
rustfs_protos::proto_gen::node_service::Error {
code: e.to_u32(),
error_info: e.to_string(),
}
}
}
impl From<serde_json::Error> for DiskError {
fn from(e: serde_json::Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp_serde::encode::Error> for DiskError {
fn from(e: rmp_serde::encode::Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp_serde::decode::Error> for DiskError {
fn from(e: rmp_serde::decode::Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp::encode::ValueWriteError> for DiskError {
fn from(e: rmp::encode::ValueWriteError) -> Self {
DiskError::other(e)
}
}
impl From<rmp::decode::ValueReadError> for DiskError {
fn from(e: rmp::decode::ValueReadError) -> Self {
DiskError::other(e)
}
}
impl From<std::string::FromUtf8Error> for DiskError {
fn from(e: std::string::FromUtf8Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp::decode::NumValueReadError> for DiskError {
fn from(e: rmp::decode::NumValueReadError) -> Self {
DiskError::other(e)
}
}
impl From<tokio::task::JoinError> for DiskError {
fn from(e: tokio::task::JoinError) -> Self {
DiskError::other(e)
}
}
impl Clone for DiskError {
fn clone(&self) -> Self {
match self {
DiskError::Io(io_error) => DiskError::Io(std::io::Error::new(io_error.kind(), io_error.to_string())),
DiskError::MaxVersionsExceeded => DiskError::MaxVersionsExceeded,
DiskError::Unexpected => DiskError::Unexpected,
DiskError::CorruptedFormat => DiskError::CorruptedFormat,
DiskError::CorruptedBackend => DiskError::CorruptedBackend,
DiskError::UnformattedDisk => DiskError::UnformattedDisk,
DiskError::InconsistentDisk => DiskError::InconsistentDisk,
DiskError::UnsupportedDisk => DiskError::UnsupportedDisk,
DiskError::DiskFull => DiskError::DiskFull,
DiskError::DiskNotDir => DiskError::DiskNotDir,
DiskError::DiskNotFound => DiskError::DiskNotFound,
DiskError::DiskOngoingReq => DiskError::DiskOngoingReq,
DiskError::DriveIsRoot => DiskError::DriveIsRoot,
DiskError::FaultyRemoteDisk => DiskError::FaultyRemoteDisk,
DiskError::FaultyDisk => DiskError::FaultyDisk,
DiskError::DiskAccessDenied => DiskError::DiskAccessDenied,
DiskError::FileNotFound => DiskError::FileNotFound,
DiskError::FileVersionNotFound => DiskError::FileVersionNotFound,
DiskError::TooManyOpenFiles => DiskError::TooManyOpenFiles,
DiskError::FileNameTooLong => DiskError::FileNameTooLong,
DiskError::VolumeExists => DiskError::VolumeExists,
DiskError::IsNotRegular => DiskError::IsNotRegular,
DiskError::PathNotFound => DiskError::PathNotFound,
DiskError::VolumeNotFound => DiskError::VolumeNotFound,
DiskError::VolumeNotEmpty => DiskError::VolumeNotEmpty,
DiskError::VolumeAccessDenied => DiskError::VolumeAccessDenied,
DiskError::FileAccessDenied => DiskError::FileAccessDenied,
DiskError::FileCorrupt => DiskError::FileCorrupt,
DiskError::BitrotHashAlgoInvalid => DiskError::BitrotHashAlgoInvalid,
DiskError::CrossDeviceLink => DiskError::CrossDeviceLink,
DiskError::LessData => DiskError::LessData,
DiskError::MoreData => DiskError::MoreData,
DiskError::OutdatedXLMeta => DiskError::OutdatedXLMeta,
DiskError::PartMissingOrCorrupt => DiskError::PartMissingOrCorrupt,
DiskError::NoHealRequired => DiskError::NoHealRequired,
DiskError::MethodNotAllowed => DiskError::MethodNotAllowed,
DiskError::ErasureWriteQuorum => DiskError::ErasureWriteQuorum,
DiskError::ErasureReadQuorum => DiskError::ErasureReadQuorum,
DiskError::ShortWrite => DiskError::ShortWrite,
DiskError::SourceStalled => DiskError::SourceStalled,
DiskError::Timeout => DiskError::Timeout,
}
}
}
impl DiskError {
pub fn to_u32(&self) -> u32 {
match self {
DiskError::MaxVersionsExceeded => 0x01,
DiskError::Unexpected => 0x02,
DiskError::CorruptedFormat => 0x03,
DiskError::CorruptedBackend => 0x04,
DiskError::UnformattedDisk => 0x05,
DiskError::InconsistentDisk => 0x06,
DiskError::UnsupportedDisk => 0x07,
DiskError::DiskFull => 0x08,
DiskError::DiskNotDir => 0x09,
DiskError::DiskNotFound => 0x0A,
DiskError::DiskOngoingReq => 0x0B,
DiskError::DriveIsRoot => 0x0C,
DiskError::FaultyRemoteDisk => 0x0D,
DiskError::FaultyDisk => 0x0E,
DiskError::DiskAccessDenied => 0x0F,
DiskError::FileNotFound => 0x10,
DiskError::FileVersionNotFound => 0x11,
DiskError::TooManyOpenFiles => 0x12,
DiskError::FileNameTooLong => 0x13,
DiskError::VolumeExists => 0x14,
DiskError::IsNotRegular => 0x15,
DiskError::PathNotFound => 0x16,
DiskError::VolumeNotFound => 0x17,
DiskError::VolumeNotEmpty => 0x18,
DiskError::VolumeAccessDenied => 0x19,
DiskError::FileAccessDenied => 0x1A,
DiskError::FileCorrupt => 0x1B,
DiskError::BitrotHashAlgoInvalid => 0x1C,
DiskError::CrossDeviceLink => 0x1D,
DiskError::LessData => 0x1E,
DiskError::MoreData => 0x1F,
DiskError::OutdatedXLMeta => 0x20,
DiskError::PartMissingOrCorrupt => 0x21,
DiskError::NoHealRequired => 0x22,
DiskError::MethodNotAllowed => 0x23,
DiskError::Io(_) => 0x24,
DiskError::ErasureWriteQuorum => 0x25,
DiskError::ErasureReadQuorum => 0x26,
DiskError::ShortWrite => 0x27,
DiskError::SourceStalled => 0x28,
DiskError::Timeout => 0x29,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(DiskError::MaxVersionsExceeded),
0x02 => Some(DiskError::Unexpected),
0x03 => Some(DiskError::CorruptedFormat),
0x04 => Some(DiskError::CorruptedBackend),
0x05 => Some(DiskError::UnformattedDisk),
0x06 => Some(DiskError::InconsistentDisk),
0x07 => Some(DiskError::UnsupportedDisk),
0x08 => Some(DiskError::DiskFull),
0x09 => Some(DiskError::DiskNotDir),
0x0A => Some(DiskError::DiskNotFound),
0x0B => Some(DiskError::DiskOngoingReq),
0x0C => Some(DiskError::DriveIsRoot),
0x0D => Some(DiskError::FaultyRemoteDisk),
0x0E => Some(DiskError::FaultyDisk),
0x0F => Some(DiskError::DiskAccessDenied),
0x10 => Some(DiskError::FileNotFound),
0x11 => Some(DiskError::FileVersionNotFound),
0x12 => Some(DiskError::TooManyOpenFiles),
0x13 => Some(DiskError::FileNameTooLong),
0x14 => Some(DiskError::VolumeExists),
0x15 => Some(DiskError::IsNotRegular),
0x16 => Some(DiskError::PathNotFound),
0x17 => Some(DiskError::VolumeNotFound),
0x18 => Some(DiskError::VolumeNotEmpty),
0x19 => Some(DiskError::VolumeAccessDenied),
0x1A => Some(DiskError::FileAccessDenied),
0x1B => Some(DiskError::FileCorrupt),
0x1C => Some(DiskError::BitrotHashAlgoInvalid),
0x1D => Some(DiskError::CrossDeviceLink),
0x1E => Some(DiskError::LessData),
0x1F => Some(DiskError::MoreData),
0x20 => Some(DiskError::OutdatedXLMeta),
0x21 => Some(DiskError::PartMissingOrCorrupt),
0x22 => Some(DiskError::NoHealRequired),
0x23 => Some(DiskError::MethodNotAllowed),
0x24 => Some(DiskError::Io(std::io::Error::other(String::new()))),
0x25 => Some(DiskError::ErasureWriteQuorum),
0x26 => Some(DiskError::ErasureReadQuorum),
0x27 => Some(DiskError::ShortWrite),
0x28 => Some(DiskError::SourceStalled),
0x29 => Some(DiskError::Timeout),
_ => None,
}
}
}
impl PartialEq for DiskError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(DiskError::Io(e1), DiskError::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
_ => self.to_u32() == other.to_u32(),
}
}
}
impl Eq for DiskError {}
impl Hash for DiskError {
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_u32().hash(state);
}
}
// NOTE: Remove commented out code later if not needed
// Some error-related helper functions and complex error handling logic
// is currently commented out to avoid complexity. These can be re-enabled
// when needed for specific disk quorum checking and error aggregation logic.
/// Bitrot errors
#[derive(Debug, thiserror::Error)]
pub enum BitrotErrorType {
#[error("bitrot checksum verification failed")]
BitrotChecksumMismatch { expected: String, got: String },
}
impl From<BitrotErrorType> for DiskError {
fn from(e: BitrotErrorType) -> Self {
DiskError::other(e)
}
}
/// Context wrapper for file access errors
#[derive(Debug, thiserror::Error)]
pub struct FileAccessDeniedWithContext {
pub path: PathBuf,
#[source]
pub source: io::Error,
}
impl std::fmt::Display for FileAccessDeniedWithContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "file access denied for path: {}", self.path.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn test_disk_error_variants() {
let errors = vec![
DiskError::MaxVersionsExceeded,
DiskError::Unexpected,
DiskError::CorruptedFormat,
DiskError::CorruptedBackend,
DiskError::UnformattedDisk,
DiskError::InconsistentDisk,
DiskError::UnsupportedDisk,
DiskError::DiskFull,
DiskError::DiskNotDir,
DiskError::DiskNotFound,
DiskError::DiskOngoingReq,
DiskError::DriveIsRoot,
DiskError::FaultyRemoteDisk,
DiskError::FaultyDisk,
DiskError::DiskAccessDenied,
DiskError::FileNotFound,
DiskError::FileVersionNotFound,
DiskError::TooManyOpenFiles,
DiskError::FileNameTooLong,
DiskError::VolumeExists,
DiskError::IsNotRegular,
DiskError::PathNotFound,
DiskError::VolumeNotFound,
DiskError::VolumeNotEmpty,
DiskError::VolumeAccessDenied,
DiskError::FileAccessDenied,
DiskError::FileCorrupt,
DiskError::ShortWrite,
DiskError::BitrotHashAlgoInvalid,
DiskError::CrossDeviceLink,
DiskError::LessData,
DiskError::MoreData,
DiskError::OutdatedXLMeta,
DiskError::PartMissingOrCorrupt,
DiskError::NoHealRequired,
DiskError::MethodNotAllowed,
DiskError::ErasureWriteQuorum,
DiskError::ErasureReadQuorum,
];
for error in errors {
// Test error display
assert!(!error.to_string().is_empty());
// Test error conversion to u32 and back
let code = error.to_u32();
let converted_back = DiskError::from_u32(code);
assert!(converted_back.is_some());
}
}
#[test]
fn test_disk_error_other() {
let custom_error = DiskError::other("custom error message");
assert!(matches!(custom_error, DiskError::Io(_)));
// The error message format might vary, so just check it's not empty
assert!(!custom_error.to_string().is_empty());
}
#[test]
fn test_disk_error_from_io_error() {
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "file not found");
let disk_error = DiskError::from(io_error);
assert!(matches!(disk_error, DiskError::Io(_)));
}
#[test]
fn test_is_all_not_found() {
// Empty slice
assert!(!DiskError::is_all_not_found(&[]));
// All file not found
let all_not_found = vec![
Some(DiskError::FileNotFound),
Some(DiskError::FileVersionNotFound),
Some(DiskError::FileNotFound),
];
assert!(DiskError::is_all_not_found(&all_not_found));
// Mixed errors
let mixed_errors = vec![
Some(DiskError::FileNotFound),
Some(DiskError::DiskNotFound),
Some(DiskError::FileNotFound),
];
assert!(!DiskError::is_all_not_found(&mixed_errors));
// Contains None
let with_none = vec![Some(DiskError::FileNotFound), None, Some(DiskError::FileNotFound)];
assert!(!DiskError::is_all_not_found(&with_none));
}
#[test]
fn test_is_err_object_not_found() {
assert!(DiskError::is_err_object_not_found(&DiskError::FileNotFound));
assert!(DiskError::is_err_object_not_found(&DiskError::VolumeNotFound));
assert!(!DiskError::is_err_object_not_found(&DiskError::DiskNotFound));
assert!(!DiskError::is_err_object_not_found(&DiskError::FileCorrupt));
}
#[test]
fn test_is_err_version_not_found() {
assert!(DiskError::is_err_version_not_found(&DiskError::FileVersionNotFound));
assert!(!DiskError::is_err_version_not_found(&DiskError::FileNotFound));
assert!(!DiskError::is_err_version_not_found(&DiskError::VolumeNotFound));
}
#[test]
fn test_disk_error_to_u32_from_u32() {
let test_cases = vec![
(DiskError::MaxVersionsExceeded, 1),
(DiskError::Unexpected, 2),
(DiskError::CorruptedFormat, 3),
(DiskError::UnformattedDisk, 5),
(DiskError::DiskNotFound, 10),
(DiskError::FileNotFound, 16),
(DiskError::VolumeNotFound, 23),
];
for (error, expected_code) in test_cases {
assert_eq!(error.to_u32(), expected_code);
assert_eq!(DiskError::from_u32(expected_code), Some(error));
}
// Test unknown error code
assert_eq!(DiskError::from_u32(999), None);
}
#[test]
fn test_disk_error_equality() {
assert_eq!(DiskError::FileNotFound, DiskError::FileNotFound);
assert_ne!(DiskError::FileNotFound, DiskError::VolumeNotFound);
let error1 = DiskError::other("test");
let error2 = DiskError::other("test");
// IO errors with the same message should be equal
assert_eq!(error1, error2);
}
#[test]
fn test_disk_error_clone() {
let original = DiskError::FileNotFound;
let cloned = original.clone();
assert_eq!(original, cloned);
let io_error = DiskError::other("test error");
let cloned_io = io_error.clone();
assert_eq!(io_error, cloned_io);
}
#[test]
fn test_disk_error_hash() {
let mut map = HashMap::new();
map.insert(DiskError::FileNotFound, "file not found");
map.insert(DiskError::VolumeNotFound, "volume not found");
assert_eq!(map.get(&DiskError::FileNotFound), Some(&"file not found"));
assert_eq!(map.get(&DiskError::VolumeNotFound), Some(&"volume not found"));
assert_eq!(map.get(&DiskError::DiskNotFound), None);
}
#[test]
fn test_error_conversions() {
// Test From implementations
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "test");
let _disk_error: DiskError = io_error.into();
let json_str = r#"{"invalid": json}"#; // Invalid JSON
let json_error = serde_json::from_str::<serde_json::Value>(json_str).unwrap_err();
let _disk_error: DiskError = json_error.into();
}
#[test]
fn test_bitrot_error_type() {
let bitrot_error = BitrotErrorType::BitrotChecksumMismatch {
expected: "abc123".to_string(),
got: "def456".to_string(),
};
assert!(bitrot_error.to_string().contains("bitrot checksum verification failed"));
let disk_error: DiskError = bitrot_error.into();
assert!(matches!(disk_error, DiskError::Io(_)));
}
#[test]
fn test_file_access_denied_with_context() {
let path = PathBuf::from("/test/path");
let io_error = std::io::Error::new(std::io::ErrorKind::PermissionDenied, "permission denied");
let context_error = FileAccessDeniedWithContext {
path: path.clone(),
source: io_error,
};
let display_str = format!("{context_error}");
assert!(display_str.contains("/test/path"));
assert!(display_str.contains("file access denied"));
}
#[test]
fn test_error_debug_format() {
let error = DiskError::FileNotFound;
let debug_str = format!("{error:?}");
assert_eq!(debug_str, "FileNotFound");
let io_error = DiskError::other("test error");
let debug_str = format!("{io_error:?}");
assert!(debug_str.contains("Io"));
}
#[test]
fn test_error_source() {
use std::error::Error;
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "test");
let disk_error = DiskError::Io(io_error);
// DiskError should have a source
if let DiskError::Io(ref inner) = disk_error {
assert!(inner.source().is_none()); // std::io::Error typically doesn't have a source
}
}
#[test]
fn test_io_error_roundtrip_conversion() {
// Test DiskError -> std::io::Error -> DiskError roundtrip
let original_disk_errors = vec![
DiskError::FileNotFound,
DiskError::VolumeNotFound,
DiskError::DiskFull,
DiskError::FileCorrupt,
DiskError::MethodNotAllowed,
];
for original_error in original_disk_errors {
// Convert to io::Error and back
let io_error: std::io::Error = original_error.clone().into();
let recovered_error: DiskError = io_error.into();
// For non-Io variants, they become Io(ErrorKind::Other) and then back to the original
match &original_error {
DiskError::Io(_) => {
// Io errors should maintain their kind
assert!(matches!(recovered_error, DiskError::Io(_)));
}
_ => {
// Other errors become Io(Other) and then are recovered via downcast
// The recovered error should be functionally equivalent
assert_eq!(original_error.to_u32(), recovered_error.to_u32());
}
}
}
}
#[test]
fn test_io_error_with_disk_error_inside() {
// Test that io::Error containing DiskError can be properly converted back
let original_disk_error = DiskError::FileNotFound;
let io_with_disk_error = std::io::Error::other(original_disk_error.clone());
// Convert io::Error back to DiskError
let recovered_disk_error: DiskError = io_with_disk_error.into();
assert_eq!(original_disk_error, recovered_disk_error);
}
#[test]
fn test_io_error_different_kinds() {
use std::io::ErrorKind;
let test_cases = vec![
(ErrorKind::NotFound, "file not found"),
(ErrorKind::PermissionDenied, "permission denied"),
(ErrorKind::ConnectionRefused, "connection refused"),
(ErrorKind::TimedOut, "timed out"),
(ErrorKind::InvalidInput, "invalid input"),
];
for (kind, message) in test_cases {
let io_error = std::io::Error::new(kind, message);
let disk_error: DiskError = io_error.into();
// Should become DiskError::Io with the same kind and message
match disk_error {
DiskError::Io(inner_io) => {
assert_eq!(inner_io.kind(), kind);
assert!(inner_io.to_string().contains(message));
}
_ => panic!("Expected DiskError::Io variant"),
}
}
}
#[test]
fn test_disk_error_to_io_error_preserves_information() {
let test_cases = vec![
DiskError::FileNotFound,
DiskError::VolumeNotFound,
DiskError::DiskFull,
DiskError::FileCorrupt,
DiskError::MethodNotAllowed,
DiskError::ErasureReadQuorum,
DiskError::ErasureWriteQuorum,
];
for disk_error in test_cases {
let io_error: std::io::Error = disk_error.clone().into();
// Error message should be preserved
assert!(io_error.to_string().contains(&disk_error.to_string()));
// Should be able to downcast back to DiskError
let recovered_error = io_error.downcast::<DiskError>();
assert!(recovered_error.is_ok());
assert_eq!(recovered_error.unwrap(), disk_error);
}
}
#[test]
fn test_io_error_downcast_chain() {
// Test nested error downcasting chain
let original_disk_error = DiskError::FileNotFound;
// Create a chain: DiskError -> io::Error -> DiskError -> io::Error
let io_error1: std::io::Error = original_disk_error.clone().into();
let disk_error2: DiskError = io_error1.into();
let io_error2: std::io::Error = disk_error2.into();
// Final io::Error should still contain the original DiskError
let final_disk_error = io_error2.downcast::<DiskError>();
assert!(final_disk_error.is_ok());
assert_eq!(final_disk_error.unwrap(), original_disk_error);
}
#[test]
fn test_io_error_with_original_io_content() {
// Test DiskError::Io variant preserves original io::Error
let original_io = std::io::Error::new(std::io::ErrorKind::BrokenPipe, "broken pipe");
let disk_error = DiskError::Io(original_io);
let converted_io: std::io::Error = disk_error.into();
assert_eq!(converted_io.kind(), std::io::ErrorKind::BrokenPipe);
assert!(converted_io.to_string().contains("broken pipe"));
}
#[test]
fn test_error_display_preservation() {
let disk_errors = vec![
DiskError::MaxVersionsExceeded,
DiskError::CorruptedFormat,
DiskError::UnformattedDisk,
DiskError::DiskNotFound,
DiskError::FileAccessDenied,
];
for disk_error in disk_errors {
let original_message = disk_error.to_string();
let io_error: std::io::Error = disk_error.clone().into();
// The io::Error should contain the original error message
assert!(io_error.to_string().contains(&original_message));
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/mod.rs | crates/ecstore/src/disk/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod disk_store;
pub mod endpoint;
pub mod error;
pub mod error_conv;
pub mod error_reduce;
pub mod format;
pub mod fs;
pub mod local;
pub mod os;
pub const RUSTFS_META_BUCKET: &str = ".rustfs.sys";
pub const RUSTFS_META_MULTIPART_BUCKET: &str = ".rustfs.sys/multipart";
pub const RUSTFS_META_TMP_BUCKET: &str = ".rustfs.sys/tmp";
pub const RUSTFS_META_TMP_DELETED_BUCKET: &str = ".rustfs.sys/tmp/.trash";
pub const BUCKET_META_PREFIX: &str = "buckets";
pub const FORMAT_CONFIG_FILE: &str = "format.json";
pub const STORAGE_FORMAT_FILE: &str = "xl.meta";
pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp";
use crate::disk::disk_store::LocalDiskWrapper;
use crate::rpc::RemoteDisk;
use bytes::Bytes;
use endpoint::Endpoint;
use error::DiskError;
use error::{Error, Result};
use local::LocalDisk;
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
use rustfs_madmin::info_commands::DiskMetrics;
use serde::{Deserialize, Serialize};
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use time::OffsetDateTime;
use tokio::io::{AsyncRead, AsyncWrite};
use uuid::Uuid;
pub type DiskStore = Arc<Disk>;
pub type FileReader = Box<dyn AsyncRead + Send + Sync + Unpin>;
pub type FileWriter = Box<dyn AsyncWrite + Send + Sync + Unpin>;
#[derive(Debug)]
pub enum Disk {
Local(Box<LocalDiskWrapper>),
Remote(Box<RemoteDisk>),
}
#[async_trait::async_trait]
impl DiskAPI for Disk {
#[tracing::instrument(skip(self))]
fn to_string(&self) -> String {
match self {
Disk::Local(local_disk) => local_disk.to_string(),
Disk::Remote(remote_disk) => remote_disk.to_string(),
}
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
match self {
Disk::Local(local_disk) => local_disk.is_online().await,
Disk::Remote(remote_disk) => remote_disk.is_online().await,
}
}
#[tracing::instrument(skip(self))]
fn is_local(&self) -> bool {
match self {
Disk::Local(local_disk) => local_disk.is_local(),
Disk::Remote(remote_disk) => remote_disk.is_local(),
}
}
#[tracing::instrument(skip(self))]
fn host_name(&self) -> String {
match self {
Disk::Local(local_disk) => local_disk.host_name(),
Disk::Remote(remote_disk) => remote_disk.host_name(),
}
}
#[tracing::instrument(skip(self))]
fn endpoint(&self) -> Endpoint {
match self {
Disk::Local(local_disk) => local_disk.endpoint(),
Disk::Remote(remote_disk) => remote_disk.endpoint(),
}
}
#[tracing::instrument(skip(self))]
async fn close(&self) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.close().await,
Disk::Remote(remote_disk) => remote_disk.close().await,
}
}
#[tracing::instrument(skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
match self {
Disk::Local(local_disk) => local_disk.get_disk_id().await,
Disk::Remote(remote_disk) => remote_disk.get_disk_id().await,
}
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.set_disk_id(id).await,
Disk::Remote(remote_disk) => remote_disk.set_disk_id(id).await,
}
}
#[tracing::instrument(skip(self))]
fn path(&self) -> PathBuf {
match self {
Disk::Local(local_disk) => local_disk.path(),
Disk::Remote(remote_disk) => remote_disk.path(),
}
}
#[tracing::instrument(skip(self))]
fn get_disk_location(&self) -> DiskLocation {
match self {
Disk::Local(local_disk) => local_disk.get_disk_location(),
Disk::Remote(remote_disk) => remote_disk.get_disk_location(),
}
}
#[tracing::instrument(skip(self))]
async fn make_volume(&self, volume: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.make_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.make_volume(volume).await,
}
}
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.make_volumes(volumes).await,
Disk::Remote(remote_disk) => remote_disk.make_volumes(volumes).await,
}
}
#[tracing::instrument(skip(self))]
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
match self {
Disk::Local(local_disk) => local_disk.list_volumes().await,
Disk::Remote(remote_disk) => remote_disk.list_volumes().await,
}
}
#[tracing::instrument(skip(self))]
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
match self {
Disk::Local(local_disk) => local_disk.stat_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.stat_volume(volume).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_volume(&self, volume: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.delete_volume(volume).await,
}
}
#[tracing::instrument(skip(self, wr))]
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.walk_dir(opts, wr).await,
Disk::Remote(remote_disk) => remote_disk.walk_dir(opts, wr).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_version(volume, path, fi, force_del_marker, opts).await,
Disk::Remote(remote_disk) => remote_disk.delete_version(volume, path, fi, force_del_marker, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
match self {
Disk::Local(local_disk) => local_disk.delete_versions(volume, versions, opts).await,
Disk::Remote(remote_disk) => remote_disk.delete_versions(volume, versions, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_paths(volume, paths).await,
Disk::Remote(remote_disk) => remote_disk.delete_paths(volume, paths).await,
}
}
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.write_metadata(_org_volume, volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.write_metadata(_org_volume, volume, path, fi).await,
}
}
#[tracing::instrument(skip(self))]
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.update_metadata(volume, path, fi, opts).await,
Disk::Remote(remote_disk) => remote_disk.update_metadata(volume, path, fi, opts).await,
}
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_version(
&self,
_org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
match self {
Disk::Local(local_disk) => local_disk.read_version(_org_volume, volume, path, version_id, opts).await,
Disk::Remote(remote_disk) => remote_disk.read_version(_org_volume, volume, path, version_id, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
match self {
Disk::Local(local_disk) => local_disk.read_xl(volume, path, read_data).await,
Disk::Remote(remote_disk) => remote_disk.read_xl(volume, path, read_data).await,
}
}
#[tracing::instrument(skip(self, fi))]
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
match self {
Disk::Local(local_disk) => local_disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await,
Disk::Remote(remote_disk) => remote_disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await,
}
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
match self {
Disk::Local(local_disk) => local_disk.list_dir(_origvolume, volume, dir_path, count).await,
Disk::Remote(remote_disk) => remote_disk.list_dir(_origvolume, volume, dir_path, count).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_file(&self, volume: &str, path: &str) -> Result<FileReader> {
match self {
Disk::Local(local_disk) => local_disk.read_file(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.read_file(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader> {
match self {
Disk::Local(local_disk) => local_disk.read_file_stream(volume, path, offset, length).await,
Disk::Remote(remote_disk) => remote_disk.read_file_stream(volume, path, offset, length).await,
}
}
#[tracing::instrument(skip(self))]
async fn append_file(&self, volume: &str, path: &str) -> Result<FileWriter> {
match self {
Disk::Local(local_disk) => local_disk.append_file(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.append_file(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, _file_size: i64) -> Result<FileWriter> {
match self {
Disk::Local(local_disk) => local_disk.create_file(_origvolume, volume, path, _file_size).await,
Disk::Remote(remote_disk) => remote_disk.create_file(_origvolume, volume, path, _file_size).await,
}
}
#[tracing::instrument(skip(self))]
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.rename_file(src_volume, src_path, dst_volume, dst_path).await,
Disk::Remote(remote_disk) => remote_disk.rename_file(src_volume, src_path, dst_volume, dst_path).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
match self {
Disk::Local(local_disk) => local_disk.read_parts(bucket, paths).await,
Disk::Remote(remote_disk) => remote_disk.read_parts(bucket, paths).await,
}
}
#[tracing::instrument(skip(self))]
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.rename_part(src_volume, src_path, dst_volume, dst_path, meta).await,
Disk::Remote(remote_disk) => {
remote_disk
.rename_part(src_volume, src_path, dst_volume, dst_path, meta)
.await
}
}
}
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete(volume, path, opt).await,
Disk::Remote(remote_disk) => remote_disk.delete(volume, path, opt).await,
}
}
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
match self {
Disk::Local(local_disk) => local_disk.verify_file(volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.verify_file(volume, path, fi).await,
}
}
#[tracing::instrument(skip(self))]
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
match self {
Disk::Local(local_disk) => local_disk.check_parts(volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.check_parts(volume, path, fi).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
match self {
Disk::Local(local_disk) => local_disk.read_multiple(req).await,
Disk::Remote(remote_disk) => remote_disk.read_multiple(req).await,
}
}
#[tracing::instrument(skip(self))]
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.write_all(volume, path, data).await,
Disk::Remote(remote_disk) => remote_disk.write_all(volume, path, data).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes> {
match self {
Disk::Local(local_disk) => local_disk.read_all(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.read_all(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
match self {
Disk::Local(local_disk) => local_disk.disk_info(opts).await,
Disk::Remote(remote_disk) => remote_disk.disk_info(opts).await,
}
}
}
pub async fn new_disk(ep: &Endpoint, opt: &DiskOption) -> Result<DiskStore> {
if ep.is_local {
let s = LocalDisk::new(ep, opt.cleanup).await?;
Ok(Arc::new(Disk::Local(Box::new(LocalDiskWrapper::new(Arc::new(s), opt.health_check)))))
} else {
let remote_disk = RemoteDisk::new(ep, opt).await?;
Ok(Arc::new(Disk::Remote(Box::new(remote_disk))))
}
}
#[async_trait::async_trait]
pub trait DiskAPI: Debug + Send + Sync + 'static {
fn to_string(&self) -> String;
async fn is_online(&self) -> bool;
fn is_local(&self) -> bool;
// LastConn
fn host_name(&self) -> String;
fn endpoint(&self) -> Endpoint;
async fn close(&self) -> Result<()>;
async fn get_disk_id(&self) -> Result<Option<Uuid>>;
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()>;
fn path(&self) -> PathBuf;
fn get_disk_location(&self) -> DiskLocation;
// Healing
// DiskInfo
// NSScanner
// Volume operations.
async fn make_volume(&self, volume: &str) -> Result<()>;
async fn make_volumes(&self, volume: Vec<&str>) -> Result<()>;
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>>;
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo>;
async fn delete_volume(&self, volume: &str) -> Result<()>;
// Concurrent read/write pipeline w <- MetaCacheEntry
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()>;
// Metadata operations
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()>;
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>>;
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>;
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>;
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>;
async fn read_version(
&self,
org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo>;
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo>;
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
file_info: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp>;
// File operations.
// Read every file and directory within the folder
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>>;
async fn read_file(&self, volume: &str, path: &str) -> Result<FileReader>;
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader>;
async fn append_file(&self, volume: &str, path: &str) -> Result<FileWriter>;
async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: i64) -> Result<FileWriter>;
// ReadFileStream
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()>;
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()>;
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()>;
// VerifyFile
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp>;
// CheckParts
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp>;
// StatInfoFile
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>>;
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>>;
// CleanAbandonedData
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()>;
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes>;
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo>;
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct CheckPartsResp {
pub results: Vec<usize>,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct UpdateMetadataOpts {
pub no_persistence: bool,
}
pub struct DiskLocation {
pub pool_idx: Option<usize>,
pub set_idx: Option<usize>,
pub disk_idx: Option<usize>,
}
impl DiskLocation {
pub fn valid(&self) -> bool {
self.pool_idx.is_some() && self.set_idx.is_some() && self.disk_idx.is_some()
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct DiskInfoOptions {
pub disk_id: String,
pub metrics: bool,
pub noop: bool,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct DiskInfo {
pub total: u64,
pub free: u64,
pub used: u64,
pub used_inodes: u64,
pub free_inodes: u64,
pub major: u64,
pub minor: u64,
pub nr_requests: u64,
pub fs_type: String,
pub root_disk: bool,
pub healing: bool,
pub scanning: bool,
pub endpoint: String,
pub mount_path: String,
pub id: Option<Uuid>,
pub rotational: bool,
pub metrics: DiskMetrics,
pub error: String,
}
#[derive(Clone, Debug, Default)]
pub struct Info {
pub total: u64,
pub free: u64,
pub used: u64,
pub files: u64,
pub ffree: u64,
pub fstype: String,
pub major: u64,
pub minor: u64,
pub name: String,
pub rotational: bool,
pub nrrequests: u64,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct FileInfoVersions {
// Name of the volume.
pub volume: String,
// Name of the file.
pub name: String,
// Represents the latest mod time of the
// latest version.
pub latest_mod_time: Option<OffsetDateTime>,
pub versions: Vec<FileInfo>,
pub free_versions: Vec<FileInfo>,
}
impl FileInfoVersions {
pub fn find_version_index(&self, v: &str) -> Option<usize> {
if v.is_empty() {
return None;
}
let vid = Uuid::parse_str(v).unwrap_or_default();
self.versions.iter().position(|v| v.version_id == Some(vid))
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct WalkDirOptions {
// Bucket to scanner
pub bucket: String,
// Directory inside the bucket.
pub base_dir: String,
// Do a full recursive scan.
pub recursive: bool,
// ReportNotFound will return errFileNotFound if all disks reports the BaseDir cannot be found.
pub report_notfound: bool,
// FilterPrefix will only return results with given prefix within folder.
// Should never contain a slash.
pub filter_prefix: Option<String>,
// ForwardTo will forward to the given object path.
pub forward_to: Option<String>,
// Limit the number of returned objects if > 0.
pub limit: i32,
// DiskID contains the disk ID of the disk.
// Leave empty to not check disk ID.
pub disk_id: String,
}
#[derive(Clone, Debug, Default)]
pub struct DiskOption {
pub cleanup: bool,
pub health_check: bool,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct RenameDataResp {
pub old_data_dir: Option<Uuid>,
pub sign: Option<Vec<u8>>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DeleteOptions {
pub recursive: bool,
pub immediate: bool,
pub undo_write: bool,
pub old_data_dir: Option<Uuid>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReadMultipleReq {
pub bucket: String,
pub prefix: String,
pub files: Vec<String>,
pub max_size: usize,
pub metadata_only: bool,
pub abort404: bool,
pub max_results: usize,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ReadMultipleResp {
pub bucket: String,
pub prefix: String,
pub file: String,
pub exists: bool,
pub error: String,
pub data: Vec<u8>,
pub mod_time: Option<OffsetDateTime>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct VolumeInfo {
pub name: String,
pub created: Option<OffsetDateTime>,
}
#[derive(Deserialize, Serialize, Debug, Default, Clone)]
pub struct ReadOptions {
pub incl_free_versions: bool,
pub read_data: bool,
pub healing: bool,
}
pub const CHECK_PART_UNKNOWN: usize = 0;
// Changing the order can cause a data loss
// when running two nodes with incompatible versions
pub const CHECK_PART_SUCCESS: usize = 1;
pub const CHECK_PART_DISK_NOT_FOUND: usize = 2;
pub const CHECK_PART_VOLUME_NOT_FOUND: usize = 3;
pub const CHECK_PART_FILE_NOT_FOUND: usize = 4;
pub const CHECK_PART_FILE_CORRUPT: usize = 5;
pub fn conv_part_err_to_int(err: &Option<Error>) -> usize {
match err {
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => CHECK_PART_FILE_NOT_FOUND,
Some(DiskError::FileCorrupt) => CHECK_PART_FILE_CORRUPT,
Some(DiskError::VolumeNotFound) => CHECK_PART_VOLUME_NOT_FOUND,
Some(DiskError::DiskNotFound) => CHECK_PART_DISK_NOT_FOUND,
None => CHECK_PART_SUCCESS,
_ => {
tracing::warn!("conv_part_err_to_int: unknown error: {err:?}");
CHECK_PART_UNKNOWN
}
}
}
pub fn has_part_err(part_errs: &[usize]) -> bool {
part_errs.iter().any(|err| *err != CHECK_PART_SUCCESS)
}
#[cfg(test)]
mod tests {
use super::*;
use endpoint::Endpoint;
use local::LocalDisk;
use std::path::PathBuf;
use tokio::fs;
use uuid::Uuid;
/// Test DiskLocation validation
#[test]
fn test_disk_location_valid() {
let valid_location = DiskLocation {
pool_idx: Some(0),
set_idx: Some(1),
disk_idx: Some(2),
};
assert!(valid_location.valid());
let invalid_location = DiskLocation {
pool_idx: None,
set_idx: None,
disk_idx: None,
};
assert!(!invalid_location.valid());
let partial_valid_location = DiskLocation {
pool_idx: Some(0),
set_idx: None,
disk_idx: Some(2),
};
assert!(!partial_valid_location.valid());
}
/// Test FileInfoVersions find_version_index
#[test]
fn test_file_info_versions_find_version_index() {
let mut versions = Vec::new();
let v1_uuid = Uuid::new_v4();
let v2_uuid = Uuid::new_v4();
let fi1 = FileInfo {
version_id: Some(v1_uuid),
..Default::default()
};
let fi2 = FileInfo {
version_id: Some(v2_uuid),
..Default::default()
};
versions.push(fi1);
versions.push(fi2);
let fiv = FileInfoVersions {
volume: "test-bucket".to_string(),
name: "test-object".to_string(),
latest_mod_time: None,
versions,
free_versions: Vec::new(),
};
assert_eq!(fiv.find_version_index(&v1_uuid.to_string()), Some(0));
assert_eq!(fiv.find_version_index(&v2_uuid.to_string()), Some(1));
assert_eq!(fiv.find_version_index("non-existent"), None);
assert_eq!(fiv.find_version_index(""), None);
}
/// Test part error conversion functions
#[test]
fn test_conv_part_err_to_int() {
assert_eq!(conv_part_err_to_int(&None), CHECK_PART_SUCCESS);
assert_eq!(
conv_part_err_to_int(&Some(Error::from(DiskError::DiskNotFound))),
CHECK_PART_DISK_NOT_FOUND
);
assert_eq!(
conv_part_err_to_int(&Some(Error::from(DiskError::VolumeNotFound))),
CHECK_PART_VOLUME_NOT_FOUND
);
assert_eq!(
conv_part_err_to_int(&Some(Error::from(DiskError::FileNotFound))),
CHECK_PART_FILE_NOT_FOUND
);
assert_eq!(conv_part_err_to_int(&Some(Error::from(DiskError::FileCorrupt))), CHECK_PART_FILE_CORRUPT);
assert_eq!(conv_part_err_to_int(&Some(Error::from(DiskError::Unexpected))), CHECK_PART_UNKNOWN);
}
/// Test has_part_err function
#[test]
fn test_has_part_err() {
assert!(!has_part_err(&[]));
assert!(!has_part_err(&[CHECK_PART_SUCCESS]));
assert!(!has_part_err(&[CHECK_PART_SUCCESS, CHECK_PART_SUCCESS]));
assert!(has_part_err(&[CHECK_PART_FILE_NOT_FOUND]));
assert!(has_part_err(&[CHECK_PART_SUCCESS, CHECK_PART_FILE_CORRUPT]));
assert!(has_part_err(&[CHECK_PART_DISK_NOT_FOUND, CHECK_PART_VOLUME_NOT_FOUND]));
}
/// Test WalkDirOptions structure
#[test]
fn test_walk_dir_options() {
let opts = WalkDirOptions {
bucket: "test-bucket".to_string(),
base_dir: "/path/to/dir".to_string(),
recursive: true,
report_notfound: false,
filter_prefix: Some("prefix_".to_string()),
forward_to: Some("object/path".to_string()),
limit: 100,
disk_id: "disk-123".to_string(),
};
assert_eq!(opts.bucket, "test-bucket");
assert_eq!(opts.base_dir, "/path/to/dir");
assert!(opts.recursive);
assert!(!opts.report_notfound);
assert_eq!(opts.filter_prefix, Some("prefix_".to_string()));
assert_eq!(opts.forward_to, Some("object/path".to_string()));
assert_eq!(opts.limit, 100);
assert_eq!(opts.disk_id, "disk-123");
}
/// Test DeleteOptions structure
#[test]
fn test_delete_options() {
let opts = DeleteOptions {
recursive: true,
immediate: false,
undo_write: true,
old_data_dir: Some(Uuid::new_v4()),
};
assert!(opts.recursive);
assert!(!opts.immediate);
assert!(opts.undo_write);
assert!(opts.old_data_dir.is_some());
}
/// Test ReadOptions structure
#[test]
fn test_read_options() {
let opts = ReadOptions {
incl_free_versions: true,
read_data: false,
healing: true,
};
assert!(opts.incl_free_versions);
assert!(!opts.read_data);
assert!(opts.healing);
}
/// Test UpdateMetadataOpts structure
#[test]
fn test_update_metadata_opts() {
let opts = UpdateMetadataOpts { no_persistence: true };
assert!(opts.no_persistence);
}
/// Test DiskOption structure
#[test]
fn test_disk_option() {
let opt = DiskOption {
cleanup: true,
health_check: false,
};
assert!(opt.cleanup);
assert!(!opt.health_check);
}
/// Test DiskInfoOptions structure
#[test]
fn test_disk_info_options() {
let opts = DiskInfoOptions {
disk_id: "test-disk-id".to_string(),
metrics: true,
noop: false,
};
assert_eq!(opts.disk_id, "test-disk-id");
assert!(opts.metrics);
assert!(!opts.noop);
}
/// Test ReadMultipleReq structure
#[test]
fn test_read_multiple_req() {
let req = ReadMultipleReq {
bucket: "test-bucket".to_string(),
prefix: "prefix/".to_string(),
files: vec!["file1.txt".to_string(), "file2.txt".to_string()],
max_size: 1024,
metadata_only: false,
abort404: true,
max_results: 10,
};
assert_eq!(req.bucket, "test-bucket");
assert_eq!(req.prefix, "prefix/");
assert_eq!(req.files.len(), 2);
assert_eq!(req.max_size, 1024);
assert!(!req.metadata_only);
assert!(req.abort404);
assert_eq!(req.max_results, 10);
}
/// Test ReadMultipleResp structure
#[test]
fn test_read_multiple_resp() {
let resp = ReadMultipleResp {
bucket: "test-bucket".to_string(),
prefix: "prefix/".to_string(),
file: "test-file.txt".to_string(),
exists: true,
error: "".to_string(),
data: vec![1, 2, 3, 4],
mod_time: Some(time::OffsetDateTime::now_utc()),
};
assert_eq!(resp.bucket, "test-bucket");
assert_eq!(resp.prefix, "prefix/");
assert_eq!(resp.file, "test-file.txt");
assert!(resp.exists);
assert!(resp.error.is_empty());
assert_eq!(resp.data, vec![1, 2, 3, 4]);
assert!(resp.mod_time.is_some());
}
/// Test VolumeInfo structure
#[test]
fn test_volume_info() {
let now = time::OffsetDateTime::now_utc();
let vol_info = VolumeInfo {
name: "test-volume".to_string(),
created: Some(now),
};
assert_eq!(vol_info.name, "test-volume");
assert_eq!(vol_info.created, Some(now));
}
/// Test CheckPartsResp structure
#[test]
fn test_check_parts_resp() {
let resp = CheckPartsResp {
results: vec![CHECK_PART_SUCCESS, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_FILE_CORRUPT],
};
assert_eq!(resp.results.len(), 3);
assert_eq!(resp.results[0], CHECK_PART_SUCCESS);
assert_eq!(resp.results[1], CHECK_PART_FILE_NOT_FOUND);
assert_eq!(resp.results[2], CHECK_PART_FILE_CORRUPT);
}
/// Test RenameDataResp structure
#[test]
fn test_rename_data_resp() {
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/format.rs | crates/ecstore/src/disk/format.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::{Error, Result};
use super::{DiskInfo, error::DiskError};
use serde::{Deserialize, Serialize};
use serde_json::Error as JsonError;
use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatMetaVersion {
#[serde(rename = "1")]
V1,
#[serde(other)]
Unknown,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatBackend {
#[serde(rename = "xl")]
Erasure,
#[serde(rename = "xl-single")]
ErasureSingle,
#[serde(other)]
Unknown,
}
/// Represents the V3 backend disk structure version
/// under `.rustfs.sys` and actual data namespace.
///
/// FormatErasureV3 - structure holds format config version '3'.
///
/// The V3 format to support "large bucket" support where a bucket
/// can span multiple erasure sets.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct FormatErasureV3 {
/// Version of 'xl' format.
pub version: FormatErasureVersion,
/// This field carries assigned disk uuid.
pub this: Uuid,
/// Sets field carries the input disk order generated the first
/// time when fresh disks were supplied, it is a two-dimensional
/// array second dimension represents list of disks used per set.
pub sets: Vec<Vec<Uuid>>,
/// Distribution algorithm represents the hashing algorithm
/// to pick the right set index for an object.
#[serde(rename = "distributionAlgo")]
pub distribution_algo: DistributionAlgoVersion,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatErasureVersion {
#[serde(rename = "1")]
V1,
#[serde(rename = "2")]
V2,
#[serde(rename = "3")]
V3,
#[serde(other)]
Unknown,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum DistributionAlgoVersion {
#[serde(rename = "CRCMOD")]
V1,
#[serde(rename = "SIPMOD")]
V2,
#[serde(rename = "SIPMOD+PARITY")]
V3,
}
/// format.json currently has the format:
///
/// ```json
/// {
/// "version": "1",
/// "format": "XXXXX",
/// "id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
/// "XXXXX": {
//
/// }
/// }
/// ```
///
/// Ideally we will never have a situation where we will have to change the
/// fields of this struct and deal with related migration.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct FormatV3 {
/// Version of the format config.
pub version: FormatMetaVersion,
/// Format indicates the backend format type, supports two values 'xl' and 'xl-single'.
pub format: FormatBackend,
/// ID is the identifier for the rustfs deployment
pub id: Uuid,
#[serde(rename = "xl")]
pub erasure: FormatErasureV3,
// /// DiskInfo is an extended type which returns current
// /// disk usage per path.
#[serde(skip)]
pub disk_info: Option<DiskInfo>,
}
impl TryFrom<&[u8]> for FormatV3 {
type Error = JsonError;
fn try_from(data: &[u8]) -> std::result::Result<Self, Self::Error> {
serde_json::from_slice(data)
}
}
impl TryFrom<&str> for FormatV3 {
type Error = JsonError;
fn try_from(data: &str) -> std::result::Result<Self, Self::Error> {
serde_json::from_str(data)
}
}
impl FormatV3 {
/// Create a new format config with the given number of sets and set length.
pub fn new(num_sets: usize, set_len: usize) -> Self {
let format = if set_len == 1 {
FormatBackend::ErasureSingle
} else {
FormatBackend::Erasure
};
let erasure = FormatErasureV3 {
version: FormatErasureVersion::V3,
this: Uuid::nil(),
sets: (0..num_sets)
.map(|_| (0..set_len).map(|_| Uuid::new_v4()).collect())
.collect(),
distribution_algo: DistributionAlgoVersion::V3,
};
Self {
version: FormatMetaVersion::V1,
format,
id: Uuid::new_v4(),
erasure,
disk_info: None,
}
}
/// Returns the number of drives in the erasure set.
pub fn drives(&self) -> usize {
self.erasure.sets.iter().map(|v| v.len()).sum()
}
pub fn to_json(&self) -> std::result::Result<String, JsonError> {
serde_json::to_string(self)
}
/// returns the i,j'th position of the input `diskID` against the reference
///
/// format, after successful validation.
/// - i'th position is the set index
/// - j'th position is the disk index in the current set
pub fn find_disk_index_by_disk_id(&self, disk_id: Uuid) -> Result<(usize, usize)> {
if disk_id == Uuid::nil() {
return Err(Error::from(DiskError::DiskNotFound));
}
if disk_id == Uuid::max() {
return Err(Error::other("disk offline"));
}
for (i, set) in self.erasure.sets.iter().enumerate() {
for (j, d) in set.iter().enumerate() {
if disk_id.eq(d) {
return Ok((i, j));
}
}
}
Err(Error::other(format!("disk id not found {disk_id}")))
}
pub fn check_other(&self, other: &FormatV3) -> Result<()> {
let mut tmp = other.clone();
let this = tmp.erasure.this;
tmp.erasure.this = Uuid::nil();
if self.erasure.sets.len() != other.erasure.sets.len() {
return Err(Error::other(format!(
"Expected number of sets {}, got {}",
self.erasure.sets.len(),
other.erasure.sets.len()
)));
}
for i in 0..self.erasure.sets.len() {
if self.erasure.sets[i].len() != other.erasure.sets[i].len() {
return Err(Error::other(format!(
"Each set should be of same size, expected {}, got {}",
self.erasure.sets[i].len(),
other.erasure.sets[i].len()
)));
}
for j in 0..self.erasure.sets[i].len() {
if self.erasure.sets[i][j] != other.erasure.sets[i][j] {
return Err(Error::other(format!(
"UUID on positions {}:{} do not match with, expected {:?} got {:?}: (%w)",
i,
j,
self.erasure.sets[i][j].to_string(),
other.erasure.sets[i][j].to_string(),
)));
}
}
}
for i in 0..tmp.erasure.sets.len() {
for j in 0..tmp.erasure.sets[i].len() {
if this == tmp.erasure.sets[i][j] {
return Ok(());
}
}
}
Err(Error::other(format!(
"DriveID {:?} not found in any drive sets {:?}",
this, other.erasure.sets
)))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_format_v1() {
let format = FormatV3::new(1, 4);
let str = serde_json::to_string(&format);
println!("{str:?}");
let data = r#"
{
"version": "1",
"format": "xl",
"id": "321b3874-987d-4c15-8fa5-757c956b1243",
"xl": {
"version": "1",
"this": null,
"sets": [
[
"8ab9a908-f869-4f1f-8e42-eb067ffa7eb5",
"c26315da-05cf-4778-a9ea-b44ea09f58c5",
"fb87a891-18d3-44cf-a46f-bcc15093a038",
"356a925c-57b9-4313-88b3-053edf1104dc"
]
],
"distributionAlgo": "CRCMOD"
}
}"#;
let p = FormatV3::try_from(data);
println!("{p:?}");
}
#[test]
fn test_format_v3_new_single_disk() {
let format = FormatV3::new(1, 1);
assert_eq!(format.version, FormatMetaVersion::V1);
assert_eq!(format.format, FormatBackend::ErasureSingle);
assert_eq!(format.erasure.version, FormatErasureVersion::V3);
assert_eq!(format.erasure.sets.len(), 1);
assert_eq!(format.erasure.sets[0].len(), 1);
assert_eq!(format.erasure.distribution_algo, DistributionAlgoVersion::V3);
assert_eq!(format.erasure.this, Uuid::nil());
}
#[test]
fn test_format_v3_new_multiple_sets() {
let format = FormatV3::new(2, 4);
assert_eq!(format.version, FormatMetaVersion::V1);
assert_eq!(format.format, FormatBackend::Erasure);
assert_eq!(format.erasure.version, FormatErasureVersion::V3);
assert_eq!(format.erasure.sets.len(), 2);
assert_eq!(format.erasure.sets[0].len(), 4);
assert_eq!(format.erasure.sets[1].len(), 4);
assert_eq!(format.erasure.distribution_algo, DistributionAlgoVersion::V3);
}
#[test]
fn test_format_v3_drives() {
let format = FormatV3::new(2, 4);
assert_eq!(format.drives(), 8); // 2 sets * 4 drives each
let format_single = FormatV3::new(1, 1);
assert_eq!(format_single.drives(), 1); // 1 set * 1 drive
}
#[test]
fn test_format_v3_to_json() {
let format = FormatV3::new(1, 2);
let json_result = format.to_json();
assert!(json_result.is_ok());
let json_str = json_result.unwrap();
assert!(json_str.contains("\"version\":\"1\""));
assert!(json_str.contains("\"format\":\"xl\""));
}
#[test]
fn test_format_v3_from_json() {
let json_data = r#"{
"version": "1",
"format": "xl-single",
"id": "321b3874-987d-4c15-8fa5-757c956b1243",
"xl": {
"version": "3",
"this": "8ab9a908-f869-4f1f-8e42-eb067ffa7eb5",
"sets": [
[
"8ab9a908-f869-4f1f-8e42-eb067ffa7eb5"
]
],
"distributionAlgo": "SIPMOD+PARITY"
}
}"#;
let format = FormatV3::try_from(json_data);
assert!(format.is_ok());
let format = format.unwrap();
assert_eq!(format.format, FormatBackend::ErasureSingle);
assert_eq!(format.erasure.version, FormatErasureVersion::V3);
assert_eq!(format.erasure.distribution_algo, DistributionAlgoVersion::V3);
assert_eq!(format.erasure.sets.len(), 1);
assert_eq!(format.erasure.sets[0].len(), 1);
}
#[test]
fn test_format_v3_from_bytes() {
let json_data = r#"{
"version": "1",
"format": "xl",
"id": "321b3874-987d-4c15-8fa5-757c956b1243",
"xl": {
"version": "2",
"this": "00000000-0000-0000-0000-000000000000",
"sets": [
[
"8ab9a908-f869-4f1f-8e42-eb067ffa7eb5",
"c26315da-05cf-4778-a9ea-b44ea09f58c5"
]
],
"distributionAlgo": "SIPMOD"
}
}"#;
let format = FormatV3::try_from(json_data.as_bytes());
assert!(format.is_ok());
let format = format.unwrap();
assert_eq!(format.erasure.version, FormatErasureVersion::V2);
assert_eq!(format.erasure.distribution_algo, DistributionAlgoVersion::V2);
assert_eq!(format.erasure.sets[0].len(), 2);
}
#[test]
fn test_format_v3_invalid_json() {
let invalid_json = r#"{"invalid": "json"}"#;
let format = FormatV3::try_from(invalid_json);
assert!(format.is_err());
}
#[test]
fn test_find_disk_index_by_disk_id() {
let mut format = FormatV3::new(2, 2);
let target_disk_id = Uuid::new_v4();
format.erasure.sets[1][0] = target_disk_id;
let result = format.find_disk_index_by_disk_id(target_disk_id);
assert!(result.is_ok());
assert_eq!(result.unwrap(), (1, 0));
}
#[test]
fn test_find_disk_index_nil_uuid() {
let format = FormatV3::new(1, 2);
let result = format.find_disk_index_by_disk_id(Uuid::nil());
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), Error::DiskNotFound));
}
#[test]
fn test_find_disk_index_max_uuid() {
let format = FormatV3::new(1, 2);
let result = format.find_disk_index_by_disk_id(Uuid::max());
assert!(result.is_err());
}
#[test]
fn test_find_disk_index_not_found() {
let format = FormatV3::new(1, 2);
let non_existent_id = Uuid::new_v4();
let result = format.find_disk_index_by_disk_id(non_existent_id);
assert!(result.is_err());
}
#[test]
fn test_check_other_identical() {
let format1 = FormatV3::new(2, 4);
let mut format2 = format1.clone();
format2.erasure.this = format1.erasure.sets[0][0];
let result = format1.check_other(&format2);
assert!(result.is_ok());
}
#[test]
fn test_check_other_different_set_count() {
let format1 = FormatV3::new(2, 4);
let format2 = FormatV3::new(3, 4);
let result = format1.check_other(&format2);
assert!(result.is_err());
}
#[test]
fn test_check_other_different_set_size() {
let format1 = FormatV3::new(2, 4);
let format2 = FormatV3::new(2, 6);
let result = format1.check_other(&format2);
assert!(result.is_err());
}
#[test]
fn test_check_other_different_disk_id() {
let format1 = FormatV3::new(1, 2);
let mut format2 = format1.clone();
format2.erasure.sets[0][0] = Uuid::new_v4();
let result = format1.check_other(&format2);
assert!(result.is_err());
}
#[test]
fn test_check_other_disk_not_in_sets() {
let format1 = FormatV3::new(1, 2);
let mut format2 = format1.clone();
format2.erasure.this = Uuid::new_v4(); // Set to a UUID not in any set
let result = format1.check_other(&format2);
assert!(result.is_err());
}
#[test]
fn test_format_meta_version_serialization() {
let v1 = FormatMetaVersion::V1;
let json = serde_json::to_string(&v1).unwrap();
assert_eq!(json, "\"1\"");
let unknown = FormatMetaVersion::Unknown;
let deserialized: FormatMetaVersion = serde_json::from_str("\"unknown\"").unwrap();
assert_eq!(deserialized, unknown);
}
#[test]
fn test_format_backend_serialization() {
let erasure = FormatBackend::Erasure;
let json = serde_json::to_string(&erasure).unwrap();
assert_eq!(json, "\"xl\"");
let single = FormatBackend::ErasureSingle;
let json = serde_json::to_string(&single).unwrap();
assert_eq!(json, "\"xl-single\"");
let unknown = FormatBackend::Unknown;
let deserialized: FormatBackend = serde_json::from_str("\"unknown\"").unwrap();
assert_eq!(deserialized, unknown);
}
#[test]
fn test_format_erasure_version_serialization() {
let v1 = FormatErasureVersion::V1;
let json = serde_json::to_string(&v1).unwrap();
assert_eq!(json, "\"1\"");
let v2 = FormatErasureVersion::V2;
let json = serde_json::to_string(&v2).unwrap();
assert_eq!(json, "\"2\"");
let v3 = FormatErasureVersion::V3;
let json = serde_json::to_string(&v3).unwrap();
assert_eq!(json, "\"3\"");
}
#[test]
fn test_distribution_algo_version_serialization() {
let v1 = DistributionAlgoVersion::V1;
let json = serde_json::to_string(&v1).unwrap();
assert_eq!(json, "\"CRCMOD\"");
let v2 = DistributionAlgoVersion::V2;
let json = serde_json::to_string(&v2).unwrap();
assert_eq!(json, "\"SIPMOD\"");
let v3 = DistributionAlgoVersion::V3;
let json = serde_json::to_string(&v3).unwrap();
assert_eq!(json, "\"SIPMOD+PARITY\"");
}
#[test]
fn test_format_v3_round_trip_serialization() {
let original = FormatV3::new(2, 3);
let json = original.to_json().unwrap();
let deserialized = FormatV3::try_from(json.as_str()).unwrap();
assert_eq!(original.version, deserialized.version);
assert_eq!(original.format, deserialized.format);
assert_eq!(original.erasure.version, deserialized.erasure.version);
assert_eq!(original.erasure.sets.len(), deserialized.erasure.sets.len());
assert_eq!(original.erasure.distribution_algo, deserialized.erasure.distribution_algo);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/disk/endpoint.rs | crates/ecstore/src/disk/endpoint.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::{Error, Result};
use path_absolutize::Absolutize;
use rustfs_utils::{is_local_host, is_socket_addr};
use std::{fmt::Display, path::Path};
use tracing::debug;
use url::{ParseError, Url};
/// enum for endpoint type.
#[derive(PartialEq, Eq, Debug)]
pub enum EndpointType {
/// path style endpoint type enum.
Path,
/// URL style endpoint type enum.
Url,
}
/// any type of endpoint.
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub struct Endpoint {
pub url: Url,
pub is_local: bool,
pub pool_idx: i32,
pub set_idx: i32,
pub disk_idx: i32,
}
impl Display for Endpoint {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.url.scheme() == "file" {
write!(f, "{}", self.get_file_path())
} else {
write!(f, "{}", self.url)
}
}
}
impl TryFrom<&str> for Endpoint {
/// The type returned in the event of a conversion error.
type Error = Error;
/// Performs the conversion.
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
// check whether given path is not empty.
if ["", "/", "\\"].iter().any(|&v| v.eq(value)) {
return Err(Error::other("empty or root endpoint is not supported"));
}
let mut is_local = false;
let url = match Url::parse(value) {
#[allow(unused_mut)]
Ok(mut url) if url.has_host() => {
// URL style of endpoint.
// Valid URL style endpoint is
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
if !((url.scheme() == "http" || url.scheme() == "https")
&& url.username().is_empty()
&& url.fragment().is_none()
&& url.query().is_none())
{
return Err(Error::other("invalid URL endpoint format"));
}
let path = url.path().to_string();
#[cfg(not(windows))]
let path = Path::new(&path).absolutize()?;
// On windows having a preceding SlashSeparator will cause problems, if the
// command line already has C:/<export-folder/ in it. Final resulting
// path on windows might become C:/C:/ this will cause problems
// of starting rustfs server properly in distributed mode on windows.
// As a special case make sure to trim the separator.
#[cfg(windows)]
let path = Path::new(&path[1..]).absolutize()?;
debug!("endpoint try_from: path={}", path.display());
if path.parent().is_none() || Path::new("").eq(&path) {
return Err(Error::other("empty or root path is not supported in URL endpoint"));
}
match path.to_str() {
Some(v) => url.set_path(v),
None => return Err(Error::other("invalid path")),
}
url
}
Ok(_) => {
// like d:/foo
is_local = true;
url_parse_from_file_path(value)?
}
Err(e) => match e {
ParseError::InvalidPort => {
return Err(Error::other("invalid URL endpoint format: port number must be between 1 to 65535"));
}
ParseError::EmptyHost => return Err(Error::other("invalid URL endpoint format: empty host name")),
ParseError::RelativeUrlWithoutBase => {
// like /foo
is_local = true;
url_parse_from_file_path(value)?
}
_ => return Err(Error::other(format!("invalid URL endpoint format: {e}"))),
},
};
Ok(Endpoint {
url,
is_local,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
})
}
}
impl Endpoint {
/// returns type of endpoint.
pub fn get_type(&self) -> EndpointType {
if self.url.scheme() == "file" {
EndpointType::Path
} else {
EndpointType::Url
}
}
/// sets a specific pool number to this node
pub fn set_pool_index(&mut self, idx: usize) {
self.pool_idx = idx as i32
}
/// sets a specific set number to this node
pub fn set_set_index(&mut self, idx: usize) {
self.set_idx = idx as i32
}
/// sets a specific disk number to this node
pub fn set_disk_index(&mut self, idx: usize) {
self.disk_idx = idx as i32
}
/// resolves the host and updates if it is local or not.
pub fn update_is_local(&mut self, local_port: u16) -> Result<()> {
match (self.url.scheme(), self.url.host()) {
(v, Some(host)) if v != "file" => {
self.is_local = is_local_host(host, self.url.port().unwrap_or_default(), local_port)?;
}
_ => {}
}
Ok(())
}
/// returns the host to be used for grid connections.
pub fn grid_host(&self) -> String {
match (self.url.host(), self.url.port()) {
(Some(host), Some(port)) => {
debug!("grid_host scheme={}: host={}, port={}", self.url.scheme(), host, port);
format!("{}://{}:{}", self.url.scheme(), host, port)
}
(Some(host), None) => {
debug!("grid_host scheme={}: host={}", self.url.scheme(), host);
format!("{}://{}", self.url.scheme(), host)
}
_ => String::new(),
}
}
pub fn host_port(&self) -> String {
match (self.url.host(), self.url.port()) {
(Some(host), Some(port)) => {
debug!("host_port host={}, port={}", host, port);
format!("{host}:{port}")
}
(Some(host), None) => {
debug!("host_port host={}, port={}", host, self.url.port().unwrap_or(0));
format!("{host}")
}
_ => String::new(),
}
}
pub fn get_file_path(&self) -> String {
let path: &str = self.url.path();
let decoded: std::borrow::Cow<'_, str> = match urlencoding::decode(path) {
Ok(decoded) => decoded,
Err(e) => {
debug!("Failed to decode path '{}': {}, using original path", path, e);
std::borrow::Cow::Borrowed(path)
}
};
#[cfg(windows)]
if self.url.scheme() == "file" {
let stripped: &str = decoded.strip_prefix('/').unwrap_or(&decoded);
debug!("get_file_path windows: path={}", stripped);
return stripped.to_string();
}
decoded.into_owned()
}
}
/// parse a file path into a URL.
fn url_parse_from_file_path(value: &str) -> Result<Url> {
// Only check if the arg is an ip address and ask for scheme since its absent.
// localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as
// /mnt/export1. So we go ahead and start the rustfs server in FS modes in these cases.
let addr: Vec<&str> = value.splitn(2, '/').collect();
if is_socket_addr(addr[0]) {
return Err(Error::other("invalid URL endpoint format: missing scheme http or https"));
}
let file_path = match Path::new(value).absolutize() {
Ok(path) => path,
Err(err) => return Err(Error::other(format!("absolute path failed: {err}"))),
};
match Url::from_file_path(file_path) {
Ok(url) => Ok(url),
Err(_) => Err(Error::other("Convert a file path into an URL failed")),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_new_endpoint() {
#[derive(Default)]
struct TestCase<'a> {
arg: &'a str,
expected_endpoint: Option<Endpoint>,
expected_type: Option<EndpointType>,
expected_err: Option<Error>,
}
let u2 = Url::parse("https://example.org/path").unwrap();
let u4 = Url::parse("http://192.168.253.200/path").unwrap();
let u6 = Url::parse("http://server:/path").unwrap();
let root_slash_foo = Url::from_file_path("/foo").unwrap();
let test_cases = [
TestCase {
arg: "/foo",
expected_endpoint: Some(Endpoint {
url: root_slash_foo,
is_local: true,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Path),
expected_err: None,
},
TestCase {
arg: "https://example.org/path",
expected_endpoint: Some(Endpoint {
url: u2,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "http://192.168.253.200/path",
expected_endpoint: Some(Endpoint {
url: u4,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "\\",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "c://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "ftp://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://server/path?location",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://:/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://:8080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://server:/path",
expected_endpoint: Some(Endpoint {
url: u6,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "https://93.184.216.34:808080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format: port number must be between 1 to 65535")),
},
TestCase {
arg: "http://server:8080//",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "http://server:8080/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "192.168.1.210:9000",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::other("invalid URL endpoint format: missing scheme http or https")),
},
];
for test_case in test_cases {
let ret = Endpoint::try_from(test_case.arg);
if test_case.expected_err.is_none() && ret.is_err() {
panic!("{}: error: expected = <nil>, got = {:?}", test_case.arg, ret);
}
if test_case.expected_err.is_some() && ret.is_ok() {
panic!("{}: error: expected = {:?}, got = <nil>", test_case.arg, test_case.expected_err);
}
match (test_case.expected_err, ret) {
(None, Err(e)) => panic!("{}: error: expected = <nil>, got = {}", test_case.arg, e),
(None, Ok(mut ep)) => {
let _ = ep.update_is_local(9000);
if test_case.expected_type != Some(ep.get_type()) {
panic!(
"{}: type: expected = {:?}, got = {:?}",
test_case.arg,
test_case.expected_type,
ep.get_type()
);
}
assert_eq!(test_case.expected_endpoint, Some(ep), "{}: endpoint", test_case.arg);
}
(Some(e), Ok(_)) => panic!("{}: error: expected = {}, got = <nil>", test_case.arg, e),
(Some(e), Err(e2)) => {
assert_eq!(e.to_string(), e2.to_string(), "{}: error: expected = {}, got = {}", test_case.arg, e, e2)
}
}
}
}
#[test]
fn test_endpoint_display() {
// Test file path display
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
let display_str = format!("{file_endpoint}");
assert_eq!(display_str, "/tmp/data");
// Test URL display
let url_endpoint = Endpoint::try_from("http://example.com:9000/path").unwrap();
let display_str = format!("{url_endpoint}");
assert_eq!(display_str, "http://example.com:9000/path");
}
#[test]
fn test_endpoint_type() {
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
assert_eq!(file_endpoint.get_type(), EndpointType::Path);
let url_endpoint = Endpoint::try_from("http://example.com:9000/path").unwrap();
assert_eq!(url_endpoint.get_type(), EndpointType::Url);
}
#[test]
fn test_endpoint_indexes() {
let mut endpoint = Endpoint::try_from("/tmp/data").unwrap();
// Test initial values
assert_eq!(endpoint.pool_idx, -1);
assert_eq!(endpoint.set_idx, -1);
assert_eq!(endpoint.disk_idx, -1);
// Test setting indexes
endpoint.set_pool_index(2);
endpoint.set_set_index(3);
endpoint.set_disk_index(4);
assert_eq!(endpoint.pool_idx, 2);
assert_eq!(endpoint.set_idx, 3);
assert_eq!(endpoint.disk_idx, 4);
}
#[test]
fn test_endpoint_grid_host() {
let endpoint = Endpoint::try_from("http://example.com:9000/path").unwrap();
assert_eq!(endpoint.grid_host(), "http://example.com:9000");
let endpoint_no_port = Endpoint::try_from("https://example.com/path").unwrap();
assert_eq!(endpoint_no_port.grid_host(), "https://example.com");
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
assert_eq!(file_endpoint.grid_host(), "");
}
#[test]
fn test_endpoint_host_port() {
let endpoint = Endpoint::try_from("http://example.com:9000/path").unwrap();
assert_eq!(endpoint.host_port(), "example.com:9000");
let endpoint_no_port = Endpoint::try_from("https://example.com/path").unwrap();
assert_eq!(endpoint_no_port.host_port(), "example.com");
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
assert_eq!(file_endpoint.host_port(), "");
}
#[test]
fn test_endpoint_get_file_path() {
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
assert_eq!(file_endpoint.get_file_path(), "/tmp/data");
let url_endpoint = Endpoint::try_from("http://example.com:9000/path/to/data").unwrap();
assert_eq!(url_endpoint.get_file_path(), "/path/to/data");
}
#[test]
fn test_endpoint_clone_and_equality() {
let endpoint1 = Endpoint::try_from("/tmp/data").unwrap();
let endpoint2 = endpoint1.clone();
assert_eq!(endpoint1, endpoint2);
assert_eq!(endpoint1.url, endpoint2.url);
assert_eq!(endpoint1.is_local, endpoint2.is_local);
assert_eq!(endpoint1.pool_idx, endpoint2.pool_idx);
assert_eq!(endpoint1.set_idx, endpoint2.set_idx);
assert_eq!(endpoint1.disk_idx, endpoint2.disk_idx);
}
#[test]
fn test_endpoint_with_special_paths() {
// Test with complex paths
let complex_path = "/var/lib/rustfs/data/bucket1";
let endpoint = Endpoint::try_from(complex_path).unwrap();
assert_eq!(endpoint.get_file_path(), complex_path);
assert!(endpoint.is_local);
assert_eq!(endpoint.get_type(), EndpointType::Path);
}
#[test]
fn test_endpoint_with_spaces_in_path() {
let path_with_spaces = "/Users/test/Library/Application Support/rustfs/data";
let endpoint = Endpoint::try_from(path_with_spaces).unwrap();
assert_eq!(endpoint.get_file_path(), path_with_spaces);
assert!(endpoint.is_local);
assert_eq!(endpoint.get_type(), EndpointType::Path);
}
#[test]
fn test_endpoint_percent_encoding_roundtrip() {
let path_with_spaces = "/Users/test/Library/Application Support/rustfs/data";
let endpoint = Endpoint::try_from(path_with_spaces).unwrap();
// Verify that the URL internally stores percent-encoded path
assert!(
endpoint.url.path().contains("%20"),
"URL path should contain percent-encoded spaces: {}",
endpoint.url.path()
);
// Verify that get_file_path() decodes the percent-encoded path correctly
assert_eq!(
endpoint.get_file_path(),
"/Users/test/Library/Application Support/rustfs/data",
"get_file_path() should decode percent-encoded spaces"
);
}
#[test]
fn test_endpoint_with_various_special_characters() {
// Test path with multiple special characters that get percent-encoded
let path_with_special = "/tmp/test path/data[1]/file+name&more";
let endpoint = Endpoint::try_from(path_with_special).unwrap();
// get_file_path() should return the original path with decoded characters
assert_eq!(endpoint.get_file_path(), path_with_special);
}
#[test]
fn test_endpoint_update_is_local() {
let mut endpoint = Endpoint::try_from("http://localhost:9000/path").unwrap();
let result = endpoint.update_is_local(9000);
assert!(result.is_ok());
let mut file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
let result = file_endpoint.update_is_local(9000);
assert!(result.is_ok());
}
#[test]
fn test_url_parse_from_file_path() {
let result = url_parse_from_file_path("/tmp/test");
assert!(result.is_ok());
let url = result.unwrap();
assert_eq!(url.scheme(), "file");
}
#[test]
fn test_endpoint_hash() {
use std::collections::HashSet;
let endpoint1 = Endpoint::try_from("/tmp/data1").unwrap();
let endpoint2 = Endpoint::try_from("/tmp/data2").unwrap();
let endpoint3 = endpoint1.clone();
let mut set = HashSet::new();
set.insert(endpoint1);
set.insert(endpoint2);
set.insert(endpoint3); // Should not be added as it's equal to endpoint1
assert_eq!(set.len(), 2);
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/metadata_sys.rs | crates/ecstore/src/bucket/metadata_sys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI as _;
use crate::bucket::bucket_target_sys::BucketTargetSys;
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
use crate::bucket::utils::{deserialize, is_meta_bucketname};
use crate::error::{Error, Result, is_err_bucket_not_found};
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
use crate::store::ECStore;
use futures::future::join_all;
use rustfs_common::heal_channel::HealOpts;
use rustfs_policy::policy::BucketPolicy;
use s3s::dto::ReplicationConfiguration;
use s3s::dto::{
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
VersioningConfiguration,
};
use std::collections::HashSet;
use std::sync::OnceLock;
use std::time::Duration;
use std::{collections::HashMap, sync::Arc};
use time::OffsetDateTime;
use tokio::sync::RwLock;
use tokio::time::sleep;
use tracing::error;
use super::metadata::{BucketMetadata, load_bucket_metadata};
use super::quota::BucketQuota;
use super::target::BucketTargets;
use lazy_static::lazy_static;
lazy_static! {
pub static ref GLOBAL_BucketMetadataSys: OnceLock<Arc<RwLock<BucketMetadataSys>>> = OnceLock::new();
}
pub async fn init_bucket_metadata_sys(api: Arc<ECStore>, buckets: Vec<String>) {
let mut sys = BucketMetadataSys::new(api);
sys.init(buckets).await;
let sys = Arc::new(RwLock::new(sys));
GLOBAL_BucketMetadataSys.set(sys).unwrap();
}
// panic if not init
pub(super) fn get_bucket_metadata_sys() -> Result<Arc<RwLock<BucketMetadataSys>>> {
if let Some(sys) = GLOBAL_BucketMetadataSys.get() {
Ok(sys.clone())
} else {
Err(Error::other("GLOBAL_BucketMetadataSys not init"))
}
}
pub async fn set_bucket_metadata(bucket: String, bm: BucketMetadata) -> Result<()> {
let sys = get_bucket_metadata_sys()?;
let lock = sys.write().await;
lock.set(bucket, Arc::new(bm)).await;
Ok(())
}
pub async fn get(bucket: &str) -> Result<Arc<BucketMetadata>> {
let sys = get_bucket_metadata_sys()?;
let lock = sys.read().await;
lock.get(bucket).await
}
pub async fn update(bucket: &str, config_file: &str, data: Vec<u8>) -> Result<OffsetDateTime> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let mut bucket_meta_sys = bucket_meta_sys_lock.write().await;
bucket_meta_sys.update(bucket, config_file, data).await
}
pub async fn delete(bucket: &str, config_file: &str) -> Result<OffsetDateTime> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let mut bucket_meta_sys = bucket_meta_sys_lock.write().await;
bucket_meta_sys.delete(bucket, config_file).await
}
pub async fn get_bucket_policy(bucket: &str) -> Result<(BucketPolicy, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_bucket_policy(bucket).await
}
pub async fn get_quota_config(bucket: &str) -> Result<(BucketQuota, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_quota_config(bucket).await
}
pub async fn get_bucket_targets_config(bucket: &str) -> Result<BucketTargets> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_bucket_targets_config(bucket).await
}
pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_tagging_config(bucket).await
}
pub async fn get_lifecycle_config(bucket: &str) -> Result<(BucketLifecycleConfiguration, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_lifecycle_config(bucket).await
}
pub async fn get_sse_config(bucket: &str) -> Result<(ServerSideEncryptionConfiguration, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_sse_config(bucket).await
}
pub async fn get_object_lock_config(bucket: &str) -> Result<(ObjectLockConfiguration, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_object_lock_config(bucket).await
}
pub async fn get_replication_config(bucket: &str) -> Result<(ReplicationConfiguration, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_replication_config(bucket).await
}
pub async fn get_notification_config(bucket: &str) -> Result<Option<NotificationConfiguration>> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_notification_config(bucket).await
}
pub async fn get_versioning_config(bucket: &str) -> Result<(VersioningConfiguration, OffsetDateTime)> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_versioning_config(bucket).await
}
pub async fn get_config_from_disk(bucket: &str) -> Result<BucketMetadata> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.get_config_from_disk(bucket).await
}
pub async fn created_at(bucket: &str) -> Result<OffsetDateTime> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
bucket_meta_sys.created_at(bucket).await
}
#[derive(Debug)]
pub struct BucketMetadataSys {
metadata_map: RwLock<HashMap<String, Arc<BucketMetadata>>>,
api: Arc<ECStore>,
initialized: RwLock<bool>,
}
impl BucketMetadataSys {
pub fn new(api: Arc<ECStore>) -> Self {
Self {
metadata_map: RwLock::new(HashMap::new()),
api,
initialized: RwLock::new(false),
}
}
pub async fn init(&mut self, buckets: Vec<String>) {
let _ = self.init_internal(buckets).await;
}
async fn init_internal(&self, buckets: Vec<String>) -> Result<()> {
let count = {
if let Some(endpoints) = GLOBAL_Endpoints.get() {
endpoints.es_count() * 10
} else {
return Err(Error::other("GLOBAL_Endpoints not init"));
}
};
let mut failed_buckets: HashSet<String> = HashSet::new();
let mut buckets = buckets.as_slice();
loop {
if buckets.len() < count {
self.concurrent_load(buckets, &mut failed_buckets).await;
break;
}
self.concurrent_load(&buckets[..count], &mut failed_buckets).await;
buckets = &buckets[count..]
}
let mut initialized = self.initialized.write().await;
*initialized = true;
if is_dist_erasure().await {
// TODO: refresh_buckets_metadata_loop
}
Ok(())
}
async fn concurrent_load(&self, buckets: &[String], failed_buckets: &mut HashSet<String>) {
let mut futures = Vec::new();
for bucket in buckets.iter() {
// TODO: HealBucket
let api = self.api.clone();
let bucket = bucket.clone();
futures.push(async move {
sleep(Duration::from_millis(30)).await;
let _ = api
.heal_bucket(
&bucket,
&HealOpts {
recreate: true,
..Default::default()
},
)
.await;
load_bucket_metadata(self.api.clone(), bucket.as_str()).await
});
}
let results = join_all(futures).await;
let mut idx = 0;
let mut mp = self.metadata_map.write().await;
// TODO:EventNotifier,BucketTargetSys
for res in results {
match res {
Ok(res) => {
if let Some(bucket) = buckets.get(idx) {
let x = Arc::new(res);
mp.insert(bucket.clone(), x.clone());
// TODO:EventNotifier,BucketTargetSys
BucketTargetSys::get().set(bucket, &x).await;
}
}
Err(e) => {
error!("Unable to load bucket metadata, will be retried: {:?}", e);
if let Some(bucket) = buckets.get(idx) {
failed_buckets.insert(bucket.clone());
}
}
}
idx += 1;
}
}
pub async fn get(&self, bucket: &str) -> Result<Arc<BucketMetadata>> {
if is_meta_bucketname(bucket) {
return Err(Error::ConfigNotFound);
}
let map = self.metadata_map.read().await;
if let Some(bm) = map.get(bucket) {
Ok(bm.clone())
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn set(&self, bucket: String, bm: Arc<BucketMetadata>) {
if !is_meta_bucketname(&bucket) {
let mut map = self.metadata_map.write().await;
map.insert(bucket, bm);
}
}
async fn _reset(&mut self) {
let mut map = self.metadata_map.write().await;
map.clear();
}
pub async fn update(&mut self, bucket: &str, config_file: &str, data: Vec<u8>) -> Result<OffsetDateTime> {
self.update_and_parse(bucket, config_file, data, true).await
}
pub async fn delete(&mut self, bucket: &str, config_file: &str) -> Result<OffsetDateTime> {
if config_file == BUCKET_LIFECYCLE_CONFIG {
let meta = match self.get_config_from_disk(bucket).await {
Ok(res) => res,
Err(err) => {
if err != Error::ConfigNotFound {
return Err(err);
} else {
BucketMetadata::new(bucket)
}
}
};
if !meta.lifecycle_config_xml.is_empty() {
let cfg = deserialize::<BucketLifecycleConfiguration>(&meta.lifecycle_config_xml)?;
// TODO: FIXME:
// for _v in cfg.rules.iter() {
// break;
// }
if let Some(_v) = cfg.rules.first() {}
}
// TODO: other lifecycle handle
}
self.update_and_parse(bucket, config_file, Vec::new(), false).await
}
async fn update_and_parse(&mut self, bucket: &str, config_file: &str, data: Vec<u8>, parse: bool) -> Result<OffsetDateTime> {
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
if is_meta_bucketname(bucket) {
return Err(Error::other("errInvalidArgument"));
}
let mut bm = match load_bucket_metadata_parse(store, bucket, parse).await {
Ok(res) => res,
Err(err) => {
if !is_erasure().await && !is_dist_erasure().await && is_err_bucket_not_found(&err) {
BucketMetadata::new(bucket)
} else {
error!("load bucket metadata failed: {}", err);
return Err(err);
}
}
};
let updated = bm.update_config(config_file, data)?;
self.save(bm).await?;
Ok(updated)
}
async fn save(&self, bm: BucketMetadata) -> Result<()> {
if is_meta_bucketname(&bm.name) {
return Err(Error::other("errInvalidArgument"));
}
let mut bm = bm;
bm.save().await?;
self.set(bm.name.clone(), Arc::new(bm)).await;
Ok(())
}
pub async fn get_config_from_disk(&self, bucket: &str) -> Result<BucketMetadata> {
if is_meta_bucketname(bucket) {
return Err(Error::other("errInvalidArgument"));
}
load_bucket_metadata(self.api.clone(), bucket).await
}
pub async fn get_config(&self, bucket: &str) -> Result<(Arc<BucketMetadata>, bool)> {
let has_bm = {
let map = self.metadata_map.read().await;
map.get(&bucket.to_string()).cloned()
};
if let Some(bm) = has_bm {
Ok((bm, false))
} else {
let bm = match load_bucket_metadata(self.api.clone(), bucket).await {
Ok(res) => res,
Err(err) => {
return if *self.initialized.read().await {
Err(Error::other("errBucketMetadataNotInitialized"))
} else {
Err(err)
};
}
};
let mut map = self.metadata_map.write().await;
let bm = Arc::new(bm);
map.insert(bucket.to_string(), bm.clone());
Ok((bm, true))
}
}
pub async fn get_versioning_config(&self, bucket: &str) -> Result<(VersioningConfiguration, OffsetDateTime)> {
let bm = match self.get_config(bucket).await {
Ok((res, _)) => res,
Err(err) => {
return if err == Error::ConfigNotFound {
Ok((VersioningConfiguration::default(), OffsetDateTime::UNIX_EPOCH))
} else {
Err(err)
};
}
};
if let Some(config) = &bm.versioning_config {
Ok((config.clone(), bm.versioning_config_updated_at))
} else {
Ok((VersioningConfiguration::default(), bm.versioning_config_updated_at))
}
}
pub async fn get_bucket_policy(&self, bucket: &str) -> Result<(BucketPolicy, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.policy_config {
Ok((config.clone(), bm.policy_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_tagging_config(&self, bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.tagging_config {
Ok((config.clone(), bm.tagging_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_object_lock_config(&self, bucket: &str) -> Result<(ObjectLockConfiguration, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.object_lock_config {
Ok((config.clone(), bm.object_lock_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_lifecycle_config(&self, bucket: &str) -> Result<(BucketLifecycleConfiguration, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.lifecycle_config {
if config.rules.is_empty() {
Err(Error::ConfigNotFound)
} else {
Ok((config.clone(), bm.lifecycle_config_updated_at))
}
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_notification_config(&self, bucket: &str) -> Result<Option<NotificationConfiguration>> {
let bm = match self.get_config(bucket).await {
Ok((bm, _)) => bm.notification_config.clone(),
Err(err) => {
if err == Error::ConfigNotFound {
None
} else {
return Err(err);
}
}
};
Ok(bm)
}
pub async fn get_sse_config(&self, bucket: &str) -> Result<(ServerSideEncryptionConfiguration, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.sse_config {
Ok((config.clone(), bm.encryption_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn created_at(&self, bucket: &str) -> Result<OffsetDateTime> {
let bm = match self.get_config(bucket).await {
Ok((bm, _)) => bm.created,
Err(err) => {
return Err(err);
}
};
Ok(bm)
}
pub async fn get_quota_config(&self, bucket: &str) -> Result<(BucketQuota, OffsetDateTime)> {
let (bm, _) = self.get_config(bucket).await?;
if let Some(config) = &bm.quota_config {
Ok((config.clone(), bm.quota_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_replication_config(&self, bucket: &str) -> Result<(ReplicationConfiguration, OffsetDateTime)> {
let (bm, reload) = self.get_config(bucket).await?;
if let Some(config) = &bm.replication_config {
if reload {
// TODO: globalBucketTargetSys
}
//println!("549 {:?}", config.clone());
Ok((config.clone(), bm.replication_config_updated_at))
} else {
Err(Error::ConfigNotFound)
}
}
pub async fn get_bucket_targets_config(&self, bucket: &str) -> Result<BucketTargets> {
let (bm, reload) = self.get_config(bucket).await?;
if let Some(config) = &bm.bucket_target_config {
if reload {
// TODO: globalBucketTargetSys
//config.
}
Ok(config.clone())
} else {
Err(Error::ConfigNotFound)
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/bucket_target_sys.rs | crates/ecstore/src/bucket/bucket_target_sys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::metadata::BucketMetadata;
use aws_credential_types::Credentials as SdkCredentials;
use aws_sdk_s3::config::Region as SdkRegion;
use aws_sdk_s3::error::SdkError;
use aws_sdk_s3::operation::complete_multipart_upload::CompleteMultipartUploadOutput;
use aws_sdk_s3::operation::head_bucket::HeadBucketError;
use aws_sdk_s3::operation::head_object::HeadObjectError;
use aws_sdk_s3::operation::upload_part::UploadPartOutput;
use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::types::{
ChecksumMode, CompletedMultipartUpload, CompletedPart, ObjectLockLegalHoldStatus, ObjectLockRetentionMode,
};
use aws_sdk_s3::{Client as S3Client, Config as S3Config, operation::head_object::HeadObjectOutput};
use aws_sdk_s3::{config::SharedCredentialsProvider, types::BucketVersioningStatus};
use http::{HeaderMap, HeaderName, HeaderValue, StatusCode};
use reqwest::Client as HttpClient;
use rustfs_filemeta::{ReplicationStatusType, ReplicationType};
use rustfs_utils::http::{
AMZ_BUCKET_REPLICATION_STATUS, AMZ_OBJECT_LOCK_BYPASS_GOVERNANCE, AMZ_OBJECT_LOCK_LEGAL_HOLD, AMZ_OBJECT_LOCK_MODE,
AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, AMZ_STORAGE_CLASS, AMZ_WEBSITE_REDIRECT_LOCATION, RUSTFS_BUCKET_REPLICATION_CHECK,
RUSTFS_BUCKET_REPLICATION_DELETE_MARKER, RUSTFS_BUCKET_REPLICATION_REQUEST, RUSTFS_BUCKET_SOURCE_ETAG,
RUSTFS_BUCKET_SOURCE_MTIME, RUSTFS_BUCKET_SOURCE_VERSION_ID, RUSTFS_FORCE_DELETE, is_amz_header, is_minio_header,
is_rustfs_header, is_standard_header, is_storageclass_header,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use std::str::FromStr as _;
use std::sync::Arc;
use std::sync::OnceLock;
use std::time::{Duration, Instant};
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
use tokio::sync::Mutex;
use tokio::sync::RwLock;
use tracing::error;
use tracing::warn;
use url::Url;
use uuid::Uuid;
use crate::bucket::metadata_sys::get_bucket_targets_config;
use crate::bucket::metadata_sys::get_replication_config;
use crate::bucket::replication::ObjectOpts;
use crate::bucket::replication::ReplicationConfigurationExt;
use crate::bucket::target::ARN;
use crate::bucket::target::BucketTargetType;
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
use crate::bucket::versioning_sys::BucketVersioningSys;
const DEFAULT_HEALTH_CHECK_DURATION: Duration = Duration::from_secs(5);
const DEFAULT_HEALTH_CHECK_RELOAD_DURATION: Duration = Duration::from_secs(30 * 60);
pub static GLOBAL_BUCKET_TARGET_SYS: OnceLock<BucketTargetSys> = OnceLock::new();
#[derive(Debug, Clone)]
pub struct ArnTarget {
pub client: Option<Arc<TargetClient>>,
pub last_refresh: OffsetDateTime,
}
impl Default for ArnTarget {
fn default() -> Self {
Self {
client: None,
last_refresh: OffsetDateTime::UNIX_EPOCH,
}
}
}
impl ArnTarget {
pub fn with_client(client: Arc<TargetClient>) -> Self {
Self {
client: Some(client),
last_refresh: OffsetDateTime::now_utc(),
}
}
}
#[derive(Debug, Clone, Default)]
pub struct ArnErrs {
pub count: i64,
pub update_in_progress: bool,
pub bucket: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LastMinuteLatency {
times: Vec<Duration>,
#[serde(skip, default = "instant_now")]
start_time: Instant,
}
fn instant_now() -> Instant {
Instant::now()
}
impl Default for LastMinuteLatency {
fn default() -> Self {
Self {
times: Vec::new(),
start_time: Instant::now(),
}
}
}
impl LastMinuteLatency {
pub fn new() -> Self {
Self::default()
}
pub fn add(&mut self, duration: Duration) {
let now = Instant::now();
// Remove entries older than 1 minute
self.times
.retain(|_| now.duration_since(self.start_time) < Duration::from_secs(60));
self.times.push(duration);
}
pub fn get_total(&self) -> LatencyAverage {
if self.times.is_empty() {
return LatencyAverage {
avg: Duration::from_secs(0),
};
}
let total: Duration = self.times.iter().sum();
LatencyAverage {
avg: total / self.times.len() as u32,
}
}
}
#[derive(Debug, Clone)]
pub struct LatencyAverage {
pub avg: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct LatencyStat {
pub lastmin: LastMinuteLatency,
pub curr: Duration,
pub avg: Duration,
pub peak: Duration,
pub n: i64,
}
impl LatencyStat {
pub fn new() -> Self {
Self::default()
}
pub fn update(&mut self, duration: Duration) {
self.lastmin.add(duration);
self.n += 1;
if duration > self.peak {
self.peak = duration;
}
self.curr = self.lastmin.get_total().avg;
self.avg = Duration::from_nanos(
(self.avg.as_nanos() as i64 * (self.n - 1) + self.curr.as_nanos() as i64) as u64 / self.n as u64,
);
}
}
#[derive(Debug, Clone)]
pub struct EpHealth {
pub endpoint: String,
pub scheme: String,
pub online: bool,
pub last_online: Option<OffsetDateTime>,
pub last_hc_at: Option<OffsetDateTime>,
pub offline_duration: Duration,
pub latency: LatencyStat,
}
impl Default for EpHealth {
fn default() -> Self {
Self {
endpoint: String::new(),
scheme: String::new(),
online: true,
last_online: None,
last_hc_at: None,
offline_duration: Duration::from_secs(0),
latency: LatencyStat::new(),
}
}
}
#[derive(Debug, Default)]
pub struct BucketTargetSys {
pub arn_remotes_map: Arc<RwLock<HashMap<String, ArnTarget>>>,
pub targets_map: Arc<RwLock<HashMap<String, Vec<BucketTarget>>>>,
pub h_mutex: Arc<RwLock<HashMap<String, EpHealth>>>,
pub hc_client: Arc<HttpClient>,
pub a_mutex: Arc<Mutex<HashMap<String, ArnErrs>>>,
pub arn_errs_map: Arc<RwLock<HashMap<String, ArnErrs>>>,
}
impl BucketTargetSys {
pub fn get() -> &'static Self {
GLOBAL_BUCKET_TARGET_SYS.get_or_init(Self::new)
}
fn new() -> Self {
Self {
arn_remotes_map: Arc::new(RwLock::new(HashMap::new())),
targets_map: Arc::new(RwLock::new(HashMap::new())),
h_mutex: Arc::new(RwLock::new(HashMap::new())),
hc_client: Arc::new(HttpClient::new()),
a_mutex: Arc::new(Mutex::new(HashMap::new())),
arn_errs_map: Arc::new(RwLock::new(HashMap::new())),
}
}
pub async fn is_offline(&self, url: &Url) -> bool {
{
let health_map = self.h_mutex.read().await;
if let Some(health) = health_map.get(url.host_str().unwrap_or("")) {
return !health.online;
}
}
// Initialize health check if not exists
self.init_hc(url).await;
false
}
pub async fn mark_offline(&self, url: &Url) {
let mut health_map = self.h_mutex.write().await;
if let Some(health) = health_map.get_mut(url.host_str().unwrap_or("")) {
health.online = false;
}
}
pub async fn init_hc(&self, url: &Url) {
let mut health_map = self.h_mutex.write().await;
let host = url.host_str().unwrap_or("").to_string();
health_map.insert(
host.clone(),
EpHealth {
endpoint: host,
scheme: url.scheme().to_string(),
online: true,
..Default::default()
},
);
}
pub async fn heartbeat(&self) {
let mut interval = tokio::time::interval(DEFAULT_HEALTH_CHECK_DURATION);
loop {
interval.tick().await;
let endpoints = {
let health_map = self.h_mutex.read().await;
health_map.keys().cloned().collect::<Vec<_>>()
};
for endpoint in endpoints {
// Perform health check
let start = Instant::now();
let online = self.check_endpoint_health(&endpoint).await;
let duration = start.elapsed();
{
let mut health_map = self.h_mutex.write().await;
if let Some(health) = health_map.get_mut(&endpoint) {
let prev_online = health.online;
health.online = online;
health.last_hc_at = Some(OffsetDateTime::now_utc());
health.latency.update(duration);
if online {
health.last_online = Some(OffsetDateTime::now_utc());
} else if prev_online {
// Just went offline
health.offline_duration += duration;
}
}
}
}
}
}
async fn check_endpoint_health(&self, _endpoint: &str) -> bool {
true
// TODO: Health check
// // Simple health check implementation
// // In a real implementation, you would make actual HTTP requests
// match self
// .hc_client
// .get(format!("https://{}/rustfs/health/ready", endpoint))
// .timeout(Duration::from_secs(3))
// .send()
// .await
// {
// Ok(response) => response.status().is_success(),
// Err(_) => false,
// }
}
pub async fn health_stats(&self) -> HashMap<String, EpHealth> {
let health_map = self.h_mutex.read().await;
health_map.clone()
}
pub async fn list_targets(&self, bucket: &str, arn_type: &str) -> Vec<BucketTarget> {
let health_stats = self.health_stats().await;
let mut targets = Vec::new();
if !bucket.is_empty() {
if let Ok(bucket_targets) = self.list_bucket_targets(bucket).await {
for mut target in bucket_targets.targets {
if arn_type.is_empty() || target.target_type.to_string() == arn_type {
if let Some(health) = health_stats.get(&target.endpoint) {
target.total_downtime = health.offline_duration;
target.online = health.online;
target.last_online = health.last_online;
target.latency = target::LatencyStat {
curr: health.latency.curr,
avg: health.latency.avg,
max: health.latency.peak,
};
}
targets.push(target);
}
}
}
return targets;
}
let targets_map = self.targets_map.read().await;
for bucket_targets in targets_map.values() {
for mut target in bucket_targets.iter().cloned() {
if arn_type.is_empty() || target.target_type.to_string() == arn_type {
if let Some(health) = health_stats.get(&target.endpoint) {
target.total_downtime = health.offline_duration;
target.online = health.online;
target.last_online = health.last_online;
target.latency = target::LatencyStat {
curr: health.latency.curr,
avg: health.latency.avg,
max: health.latency.peak,
};
}
targets.push(target);
}
}
}
targets
}
pub async fn list_bucket_targets(&self, bucket: &str) -> Result<BucketTargets, BucketTargetError> {
let targets_map = self.targets_map.read().await;
if let Some(targets) = targets_map.get(bucket) {
Ok(BucketTargets {
targets: targets.clone(),
})
} else {
Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: bucket.to_string(),
})
}
}
pub async fn delete(&self, bucket: &str) {
let mut targets_map = self.targets_map.write().await;
let mut arn_remotes_map = self.arn_remotes_map.write().await;
if let Some(targets) = targets_map.remove(bucket) {
for target in targets {
arn_remotes_map.remove(&target.arn);
}
}
}
pub async fn set_target(&self, bucket: &str, target: &BucketTarget, update: bool) -> Result<(), BucketTargetError> {
if !target.target_type.is_valid() && !update {
return Err(BucketTargetError::BucketRemoteArnTypeInvalid {
bucket: bucket.to_string(),
});
}
let target_client = self.get_remote_target_client_internal(target).await?;
// Validate target credentials
if !self.validate_target_credentials(target).await? {
return Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: target.target_bucket.clone(),
});
}
match target_client.bucket_exists(&target.target_bucket).await {
Ok(false) => {
return Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: target.target_bucket.clone(),
});
}
Err(e) => {
return Err(BucketTargetError::RemoteTargetConnectionErr {
bucket: target.target_bucket.clone(),
access_key: target.credentials.as_ref().map(|c| c.access_key.clone()).unwrap_or_default(),
error: e.to_string(),
});
}
Ok(true) => {}
}
if target.target_type == BucketTargetType::ReplicationService {
if !BucketVersioningSys::enabled(bucket).await {
return Err(BucketTargetError::BucketReplicationSourceNotVersioned {
bucket: bucket.to_string(),
});
}
let versioning = target_client
.get_bucket_versioning(&target.target_bucket)
.await
.map_err(|_e| BucketTargetError::BucketReplicationSourceNotVersioned {
bucket: bucket.to_string(),
})?;
if versioning.is_none() {
return Err(BucketTargetError::BucketReplicationSourceNotVersioned {
bucket: bucket.to_string(),
});
}
}
{
let mut targets_map = self.targets_map.write().await;
let bucket_targets = targets_map.entry(bucket.to_string()).or_insert_with(Vec::new);
let mut found = false;
for (idx, existing_target) in bucket_targets.iter().enumerate() {
if existing_target.target_type.to_string() == target.target_type.to_string() {
if existing_target.arn == target.arn {
if !update {
return Err(BucketTargetError::BucketRemoteAlreadyExists {
bucket: existing_target.target_bucket.clone(),
});
}
bucket_targets[idx] = target.clone();
found = true;
break;
}
if existing_target.endpoint == target.endpoint {
return Err(BucketTargetError::BucketRemoteAlreadyExists {
bucket: existing_target.target_bucket.clone(),
});
}
}
}
if !found && !update {
bucket_targets.push(target.clone());
}
}
{
let mut arn_remotes_map = self.arn_remotes_map.write().await;
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(target_client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
}
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
Ok(())
}
pub async fn remove_target(&self, bucket: &str, arn_str: &str) -> Result<(), BucketTargetError> {
if arn_str.is_empty() {
return Err(BucketTargetError::BucketRemoteArnInvalid {
bucket: bucket.to_string(),
});
}
let arn = ARN::from_str(arn_str).map_err(|_e| BucketTargetError::BucketRemoteArnInvalid {
bucket: bucket.to_string(),
})?;
if arn.arn_type == BucketTargetType::ReplicationService
&& let Ok((config, _)) = get_replication_config(bucket).await
{
for rule in config.filter_target_arns(&ObjectOpts {
op_type: ReplicationType::All,
..Default::default()
}) {
if rule == arn_str || config.role == arn_str {
let arn_remotes_map = self.arn_remotes_map.read().await;
if arn_remotes_map.get(arn_str).is_some() {
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
bucket: bucket.to_string(),
});
}
}
}
}
{
let mut targets_map = self.targets_map.write().await;
let Some(targets) = targets_map.get(bucket) else {
return Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: bucket.to_string(),
});
};
let new_targets: Vec<BucketTarget> = targets.iter().filter(|t| t.arn != arn_str).cloned().collect();
if new_targets.len() == targets.len() {
return Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: bucket.to_string(),
});
}
targets_map.insert(bucket.to_string(), new_targets);
}
{
self.arn_remotes_map.write().await.remove(arn_str);
}
self.update_bandwidth_limit(bucket, arn_str, 0);
Ok(())
}
pub async fn mark_refresh_in_progress(&self, bucket: &str, arn: &str) {
let mut arn_errs = self.arn_errs_map.write().await;
arn_errs.entry(arn.to_string()).or_insert_with(|| ArnErrs {
bucket: bucket.to_string(),
update_in_progress: true,
count: 1,
});
}
pub async fn mark_refresh_done(&self, bucket: &str, arn: &str) {
let mut arn_errs = self.arn_errs_map.write().await;
if let Some(err) = arn_errs.get_mut(arn) {
err.update_in_progress = false;
err.bucket = bucket.to_string();
}
}
pub async fn is_reloading_target(&self, _bucket: &str, arn: &str) -> bool {
let arn_errs = self.arn_errs_map.read().await;
arn_errs.get(arn).map(|err| err.update_in_progress).unwrap_or(false)
}
pub async fn inc_arn_errs(&self, _bucket: &str, arn: &str) {
let mut arn_errs = self.arn_errs_map.write().await;
if let Some(err) = arn_errs.get_mut(arn) {
err.count += 1;
}
}
pub async fn get_remote_target_client(&self, bucket: &str, arn: &str) -> Option<Arc<TargetClient>> {
let (cli, last_refresh) = {
self.arn_remotes_map
.read()
.await
.get(arn)
.map(|target| (target.client.clone(), Some(target.last_refresh)))
.unwrap_or((None, None))
};
if let Some(cli) = cli {
return Some(cli.clone());
}
// TODO: spawn a task to reload the target
if self.is_reloading_target(bucket, arn).await {
return None;
}
if let Some(last_refresh) = last_refresh {
let now = OffsetDateTime::now_utc();
if now - last_refresh > Duration::from_secs(60 * 5) {
return None;
}
}
match get_bucket_targets_config(bucket).await {
Ok(bucket_targets) => {
self.mark_refresh_in_progress(bucket, arn).await;
self.update_all_targets(bucket, Some(&bucket_targets)).await;
self.mark_refresh_done(bucket, arn).await;
}
Err(e) => {
error!("get bucket targets config error:{}", e);
}
};
self.inc_arn_errs(bucket, arn).await;
None
}
pub async fn get_remote_target_client_internal(&self, target: &BucketTarget) -> Result<TargetClient, BucketTargetError> {
let Some(credentials) = &target.credentials else {
return Err(BucketTargetError::BucketRemoteTargetNotFound {
bucket: target.target_bucket.clone(),
});
};
let creds = SdkCredentials::builder()
.access_key_id(credentials.access_key.clone())
.secret_access_key(credentials.secret_key.clone())
.account_id(target.reset_id.clone())
.provider_name("bucket_target_sys")
.build();
let endpoint = if target.secure {
format!("https://{}", target.endpoint)
} else {
format!("http://{}", target.endpoint)
};
let config = S3Config::builder()
.endpoint_url(endpoint.clone())
.credentials_provider(SharedCredentialsProvider::new(creds))
.region(SdkRegion::new(target.region.clone()))
.behavior_version(aws_sdk_s3::config::BehaviorVersion::latest())
.build();
Ok(TargetClient {
endpoint,
credentials: target.credentials.clone(),
bucket: target.target_bucket.clone(),
storage_class: target.storage_class.clone(),
disable_proxy: target.disable_proxy,
arn: target.arn.clone(),
reset_id: target.reset_id.clone(),
secure: target.secure,
health_check_duration: target.health_check_duration,
replicate_sync: target.replication_sync,
client: Arc::new(S3Client::from_conf(config)),
})
}
async fn validate_target_credentials(&self, _target: &BucketTarget) -> Result<bool, BucketTargetError> {
// In a real implementation, you would validate the credentials
// by making actual API calls to the target
Ok(true)
}
fn update_bandwidth_limit(&self, _bucket: &str, _arn: &str, _limit: i64) {
// Implementation for bandwidth limit update
// This would interact with the global bucket monitor
}
pub async fn get_remote_target_client_by_arn(&self, _bucket: &str, arn: &str) -> Option<Arc<TargetClient>> {
let arn_remotes_map = self.arn_remotes_map.read().await;
arn_remotes_map.get(arn).and_then(|target| target.client.clone())
}
pub async fn get_remote_bucket_target_by_arn(&self, bucket: &str, arn: &str) -> Option<BucketTarget> {
let targets_map = self.targets_map.read().await;
targets_map
.get(bucket)
.and_then(|targets| targets.iter().find(|t| t.arn == arn).cloned())
}
pub async fn update_all_targets(&self, bucket: &str, targets: Option<&BucketTargets>) {
let mut targets_map = self.targets_map.write().await;
let mut arn_remotes_map = self.arn_remotes_map.write().await;
// Remove existing targets
if let Some(existing_targets) = targets_map.remove(bucket) {
for target in existing_targets {
arn_remotes_map.remove(&target.arn);
}
}
// Add new targets
if let Some(new_targets) = targets
&& !new_targets.is_empty()
{
for target in &new_targets.targets {
if let Ok(client) = self.get_remote_target_client_internal(target).await {
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
}
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
}
}
pub async fn set(&self, bucket: &str, meta: &BucketMetadata) {
let Some(config) = &meta.bucket_target_config else {
return;
};
if config.is_empty() {
return;
}
for target in config.targets.iter() {
let cli = match self.get_remote_target_client_internal(target).await {
Ok(cli) => cli,
Err(e) => {
warn!("get_remote_target_client_internal error:{}", e);
continue;
}
};
{
let arn_target = ArnTarget::with_client(Arc::new(cli));
let mut arn_remotes_map = self.arn_remotes_map.write().await;
arn_remotes_map.insert(target.arn.clone(), arn_target);
}
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
let mut targets_map = self.targets_map.write().await;
targets_map.insert(bucket.to_string(), config.targets.clone());
}
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
pub async fn get_remote_arn(&self, bucket: &str, target: Option<&BucketTarget>, depl_id: &str) -> (String, bool) {
let Some(target) = target else {
return (String::new(), false);
};
{
let targets_map = self.targets_map.read().await;
if let Some(targets) = targets_map.get(bucket) {
for tgt in targets {
if tgt.target_type == target.target_type
&& tgt.target_bucket == target.target_bucket
&& target.endpoint == tgt.endpoint
&& tgt
.credentials
.as_ref()
.map(|c| c.access_key == target.credentials.as_ref().unwrap_or(&Credentials::default()).access_key)
.unwrap_or(false)
{
return (tgt.arn.clone(), true);
}
}
}
}
if !target.target_type.is_valid() {
return (String::new(), false);
}
let arn = generate_arn(target, depl_id);
(arn, false)
}
}
// generate ARN that is unique to this target type
fn generate_arn(t: &BucketTarget, depl_id: &str) -> String {
let uuid = if depl_id.is_empty() {
Uuid::new_v4().to_string()
} else {
depl_id.to_string()
};
let arn = ARN {
arn_type: t.target_type.clone(),
id: uuid,
region: t.region.clone(),
bucket: t.target_bucket.clone(),
};
arn.to_string()
}
pub struct RemoveObjectOptions {
pub force_delete: bool,
pub governance_bypass: bool,
pub replication_delete_marker: bool,
pub replication_mtime: Option<OffsetDateTime>,
pub replication_status: ReplicationStatusType,
pub replication_request: bool,
pub replication_validity_check: bool,
}
#[derive(Debug, Clone)]
pub struct AdvancedPutOptions {
pub source_version_id: String,
pub source_etag: String,
pub replication_status: ReplicationStatusType,
pub source_mtime: OffsetDateTime,
pub replication_request: bool,
pub retention_timestamp: OffsetDateTime,
pub tagging_timestamp: OffsetDateTime,
pub legalhold_timestamp: OffsetDateTime,
pub replication_validity_check: bool,
}
impl Default for AdvancedPutOptions {
fn default() -> Self {
Self {
source_version_id: "".to_string(),
source_etag: "".to_string(),
replication_status: ReplicationStatusType::Pending,
source_mtime: OffsetDateTime::now_utc(),
replication_request: false,
retention_timestamp: OffsetDateTime::now_utc(),
tagging_timestamp: OffsetDateTime::now_utc(),
legalhold_timestamp: OffsetDateTime::now_utc(),
replication_validity_check: false,
}
}
}
#[derive(Clone)]
pub struct PutObjectOptions {
pub user_metadata: HashMap<String, String>,
pub user_tags: HashMap<String, String>,
//pub progress: ReaderImpl,
pub content_type: String,
pub content_encoding: String,
pub content_disposition: String,
pub content_language: String,
pub cache_control: String,
pub expires: OffsetDateTime,
pub mode: Option<ObjectLockRetentionMode>,
pub retain_until_date: OffsetDateTime,
//pub server_side_encryption: encrypt::ServerSide,
pub num_threads: u64,
pub storage_class: String,
pub website_redirect_location: String,
pub part_size: u64,
pub legalhold: Option<ObjectLockLegalHoldStatus>,
pub send_content_md5: bool,
pub disable_content_sha256: bool,
pub disable_multipart: bool,
pub auto_checksum: Option<ChecksumMode>,
pub checksum: Option<ChecksumMode>,
pub concurrent_stream_parts: bool,
pub internal: AdvancedPutOptions,
pub custom_header: HeaderMap,
}
impl Default for PutObjectOptions {
fn default() -> Self {
Self {
user_metadata: HashMap::new(),
user_tags: HashMap::new(),
//progress: ReaderImpl::Body(Bytes::new()),
content_type: "".to_string(),
content_encoding: "".to_string(),
content_disposition: "".to_string(),
content_language: "".to_string(),
cache_control: "".to_string(),
expires: OffsetDateTime::UNIX_EPOCH,
mode: None,
retain_until_date: OffsetDateTime::UNIX_EPOCH,
//server_side_encryption: encrypt.ServerSide::default(),
num_threads: 0,
storage_class: "".to_string(),
website_redirect_location: "".to_string(),
part_size: 0,
legalhold: None,
send_content_md5: false,
disable_content_sha256: false,
disable_multipart: false,
auto_checksum: None,
checksum: None,
concurrent_stream_parts: false,
internal: AdvancedPutOptions::default(),
custom_header: HeaderMap::new(),
}
}
}
#[allow(dead_code)]
impl PutObjectOptions {
fn set_match_etag(&mut self, etag: &str) {
if etag == "*" {
self.custom_header
.insert("If-Match", HeaderValue::from_str("*").expect("err"));
} else {
self.custom_header
.insert("If-Match", HeaderValue::from_str(&format!("\"{etag}\"")).expect("err"));
}
}
fn set_match_etag_except(&mut self, etag: &str) {
if etag == "*" {
self.custom_header
.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
} else {
self.custom_header
.insert("If-None-Match", HeaderValue::from_str(&format!("\"{etag}\"")).expect("err"));
}
}
pub fn header(&self) -> HeaderMap {
let mut header = HeaderMap::new();
let mut content_type = self.content_type.clone();
if content_type.is_empty() {
content_type = "application/octet-stream".to_string();
}
header.insert("Content-Type", HeaderValue::from_str(&content_type).expect("err"));
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | true |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/policy_sys.rs | crates/ecstore/src/bucket/policy_sys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::metadata_sys::get_bucket_metadata_sys;
use crate::error::{Result, StorageError};
use rustfs_policy::policy::{BucketPolicy, BucketPolicyArgs};
use tracing::info;
pub struct PolicySys {}
impl PolicySys {
pub async fn is_allowed(args: &BucketPolicyArgs<'_>) -> bool {
match Self::get(args.bucket).await {
Ok(cfg) => return cfg.is_allowed(args).await,
Err(err) => {
if err != StorageError::ConfigNotFound {
info!("config get err {:?}", err);
}
}
}
args.is_owner
}
pub async fn get(bucket: &str) -> Result<BucketPolicy> {
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
let (cfg, _) = bucket_meta_sys.get_bucket_policy(bucket).await?;
Ok(cfg)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/error.rs | crates/ecstore/src/bucket/error.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::Error;
#[derive(Debug, thiserror::Error)]
pub enum BucketMetadataError {
#[error("tagging not found")]
TaggingNotFound,
#[error("bucket policy not found")]
BucketPolicyNotFound,
#[error("bucket object lock not found")]
BucketObjectLockConfigNotFound,
#[error("bucket lifecycle not found")]
BucketLifecycleNotFound,
#[error("bucket SSE config not found")]
BucketSSEConfigNotFound,
#[error("bucket quota config not found")]
BucketQuotaConfigNotFound,
#[error("bucket replication config not found")]
BucketReplicationConfigNotFound,
#[error("bucket remote target not found")]
BucketRemoteTargetNotFound,
#[error("Io error: {0}")]
Io(std::io::Error),
}
impl BucketMetadataError {
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
BucketMetadataError::Io(std::io::Error::other(error))
}
}
impl From<Error> for BucketMetadataError {
fn from(e: Error) -> Self {
match e {
Error::Io(e) => e.into(),
_ => BucketMetadataError::other(e),
}
}
}
impl From<std::io::Error> for BucketMetadataError {
fn from(e: std::io::Error) -> Self {
e.downcast::<BucketMetadataError>().unwrap_or_else(BucketMetadataError::other)
}
}
impl PartialEq for BucketMetadataError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(BucketMetadataError::Io(e1), BucketMetadataError::Io(e2)) => {
e1.kind() == e2.kind() && e1.to_string() == e2.to_string()
}
(e1, e2) => e1.to_u32() == e2.to_u32(),
}
}
}
impl Eq for BucketMetadataError {}
impl BucketMetadataError {
pub fn to_u32(&self) -> u32 {
match self {
BucketMetadataError::TaggingNotFound => 0x01,
BucketMetadataError::BucketPolicyNotFound => 0x02,
BucketMetadataError::BucketObjectLockConfigNotFound => 0x03,
BucketMetadataError::BucketLifecycleNotFound => 0x04,
BucketMetadataError::BucketSSEConfigNotFound => 0x05,
BucketMetadataError::BucketQuotaConfigNotFound => 0x06,
BucketMetadataError::BucketReplicationConfigNotFound => 0x07,
BucketMetadataError::BucketRemoteTargetNotFound => 0x08,
BucketMetadataError::Io(_) => 0x09,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(BucketMetadataError::TaggingNotFound),
0x02 => Some(BucketMetadataError::BucketPolicyNotFound),
0x03 => Some(BucketMetadataError::BucketObjectLockConfigNotFound),
0x04 => Some(BucketMetadataError::BucketLifecycleNotFound),
0x05 => Some(BucketMetadataError::BucketSSEConfigNotFound),
0x06 => Some(BucketMetadataError::BucketQuotaConfigNotFound),
0x07 => Some(BucketMetadataError::BucketReplicationConfigNotFound),
0x08 => Some(BucketMetadataError::BucketRemoteTargetNotFound),
0x09 => Some(BucketMetadataError::Io(std::io::Error::other("Io error"))),
_ => None,
}
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/utils.rs | crates/ecstore/src/bucket/utils.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::disk::RUSTFS_META_BUCKET;
use crate::error::{Error, Result};
use s3s::xml;
pub fn is_meta_bucketname(name: &str) -> bool {
name.starts_with(RUSTFS_META_BUCKET)
}
use regex::Regex;
lazy_static::lazy_static! {
static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap();
static ref VALID_BUCKET_NAME_STRICT: Regex = Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap();
static ref IP_ADDRESS: Regex = Regex::new(r"^(\d+\.){3}\d+$").unwrap();
}
pub fn check_bucket_name_common(bucket_name: &str, strict: bool) -> Result<()> {
let bucket_name_trimmed = bucket_name.trim();
if bucket_name_trimmed.is_empty() {
return Err(Error::other("Bucket name cannot be empty"));
}
if bucket_name_trimmed.len() < 3 {
return Err(Error::other("Bucket name cannot be shorter than 3 characters"));
}
if bucket_name_trimmed.len() > 63 {
return Err(Error::other("Bucket name cannot be longer than 63 characters"));
}
if bucket_name_trimmed == "rustfs" {
return Err(Error::other("Bucket name cannot be rustfs"));
}
if IP_ADDRESS.is_match(bucket_name_trimmed) {
return Err(Error::other("Bucket name cannot be an IP address"));
}
if bucket_name_trimmed.contains("..") || bucket_name_trimmed.contains(".-") || bucket_name_trimmed.contains("-.") {
return Err(Error::other("Bucket name contains invalid characters"));
}
if strict {
if !VALID_BUCKET_NAME_STRICT.is_match(bucket_name_trimmed) {
return Err(Error::other("Bucket name contains invalid characters"));
}
} else if !VALID_BUCKET_NAME.is_match(bucket_name_trimmed) {
return Err(Error::other("Bucket name contains invalid characters"));
}
Ok(())
}
pub fn check_valid_bucket_name(bucket_name: &str) -> Result<()> {
check_bucket_name_common(bucket_name, false)
}
pub fn check_valid_bucket_name_strict(bucket_name: &str) -> Result<()> {
check_bucket_name_common(bucket_name, true)
}
pub fn check_valid_object_name_prefix(object_name: &str) -> Result<()> {
if object_name.len() > 1024 {
return Err(Error::other("Object name cannot be longer than 1024 characters"));
}
if !object_name.is_ascii() {
return Err(Error::other("Object name with non-UTF-8 strings are not supported"));
}
Ok(())
}
pub fn check_valid_object_name(object_name: &str) -> Result<()> {
if object_name.trim().is_empty() {
return Err(Error::other("Object name cannot be empty"));
}
check_valid_object_name_prefix(object_name)
}
pub fn deserialize<T>(input: &[u8]) -> xml::DeResult<T>
where
T: for<'xml> xml::Deserialize<'xml>,
{
let mut d = xml::Deserializer::new(input);
let ans = T::deserialize(&mut d)?;
d.expect_eof()?;
Ok(ans)
}
pub fn serialize_content<T: xml::SerializeContent>(val: &T) -> xml::SerResult<String> {
let mut buf = Vec::with_capacity(256);
{
let mut ser = xml::Serializer::new(&mut buf);
val.serialize_content(&mut ser)?;
}
Ok(String::from_utf8(buf).unwrap())
}
pub fn serialize<T: xml::Serialize>(val: &T) -> xml::SerResult<Vec<u8>> {
let mut buf = Vec::with_capacity(256);
{
let mut ser = xml::Serializer::new(&mut buf);
val.serialize(&mut ser)?;
}
Ok(buf)
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/mod.rs | crates/ecstore/src/bucket/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod bucket_target_sys;
pub mod error;
pub mod lifecycle;
pub mod metadata;
pub mod metadata_sys;
pub mod object_lock;
pub mod policy_sys;
pub mod quota;
pub mod replication;
pub mod tagging;
pub mod target;
pub mod utils;
pub mod versioning;
pub mod versioning_sys;
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/versioning_sys.rs | crates/ecstore/src/bucket/versioning_sys.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{metadata_sys::get_bucket_metadata_sys, versioning::VersioningApi};
use crate::disk::RUSTFS_META_BUCKET;
use crate::error::Result;
use s3s::dto::VersioningConfiguration;
use tracing::warn;
pub struct BucketVersioningSys {}
impl Default for BucketVersioningSys {
fn default() -> Self {
Self::new()
}
}
impl BucketVersioningSys {
pub fn new() -> Self {
Self {}
}
pub async fn enabled(bucket: &str) -> bool {
match Self::get(bucket).await {
Ok(res) => res.enabled(),
Err(err) => {
warn!("{:?}", err);
false
}
}
}
pub async fn prefix_enabled(bucket: &str, prefix: &str) -> bool {
match Self::get(bucket).await {
Ok(res) => res.prefix_enabled(prefix),
Err(err) => {
warn!("{:?}", err);
false
}
}
}
pub async fn suspended(bucket: &str) -> bool {
match Self::get(bucket).await {
Ok(res) => res.suspended(),
Err(err) => {
warn!("{:?}", err);
false
}
}
}
pub async fn prefix_suspended(bucket: &str, prefix: &str) -> bool {
match Self::get(bucket).await {
Ok(res) => res.prefix_suspended(prefix),
Err(err) => {
warn!("{:?}", err);
false
}
}
}
pub async fn get(bucket: &str) -> Result<VersioningConfiguration> {
if bucket == RUSTFS_META_BUCKET || bucket.starts_with(RUSTFS_META_BUCKET) {
return Ok(VersioningConfiguration::default());
}
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
let bucket_meta_sys = bucket_meta_sys_lock.write().await;
let (cfg, _) = bucket_meta_sys.get_versioning_config(bucket).await?;
Ok(cfg)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/metadata.rs | crates/ecstore/src/bucket/metadata.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{quota::BucketQuota, target::BucketTargets};
use super::object_lock::ObjectLockApi;
use super::versioning::VersioningApi;
use crate::bucket::utils::deserialize;
use crate::config::com::{read_config, save_config};
use crate::error::{Error, Result};
use crate::new_object_layer_fn;
use byteorder::{BigEndian, ByteOrder, LittleEndian};
use rmp_serde::Serializer as rmpSerializer;
use rustfs_policy::policy::BucketPolicy;
use s3s::dto::{
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
};
use serde::Serializer;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use time::OffsetDateTime;
use tracing::error;
use crate::disk::BUCKET_META_PREFIX;
use crate::store::ECStore;
pub const BUCKET_METADATA_FILE: &str = ".metadata.bin";
pub const BUCKET_METADATA_FORMAT: u16 = 1;
pub const BUCKET_METADATA_VERSION: u16 = 1;
pub const BUCKET_POLICY_CONFIG: &str = "policy.json";
pub const BUCKET_NOTIFICATION_CONFIG: &str = "notification.xml";
pub const BUCKET_LIFECYCLE_CONFIG: &str = "lifecycle.xml";
pub const BUCKET_SSECONFIG: &str = "bucket-encryption.xml";
pub const BUCKET_TAGGING_CONFIG: &str = "tagging.xml";
pub const BUCKET_QUOTA_CONFIG_FILE: &str = "quota.json";
pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml";
pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml";
pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml";
pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json";
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase", default)]
pub struct BucketMetadata {
pub name: String,
pub created: OffsetDateTime,
pub lock_enabled: bool, // While marked as unused, it may need to be retained
pub policy_config_json: Vec<u8>,
pub notification_config_xml: Vec<u8>,
pub lifecycle_config_xml: Vec<u8>,
pub object_lock_config_xml: Vec<u8>,
pub versioning_config_xml: Vec<u8>,
pub encryption_config_xml: Vec<u8>,
pub tagging_config_xml: Vec<u8>,
pub quota_config_json: Vec<u8>,
pub replication_config_xml: Vec<u8>,
pub bucket_targets_config_json: Vec<u8>,
pub bucket_targets_config_meta_json: Vec<u8>,
pub policy_config_updated_at: OffsetDateTime,
pub object_lock_config_updated_at: OffsetDateTime,
pub encryption_config_updated_at: OffsetDateTime,
pub tagging_config_updated_at: OffsetDateTime,
pub quota_config_updated_at: OffsetDateTime,
pub replication_config_updated_at: OffsetDateTime,
pub versioning_config_updated_at: OffsetDateTime,
pub lifecycle_config_updated_at: OffsetDateTime,
pub notification_config_updated_at: OffsetDateTime,
pub bucket_targets_config_updated_at: OffsetDateTime,
pub bucket_targets_config_meta_updated_at: OffsetDateTime,
#[serde(skip)]
pub new_field_updated_at: OffsetDateTime,
#[serde(skip)]
pub policy_config: Option<BucketPolicy>,
#[serde(skip)]
pub notification_config: Option<NotificationConfiguration>,
#[serde(skip)]
pub lifecycle_config: Option<BucketLifecycleConfiguration>,
#[serde(skip)]
pub object_lock_config: Option<ObjectLockConfiguration>,
#[serde(skip)]
pub versioning_config: Option<VersioningConfiguration>,
#[serde(skip)]
pub sse_config: Option<ServerSideEncryptionConfiguration>,
#[serde(skip)]
pub tagging_config: Option<Tagging>,
#[serde(skip)]
pub quota_config: Option<BucketQuota>,
#[serde(skip)]
pub replication_config: Option<ReplicationConfiguration>,
#[serde(skip)]
pub bucket_target_config: Option<BucketTargets>,
#[serde(skip)]
pub bucket_target_config_meta: Option<HashMap<String, String>>,
}
impl Default for BucketMetadata {
fn default() -> Self {
Self {
name: Default::default(),
created: OffsetDateTime::UNIX_EPOCH,
lock_enabled: Default::default(),
policy_config_json: Default::default(),
notification_config_xml: Default::default(),
lifecycle_config_xml: Default::default(),
object_lock_config_xml: Default::default(),
versioning_config_xml: Default::default(),
encryption_config_xml: Default::default(),
tagging_config_xml: Default::default(),
quota_config_json: Default::default(),
replication_config_xml: Default::default(),
bucket_targets_config_json: Default::default(),
bucket_targets_config_meta_json: Default::default(),
policy_config_updated_at: OffsetDateTime::UNIX_EPOCH,
object_lock_config_updated_at: OffsetDateTime::UNIX_EPOCH,
encryption_config_updated_at: OffsetDateTime::UNIX_EPOCH,
tagging_config_updated_at: OffsetDateTime::UNIX_EPOCH,
quota_config_updated_at: OffsetDateTime::UNIX_EPOCH,
replication_config_updated_at: OffsetDateTime::UNIX_EPOCH,
versioning_config_updated_at: OffsetDateTime::UNIX_EPOCH,
lifecycle_config_updated_at: OffsetDateTime::UNIX_EPOCH,
notification_config_updated_at: OffsetDateTime::UNIX_EPOCH,
bucket_targets_config_updated_at: OffsetDateTime::UNIX_EPOCH,
bucket_targets_config_meta_updated_at: OffsetDateTime::UNIX_EPOCH,
new_field_updated_at: OffsetDateTime::UNIX_EPOCH,
policy_config: Default::default(),
notification_config: Default::default(),
lifecycle_config: Default::default(),
object_lock_config: Default::default(),
versioning_config: Default::default(),
sse_config: Default::default(),
tagging_config: Default::default(),
quota_config: Default::default(),
replication_config: Default::default(),
bucket_target_config: Default::default(),
bucket_target_config_meta: Default::default(),
}
}
}
impl BucketMetadata {
pub fn new(name: &str) -> Self {
BucketMetadata {
name: name.to_string(),
..Default::default()
}
}
pub fn save_file_path(&self) -> String {
format!("{}/{}/{}", BUCKET_META_PREFIX, self.name.as_str(), BUCKET_METADATA_FILE)
}
pub fn versioning(&self) -> bool {
self.lock_enabled
|| (self.object_lock_config.as_ref().is_some_and(|v| v.enabled())
|| self.versioning_config.as_ref().is_some_and(|v| v.enabled()))
}
pub fn object_locking(&self) -> bool {
self.lock_enabled || (self.versioning_config.as_ref().is_some_and(|v| v.enabled()))
}
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: BucketMetadata = rmp_serde::from_slice(buf)?;
Ok(t)
}
pub fn check_header(buf: &[u8]) -> Result<()> {
if buf.len() <= 4 {
return Err(Error::other("read_bucket_metadata: data invalid"));
}
let format = LittleEndian::read_u16(&buf[0..2]);
let version = LittleEndian::read_u16(&buf[2..4]);
match format {
BUCKET_METADATA_FORMAT => {}
_ => return Err(Error::other("read_bucket_metadata: format invalid")),
}
match version {
BUCKET_METADATA_VERSION => {}
_ => return Err(Error::other("read_bucket_metadata: version invalid")),
}
Ok(())
}
fn default_timestamps(&mut self) {
if self.policy_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.policy_config_updated_at = self.created
}
if self.encryption_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.encryption_config_updated_at = self.created
}
if self.tagging_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.tagging_config_updated_at = self.created
}
if self.object_lock_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.object_lock_config_updated_at = self.created
}
if self.quota_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.quota_config_updated_at = self.created
}
if self.replication_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.replication_config_updated_at = self.created
}
if self.versioning_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.versioning_config_updated_at = self.created
}
if self.lifecycle_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.lifecycle_config_updated_at = self.created
}
if self.notification_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.notification_config_updated_at = self.created
}
if self.bucket_targets_config_updated_at == OffsetDateTime::UNIX_EPOCH {
self.bucket_targets_config_updated_at = self.created
}
if self.bucket_targets_config_meta_updated_at == OffsetDateTime::UNIX_EPOCH {
self.bucket_targets_config_meta_updated_at = self.created
}
}
pub fn update_config(&mut self, config_file: &str, data: Vec<u8>) -> Result<OffsetDateTime> {
let updated = OffsetDateTime::now_utc();
match config_file {
BUCKET_POLICY_CONFIG => {
self.policy_config_json = data;
self.policy_config_updated_at = updated;
}
BUCKET_NOTIFICATION_CONFIG => {
self.notification_config_xml = data;
self.notification_config_updated_at = updated;
}
BUCKET_LIFECYCLE_CONFIG => {
self.lifecycle_config_xml = data;
self.lifecycle_config_updated_at = updated;
}
BUCKET_SSECONFIG => {
self.encryption_config_xml = data;
self.encryption_config_updated_at = updated;
}
BUCKET_TAGGING_CONFIG => {
self.tagging_config_xml = data;
self.tagging_config_updated_at = updated;
}
BUCKET_QUOTA_CONFIG_FILE => {
self.quota_config_json = data;
self.quota_config_updated_at = updated;
}
OBJECT_LOCK_CONFIG => {
self.object_lock_config_xml = data;
self.object_lock_config_updated_at = updated;
}
BUCKET_VERSIONING_CONFIG => {
self.versioning_config_xml = data;
self.versioning_config_updated_at = updated;
}
BUCKET_REPLICATION_CONFIG => {
self.replication_config_xml = data;
self.replication_config_updated_at = updated;
}
BUCKET_TARGETS_FILE => {
// let x = data.clone();
// let str = std::str::from_utf8(&x).expect("Invalid UTF-8");
// println!("update config:{}", str);
self.bucket_targets_config_json = data.clone();
self.bucket_targets_config_updated_at = updated;
}
_ => return Err(Error::other(format!("config file not found : {config_file}"))),
}
Ok(updated)
}
pub fn set_created(&mut self, created: Option<OffsetDateTime>) {
self.created = created.unwrap_or_else(OffsetDateTime::now_utc)
}
pub async fn save(&mut self) -> Result<()> {
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
self.parse_all_configs(store.clone())?;
let mut buf: Vec<u8> = vec![0; 4];
LittleEndian::write_u16(&mut buf[0..2], BUCKET_METADATA_FORMAT);
LittleEndian::write_u16(&mut buf[2..4], BUCKET_METADATA_VERSION);
let data = self
.marshal_msg()
.map_err(|e| Error::other(format!("save bucket metadata failed: {e}")))?;
buf.extend_from_slice(&data);
save_config(store, self.save_file_path().as_str(), buf).await?;
Ok(())
}
fn parse_all_configs(&mut self, _api: Arc<ECStore>) -> Result<()> {
if !self.policy_config_json.is_empty() {
self.policy_config = Some(serde_json::from_slice(&self.policy_config_json)?);
}
if !self.notification_config_xml.is_empty() {
self.notification_config = Some(deserialize::<NotificationConfiguration>(&self.notification_config_xml)?);
}
if !self.lifecycle_config_xml.is_empty() {
self.lifecycle_config = Some(deserialize::<BucketLifecycleConfiguration>(&self.lifecycle_config_xml)?);
}
if !self.object_lock_config_xml.is_empty() {
self.object_lock_config = Some(deserialize::<ObjectLockConfiguration>(&self.object_lock_config_xml)?);
}
if !self.versioning_config_xml.is_empty() {
self.versioning_config = Some(deserialize::<VersioningConfiguration>(&self.versioning_config_xml)?);
}
if !self.encryption_config_xml.is_empty() {
self.sse_config = Some(deserialize::<ServerSideEncryptionConfiguration>(&self.encryption_config_xml)?);
}
if !self.tagging_config_xml.is_empty() {
self.tagging_config = Some(deserialize::<Tagging>(&self.tagging_config_xml)?);
}
if !self.quota_config_json.is_empty() {
self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?);
}
if !self.replication_config_xml.is_empty() {
self.replication_config = Some(deserialize::<ReplicationConfiguration>(&self.replication_config_xml)?);
}
//let temp = self.bucket_targets_config_json.clone();
if !self.bucket_targets_config_json.is_empty() {
let bucket_targets: BucketTargets = serde_json::from_slice(&self.bucket_targets_config_json)?;
self.bucket_target_config = Some(bucket_targets);
} else {
self.bucket_target_config = Some(BucketTargets::default())
}
Ok(())
}
}
pub async fn load_bucket_metadata(api: Arc<ECStore>, bucket: &str) -> Result<BucketMetadata> {
load_bucket_metadata_parse(api, bucket, true).await
}
pub async fn load_bucket_metadata_parse(api: Arc<ECStore>, bucket: &str, parse: bool) -> Result<BucketMetadata> {
let mut bm = match read_bucket_metadata(api.clone(), bucket).await {
Ok(res) => res,
Err(err) => {
if err != Error::ConfigNotFound {
return Err(err);
}
// info!("bucketmeta {} not found with err {:?}, start to init ", bucket, &err);
BucketMetadata::new(bucket)
}
};
bm.default_timestamps();
if parse {
bm.parse_all_configs(api)?;
}
// TODO: parse_all_configs
Ok(bm)
}
async fn read_bucket_metadata(api: Arc<ECStore>, bucket: &str) -> Result<BucketMetadata> {
if bucket.is_empty() {
error!("bucket name empty");
return Err(Error::other("invalid argument"));
}
let bm = BucketMetadata::new(bucket);
let file_path = bm.save_file_path();
let data = read_config(api, &file_path).await?;
BucketMetadata::check_header(&data)?;
let bm = BucketMetadata::unmarshal(&data[4..])?;
Ok(bm)
}
fn _write_time<S>(t: &OffsetDateTime, s: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut buf = vec![0x0; 15];
let sec = t.unix_timestamp() - 62135596800;
let nsec = t.nanosecond();
buf[0] = 0xc7; // mext8
buf[1] = 0x0c; // Length
buf[2] = 0x05; // Time extension type
BigEndian::write_u64(&mut buf[3..], sec as u64);
BigEndian::write_u32(&mut buf[11..], nsec);
s.serialize_bytes(&buf)
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn marshal_msg() {
// write_time(OffsetDateTime::UNIX_EPOCH).unwrap();
let bm = BucketMetadata::new("dada");
let buf = bm.marshal_msg().unwrap();
let new = BucketMetadata::unmarshal(&buf).unwrap();
assert_eq!(bm.name, new.name);
}
#[tokio::test]
async fn marshal_msg_complete_example() {
// Create a complete BucketMetadata with various configurations
let mut bm = BucketMetadata::new("test-bucket");
// Set creation time to current time
bm.created = OffsetDateTime::now_utc();
bm.lock_enabled = true;
// Add policy configuration
let policy_json = r#"{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":"*","Action":"s3:GetObject","Resource":"arn:aws:s3:::test-bucket/*"}]}"#;
bm.policy_config_json = policy_json.as_bytes().to_vec();
bm.policy_config_updated_at = OffsetDateTime::now_utc();
// Add lifecycle configuration
let lifecycle_xml = r#"<LifecycleConfiguration><Rule><ID>rule1</ID><Status>Enabled</Status><Expiration><Days>30</Days></Expiration></Rule></LifecycleConfiguration>"#;
bm.lifecycle_config_xml = lifecycle_xml.as_bytes().to_vec();
bm.lifecycle_config_updated_at = OffsetDateTime::now_utc();
// Add versioning configuration
let versioning_xml = r#"<VersioningConfiguration><Status>Enabled</Status></VersioningConfiguration>"#;
bm.versioning_config_xml = versioning_xml.as_bytes().to_vec();
bm.versioning_config_updated_at = OffsetDateTime::now_utc();
// Add encryption configuration
let encryption_xml = r#"<ServerSideEncryptionConfiguration><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>"#;
bm.encryption_config_xml = encryption_xml.as_bytes().to_vec();
bm.encryption_config_updated_at = OffsetDateTime::now_utc();
// Add tagging configuration
let tagging_xml = r#"<Tagging><TagSet><Tag><Key>Environment</Key><Value>Test</Value></Tag><Tag><Key>Owner</Key><Value>RustFS</Value></Tag></TagSet></Tagging>"#;
bm.tagging_config_xml = tagging_xml.as_bytes().to_vec();
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
// Add quota configuration
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
bm.quota_config_json = quota_json.as_bytes().to_vec();
bm.quota_config_updated_at = OffsetDateTime::now_utc();
// Add object lock configuration
let object_lock_xml = r#"<ObjectLockConfiguration><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>GOVERNANCE</Mode><Days>7</Days></DefaultRetention></Rule></ObjectLockConfiguration>"#;
bm.object_lock_config_xml = object_lock_xml.as_bytes().to_vec();
bm.object_lock_config_updated_at = OffsetDateTime::now_utc();
// Add notification configuration
let notification_xml = r#"<NotificationConfiguration><CloudWatchConfiguration><Id>notification1</Id><Event>s3:ObjectCreated:*</Event><CloudWatchConfiguration><LogGroupName>test-log-group</LogGroupName></CloudWatchConfiguration></CloudWatchConfiguration></NotificationConfiguration>"#;
bm.notification_config_xml = notification_xml.as_bytes().to_vec();
bm.notification_config_updated_at = OffsetDateTime::now_utc();
// Add replication configuration
let replication_xml = r#"<ReplicationConfiguration><Role>arn:aws:iam::123456789012:role/replication-role</Role><Rule><ID>rule1</ID><Status>Enabled</Status><Prefix>documents/</Prefix><Destination><Bucket>arn:aws:s3:::destination-bucket</Bucket></Destination></Rule></ReplicationConfiguration>"#;
bm.replication_config_xml = replication_xml.as_bytes().to_vec();
bm.replication_config_updated_at = OffsetDateTime::now_utc();
// Add bucket targets configuration
let bucket_targets_json = r#"[{"endpoint":"http://target1.example.com","credentials":{"accessKey":"key1","secretKey":"secret1"},"targetBucket":"target-bucket-1","region":"us-east-1"},{"endpoint":"http://target2.example.com","credentials":{"accessKey":"key2","secretKey":"secret2"},"targetBucket":"target-bucket-2","region":"us-west-2"}]"#;
bm.bucket_targets_config_json = bucket_targets_json.as_bytes().to_vec();
bm.bucket_targets_config_updated_at = OffsetDateTime::now_utc();
// Add bucket targets meta configuration
let bucket_targets_meta_json = r#"{"replicationId":"repl-123","syncMode":"async","bandwidth":"100MB"}"#;
bm.bucket_targets_config_meta_json = bucket_targets_meta_json.as_bytes().to_vec();
bm.bucket_targets_config_meta_updated_at = OffsetDateTime::now_utc();
// Test serialization
let buf = bm.marshal_msg().unwrap();
assert!(!buf.is_empty(), "Serialized buffer should not be empty");
// Test deserialization
let deserialized_bm = BucketMetadata::unmarshal(&buf).unwrap();
// Verify all fields are correctly serialized and deserialized
assert_eq!(bm.name, deserialized_bm.name);
assert_eq!(bm.created.unix_timestamp(), deserialized_bm.created.unix_timestamp());
assert_eq!(bm.lock_enabled, deserialized_bm.lock_enabled);
// Verify configuration data
assert_eq!(bm.policy_config_json, deserialized_bm.policy_config_json);
assert_eq!(bm.lifecycle_config_xml, deserialized_bm.lifecycle_config_xml);
assert_eq!(bm.versioning_config_xml, deserialized_bm.versioning_config_xml);
assert_eq!(bm.encryption_config_xml, deserialized_bm.encryption_config_xml);
assert_eq!(bm.tagging_config_xml, deserialized_bm.tagging_config_xml);
assert_eq!(bm.quota_config_json, deserialized_bm.quota_config_json);
assert_eq!(bm.object_lock_config_xml, deserialized_bm.object_lock_config_xml);
assert_eq!(bm.notification_config_xml, deserialized_bm.notification_config_xml);
assert_eq!(bm.replication_config_xml, deserialized_bm.replication_config_xml);
assert_eq!(bm.bucket_targets_config_json, deserialized_bm.bucket_targets_config_json);
assert_eq!(bm.bucket_targets_config_meta_json, deserialized_bm.bucket_targets_config_meta_json);
// Verify timestamps (comparing unix timestamps to avoid precision issues)
assert_eq!(
bm.policy_config_updated_at.unix_timestamp(),
deserialized_bm.policy_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.lifecycle_config_updated_at.unix_timestamp(),
deserialized_bm.lifecycle_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.versioning_config_updated_at.unix_timestamp(),
deserialized_bm.versioning_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.encryption_config_updated_at.unix_timestamp(),
deserialized_bm.encryption_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.tagging_config_updated_at.unix_timestamp(),
deserialized_bm.tagging_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.quota_config_updated_at.unix_timestamp(),
deserialized_bm.quota_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.object_lock_config_updated_at.unix_timestamp(),
deserialized_bm.object_lock_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.notification_config_updated_at.unix_timestamp(),
deserialized_bm.notification_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.replication_config_updated_at.unix_timestamp(),
deserialized_bm.replication_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.bucket_targets_config_updated_at.unix_timestamp(),
deserialized_bm.bucket_targets_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.bucket_targets_config_meta_updated_at.unix_timestamp(),
deserialized_bm.bucket_targets_config_meta_updated_at.unix_timestamp()
);
// Test that the serialized data contains expected content
let buf_str = String::from_utf8_lossy(&buf);
assert!(buf_str.contains("test-bucket"), "Serialized data should contain bucket name");
// Verify the buffer size is reasonable (should be larger due to all the config data)
assert!(buf.len() > 1000, "Buffer should be substantial in size due to all configurations");
println!("✅ Complete BucketMetadata serialization test passed");
println!(" - Bucket name: {}", deserialized_bm.name);
println!(" - Lock enabled: {}", deserialized_bm.lock_enabled);
println!(" - Policy config size: {} bytes", deserialized_bm.policy_config_json.len());
println!(" - Lifecycle config size: {} bytes", deserialized_bm.lifecycle_config_xml.len());
println!(" - Serialized buffer size: {} bytes", buf.len());
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/quota/mod.rs | crates/ecstore/src/bucket/quota/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::Result;
use rmp_serde::Serializer as rmpSerializer;
use serde::{Deserialize, Serialize};
// Define the QuotaType enum
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum QuotaType {
Hard,
}
// Define the BucketQuota structure
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct BucketQuota {
quota: Option<u64>, // Use Option to represent optional fields
size: u64,
rate: u64,
requests: u64,
quota_type: Option<QuotaType>,
}
impl BucketQuota {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: BucketQuota = rmp_serde::from_slice(buf)?;
Ok(t)
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
rustfs/rustfs | https://github.com/rustfs/rustfs/blob/666c0a9a38636eb6653dff7d9c98ff7122601ce2/crates/ecstore/src/bucket/versioning/mod.rs | crates/ecstore/src/bucket/versioning/mod.rs | // Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
use rustfs_utils::string::match_simple;
pub trait VersioningApi {
fn enabled(&self) -> bool;
fn prefix_enabled(&self, prefix: &str) -> bool;
fn prefix_suspended(&self, prefix: &str) -> bool;
fn versioned(&self, prefix: &str) -> bool;
fn suspended(&self) -> bool;
}
impl VersioningApi for VersioningConfiguration {
fn enabled(&self) -> bool {
self.status == Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
}
fn prefix_enabled(&self, prefix: &str) -> bool {
if self.status != Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)) {
return false;
}
if prefix.is_empty() {
return true;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return false;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
for p in excluded_prefixes.iter() {
if let Some(ref sprefix) = p.prefix {
let pattern = format!("{sprefix}*");
if match_simple(&pattern, prefix) {
return false;
}
}
}
}
true
}
fn prefix_suspended(&self, prefix: &str) -> bool {
if self.status == Some(BucketVersioningStatus::from_static(BucketVersioningStatus::SUSPENDED)) {
return true;
}
if self.status == Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)) {
if prefix.is_empty() {
return false;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return true;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
for p in excluded_prefixes.iter() {
if let Some(ref sprefix) = p.prefix {
let pattern = format!("{sprefix}*");
if match_simple(&pattern, prefix) {
return true;
}
}
}
}
}
false
}
fn versioned(&self, prefix: &str) -> bool {
self.prefix_enabled(prefix) || self.prefix_suspended(prefix)
}
fn suspended(&self) -> bool {
self.status == Some(BucketVersioningStatus::from_static(BucketVersioningStatus::SUSPENDED))
}
}
| rust | Apache-2.0 | 666c0a9a38636eb6653dff7d9c98ff7122601ce2 | 2026-01-04T15:42:12.458416Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.